def test_spherical():
    assert RandomRotate(-180).__repr__() == 'RandomRotate((-180, 180))'

    pos = torch.tensor([[1, 0], [0, 1]], dtype=torch.float)
    data = Data(pos=pos)

    out = RandomRotate((90, 90))(data).pos.view(-1).tolist()
    assert approx(out) == [0, 1, -1, 0]
Ejemplo n.º 2
0
    def get_data(self):
        from torch_geometric.datasets.modelnet import ModelNet
        from torch_geometric.transforms import SamplePoints, Compose, NormalizeScale, RandomRotate, RandomTranslate
        from torch_geometric.data import DataLoader

        trans = Compose((SamplePoints(self.nr_points), NormalizeScale(),
                         RandomTranslate(0.01), RandomRotate(180)))

        #dataset = ModelNet('/media/j-pc-ub/ExtraLinux', name='40', train=True, transform=trans)
        dataset = ModelNet('data/mn40', name='40', train=True, transform=trans)
        nr_classes = len(dataset)
        self.nr_classes = nr_classes

        dataset = dataset.shuffle()
        train_loader = DataLoader(dataset,
                                  batch_size=self.batch_size,
                                  drop_last=True)

        dataset_val = ModelNet('data/mn40',
                               name='40',
                               train=False,
                               transform=trans)
        val_loader = DataLoader(dataset_val,
                                batch_size=self.batch_size,
                                drop_last=True)

        return train_loader, val_loader
Ejemplo n.º 3
0
    def __init__(self, cfg):
        self.cfg = cfg
        self.device = torch.device(cfg.device)
        torch.cuda.set_device(cfg.device_id)

        # The room version dataset
        self.train_transform = Compose([
            RandomRotate(degrees=180, axis=2),
            RandomScaleAnisotropic(scales=[0.8, 1.2], anisotropic=True),
            RandomSymmetry(axis=[True, False, False]),
            RandomNoise(sigma=0.001),
            DropFeature(drop_proba=0.2, feature_name='rgb'),
            AddFeatsByKeys(list_add_to_x=[True, True],
                           feat_names=['pos', 'rgb'],
                           delete_feats=[False, True])
        ])
        self.test_transform = Compose([
            AddFeatsByKeys(list_add_to_x=[True, True],
                           feat_names=['pos', 'rgb'],
                           delete_feats=[False, True])
        ])

        self.dataset = Semantic3DWholeDataset(
            root=cfg.root,
            grid_size=cfg.grid_size,
            num_points=cfg.sample_num,
            train_sample_per_epoch=cfg.train_samples_per_epoch,
            test_sample_per_epoch=cfg.test_samples_per_epoch,
            train_transform=self.train_transform,
            test_transform=self.test_transform)

        self.dataset.create_dataloader(batch_size=cfg.batch_size,
                                       shuffle=True,
                                       num_workers=0,
                                       precompute_multi_scale=True,
                                       num_scales=5)

        self.test_probs = [
            np.zeros(shape=(t.data.shape[0], cfg.num_classes),
                     dtype=np.float32)
            for t in self.dataset.val_set.input_trees
        ]

        self.model = getattr(models, cfg.model_name)(in_channels=6,
                                                     n_classes=cfg.num_classes,
                                                     use_crf=cfg.use_crf,
                                                     steps=cfg.steps)

        # self.optimizer = torch.optim.Adam(params=self.model.parameters(),
        #                                   lr=cfg.lr,
        #                                   weight_decay=cfg.weight_decay)
        self.optimizer = torch.optim.SGD(params=self.model.parameters(),
                                         lr=cfg.lr,
                                         momentum=cfg.momentum,
                                         weight_decay=cfg.weight_decay)
        self.scheduler = torch.optim.lr_scheduler.ExponentialLR(
            self.optimizer, gamma=cfg.gamma)
        self.metrics = runningScore(cfg.num_classes,
                                    ignore_index=cfg.ignore_index)
Ejemplo n.º 4
0
def test_random_rotate():
    assert RandomRotate([-180, 180]).__repr__() == ('RandomRotate('
                                                    '[-180, 180], axis=0)')

    pos = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])

    data = Data(pos=pos)
    data = RandomRotate(0)(data)
    assert len(data) == 1
    assert data.pos.tolist() == pos.tolist()

    data = Data(pos=pos)
    data = RandomRotate([180, 180])(data)
    assert len(data) == 1
    assert data.pos.tolist() == [[1, 1], [1, -1], [-1, 1], [-1, -1]]

    pos = torch.Tensor([[-1, -1, 1], [-1, 1, 1], [1, -1, -1], [1, 1, -1]])

    data = Data(pos=pos)
    data = RandomRotate([180, 180], axis=0)(data)
    assert len(data) == 1
    assert data.pos.tolist() == [[-1, 1, -1], [-1, -1, -1], [1, 1, 1],
                                 [1, -1, 1]]

    data = Data(pos=pos)
    data = RandomRotate([180, 180], axis=1)(data)
    assert len(data) == 1
    assert data.pos.tolist() == [[1, -1, -1], [1, 1, -1], [-1, -1, 1],
                                 [-1, 1, 1]]

    data = Data(pos=pos)
    data = RandomRotate([180, 180], axis=2)(data)
    assert len(data) == 1
    assert data.pos.tolist() == [[1, 1, 1], [1, -1, 1], [-1, 1, -1],
                                 [-1, -1, -1]]
Ejemplo n.º 5
0
    def get_data(self):
        from torch_geometric.datasets.geometry import GeometricShapes
        from torch_geometric.transforms import SamplePoints, Compose, NormalizeScale, RandomRotate, RandomTranslate
        from torch_geometric.data import DataLoader

        trans = Compose((SamplePoints(self.nr_points), NormalizeScale(),
                         RandomTranslate(0.01), RandomRotate(180)))

        dataset = GeometricShapes('data/geometric',
                                  train=True,
                                  transform=trans)
        nr_classes = len(dataset)
        self.nr_classes = nr_classes

        dataset = dataset.shuffle()

        val_loader = DataLoader(dataset,
                                batch_size=self.batch_size,
                                drop_last=True)
        train_loader = DataLoader(dataset,
                                  batch_size=self.batch_size,
                                  drop_last=True)

        return train_loader, val_loader
Ejemplo n.º 6
0
from torch_geometric.transforms import SamplePoints, Compose, RandomRotate
from torch_geometric.data import DataLoader, Data
from torch_geometric.nn import EdgeConv, knn_graph, SplineConv, graclus, fps, GraphConv
from torch_geometric.nn import global_mean_pool as gavgp, max_pool_x

from torch_geometric.datasets.geometry import GeometricShapes
from utility.utility import plot_point_cloud, graclus_out
from my_nn_viz import DirectionalSplineConvNoF, DirectionalEdgeConv

#### Load Data ####
batch_size = 5
nr_points = 1000
k = 50

trans = Compose((SamplePoints(nr_points),
        RandomRotate(180)))

#dataset = ModelNet(root='MN', name="10", train=True, transform=trans)
dataset = GeometricShapes('data/geometric', train=True, transform=trans)
nr_clases = len(dataset)

dataset = dataset.shuffle()

test_loader = DataLoader(dataset, batch_size=batch_size)
train_loader = DataLoader(dataset, batch_size=batch_size)

#### Define Model ####

class Net(torch.nn.Module):
    def __init__(self):
        self.k = k
Ejemplo n.º 7
0
sys.path.insert(0, '..')

from torch_geometric.datasets import Cuneiform  # noqa
from torch_geometric.transforms import (RandomRotate, RandomScale,
                                        RandomTranslate, CartesianAdj)  # noqa
from torch_geometric.utils import DataLoader  # noqa
from torch_geometric.nn.modules import SplineConv  # noqa
from torch_geometric.nn.functional import batch_average  # noqa

path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(path, '..', 'data', 'Cuneiform')
n = 267
perm = torch.randperm(n)
split = torch.arange(0, n + 27, 27, out=torch.LongTensor())
train_transform = Compose([
    RandomRotate(0.6),
    RandomScale(1.4),
    RandomTranslate(0.1),
    CartesianAdj(),
])
test_transform = CartesianAdj()
train_dataset = Cuneiform(path, split=None, transform=test_transform)
test_dataset = Cuneiform(path, split=None, transform=test_transform)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = SplineConv(8, 32, dim=2, kernel_size=5)
        self.conv2 = SplineConv(32, 64, dim=2, kernel_size=5)
        self.conv3 = SplineConv(64, 124, dim=2, kernel_size=5)
Ejemplo n.º 8
0
model.to(device)
optimizer = torch.optim.Adam(model.parameters(),
                             lr=learn_rate,
                             weight_decay=p.weight_decay)

writer = SummaryWriter(
    comment='model:exp2_3hops_lr:{}_shuffle:{}_seed:{}'.format(
        learn_rate, p.shuffle_dataset, p.random_seed))

max_roc_auc = 0

# ---- Training ----
print('Training...')
for epoch in range(1, epochs + 1):
    trainset.transform = Compose(
        (Center(), RandomRotate(90, epoch % 3), AddPositionalData(), TwoHop()))
    validset.transform = Compose(
        (Center(), RandomRotate(90, epoch % 3), AddPositionalData(), TwoHop()))
    train_loader = DataLoader(trainset,
                              shuffle=p.shuffle_dataset,
                              batch_size=p.batch_size)
    val_loader = DataLoader(validset,
                            shuffle=False,
                            batch_size=p.test_batch_size)

    learn_rate = optimizer.param_groups[0][
        'lr']  # for when it may be modified during run
    model.train()
    pred = torch.Tensor()
    tr_weights = torch.Tensor()
    loss = []
Ejemplo n.º 9
0
model.to(device)
optimizer = torch.optim.Adam(model.parameters(),
                             lr=learn_rate,
                             weight_decay=p.weight_decay)

writer = SummaryWriter(
    comment='model:exp2_4hops_lr:{}_shuffle:{}_seed:{}'.format(
        learn_rate, p.shuffle_dataset, p.random_seed))

max_roc_auc = 0

# ---- Training ----
print('Training...')
for epoch in range(1, epochs + 1):
    trainset.transform = Compose((Center(), RandomRotate(90, epoch % 3),
                                  AddPositionalData(), TwoHop(), TwoHop()))
    validset.transform = Compose((Center(), RandomRotate(90, epoch % 3),
                                  AddPositionalData(), TwoHop(), TwoHop()))
    train_loader = DataLoader(trainset,
                              shuffle=p.shuffle_dataset,
                              batch_size=p.batch_size)
    val_loader = DataLoader(validset,
                            shuffle=False,
                            batch_size=p.test_batch_size)

    learn_rate = optimizer.param_groups[0][
        'lr']  # for when it may be modified during run
    model.train()
    pred = torch.Tensor()
    tr_weights = torch.Tensor()
Ejemplo n.º 10
0
                       p.shuffle_dataset,
                       p.random_seed))

max_roc_auc = 0
max_roc_masked = 0

# ---- Training ----
axes = [0,1,2]

for model_n, model in enumerate(models):
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=learn_rate, weight_decay=p.weight_decay)
# ------------ TRAINING NEW BLOCK --------------------------
    print('Training block {}'.format(model_n))
    for epoch in range(1, epochs+1):
        trainset.transform = Compose((AddShapeIndex(), Center(), RandomRotate(90, axes[epoch%3]),
                                      AddPositionalData()))

        # if model > 0:


        train_loader = DataLoader(trainset, shuffle=p.shuffle_dataset, batch_size=p.batch_size)  # redefine train_loader to use data out.
        val_loader = DataLoader(validset, shuffle=False, batch_size=p.test_batch_size)
        masked_loader = DataLoader(maskedset, shuffle=False, batch_size=p.test_batch_size)

        model.train()
        first_batch_labels = torch.Tensor()
        pred = torch.Tensor()
        loss = []\\

        for batch_n, batch in enumerate(train_loader):
Ejemplo n.º 11
0
import numpy as np
from torch_geometric.transforms import SamplePoints, Compose, RandomRotate
from pyntcloud import PyntCloud
from torch_geometric.datasets.geometry import GeometricShapes

nr_points = 512

trans = Compose((SamplePoints(nr_points), RandomRotate(180)))

#dataset = ModelNet(root='MN', name="10", train=True, transform=trans)
dataset = GeometricShapes('gm', train=True, transform=trans)
dataset = dataset.shuffle()

x = dataset[0]['pos']

from utility import plot_point_cloud, plot_voxel

plot_voxel(x, d=32)
Ejemplo n.º 12
0
def train_pointnet(dataset):
    # load data for train and test
    train_dataset = GeometricShapes(root='data/GeometricShapes',
                                    train=True,
                                    transform=SamplePoints(128))
    test_dataset = GeometricShapes(root='data/GeometricShapes',
                                   train=False,
                                   transform=SamplePoints(128))

    train_loader = DataLoader(train_dataset, batch_size=10, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=10)

    # setup model
    model = PointNet(dataset.num_classes)
    print(model)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
    criterion = torch.nn.CrossEntropyLoss()  # Define loss criterion.

    def train(model, optimizer, loader):
        model.train()

        total_loss = 0
        for data in loader:
            optimizer.zero_grad()  # Clear gradients.
            logits = model(data.pos, data.batch)  # Forward pass.
            loss = criterion(logits, data.y)  # Loss computation.
            loss.backward()  # Backward pass.
            optimizer.step()  # Update model parameters.
            total_loss += loss.item() * data.num_graphs

        return total_loss / len(train_loader.dataset)

    @torch.no_grad()
    def test(model, loader):
        model.eval()

        total_correct = 0
        for data in loader:
            logits = model(data.pos, data.batch)
            pred = logits.argmax(dim=-1)
            total_correct += int((pred == data.y).sum())

        return total_correct / len(loader.dataset)

    for epoch in range(1, 96):
        loss = train(model, optimizer, train_loader)
        test_acc = test(model, test_loader)
        print(
            f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Test Accuracy: {test_acc:.4f}'
        )

    ### Test rotation impact to PointNet
    torch.manual_seed(123)
    random_rotate = Compose([
        RandomRotate(degrees=180, axis=0),
        RandomRotate(degrees=180, axis=1),
        RandomRotate(degrees=180, axis=2),
    ])

    dataset = GeometricShapes(root='data/GeometricShapes',
                              transform=random_rotate)

    data = dataset[0]
    print(data)
    visualize_mesh(data.pos, data.face)

    data = dataset[4]
    print(data)
    visualize_mesh(data.pos, data.face)

    torch.manual_seed(42)

    transform = Compose([
        random_rotate,
        SamplePoints(num=128),
    ])

    test_dataset = GeometricShapes(root='data/GeometricShapes',
                                   train=False,
                                   transform=transform)

    test_loader = DataLoader(test_dataset, batch_size=10)

    test_acc = test(model, test_loader)
    print(f'Test Accuracy: {test_acc:.4f}')
Ejemplo n.º 13
0
# ---- Training ----
print('Training...')
for epoch in range(1, epochs+1):
    # rotate the structures between epochs

    if 'pos' in p.dataset:  # Is there positional data in the features?
        degrees = 0
        if epoch > 200:
            if epoch < 700:
                degrees = 90*((epoch-200)/500)
            else:
                degrees = 90
        rotation_axis = axes[epoch % 3]  # only for structural data.
        trainset.transform = Compose((RemovePositionalData(),
                                      RandomRotate(degrees, axis=rotation_axis),
                                      AddPositionalData(),
                                      RemoveXYZ()))
        validset.transform = RemoveXYZ()

    # Using shape index data:
    trainset.transform = Compose((Center(), FaceAttributes(),
                                  NodeCurvature(), FaceToEdge(),
                                  TwoHop()), AddShapeIndex())
    validset.transform = Compose((Center(), FaceAttributes(),
                                  NodeCurvature(), FaceToEdge(),
                                  TwoHop()), AddShapeIndex())
    train_loader = DataListLoader(trainset, shuffle=p.shuffle_dataset, batch_size=p.batch_size)
    val_loader = DataListLoader(validset, shuffle=False, batch_size=p.test_batch_size)

    learn_rate = optimizer.param_groups[0]['lr']  # for when it may be modified during run
Ejemplo n.º 14
0
 def get_loader(phase, bz = 10):
     transforms = [RandomSample(2048)] 
     if data_type == 'dis':
         transforms += [RandomRotate(180,0),RandomRotate(180,1),RandomRotate(180,2)]
     return DataLoader(ModalDataset(args.dataset + phase, transforms = transforms),batch_size=bz,num_workers=10)