コード例 #1
0
ファイル: experiments.py プロジェクト: fclairec/geometric-ifc
def transform_setup(graph_u=False,
                    graph_gcn=False,
                    rotation=180,
                    samplePoints=1024,
                    mesh=False,
                    node_translation=0.01):
    if not graph_u and not graph_gcn:
        # Default transformation for scale noralization, centering, point sampling and rotating
        pretransform = T.Compose([T.NormalizeScale(), T.Center()])
        transform = T.Compose([
            T.SamplePoints(samplePoints),
            T.RandomRotate(rotation[0], rotation[1])
        ])
        print("pointnet rotation {}".format(rotation))
    elif graph_u:
        pretransform = T.Compose([T.NormalizeScale(), T.Center()])
        transform = T.Compose([
            T.NormalizeScale(),
            T.Center(),
            T.SamplePoints(samplePoints, True, True),
            T.RandomRotate(rotation[0], rotation[1]),
            T.KNNGraph(k=graph_u)
        ])
    elif graph_gcn:

        pretransform = T.Compose([T.NormalizeScale(), T.Center()])

        if mesh:
            if mesh == "extraFeatures":
                transform = T.Compose([
                    T.RandomRotate(rotation[0], rotation[1]),
                    T.GenerateMeshNormals(),
                    T.FaceToEdge(True),
                    T.Distance(norm=True),
                    T.TargetIndegree(cat=True)
                ])  # ,
            else:
                transform = T.Compose([
                    T.RandomRotate(rotation[0], rotation[1]),
                    T.GenerateMeshNormals(),
                    T.FaceToEdge(True),
                    T.Distance(norm=True),
                    T.TargetIndegree(cat=True)
                ])
        else:
            transform = T.Compose([
                T.SamplePoints(samplePoints, True, True),
                T.KNNGraph(k=graph_gcn),
                T.Distance(norm=True)
            ])
            print("no mesh")
        print("Rotation {}".format(rotation))
        print("Meshing {}".format(mesh))

    else:
        print('no transfom')

    return transform, pretransform
コード例 #2
0
ファイル: modelnet.py プロジェクト: Chen-Cai-OSU/Esme
def load_modelnet(version='10', point_flag=False):
    """
    :param point_flag: Sample points if point_flag true. Otherwise load mesh
    :return: train_dataset, test_dataset
    """
    assert version in ['10', '40']
    if point_flag:
        pre_transform, transform = T.NormalizeScale(), T.SamplePoints(1024)
    else:
        pre_transform, transform = FaceToEdge(), None

    # path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data/ModelNet' + version)
    path = '/home/cai.507/Documents/DeepLearning/ModelNet' + version

    train_dataset = ModelNet(path,
                             version,
                             True,
                             transform=transform,
                             pre_transform=pre_transform)
    test_dataset = ModelNet(path,
                            version,
                            False,
                            transform=transform,
                            pre_transform=pre_transform)
    return train_dataset, test_dataset
コード例 #3
0
ファイル: datasets.py プロジェクト: saha223311/dgcnn_full
 def __init__(self, root, **kwargs):
     self.root = os.path.join(root, 'ModelNet40')
     self.name = '40'
     self.pre_transform = T.NormalizeScale()
     self.transform = T.SamplePoints(1024)
     self.label_parser = lambda data: data.y
     self.num_classes = 40
コード例 #4
0
ファイル: modelnet.py プロジェクト: yunyoonaer/cogdl
 def __init__(self, train):
     dataset = "ModelNet40"
     pre_transform, transform = T.NormalizeScale(), T.SamplePoints(1024)
     path = osp.join(osp.dirname(osp.realpath(__file__)), "../..", "data", dataset)
     if not osp.exists(path):
         ModelNet(path, "40", transform, pre_transform)
     super(ModelNet40, self).__init__(path, name="40", train=train, transform=transform, pre_transform=pre_transform)
コード例 #5
0
    def __init__(self, dataset_opt, training_opt):
        super().__init__(dataset_opt, training_opt)

        number = dataset_opt.number
        if str(number) not in AVAILABLE_NUMBERS:
            raise Exception("Only ModelNet10 and ModelNet40 are available")
        name = "ModelNet{}".format(number)
        self._data_path = osp.join(osp.dirname(osp.realpath(__file__)), "..",
                                   "data", name)
        pre_transform = T.Compose([T.NormalizeScale(), MeshToNormal()])
        transform = (T.SamplePoints(dataset_opt.num_points) if contains_key(
            dataset_opt, "num_points") else None)

        train_dataset = ModelNet(
            self._data_path,
            name=str(number),
            train=True,
            transform=transform,
            pre_transform=pre_transform,
        )

        test_dataset = ModelNet(
            self._data_path,
            name=str(number),
            train=False,
            transform=transform,
            pre_transform=pre_transform,
        )

        self._create_dataloaders(train_dataset, test_dataset, validation=None)
コード例 #6
0
def augment_transforms(args):
    """
    define transformation
    """
    pre_transform = None
    if args.norm == 'scale':
        pre_transform = T.NormalizeScale()
    elif args.norm == 'bbox':
        pre_transform = NormalizeBox()
    elif args.norm == 'sphere':
        pre_transform = NormalizeSphere(center=True)
    elif args.norm == 'sphere_wo_center':
        pre_transform = NormalizeSphere(center=False)
    else:
        pass

    transform = []
    # Shapenet
    if args.task == 'segmentation':
        transform.append(T.FixedPoints(args.num_pts))
    # Modelnet
    if args.task == 'classification':
        transform.append(T.SamplePoints(args.num_pts))

    transform = T.Compose(transform)
    return pre_transform, transform
コード例 #7
0
 def get_view_transform(self, k, num_pts):
     R = rotation_matrix(np.pi / 3., 0., np.pi / 6. * k)
     transformation = TG.Compose([
         TG.NormalizeScale(),
         TG.LinearTransformation(R),
         TG.SamplePoints(num=num_pts, include_normals=self.generate_norms)
     ])
     return transformation
コード例 #8
0
def getTransform():
	transform = None

	if (SAMPLING_METHOD == 'ImportanceSampling'):
		transform = T.SamplePoints(SAMPLE_NUM, remove_faces=True, include_normals=False)

	elif (SAMPLING_METHOD == 'PoissonDiskSampling'):
		transform = Poisson.PoissonDiskSampling(SAMPLE_NUM, remove_faces=True)

	return transform
コード例 #9
0
def get_dataset(num_points):
    name = 'ModelNet10'
    path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', name)
    pre_transform = T.NormalizeScale()
    transform = T.SamplePoints(num_points)

    train_dataset = ModelNet(path, name='10', train=True, transform=transform,
                             pre_transform=pre_transform)
    test_dataset = ModelNet(path, name='10', train=False, transform=transform,
                            pre_transform=pre_transform)

    return train_dataset, test_dataset
コード例 #10
0
    def __init__(self, root='data/ModelNet', name='40', train=True):
        # Default setting
        pre_transform = T.NormalizeScale()
        transform = T.SamplePoints(1024)
        pre_filter = None

        super().__init__(root + name,
                         name=name,
                         train=train,
                         transform=transform,
                         pre_transform=pre_transform,
                         pre_filter=pre_filter)
コード例 #11
0
def getSampler(name, dataset_name):
    transform = None

    if (dataset_name == 'ShapeNet'):
        transform = T.FixedPoints(NUM_POINT)

    elif (name == 'ImportanceSampling'):
        transform = T.SamplePoints(NUM_POINT,
                                   remove_faces=True,
                                   include_normals=USE_NORMALS)

    elif (name == 'PoissonDiskSampling'):
        transform = PoissonDiskSampling(NUM_POINT, remove_faces=True)

    return transform
コード例 #12
0
def load_dataset(args):

    # load ShapeNet dataset
    if args.dataset == 'shapenet':
        pre_transform, transform = augment_transforms(args)

        categories = args.categories.split(',')
        train_dataset = ShapeNet('../data_root/ShapeNet_normal', categories, split='trainval', include_normals=False,
                                 pre_transform=pre_transform, transform=transform)
        test_dataset = ShapeNet('../data_root/ShapeNet_normal', categories, split='test', include_normals=False,
                                pre_transform=pre_transform, transform=T.FixedPoints(args.num_pts))
        train_dataloader = DataLoader(train_dataset, batch_size=args.bsize, shuffle=True,
                                      num_workers=6, drop_last=True)
        test_dataloader = DataLoader(test_dataset, batch_size=args.bsize, shuffle=True,
                                     num_workers=6, drop_last=True)

    # load ModelNet dataset
    if args.dataset == 'modelnet':
        pre_transform, transform = augment_transforms(args)

        train_dataset = ModelNet('../data_root/ModelNet40', name='40', train=True,
                                 pre_transform=pre_transform, transform=transform)
        test_dataset = ModelNet('../data_root/ModelNet40', name='40', train=False,
                                 pre_transform=pre_transform, transform=T.SamplePoints(args.num_pts))
        train_dataloader = DataLoader(train_dataset, batch_size=args.bsize, shuffle=True,
                                      num_workers=6, drop_last=True)
        test_dataloader = DataLoader(test_dataset, batch_size=args.bsize, shuffle=True,
                                     num_workers=6, drop_last=True)

    # load completion3D dataset
    if args.dataset == 'completion3D':
        pre_transform, transform = augment_transforms(args)

        categories = args.categories.split(',')
        train_dataset = completion3D_class('../data_root/completion3D', categories, split='train',
                            include_normals=False, pre_transform=pre_transform, transform=transform)
        test_dataset = completion3D_class('../data_root/completion3D', categories, split='val',
                            include_normals=False, pre_transform=pre_transform, transform=transform)
        train_dataloader = DataLoader(train_dataset, batch_size=args.bsize, shuffle=True,
                                      num_workers=8, drop_last=True)
        test_dataloader = DataLoader(test_dataset, batch_size=args.bsize, shuffle=False,
                                     num_workers=8, drop_last=True)

    return train_dataloader, test_dataloader
コード例 #13
0
def get_dataloader(num_points, b_size, name='10'):
    path = 'ModelNet' + name
    pre_transform = T.NormalizeScale()
    transform = T.SamplePoints(num_points)

    train_dataset = ModelNet('dataset/' + path,
                             name=name,
                             train=True,
                             transform=transform,
                             pre_transform=pre_transform)
    test_dataset = ModelNet('dataset/' + path,
                            name=name,
                            train=False,
                            transform=transform,
                            pre_transform=pre_transform)

    train_loader = DataLoader(train_dataset, batch_size=b_size, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=b_size, shuffle=False)

    return train_loader, test_loader
コード例 #14
0
ファイル: model_utils.py プロジェクト: Arcane-Wang/545Project
def augment_transforms(args):
    """
    define transformation
    """
    pre_transform = None
    if args.norm == 'scale':
        pre_transform = T.NormalizeScale()
    elif args.norm == 'sphere':
        pre_transform = NormalizeSphere(center=True)
    elif args.norm == 'sphere_wo_center':
        pre_transform = NormalizeSphere(center=False)
    else:
        pass

    transform = []
    if args.dataset == 'shapenet':
        transform.append(T.FixedPoints(args.num_pts))
    if args.dataset == 'modelnet':
        transform.append(T.SamplePoints(args.num_pts))

    # if args.is_randRotY:
    #     transform.append(T.RandomRotate(180, axis=1))
    transform = T.Compose(transform)
    return pre_transform, transform
コード例 #15
0
    def __init__(self,
                 data_dir,
                 batch_size,
                 shuffle,
                 validation_split,
                 num_workers,
                 num_points,
                 training=True):
        # trsfm = transforms.Compose([
        #     transforms.ToTensor(),
        #     transforms.Normalize((0.1307,), (0.3081,))
        # ])

        self.data_dir = data_dir
        path = osp.join(self.data_dir, 'ModelNet10')
        pre_transform, transform = T.NormalizeScale(), T.SamplePoints(
            num_points)

        train_dataset = ModelNet(path, '10', training, transform,
                                 pre_transform)

        super(MyModelNetDataLoader, self).__init__(train_dataset,
                                                   batch_size=batch_size,
                                                   shuffle=shuffle)
コード例 #16
0
    model.eval()

    correct = []
    for i_batch, batch in enumerate(dataloader):
        x = batch.pos.reshape((-1, 1024, 3)).to(DEVICE)
        pred = model(x)
        pred = torch.argmax(pred, dim=-1)
        pred, y = pred.cpu().detach().numpy(), batch.y.detach().numpy()
        correct.append(np.mean(pred == y))
    return np.mean(correct)


DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device(
    'cpu')

pre_transform, transform = T.NormalizeScale(), T.SamplePoints(1024)
dataset = ModelNet('./data/model-net', '10', True, transform, pre_transform)
dataloader = DataLoader(dataset, num_workers=2, batch_size=16, shuffle=True)

loss_dict = {}
for lr in (5e-3, ):
    model = PointNet(3, [64, 64], [128, 128, 128], 10).to(DEVICE)
    optimizer = optim.Adam(model.parameters(), lr=lr)
    criterion = nn.CrossEntropyLoss()

    key = lr
    loss_dict[key] = []

    for epoch in range(5):
        loss = train_epoch()
        loss_dict[key].append(loss)
コード例 #17
0

def test(loader):
    model.eval()
    correct = 0
    for data in loader:
        data = data.to(device)
        with torch.no_grad():
            pred = model(data).max(1)[1]
        correct += pred.eq(data.y).sum().item()
    return correct / len(loader.dataset)


if __name__ == '__main__':
    path = ('dataset')
    pre_transform, transform = T.NormalizeScale(), T.SamplePoints(
        128, include_normals=True)
    train_dataset = ModelNet(path, '10', True, transform=transform)
    test_dataset = ModelNet(path, '10', False, transform=transform)
    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)
    model = Net().to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)

    losses = []
    accs = []
    try:
        for epoch in range(200):
            loss = train(epoch)
            test_acc = test(test_loader)
            print('epoch', epoch, 'loss', loss, 'acc', test_acc)
            losses.append(loss)
コード例 #18
0
from torch_geometric.datasets import GeometricShapes
from torch_geometric.data import DataLoader

dataset = GeometricShapes(root='/tmp/geometric_shapes')

# Filter dataset to only contain a circle and a square.
dataset = dataset[torch.tensor([0, 4])]

loader = DataLoader(dataset, batch_size=2, shuffle=False)

data = next(iter(loader))  # Get first mini-batch.

import torch_geometric.transforms as T

dataset.transform = T.SamplePoints(num=128)
data = next(iter(loader))  # Get first mini-batch.

from torch_geometric.nn import fps

mask = fps(data.pos, data.batch, ratio=0.25)

# Create radius graph.
from torch_geometric.nn import radius

assign_index = radius(data.pos, data.pos[mask], 0.4, data.batch,
                      data.batch[mask])

import torch
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import MessagePassing
コード例 #19
0
import argparse

from dataset import Dataset
from utils import read_data_objects

import torch_geometric.transforms as T

if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('--path', required=True)
    parser.add_argument('--num_points', required=False, type=int, default=1024)
    dataset_args = parser.parse_args()

    num_points = dataset_args.num_points

    pre_transform, transform = T.NormalizeScale(), T.SamplePoints(num_points)

    data_objects = read_data_objects()

    Dataset(
        dataset_args.path,
        transform,
        pre_transform,
        data_objects
    )
コード例 #20
0
#

# ## Data loading
# Let's get the dataset

# In[2]:

import torch
from torch_geometric.datasets import ModelNet
import torch_geometric.transforms as T
import time
from tqdm import tqdm_notebook

pre_transform = T.NormalizeScale()
transform = T.Compose([
    T.SamplePoints(1024),
    T.RandomRotate(30),
    T.RandomScale((0.5, 2)),
])
name = '40'

train_ds = ModelNet(root='./',
                    train=True,
                    name=name,
                    pre_transform=pre_transform,
                    transform=transform)

test_ds = ModelNet(root='./',
                   train=True,
                   name=name,
                   pre_transform=pre_transform,
コード例 #21
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    if args.random_seed:
        args.seed = np.random.randint(0, 1000, 1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    # dataset modelnet
    pre_transform, transform = T.NormalizeScale(), T.SamplePoints(
        args.num_points)
    train_dataset = GeoData.ModelNet(os.path.join(args.data, 'modelnet10'),
                                     '10', True, transform, pre_transform)
    train_queue = DenseDataLoader(train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.batch_size // 2)
    test_dataset = GeoData.ModelNet(os.path.join(args.data, 'modelnet10'),
                                    '10', False, transform, pre_transform)
    valid_queue = DenseDataLoader(test_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.batch_size // 2)
    n_classes = train_queue.dataset.num_classes

    criterion = torch.nn.CrossEntropyLoss().cuda()
    model = Network(args.init_channels,
                    n_classes,
                    args.num_cells,
                    criterion,
                    args.n_steps,
                    in_channels=args.in_channels,
                    emb_dims=args.emb_dims,
                    dropout=args.dropout,
                    k=args.k).cuda()
    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    num_edges = model._steps * 2
    post_train = 5
    # import pdb;pdb.set_trace()
    args.epochs = args.warmup_dec_epoch + args.decision_freq * (
        num_edges - 1) + post_train + 1
    logging.info("total epochs: %d", args.epochs)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs), eta_min=args.learning_rate_min)

    architect = Architect(model, args)

    normal_selected_idxs = torch.tensor(len(model.alphas_normal) * [-1],
                                        requires_grad=False,
                                        dtype=torch.int).cuda()
    normal_candidate_flags = torch.tensor(len(model.alphas_normal) * [True],
                                          requires_grad=False,
                                          dtype=torch.bool).cuda()
    logging.info('normal_selected_idxs: {}'.format(normal_selected_idxs))
    logging.info('normal_candidate_flags: {}'.format(normal_candidate_flags))
    model.normal_selected_idxs = normal_selected_idxs
    model.normal_candidate_flags = normal_candidate_flags

    print(F.softmax(torch.stack(model.alphas_normal, dim=0), dim=-1).detach())

    count = 0
    normal_probs_history = []
    train_losses, valid_losses = utils.AverageMeter(), utils.AverageMeter()
    for epoch in range(args.epochs):
        lr = scheduler.get_lr()[0]
        logging.info('epoch %d lr %e', epoch, lr)
        # training
        # import pdb;pdb.set_trace()
        att = model.show_att()
        beta = model.show_beta()
        train_acc, train_losses = train(train_queue, valid_queue, model,
                                        architect, criterion, optimizer, lr,
                                        train_losses)
        valid_overall_acc, valid_class_acc, valid_losses = infer(
            valid_queue, model, criterion, valid_losses)

        logging.info(
            'train_acc %f\tvalid_overall_acc %f \t valid_class_acc %f',
            train_acc, valid_overall_acc, valid_class_acc)
        logging.info('beta %s', beta.cpu().detach().numpy())
        logging.info('att %s', att.cpu().detach().numpy())
        # make edge decisions
        saved_memory_normal, model.normal_selected_idxs, \
        model.normal_candidate_flags = edge_decision('normal',
                                                     model.alphas_normal,
                                                     model.normal_selected_idxs,
                                                     model.normal_candidate_flags,
                                                     normal_probs_history,
                                                     epoch,
                                                     model,
                                                     args)

        if saved_memory_normal:
            del train_queue, valid_queue
            torch.cuda.empty_cache()

            count += 1
            new_batch_size = args.batch_size + args.batch_increase * count
            logging.info("new_batch_size = {}".format(new_batch_size))
            train_queue = DenseDataLoader(train_dataset,
                                          batch_size=new_batch_size,
                                          shuffle=True,
                                          num_workers=args.batch_size // 2)
            valid_queue = DenseDataLoader(test_dataset,
                                          batch_size=new_batch_size,
                                          shuffle=False,
                                          num_workers=args.batch_size // 2)
            # post validation
            if args.post_val:
                post_valid_overall_acc, post_valid_class_acc, valid_losses = infer(
                    valid_queue, model, criterion, valid_losses)
                logging.info('post_valid_overall_acc %f',
                             post_valid_overall_acc)

        writer.add_scalar('stats/train_acc', train_acc, epoch)
        writer.add_scalar('stats/valid_overall_acc', valid_overall_acc, epoch)
        writer.add_scalar('stats/valid_class_acc', valid_class_acc, epoch)
        utils.save(model, os.path.join(args.save, 'weights.pt'))
        scheduler.step()

    logging.info("#" * 30 + " Done " + "#" * 30)
    logging.info('genotype = %s', model.get_genotype())
コード例 #22
0
#         train=True,
#         download=True,
#         transform=transforms.Compose(
#             [transforms.Resize(opt.img_size), transforms.ToTensor(
#             ), transforms.Normalize([0.5], [0.5])]
#         ),
#     ),
#     batch_size=opt.batch_size,
#     shuffle=True,
# )

# train_db = MNISTSummation(set_size=opt.set_size, train=True, transform=transforms.Compose([transforms.Resize(opt.img_size), transforms.ToTensor(),
# transforms.Normalize([0.5], [0.5])]))
# train_db = Parametric(set_size=opt.set_size)
path = osp.join('..', 'data/ModelNet10')
pre_transform, transform = T.NormalizeScale(), T.SamplePoints(opt.set_size)
train_db = ModelNet(path, '10', True, transform, pre_transform)
test_db = ModelNet(path, '10', False, transform, pre_transform)
# train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True, num_workers=6)
# test_loader = DataLoader(test_dataset, batch_size=8, shuffle=False, num_workers=6)

# dataloader = torch.utils.data.DataLoader(train_db, batch_size=opt.batch_size, shuffle=True, num_workers=6)
dataloader = DataLoader(train_db,
                        batch_size=opt.batch_size,
                        shuffle=True,
                        num_workers=6)

# Optimizers
optimizer_E1 = torch.optim.Adam(encoder1.parameters(),
                                lr=opt.lr,
                                betas=(opt.b1, opt.b2))