Exemplo n.º 1
0
def transform_setup(graph_u=False,
                    graph_gcn=False,
                    rotation=180,
                    samplePoints=1024,
                    mesh=False,
                    node_translation=0.01):
    if not graph_u and not graph_gcn:
        # Default transformation for scale noralization, centering, point sampling and rotating
        pretransform = T.Compose([T.NormalizeScale(), T.Center()])
        transform = T.Compose([
            T.SamplePoints(samplePoints),
            T.RandomRotate(rotation[0], rotation[1])
        ])
        print("pointnet rotation {}".format(rotation))
    elif graph_u:
        pretransform = T.Compose([T.NormalizeScale(), T.Center()])
        transform = T.Compose([
            T.NormalizeScale(),
            T.Center(),
            T.SamplePoints(samplePoints, True, True),
            T.RandomRotate(rotation[0], rotation[1]),
            T.KNNGraph(k=graph_u)
        ])
    elif graph_gcn:

        pretransform = T.Compose([T.NormalizeScale(), T.Center()])

        if mesh:
            if mesh == "extraFeatures":
                transform = T.Compose([
                    T.RandomRotate(rotation[0], rotation[1]),
                    T.GenerateMeshNormals(),
                    T.FaceToEdge(True),
                    T.Distance(norm=True),
                    T.TargetIndegree(cat=True)
                ])  # ,
            else:
                transform = T.Compose([
                    T.RandomRotate(rotation[0], rotation[1]),
                    T.GenerateMeshNormals(),
                    T.FaceToEdge(True),
                    T.Distance(norm=True),
                    T.TargetIndegree(cat=True)
                ])
        else:
            transform = T.Compose([
                T.SamplePoints(samplePoints, True, True),
                T.KNNGraph(k=graph_gcn),
                T.Distance(norm=True)
            ])
            print("no mesh")
        print("Rotation {}".format(rotation))
        print("Meshing {}".format(mesh))

    else:
        print('no transfom')

    return transform, pretransform
Exemplo n.º 2
0
def main():
    opt = OptInit().get_args()
    logging.info('===> Creating dataloader ...')
    train_dataset = GeoData.S3DIS(opt.data_dir,
                                  opt.area,
                                  True,
                                  pre_transform=T.NormalizeScale())
    train_loader = DenseDataLoader(train_dataset,
                                   batch_size=opt.batch_size,
                                   shuffle=True,
                                   num_workers=4)
    test_dataset = GeoData.S3DIS(opt.data_dir,
                                 opt.area,
                                 train=False,
                                 pre_transform=T.NormalizeScale())
    test_loader = DenseDataLoader(test_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=False,
                                  num_workers=0)
    opt.n_classes = train_loader.dataset.num_classes

    logging.info('===> Loading the network ...')
    model = DenseDeepGCN(opt).to(opt.device)
    if opt.multi_gpus:
        model = DataParallel(DenseDeepGCN(opt)).to(opt.device)
    logging.info('===> loading pre-trained ...')
    model, opt.best_value, opt.epoch = load_pretrained_models(
        model, opt.pretrained_model, opt.phase)
    logging.info(model)

    logging.info('===> Init the optimizer ...')
    criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq,
                                                opt.lr_decay_rate)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(
        opt.pretrained_model, optimizer, scheduler, opt.lr)

    logging.info('===> Init Metric ...')
    opt.losses = AverageMeter()
    opt.test_value = 0.

    logging.info('===> start training ...')
    for _ in range(opt.epoch, opt.total_epochs):
        opt.epoch += 1
        logging.info('Epoch:{}'.format(opt.epoch))
        train(model, train_loader, optimizer, scheduler, criterion, opt)
        if opt.epoch % opt.eval_freq == 0 and opt.eval_freq != -1:
            test(model, test_loader, opt)
        scheduler.step()
    logging.info('Saving the final model.Finish!')
Exemplo n.º 3
0
 def __init__(self, train):
     dataset = "ModelNet40"
     pre_transform, transform = T.NormalizeScale(), T.SamplePoints(1024)
     path = osp.join(osp.dirname(osp.realpath(__file__)), "../..", "data", dataset)
     if not osp.exists(path):
         ModelNet(path, "40", transform, pre_transform)
     super(ModelNet40, self).__init__(path, name="40", train=train, transform=transform, pre_transform=pre_transform)
Exemplo n.º 4
0
def load_modelnet(version='10', point_flag=False):
    """
    :param point_flag: Sample points if point_flag true. Otherwise load mesh
    :return: train_dataset, test_dataset
    """
    assert version in ['10', '40']
    if point_flag:
        pre_transform, transform = T.NormalizeScale(), T.SamplePoints(1024)
    else:
        pre_transform, transform = FaceToEdge(), None

    # path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data/ModelNet' + version)
    path = '/home/cai.507/Documents/DeepLearning/ModelNet' + version

    train_dataset = ModelNet(path,
                             version,
                             True,
                             transform=transform,
                             pre_transform=pre_transform)
    test_dataset = ModelNet(path,
                            version,
                            False,
                            transform=transform,
                            pre_transform=pre_transform)
    return train_dataset, test_dataset
class MNISTSuperpixels(LightningDataModule):
    def __init__(
        self,
        data_dir: str = "data/",
        batch_size: int = 32,
        num_workers: int = 0,
        pin_memory: bool = False,
        train_val_test_split: Sequence[int] = (55_000, 5_000, 10_000),
        n_segments: int = 75,
        k: int = 10,
        loop: bool = True,
        **kwargs,
    ):
        super().__init__()

        self.data_dir = data_dir
        self.batch_size = batch_size
        self.num_workers = num_workers
        self.pin_memory = pin_memory
        self.train_val_test_split = train_val_test_split
        self.n_segments = n_segments
        self.k = k
        self.loop = loop
        self.slic_kwargs = kwargs

        assert 1 <= n_segments <= 28 * 28

        self.pre_transform = T.Compose([
            T.NormalizeScale(),
        ])
        self.transform = None

        self.data_train: Optional[Dataset] = None
        self.data_val: Optional[Dataset] = None
        self.data_test: Optional[Dataset] = None
 def __init__(self, dataset_opt, training_opt):
     super().__init__(dataset_opt, training_opt)
     self._data_path = os.path.join(dataset_opt.dataroot, "ShapeNet")
     try:
         self._category = dataset_opt.category
     except KeyError:
         self._category = None
     pre_transform = T.NormalizeScale()
     train_transform = T.Compose(
         [T.FixedPoints(dataset_opt.num_points),
          RandomNoise()])
     test_transform = T.FixedPoints(dataset_opt.num_points)
     train_dataset = ShapeNet(
         self._data_path,
         self._category,
         include_normals=dataset_opt.normal,
         split="trainval",
         pre_transform=pre_transform,
         transform=train_transform,
     )
     test_dataset = ShapeNet(
         self._data_path,
         self._category,
         include_normals=dataset_opt.normal,
         split="test",
         pre_transform=pre_transform,
         transform=test_transform,
     )
     self._categories = train_dataset.categories
     self._create_dataloaders(train_dataset, test_dataset)
Exemplo n.º 7
0
def augment_transforms(args):
    """
    define transformation
    """
    pre_transform = None
    if args.norm == 'scale':
        pre_transform = T.NormalizeScale()
    elif args.norm == 'bbox':
        pre_transform = NormalizeBox()
    elif args.norm == 'sphere':
        pre_transform = NormalizeSphere(center=True)
    elif args.norm == 'sphere_wo_center':
        pre_transform = NormalizeSphere(center=False)
    else:
        pass

    transform = []
    # Shapenet
    if args.task == 'segmentation':
        transform.append(T.FixedPoints(args.num_pts))
    # Modelnet
    if args.task == 'classification':
        transform.append(T.SamplePoints(args.num_pts))

    transform = T.Compose(transform)
    return pre_transform, transform
Exemplo n.º 8
0
 def __init__(self, root, **kwargs):
     self.root = os.path.join(root, 'ModelNet40')
     self.name = '40'
     self.pre_transform = T.NormalizeScale()
     self.transform = T.SamplePoints(1024)
     self.label_parser = lambda data: data.y
     self.num_classes = 40
    def __init__(self, dataset_opt, training_opt):
        super().__init__(dataset_opt, training_opt)

        number = dataset_opt.number
        if str(number) not in AVAILABLE_NUMBERS:
            raise Exception("Only ModelNet10 and ModelNet40 are available")
        name = "ModelNet{}".format(number)
        self._data_path = osp.join(osp.dirname(osp.realpath(__file__)), "..",
                                   "data", name)
        pre_transform = T.Compose([T.NormalizeScale(), MeshToNormal()])
        transform = (T.SamplePoints(dataset_opt.num_points) if contains_key(
            dataset_opt, "num_points") else None)

        train_dataset = ModelNet(
            self._data_path,
            name=str(number),
            train=True,
            transform=transform,
            pre_transform=pre_transform,
        )

        test_dataset = ModelNet(
            self._data_path,
            name=str(number),
            train=False,
            transform=transform,
            pre_transform=pre_transform,
        )

        self._create_dataloaders(train_dataset, test_dataset, validation=None)
Exemplo n.º 10
0
 def get_view_transform(self, k, num_pts):
     R = rotation_matrix(np.pi / 3., 0., np.pi / 6. * k)
     transformation = TG.Compose([
         TG.NormalizeScale(),
         TG.LinearTransformation(R),
         TG.SamplePoints(num=num_pts, include_normals=self.generate_norms)
     ])
     return transformation
Exemplo n.º 11
0
    def __init__(self,
                 root='data/ShapeNet',
                 train=True,
                 categories=None,
                 include_normals=True,
                 split='trainval',
                 transform=None,
                 pre_transform=None,
                 pre_filter=None,
                 repeat_to=None):  # Modified here to add repeat_to
        if categories is None:
            categories = list(self.category_ids.keys())
        if isinstance(categories, str):
            categories = [categories]
        assert all(category in self.category_ids for category in categories)
        self.categories = categories

        # Default settings
        pre_transform = T.NormalizeScale()
        pre_filter = None
        include_normals = True

        if train:
            transform = T.Compose([
                T.RandomTranslate(0.01),
                T.RandomRotate(15, axis=0),
                T.RandomRotate(15, axis=1),
                T.RandomRotate(15, axis=2)
            ])
            split = 'trainval'
        else:
            transform = None
            split = 'test'

        super().__init__(root, transform, pre_transform,
                         pre_filter)  # Modified here to add repeat_to

        if split == 'train':
            path = self.processed_paths[0]
        elif split == 'val':
            path = self.processed_paths[1]
        elif split == 'test':
            path = self.processed_paths[2]
        elif split == 'trainval':
            path = self.processed_paths[3]
        else:
            raise ValueError((f'Split {split} found, but expected either '
                              'train, val, trainval or test'))

        self.data, self.slices = torch.load(path)
        self.data.x = self.data.x if include_normals else None

        self.y_mask = torch.zeros((len(self.seg_classes.keys()), 50),
                                  dtype=torch.bool)
        for i, labels in enumerate(self.seg_classes.values()):
            self.y_mask[i, labels] = 1

        self.repeat_to = repeat_to  # Modified here to add repeat_to
def get_s3dis_dataloaders(root_dir, phases, batch_size, category=5, augment=False):
    """
    Create Dataset and Dataloader classes of the S3DIS dataset, for
    the phases required (`train`, `test`).

    :param root_dir: Directory with the h5 files
    :param phases: List of phases. Should be from {`train`, `test`}
    :param batch_size: Batch size
    :param category: Area used for test set (1, 2, 3, 4, 5, or 6)

    :return: 2 dictionaries, each containing Dataset or Dataloader for all phases
    """
    datasets = {
        'train': S3DIS(root_dir, category, True, pre_transform=T.NormalizeScale()),
        'test': S3DIS(root_dir, category, False, pre_transform=T.NormalizeScale())
    }

    dataloaders = {x: DenseDataLoader(datasets[x], batch_size=batch_size, num_workers=4, shuffle=(x == 'train'))
                   for x in phases}
    return datasets, dataloaders, datasets['train'].num_classes
Exemplo n.º 13
0
def main():
    opt = OptInit().initialize()
    opt.printer.info('===> Creating dataloader ...')
    train_dataset = GeoData.S3DIS(opt.train_path,
                                  5,
                                  True,
                                  pre_transform=T.NormalizeScale())
    if opt.multi_gpus:
        train_loader = DataListLoader(train_dataset,
                                      batch_size=opt.batch_size,
                                      shuffle=True,
                                      num_workers=4)
    else:
        train_loader = DataLoader(train_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=True,
                                  num_workers=4)
    opt.n_classes = train_loader.dataset.num_classes

    opt.printer.info('===> Loading the network ...')
    model = SparseDeepGCN(opt).to(opt.device)
    if opt.multi_gpus:
        model = DataParallel(SparseDeepGCN(opt)).to(opt.device)
    opt.printer.info('===> loading pre-trained ...')
    model, opt.best_value, opt.epoch = load_pretrained_models(
        model, opt.pretrained_model, opt.phase)

    opt.printer.info('===> Init the optimizer ...')
    criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    if opt.optim.lower() == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    elif opt.optim.lower() == 'radam':
        optimizer = optim.RAdam(model.parameters(), lr=opt.lr)
    else:
        raise NotImplementedError('opt.optim is not supported')
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq,
                                                opt.lr_decay_rate)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(
        opt.pretrained_model, optimizer, scheduler, opt.lr)

    opt.printer.info('===> Init Metric ...')
    opt.losses = AverageMeter()
    # opt.test_metric = miou
    # opt.test_values = AverageMeter()
    opt.test_value = 0.

    opt.printer.info('===> start training ...')
    for _ in range(opt.total_epochs):
        opt.epoch += 1
        train(model, train_loader, optimizer, scheduler, criterion, opt)
        # test_value = test(model, test_loader, test_metric, opt)
        scheduler.step()
    opt.printer.info('Saving the final model.Finish!')
Exemplo n.º 14
0
    def __init__(self, root='data/ModelNet', name='40', train=True):
        # Default setting
        pre_transform = T.NormalizeScale()
        transform = T.SamplePoints(1024)
        pre_filter = None

        super().__init__(root + name,
                         name=name,
                         train=train,
                         transform=transform,
                         pre_transform=pre_transform,
                         pre_filter=pre_filter)
Exemplo n.º 15
0
def get_dataset(num_points):
    name = 'ModelNet10'
    path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', name)
    pre_transform = T.NormalizeScale()
    transform = T.SamplePoints(num_points)

    train_dataset = ModelNet(path, name='10', train=True, transform=transform,
                             pre_transform=pre_transform)
    test_dataset = ModelNet(path, name='10', train=False, transform=transform,
                            pre_transform=pre_transform)

    return train_dataset, test_dataset
Exemplo n.º 16
0
 def __init__(self, root, category, **kwargs):
     self.root = os.path.join(root, 'ShapeNet')
     self.category = category
     self.categories = [category]
     self.pre_transform = T.NormalizeScale()
     self.train_transform = T.Compose([
         T.RandomTranslate(0.01),
         T.RandomRotate(15, axis=0),
         T.RandomRotate(15, axis=1),
         T.RandomRotate(15, axis=2),
     ])
     self.label_parser = lambda data: data.y
def predict_age(file_path="/media/original/data/vtps/sub-CC00050XX01_ses-7201_hemi-L_inflated_reduce50.vtp"):
    torch.manual_seed(0)
    if osp.isfile(file_path):

        # mesh = read(file_path)
        # reader = vtk.vtkPolyDataReader()
        reader = vtk.vtkXMLPolyDataReader()
        reader.SetFileName(file_path)
        reader.Update()
        # output = reader.GetOutput()

        points = torch.tensor(np.array(reader.GetOutput().GetPoints().GetData()))

        local_features = ['corrected_thickness', 'curvature', 'sulcal_depth']


        x = get_features(local_features, reader)
        transform = T.NormalizeScale()
        # transform_samp = T.FixedPoints(10000)
        data = Data(batch=torch.zeros_like(x[:, 0]).long(), x=x, pos=points)
        data = transform(data)
        # data = transform_samp(data)
        # data = Data(batch=torch.zeros_like(x[:, 0]).long(), x=x, pos=points)
        # data = Data(x=x, pos=points)

        try:
            nvidia_smi.nvmlInit()
            handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)
            mem_res = nvidia_smi.nvmlDeviceGetMemoryInfo(handle)
            free_mem = mem_res.free / 1024 ** 2
        except:
            free_mem = 0

        device = torch.device('cuda' if torch.cuda.is_available() and free_mem >= GPU_MEM_LIMIT else 'cpu')

        numb_local_features = x.size(1)
        numb_global_features = 0

        model = Net(numb_local_features, numb_global_features).to(device)

        model.load_state_dict(torch.load(MODEL_PATH, map_location=device))
        model.eval()

        # data_loader = DataLoader([data], batch_size=1, shuffle=False)
        # print(len(data_loader))
        # pred = model(next(iter(data_loader)).to(device))
        pred = model(data.to(device))

        return pred.item()
    else:
        return 'Unable to predict..'
Exemplo n.º 18
0
def main():
    opt = OptInit().initialize()
    opt.batch_size = 1
    os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpuNum

    print('===> Creating dataloader...')
    # def __init__(self,
    #              root,
    #              is_train=True,
    #              is_validation=False,
    #              is_test=False,
    #              num_channel=5,
    #              pre_transform=None,
    #              pre_filter=None)
    test_dataset = BigredDataset(root=opt.test_path,
                                 is_train=False,
                                 is_validation=False,
                                 is_test=True,
                                 num_channel=5,
                                 pre_transform=T.NormalizeScale())
    test_loader = DenseDataLoader(test_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=False,
                                  num_workers=32)
    opt.n_classes = 2

    print('len(test_loader):', len(test_loader))
    print('phase: ', opt.phase)
    print('batch_size: ', opt.batch_size)
    print('use_cpu: ', opt.use_cpu)
    print('gpuNum: ', opt.gpuNum)
    print('multi_gpus: ', opt.multi_gpus)
    print('test_path: ', opt.test_path)
    print('in_channels: ', opt.in_channels)
    print('device: ', opt.device)

    print('===> Loading the network ...')
    model = DenseDeepGCN(opt).to(opt.device)
    load_package = torch.load(opt.pretrained_model)
    model, opt.best_value, opt.epoch = load_pretrained_models(
        model, opt.pretrained_model, opt.phase)
    pdb.set_trace()
    for item in load_package.keys():
        if (item != 'optimizer_state_dict' and item != 'state_dict'
                and item != 'scheduler_state_dict'):
            print(str(item), load_package[item])

    print('===> Start Evaluation ...')
    test(model, test_loader, opt)
Exemplo n.º 19
0
 def __init__(self, root, classification=False, **kwargs):
     self.root = os.path.join(root, 'ShapeNet')
     self.pre_transform = T.NormalizeScale()
     self.train_transform = T.Compose([
         RandomSamplePoints(2048),
         T.RandomTranslate(0.01),
         T.RandomRotate(15, axis=0),
         T.RandomRotate(15, axis=1),
         T.RandomRotate(15, axis=2),
     ])
     self.val_transform = RandomSamplePoints(2048)
     self.num_classes = len(self.categories)
     if not classification:
         self.label_parser = lambda data: data.y
     else:
         self.label_parser = lambda data: data.cid
Exemplo n.º 20
0
def main():
    opt = OptInit().initialize()

    print('===> Creating dataloader...')
    test_dataset = GeoData.S3DIS(opt.test_path, 5, False, pre_transform=T.NormalizeScale())
    test_loader = DataLoader(test_dataset, batch_size=opt.batch_size, shuffle=False, num_workers=0)
    opt.n_classes = test_loader.dataset.num_classes
    if opt.no_clutter:
        opt.n_classes -= 1

    print('===> Loading the network ...')
    model = getattr(models, opt.model_name)(opt).to(opt.device)
    model, opt.best_value, opt.epoch = load_pretrained_models(model, opt.pretrained_model, opt.phase)

    print('===> Start Evaluation ...')
    test(opt.model, test_loader, opt)
Exemplo n.º 21
0
    def  __init__(self, dataset_opt, training_opt):
        super().__init__(dataset_opt, training_opt)
        self._data_path = os.path.join(dataset_opt.dataroot, 'ShapeNet')
        self._category = dataset_opt.shapenet.category
        transform = T.Compose([
            T.RandomTranslate(0.01),
            T.RandomRotate(15, axis=0),
            T.RandomRotate(15, axis=1),
            T.RandomRotate(15, axis=2)
        ])
        pre_transform = T.NormalizeScale()
        train_dataset = ShapeNet(self._data_path, self._category, train=True, transform=transform,
                                pre_transform=pre_transform)
        test_dataset = ShapeNet( self._data_path, self._category, train=False,
                                pre_transform=pre_transform)

        self.create_dataloaders(train_dataset, test_dataset, validation=None)
Exemplo n.º 22
0
def main():
    opt = OptInit().initialize()

    print('===> Creating dataloader ...')
    train_dataset = GeoData.S3DIS(opt.train_path,
                                  5,
                                  True,
                                  pre_transform=T.NormalizeScale())
    if opt.multi_gpus:
        train_loader = DataListLoader(train_dataset,
                                      batch_size=opt.batch_size,
                                      shuffle=True,
                                      num_workers=4)
    else:
        train_loader = DataLoader(train_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=True,
                                  num_workers=4)
    opt.n_classes = train_loader.dataset.num_classes

    print('===> Loading the network ...')
    opt.model = getattr(models, opt.model_name)(opt).to(opt.device)
    if opt.multi_gpus:
        opt.model = DataParallel(getattr(models,
                                         opt.model_name)(opt)).to(opt.device)
    print('===> loading pre-trained ...')
    load_pretrained_models(opt)

    print('===> Init the optimizer ...')
    opt.criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    opt.valid_metric = miou
    opt.optimizer = torch.optim.Adam(opt.model.parameters(), lr=opt.lr)
    opt.scheduler = torch.optim.lr_scheduler.StepLR(opt.optimizer,
                                                    opt.lr_adjust_freq, 0.5)
    load_pretrained_optimizer(opt)

    print('===> start training ...')
    for _ in range(opt.total_epochs):
        opt.epoch += 1
        train(train_loader, opt)
        # valid(train_loader, opt)
        opt.scheduler.step()
    print('Saving the final model.Finish!')
def get_dataloader(num_points, b_size, name='10'):
    path = 'ModelNet' + name
    pre_transform = T.NormalizeScale()
    transform = T.SamplePoints(num_points)

    train_dataset = ModelNet('dataset/' + path,
                             name=name,
                             train=True,
                             transform=transform,
                             pre_transform=pre_transform)
    test_dataset = ModelNet('dataset/' + path,
                            name=name,
                            train=False,
                            transform=transform,
                            pre_transform=pre_transform)

    train_loader = DataLoader(train_dataset, batch_size=b_size, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=b_size, shuffle=False)

    return train_loader, test_loader
Exemplo n.º 24
0
def main(epochs, lr, classification, pool, heatmap):
    print('pool', pool)
    data_path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'coseg', 'vases')
    arguments = 'c{}_e{}_lr{}_p{}'.format(classification, epochs, lr, '-'.join([str(x) for x in pool]))
    run_path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'runs', arguments)
    if os.path.exists(run_path):
        shutil.rmtree(run_path)
    os.makedirs(run_path)

    # pre_filter = lambda x: x.shape_id.item() < 10
    pre_transform = T.Compose(
        [T.NormalizeScale(), FaceToEdge(remove_faces=False), CalcEdgeFeaturesTransform(), AddMeshStructureTransform()])
    train_dataset = COSEG(data_path, classification=classification, train=True, pre_transform=pre_transform)
    test_dataset = COSEG(data_path, classification=classification, train=False, pre_transform=pre_transform)
    train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=1)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(device)
    model = Net(train_dataset.num_classes, pool, run_path, heatmap).to(device)

    train_writer = SummaryWriter(run_path + '/train')
    test_writer = SummaryWriter(run_path + '/test')
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    visualize_gt(10, train_loader, test_loader, run_path, heatmap)

    for epoch in range(epochs):
        train_acc, train_loss = train(model, device, optimizer, train_loader)
        train_writer.add_scalar('accuracy', train_acc, epoch)
        train_writer.add_scalar('loss', train_loss, epoch)
        test_acc, test_loss = test(model, device, test_loader)
        test_writer.add_scalar('accuracy', test_acc, epoch)
        test_writer.add_scalar('loss', test_loss, epoch)
        print('epoch: {:04d}, train acc: {:.4f}, test acc: {:.4f}'.format(epoch + 1, train_acc, test_acc))

    train_writer.close()
    test_writer.close()

    visualize_pred(10, train_loader, test_loader, run_path, model, device)
Exemplo n.º 25
0
def main():
    opt = OptInit().get_args()

    logging.info('===> Creating dataloader...')
    test_dataset = GeoData.S3DIS(opt.data_dir,
                                 opt.area,
                                 train=False,
                                 pre_transform=T.NormalizeScale())
    test_loader = DenseDataLoader(test_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=False,
                                  num_workers=0)
    opt.n_classes = test_loader.dataset.num_classes
    if opt.no_clutter:
        opt.n_classes -= 1

    logging.info('===> Loading the network ...')
    model = DenseDeepGCN(opt).to(opt.device)
    model, opt.best_value, opt.epoch = load_pretrained_models(
        model, opt.pretrained_model, opt.phase)

    logging.info('===> Start Evaluation ...')
    test(model, test_loader, opt)
    def __init__(self,
                 data_dir,
                 batch_size,
                 shuffle,
                 validation_split,
                 num_workers,
                 num_points,
                 training=True):
        # trsfm = transforms.Compose([
        #     transforms.ToTensor(),
        #     transforms.Normalize((0.1307,), (0.3081,))
        # ])

        self.data_dir = data_dir
        path = osp.join(self.data_dir, 'ModelNet10')
        pre_transform, transform = T.NormalizeScale(), T.SamplePoints(
            num_points)

        train_dataset = ModelNet(path, '10', training, transform,
                                 pre_transform)

        super(MyModelNetDataLoader, self).__init__(train_dataset,
                                                   batch_size=batch_size,
                                                   shuffle=shuffle)
Exemplo n.º 27
0
def augment_transforms(args):
    """
    define transformation
    """
    pre_transform = None
    if args.norm == 'scale':
        pre_transform = T.NormalizeScale()
    elif args.norm == 'sphere':
        pre_transform = NormalizeSphere(center=True)
    elif args.norm == 'sphere_wo_center':
        pre_transform = NormalizeSphere(center=False)
    else:
        pass

    transform = []
    if args.dataset == 'shapenet':
        transform.append(T.FixedPoints(args.num_pts))
    if args.dataset == 'modelnet':
        transform.append(T.SamplePoints(args.num_pts))

    # if args.is_randRotY:
    #     transform.append(T.RandomRotate(180, axis=1))
    transform = T.Compose(transform)
    return pre_transform, transform
Exemplo n.º 28
0
from torchmetrics.functional import jaccard_index

import torch_geometric.transforms as T
from torch_geometric.datasets import ShapeNet
from torch_geometric.loader import DataLoader
from torch_geometric.nn import MLP, DynamicEdgeConv

category = 'Airplane'  # Pass in `None` to train on all categories.
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'ShapeNet')
transform = T.Compose([
    T.RandomTranslate(0.01),
    T.RandomRotate(15, axis=0),
    T.RandomRotate(15, axis=1),
    T.RandomRotate(15, axis=2)
])
pre_transform = T.NormalizeScale()
train_dataset = ShapeNet(path, category, split='trainval', transform=transform,
                         pre_transform=pre_transform)
test_dataset = ShapeNet(path, category, split='test',
                        pre_transform=pre_transform)
train_loader = DataLoader(train_dataset, batch_size=10, shuffle=True,
                          num_workers=6)
test_loader = DataLoader(test_dataset, batch_size=10, shuffle=False,
                         num_workers=6)


class Net(torch.nn.Module):
    def __init__(self, out_channels, k=30, aggr='max'):
        super().__init__()

        self.conv1 = DynamicEdgeConv(MLP([2 * 6, 64, 64]), k, aggr)
Exemplo n.º 29
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    if args.random_seed:
        args.seed = np.random.randint(0, 1000, 1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    # dataset modelnet
    pre_transform, transform = T.NormalizeScale(), T.SamplePoints(
        args.num_points)
    train_dataset = GeoData.ModelNet(os.path.join(args.data, 'modelnet10'),
                                     '10', True, transform, pre_transform)
    train_queue = DenseDataLoader(train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.batch_size // 2)
    test_dataset = GeoData.ModelNet(os.path.join(args.data, 'modelnet10'),
                                    '10', False, transform, pre_transform)
    valid_queue = DenseDataLoader(test_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.batch_size // 2)
    n_classes = train_queue.dataset.num_classes

    criterion = torch.nn.CrossEntropyLoss().cuda()
    model = Network(args.init_channels,
                    n_classes,
                    args.num_cells,
                    criterion,
                    args.n_steps,
                    in_channels=args.in_channels,
                    emb_dims=args.emb_dims,
                    dropout=args.dropout,
                    k=args.k).cuda()
    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    num_edges = model._steps * 2
    post_train = 5
    # import pdb;pdb.set_trace()
    args.epochs = args.warmup_dec_epoch + args.decision_freq * (
        num_edges - 1) + post_train + 1
    logging.info("total epochs: %d", args.epochs)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs), eta_min=args.learning_rate_min)

    architect = Architect(model, args)

    normal_selected_idxs = torch.tensor(len(model.alphas_normal) * [-1],
                                        requires_grad=False,
                                        dtype=torch.int).cuda()
    normal_candidate_flags = torch.tensor(len(model.alphas_normal) * [True],
                                          requires_grad=False,
                                          dtype=torch.bool).cuda()
    logging.info('normal_selected_idxs: {}'.format(normal_selected_idxs))
    logging.info('normal_candidate_flags: {}'.format(normal_candidate_flags))
    model.normal_selected_idxs = normal_selected_idxs
    model.normal_candidate_flags = normal_candidate_flags

    print(F.softmax(torch.stack(model.alphas_normal, dim=0), dim=-1).detach())

    count = 0
    normal_probs_history = []
    train_losses, valid_losses = utils.AverageMeter(), utils.AverageMeter()
    for epoch in range(args.epochs):
        lr = scheduler.get_lr()[0]
        logging.info('epoch %d lr %e', epoch, lr)
        # training
        # import pdb;pdb.set_trace()
        att = model.show_att()
        beta = model.show_beta()
        train_acc, train_losses = train(train_queue, valid_queue, model,
                                        architect, criterion, optimizer, lr,
                                        train_losses)
        valid_overall_acc, valid_class_acc, valid_losses = infer(
            valid_queue, model, criterion, valid_losses)

        logging.info(
            'train_acc %f\tvalid_overall_acc %f \t valid_class_acc %f',
            train_acc, valid_overall_acc, valid_class_acc)
        logging.info('beta %s', beta.cpu().detach().numpy())
        logging.info('att %s', att.cpu().detach().numpy())
        # make edge decisions
        saved_memory_normal, model.normal_selected_idxs, \
        model.normal_candidate_flags = edge_decision('normal',
                                                     model.alphas_normal,
                                                     model.normal_selected_idxs,
                                                     model.normal_candidate_flags,
                                                     normal_probs_history,
                                                     epoch,
                                                     model,
                                                     args)

        if saved_memory_normal:
            del train_queue, valid_queue
            torch.cuda.empty_cache()

            count += 1
            new_batch_size = args.batch_size + args.batch_increase * count
            logging.info("new_batch_size = {}".format(new_batch_size))
            train_queue = DenseDataLoader(train_dataset,
                                          batch_size=new_batch_size,
                                          shuffle=True,
                                          num_workers=args.batch_size // 2)
            valid_queue = DenseDataLoader(test_dataset,
                                          batch_size=new_batch_size,
                                          shuffle=False,
                                          num_workers=args.batch_size // 2)
            # post validation
            if args.post_val:
                post_valid_overall_acc, post_valid_class_acc, valid_losses = infer(
                    valid_queue, model, criterion, valid_losses)
                logging.info('post_valid_overall_acc %f',
                             post_valid_overall_acc)

        writer.add_scalar('stats/train_acc', train_acc, epoch)
        writer.add_scalar('stats/valid_overall_acc', valid_overall_acc, epoch)
        writer.add_scalar('stats/valid_class_acc', valid_class_acc, epoch)
        utils.save(model, os.path.join(args.save, 'weights.pt'))
        scheduler.step()

    logging.info("#" * 30 + " Done " + "#" * 30)
    logging.info('genotype = %s', model.get_genotype())
Exemplo n.º 30
0
if __name__ == "__main__":
    args = default_argument_parser().parse_args()
    np.random.seed(args.seed)
    num_classes = 2
    transforms = []
    if args.max_points > 0:
        transforms.append(T.FixedPoints(args.max_points))
    if args.augment:
        transforms.append(T.RandomRotate((-180, 180),
                                         axis=2))  # Rotate around z axis
        transforms.append(T.RandomFlip(0))  # Flp about x axis
        transforms.append(T.RandomFlip(1))  # Flip about y axis
        transforms.append(T.RandomTranslate(0.0001))  # Random jitter
    if args.norm:
        transforms.append(T.NormalizeScale())
    transform = T.Compose(transforms=transforms) if transforms else None
    train_dataset = EventDataset(args.dataset,
                                 "trainval",
                                 include_proton=True,
                                 task="separation",
                                 cleanliness=args.clean,
                                 pre_transform=None,
                                 transform=transform,
                                 balanced_classes=True,
                                 fraction=0.001)
    test_dataset = EventDataset(args.dataset,
                                "test",
                                include_proton=True,
                                task="separation",
                                cleanliness=args.clean,