def prepare_data(self):
        train_transforms = transforms.Compose([
            d_utils.PointcloudToTensor(),
            d_utils.PointcloudScale(),
            d_utils.PointcloudRotate(),
            d_utils.PointcloudRotatePerturbation(),
            d_utils.PointcloudTranslate(),
            d_utils.PointcloudJitter(),
            d_utils.PointcloudRandomInputDropout(),
        ])

        self.train_dset = ModelNet40Cls(self.hparams["num_points"],
                                        transforms=train_transforms,
                                        train=True)
        self.val_dset = ModelNet40Cls(self.hparams["num_points"],
                                      transforms=None,
                                      train=False)
    def prepare_data(self):
        from pointnet2.data.RandAugment import RandAugment
        N = 4
        M = 4
        train_transforms = transforms.Compose([
            d_utils.PointcloudToTensor(),
            RandAugment(N, M),  # parameter:  N , M
            d_utils.PointcloudScale(),
            d_utils.PointcloudRotate(),
            d_utils.PointcloudRotatePerturbation(),
            d_utils.PointcloudTranslate(),
            d_utils.PointcloudJitter(),
            d_utils.PointcloudRandomInputDropout(),
        ])

        self.train_dset = ModelNet40Cls(self.hparams.num_points,
                                        transforms=train_transforms,
                                        train=True)
        self.val_dset = ModelNet40Cls(self.hparams.num_points,
                                      transforms=None,
                                      train=False)
    return parser.parse_args()


lr_clip = 1e-5
bnm_clip = 1e-2

if __name__ == "__main__":
    args = parse_args()

    transforms = transforms.Compose([
        d_utils.PointcloudToTensor(),
        d_utils.PointcloudScale(),
        d_utils.PointcloudRotate(),
        d_utils.PointcloudRotatePerturbation(),
        d_utils.PointcloudTranslate(),
        d_utils.PointcloudJitter(),
        d_utils.PointcloudRandomInputDropout(),
    ])

    num_envs = 100
    num_views = 100
    dataset = ActiveVisionDataset(args.data_dir, num_envs, num_views, 750)

    # Create data indices for training and validation splits
    validation_split = .2
    shuffle_dataset = True
    random_seed = 42
    dataset_size = len(dataset)
    indices = list(range(dataset_size))
    split = int(np.floor(validation_split * dataset_size))
    if shuffle_dataset:
Beispiel #4
0
        gallery = os.listdir(cpath)
        for gname in gallery:
            gpath = os.path.join(cpath, gname)
            pt_list.append(gpath)
            class_list.append(cname)
    return pt_list, class_list


if __name__ == "__main__":
    args = parse_args()
    f = open(log_file, 'w')
    transforms = transforms.Compose([
        d_utils.PointcloudRotate(axis=np.array([1, 0, 0])),
        d_utils.PointcloudRotate(axis=np.array([0, 1, 0])),
        d_utils.PointcloudRotate(axis=np.array([0, 0, 1])),
        d_utils.PointcloudJitter(std=0.002),
    ])
    train_dataset = TripletFaceDataset(root='',
                                       n_triplets=args.num_triplet,
                                       n_points=args.num_points,
                                       class_nums=500,
                                       transforms=None,
                                       extensions='bcnc')

    train_loader = DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=4,
        pin_memory=True,
    )
def main(cfg):
    '''

    Parameters
    ----------
    cfg:{'gpus': [0], 'optimizer': {'weight_decay': 0.0, 'lr': 0.001, 'lr_decay': 0.7, 'bn_momentum': 0.5,
    'bnm_decay': 0.5, 'decay_step': 20000.0}, 'task_model': {'class': 'pointnet2.models.PointNet2ClassificationSSG',
    'name': 'cls-ssg'}, 'model': {'use_xyz': True}, 'distrib_backend': 'dp', 'num_points': 1024, 'epochs': 200,
    'batch_size': 32}
    cfg.task_model:{'class': 'pointnet2.models.PointNet2ClassificationSSG', 'name': 'cls-ssg'}
    hydra_params_to_dotdict(cfg):{'optimizer.weight_decay': 0.0, 'optimizer.lr': 0.001, 'optimizer.lr_decay': 0.7,
    'optimizer.bn_momentum': 0.5, 'optimizer.bnm_decay': 0.5, 'optimizer.decay_step': 20000.0, 'task_model.class':
     'pointnet2.models.PointNet2ClassificationSSG', 'task_model.name': 'cls-ssg', 'model.use_xyz': True, 'distrib_backend':
     'dp', 'num_points': 1024, 'epochs': 200, 'batch_size': 32}

    Returns
    -------

    '''
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = PointNetCls(40, bool(1)).to(device)
    checkpoint = torch.load(
        '/home/jinwei/Pointnet2_PyTorch/pointnet2/checkpoints/example.pth')
    model.load_state_dict(checkpoint['model_state_dict'])
    # early_stop_callback = pl.callbacks.EarlyStopping(patience=5)
    # checkpoint_callback = pl.callbacks.ModelCheckpoint(
    #     monitor="val_acc",
    #     mode="max",
    #     save_top_k=2,
    #     filepath=os.path.join(
    #         cfg.task_model.name, "{epoch}-{val_loss:.2f}-{val_acc:.3f}"
    #     ),
    #     verbose=True,
    # )

    train_transforms = transforms.Compose([
        d_utils.PointcloudToTensor(),
        d_utils.PointcloudScale(),
        d_utils.PointcloudRotate(),
        d_utils.PointcloudRotatePerturbation(),
        d_utils.PointcloudTranslate(),
        d_utils.PointcloudJitter(),
        d_utils.PointcloudRandomInputDropout(),
    ])
    dset = ModelNet40Cls(2048, transforms=train_transforms, train=True)

    test_loader = DataLoader(
        dset,
        batch_size=8,
        shuffle=False,
        num_workers=4,
        pin_memory=True,
        drop_last=False,
    )
    model.eval()
    correct = 0
    total = 0
    class_acc = np.zeros((40, 3))
    for j, data in enumerate(test_loader, 0):
        points, label = data
        points, label = points.to(device), label.to(device)

        points = points.transpose(2, 1)  # to be shape batch_size*3*N
        points = points[:, :3, :]
        pred, trans_feat = model(points)
        pred_choice = pred.data.max(1)[1]
        for cat in np.unique(label.cpu()):
            # print(pred_choice[target==cat].long().data)
            classacc = pred_choice[label == cat].eq(
                label[label == cat].long().data).cpu().sum()
            class_acc[cat, 0] += classacc.item() / float(
                points[label == cat].size()[0])
            class_acc[cat, 1] += 1
        # print(pred.shape, label.shape)

        pred_choice = pred.data.max(1)[1]
        correct += pred_choice.eq(label.data).cpu().sum()
        total += label.size(0)
        progress_bar(
            j, len(test_loader), ' Test Acc: %.3f%% (%d/%d)' %
            (100. * correct.item() / total, correct, total))