Exemplo n.º 1
0
def build_dataset(args):
    if args.dataset_type == 'shapenet':
        dataset = ShapeNetDataset(
            root=args.dataset,
            classification=True,
            npoints=args.num_points,
        )
        test_dataset = ShapeNetDataset(
            root=args.dataset,
            classification=True,
            split='test',
            npoints=args.num_points,
            data_augmentation=False,
        )
    elif args.dataset_type == 'modelnet40':
        dataset = ModelNetDataset(
            root=args.dataset,
            npoints=args.num_points,
            split='trainval',
        )
        test_dataset = ModelNetDataset(
            root=args.dataset,
            split='test',
            npoints=args.num_points,
            data_augmentation=False,
        )
    else:
        exit('wrong dataset type')
    return dataset, test_dataset
Exemplo n.º 2
0
def main():

    best_prec1 = 0
    model = MVConv()
    if use_gpu:
        model.cuda()
    print(model)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    print(lr, weight_decay)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr,
                                weight_decay=weight_decay)

    cudnn.benchmark = True

    transformed_train_dataset = ModelNetDataset(root_dir=train_data_root,
                                                phase='train',
                                                transform=transforms.Compose(
                                                    [ToTensor()]))

    transformed_valid_dataset = ModelNetDataset(root_dir=train_data_root,
                                                phase='test',
                                                transform=transforms.Compose(
                                                    [ToTensor()]))

    # Loading dataset into dataloader
    train_loader = torch.utils.data.DataLoader(transformed_train_dataset,
                                               batch_size=train_batch_size,
                                               shuffle=True,
                                               num_workers=num_workers)

    val_loader = torch.utils.data.DataLoader(transformed_valid_dataset,
                                             batch_size=test_batch_size,
                                             shuffle=True,
                                             num_workers=num_workers)

    start_time = time.time()

    # Train for all epochs between Startepoch, Endepoch
    for epoch in range(0, epochs):
        adjust_learning_rate(optimizer, epoch)

        # train on train dataset
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(model.state_dict(), is_best, trained_model_path)

    end_time = time.time()
    duration = (end_time - start_time) / 3600
    print("Duration:")
    print(duration)
Exemplo n.º 3
0
def train(cfg):
    print(cfg.pretty())

    ds = ModelNetDataset(cfg)
    dataloader = torch.utils.data.DataLoader(ds,
                                             batch_size=cfg.batch_size,
                                             num_workers=0,
                                             pin_memory=True,
                                             shuffle=True,
                                             drop_last=True)

    model = Model(cfg).cuda()
    model.train()

    crit = SpreadLoss(cfg.min_margin)
    opt = torch.optim.Adam(model.parameters(), lr=1e-3)

    for e in range(cfg.max_epoch):
        td = tqdm(dataloader)
        for i, batch in enumerate(td):
            pts, lrfs, labels = batch

            pts = pts.cuda()
            lrfs = lrfs.cuda()
            labels = labels.cuda()

            opt.zero_grad()

            # with torch.autograd.detect_anomaly():
            output_cap, output_a = model(pts, lrfs)
            loss = crit(output_a, labels)
            loss.backward()
            # torch.nn.utils.clip_grad_norm_(model.parameters(), 0.01)

            for p in model.parameters():
                assert not torch.any(torch.isnan(p.grad))
            opt.step()

            td.set_description('iter {}/{}'.format(i, len(td)))
            td.set_postfix({'loss': loss.item()})
Exemplo n.º 4
0
# 参数设置
MAX_EPOCH = int(args.epoch)
BATCH_SIZE = int(args.batch)
LR = float(args.lr)
L2_REG = float(args.reg)
log_interval = 1  # 多少个batch打印一次学习信息
val_interval = 1  # 多少个epoch进行一次验证
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")  # 设置设备

# ============================ step 1/6 数据 ============================

root = '/root/CYX_Space/3d/modelnet40_ply_hdf5_2048/'
train_data_list = 'train_files.txt'
test_data_list = 'test_files.txt'
train_dataset = ModelNetDataset(root, train_data_list)
test_dataset = ModelNetDataset(root, test_data_list)
print('The number of samples in training set / testing set:')
print(len(train_dataset))
print(len(test_dataset), '\n')

trainloader = DataLoader(dataset=train_dataset,
                         batch_size=BATCH_SIZE,
                         shuffle=True,
                         num_workers=4)
testloader = DataLoader(dataset=test_dataset,
                        batch_size=BATCH_SIZE,
                        shuffle=True,
                        num_workers=4)

# ============================ step 2/6 模型 ============================
Exemplo n.º 5
0
    # Load the trained weights of multi-view VGG16_bn CNN model
    if len(sys.argv) > 1:
        trained_model = sys.argv[1]
    else:
        trained_model = trained_model_path_best

    model = MVConv()
    model.load_state_dict(torch.load(trained_model))

    if use_gpu:
        model.cuda()

    print(model)
    cudnn.benchmark = True

    criterion = nn.CrossEntropyLoss().cuda()

    # Loading the test data
    transformed_valid_dataset = ModelNetDataset(root_dir=train_data_root,
                                                phase='test',
                                                transform=transforms.Compose(
                                                    [ToTensor()]))

    # Loading dataset into dataloader
    test_loader = torch.utils.data.DataLoader(transformed_valid_dataset,
                                              batch_size=test_batch_size,
                                              shuffle=True,
                                              num_workers=num_workers)

    prec1 = test(test_loader, model, criterion)
Exemplo n.º 6
0
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

if opt.dataset_type == 'shapenet':
    dataset = ShapeNetDataset(root=opt.dataset,
                              classification=True,
                              npoints=opt.num_points)

    test_dataset = ShapeNetDataset(root=opt.dataset,
                                   classification=True,
                                   split='test',
                                   npoints=opt.num_points,
                                   data_augmentation=False)
elif opt.dataset_type == 'modelnet40':
    dataset = ModelNetDataset(root=opt.dataset,
                              npoints=opt.num_points,
                              split='trainval')

    test_dataset = ModelNetDataset(root=opt.dataset,
                                   split='test',
                                   npoints=opt.num_points,
                                   data_augmentation=False)
else:
    exit('wrong dataset type')

dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=opt.batchSize,
                                         shuffle=True,
                                         num_workers=int(opt.workers))

testdataloader = torch.utils.data.DataLoader(test_dataset,