parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--eval', type=bool, default=False, help='evaluate the model') parser.add_argument('--num_points', type=int, default=1024, help='num of points to use') parser.add_argument('--dropout', type=float, default=0.5, help='dropout rate') parser.add_argument('--model_path', type=str, default='', metavar='N', help='Pretrained model path') args = parser.parse_args() _init_() io = IOStream('checkpoints/' + args.exp_name + '/run.log') io.cprint(str(args)) args.cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) if args.cuda: io.cprint( 'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices') torch.cuda.manual_seed(args.seed) else: io.cprint('Using CPU') if not args.eval: train(args, io) else: test(args, io)
# Training settings parser = argparse.ArgumentParser(description='Point Cloud Recognition') parser.add_argument('--exp_name', type=str, default='exp', metavar='N', help='Name of the experiment') parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size', help='Size of batch)') parser.add_argument('--epochs', type=int, default=250, metavar='N', help='number of episode to train ') parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.001)') args = parser.parse_args() _init_() io = IOStream('checkpoints/' + args.exp_name + '/run.log') io.cprint(str(args)) train(args, io)
loss = criterion(logits, label) loss.backward() opt.step() preds = logits.max(dim=1)[1] count += batch_size train_loss += loss.item() * batch_size train_true.append(label.cpu().numpy()) train_pred.append(preds.detach().cpu().numpy()) train_true = np.concatenate(train_true) train_pred = np.concatenate(train_pred) outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % ( epoch, train_loss * 1.0 / count, metrics.accuracy_score(train_true, train_pred), metrics.balanced_accuracy_score(train_true, train_pred)) io.cprint(outstr) #################### # Test #################### test_loss = 0.0 count = 0.0 model.eval() test_pred = [] test_true = [] for data, label in tqdm(test_loader): data = data.permute(0, 2, 1) data, label = data.cuda(), label.squeeze().cuda() batch_size = data.size()[0] logits = model(data) loss = criterion(logits, label)
def experiment(n_points_choices, path): args.n_points_choices = n_points_choices args.path = path # make path _init_() # record args io = IOStream('checkpoints/' + args.path + '/run.log') io.cprint(str(args)) io.cprint('Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices') # set seeds torch.manual_seed(1) torch.cuda.manual_seed(1) # initialize train_loader train_dataset = ScanObject_coseg(partition='training') train_sampler = TrainingBatchSampler(train_dataset, args.n_points_choices, args.batch_size) train_loader = DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=32) # initialize test data loaders test_dataset256 = ScanObject_coseg(partition='test', n_points=256) test_loader256 = DataLoader(test_dataset256, num_workers=16, batch_size=args.test_batch_size) test_dataset512 = ScanObject_coseg(partition='test', n_points=512) test_loader512 = DataLoader(test_dataset512, num_workers=16, batch_size=args.test_batch_size) test_dataset1024 = ScanObject_coseg(partition='test', n_points=1024) test_loader1024 = DataLoader(test_dataset1024, num_workers=16, batch_size=args.test_batch_size) test_dataset2048 = ScanObject_coseg(partition='test', n_points=2048) test_loader2048 = DataLoader(test_dataset2048, num_workers=16, batch_size=args.test_batch_size) # Load models device = torch.device("cuda") model = DGCNN_cls(args).to(device) model = nn.DataParallel(model) # Use SGD and CosineAnnealingLR to train print("Use SGD") opt = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=1e-4) scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=1e-3) # start training print("Let's use", torch.cuda.device_count(), "GPUs!") for i in range(args.epochs): io.cprint('Epoch [%d]' % (i + 1)) # train model train(args, io, model, train_loader, opt) # adjust learning rate scheduler.step() # test test(args, io, model, test_loader256, 'Test 256 ',256) test(args, io, model, test_loader512, 'Test 512 ',515) test(args, io, model, test_loader1024, 'Test 1024',1024) test(args, io, model, test_loader2048, 'Test 2048',2048)