train_pred_cls.append(pred_np.reshape(-1)) train_true_seg.append(seg_np) train_pred_seg.append(pred_np) train_true_cls = np.concatenate(train_true_cls) train_pred_cls = np.concatenate(train_pred_cls) train_acc = metrics.accuracy_score(train_true_cls, train_pred_cls) avg_per_class_acc = metrics.balanced_accuracy_score( train_true_cls, train_pred_cls) train_true_seg = np.concatenate(train_true_seg, axis=0) train_pred_seg = np.concatenate(train_pred_seg, axis=0) train_ious = calculate_sem_IoU(train_pred_seg, train_true_seg) print(' * Train * loss: %.6f ' 'train acc: %.6f ' 'train avg acc: %.6f ' 'train iou: %.6f ' % (epoch, train_loss * 1.0 / count, train_acc, avg_per_class_acc, np.mean(train_ious))) return # def test(args, io, SampleNetModel, DGCNNModel, train_loader, opt): # pass if __name__ == "__main__": torch.manual_seed(1) torch.cuda.manual_seed(1) args = get_args() io = IOStream('run.log') main(args, io)
help='enables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--eval', type=bool, default=False, help='evaluate the model') parser.add_argument('--num_points', type=int, default=1024, help='num of points to use') parser.add_argument('--dropout', type=float, default=0.5, help='dropout rate') parser.add_argument('--model_path', type=str, default='', metavar='N', help='Pretrained model path') args = parser.parse_args() _init_() io = IOStream('checkpoints/' + args.exp_name + '/run.log') io.cprint(str(args)) args.cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) if args.cuda: io.cprint( 'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices') torch.cuda.manual_seed(args.seed) else: io.cprint('Using CPU') if not args.eval: train(args, io) else: test(args, io)
def main(): arg_bool = lambda x: x.lower() in ['true', 't', '1'] parser = argparse.ArgumentParser(description='Point Cloud Registration') parser.add_argument('--exp_name', type=str, default='exp', metavar='N', help='Name of the experiment') parser.add_argument('--num_iter', type=int, default=4, metavar='N', help='Number of iteration inside the network') parser.add_argument('--emb_nn', type=str, default='GNN', metavar='N', help='Feature extraction method. [GNN]') parser.add_argument('--emb_dims', type=int, default=64, metavar='N', help='Dimension of embeddings') parser.add_argument('--batch_size', type=int, default=16, metavar='batch_size', help='Size of batch)') parser.add_argument('--test_batch_size', type=int, default=16, metavar='batch_size', help='Size of batch)') parser.add_argument('--epochs', type=int, default=50, metavar='N', help='number of episode to train ') parser.add_argument('--unseen', type=arg_bool, default='True', help='Test on unseen categories') parser.add_argument('--gaussian_noise', type=arg_bool, default='True', help='Wheter to add gaussian noise') parser.add_argument( '--alpha', type=float, default=0.75, metavar='N', help='Fraction of points when sampling partial point cloud') parser.add_argument('--factor', type=float, default=4, metavar='N', help='Divided factor for rotations') # 旋转角度在pi/4范围内 parser.add_argument('--pretrained', type=arg_bool, default='False', help='Load pretrained weight') # 旋转角度在pi/4范围内 args = parser.parse_args() print(args) ##### make checkpoint directory and backup ##### if not os.path.exists('checkpoints'): os.makedirs('checkpoints') if not os.path.exists('checkpoints/' + args.exp_name): os.makedirs('checkpoints/' + args.exp_name) if not os.path.exists('checkpoints/' + args.exp_name + '/' + 'models'): os.makedirs('checkpoints/' + args.exp_name + '/' + 'models') os.system('cp main.py checkpoints' + '/' + args.exp_name + '/' + 'main.py.backup') os.system('cp model.py checkpoints' + '/' + args.exp_name + '/' + 'model.py.backup') os.system('cp data.py checkpoints' + '/' + args.exp_name + '/' + 'data.py.backup') ##### make checkpoint directory and backup ##### io = IOStream('checkpoints/' + args.exp_name + '/log.txt') io.cprint(str(args)) ##### load data ##### train_loader = DataLoader(ModelNet40(partition='train', alpha=args.alpha, gaussian_noise=args.gaussian_noise, unseen=args.unseen, factor=args.factor), batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=8) test_loader = DataLoader(ModelNet40(partition='test', alpha=args.alpha, gaussian_noise=args.gaussian_noise, unseen=args.unseen, factor=args.factor), batch_size=args.test_batch_size, shuffle=False, drop_last=False, num_workers=8) ##### load model ##### net = MFGNet(GNN(args.emb_dims), args).cuda() # net.load_state_dict(torch.load('model_gaussian.t7')) # for param in net.parameters(): # print(param.name, param.size()) ##### train ##### train(args, net, train_loader, test_loader, io) io.close()
train_loader = DataLoader(ModelNet40(partition='train', num_points=1024), num_workers=8, batch_size=32, shuffle=True, drop_last=True) test_loader = DataLoader(ModelNet40(partition='test', num_points=1024), num_workers=8, batch_size=32, shuffle=True, drop_last=False) model = PointNet().cuda() criterion = nn.CrossEntropyLoss() opt = optim.SGD(model.parameters(), lr=0.0001, momentum=0.9) io = IOStream('checkpoints/run.log') for epoch in range(100): ################# #Train ################# train_loss = 0.0 count = 0.0 model.train() train_pred = [] train_true = [] for data, label in tqdm(train_loader): data = data.permute(0, 2, 1) data, label = data.cuda(), label.squeeze().cuda() batch_size = data.size()[0] opt.zero_grad() logits = model(data)
# Training settings parser = argparse.ArgumentParser(description='Point Cloud Recognition') parser.add_argument('--exp_name', type=str, default='exp', metavar='N', help='Name of the experiment') parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size', help='Size of batch)') parser.add_argument('--epochs', type=int, default=250, metavar='N', help='number of episode to train ') parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.001)') args = parser.parse_args() _init_() io = IOStream('checkpoints/' + args.exp_name + '/run.log') io.cprint(str(args)) train(args, io)
# Calculate Accuracy acc = calculate_ACC(pred, labels) ########################## ###### Record Losses ##### ########################## batch_size = SamplePoints.shape[0] TOTALLOSS.update(float(loss), batch_size) ACC50.update(acc, batch_size) ######################### ###### Print Losses ##### ######################### print(' * [{0}/{1}] * ' '[email protected]: {ACC50.val:.4f} ({ACC50.avg:.4f})' 'loss {total.val:.4f} ({total.avg:.4f})'.format( i, len(test_loader), ACC50=ACC50, total=TOTALLOSS), end=' \r') print(' ' * 180, end='\r') io.cprint(' * Test * Loss: %.6f [email protected]: %.6f' % (TOTALLOSS.avg, ACC50.avg)) return TOTALLOSS.avg if __name__ == "__main__": torch.manual_seed(1) torch.cuda.manual_seed(1) args = get_args() io = IOStream('SampleNetCheckPoint/run.log') main(args, io)
def experiment(n_points_choices, path): args.n_points_choices = n_points_choices args.path = path # make path _init_() # record args io = IOStream('checkpoints/' + args.path + '/run.log') io.cprint(str(args)) io.cprint('Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices') # set seeds torch.manual_seed(1) torch.cuda.manual_seed(1) # initialize train_loader train_dataset = ScanObject_coseg(partition='training') train_sampler = TrainingBatchSampler(train_dataset, args.n_points_choices, args.batch_size) train_loader = DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=32) # initialize test data loaders test_dataset256 = ScanObject_coseg(partition='test', n_points=256) test_loader256 = DataLoader(test_dataset256, num_workers=16, batch_size=args.test_batch_size) test_dataset512 = ScanObject_coseg(partition='test', n_points=512) test_loader512 = DataLoader(test_dataset512, num_workers=16, batch_size=args.test_batch_size) test_dataset1024 = ScanObject_coseg(partition='test', n_points=1024) test_loader1024 = DataLoader(test_dataset1024, num_workers=16, batch_size=args.test_batch_size) test_dataset2048 = ScanObject_coseg(partition='test', n_points=2048) test_loader2048 = DataLoader(test_dataset2048, num_workers=16, batch_size=args.test_batch_size) # Load models device = torch.device("cuda") model = DGCNN_cls(args).to(device) model = nn.DataParallel(model) # Use SGD and CosineAnnealingLR to train print("Use SGD") opt = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=1e-4) scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=1e-3) # start training print("Let's use", torch.cuda.device_count(), "GPUs!") for i in range(args.epochs): io.cprint('Epoch [%d]' % (i + 1)) # train model train(args, io, model, train_loader, opt) # adjust learning rate scheduler.step() # test test(args, io, model, test_loader256, 'Test 256 ',256) test(args, io, model, test_loader512, 'Test 512 ',515) test(args, io, model, test_loader1024, 'Test 1024',1024) test(args, io, model, test_loader2048, 'Test 2048',2048)