args.feature_transform, args.augment = bool(args.feature_transform), bool( args.augment) ### Set random seed args.seed = args.seed if args.seed > 0 else random.randint(1, 10000) # dataset path DATA_PATH = './data/modelnet40_normal_resampled/' ######################################## ## Intiate model ######################################## device = torch.device("cuda" if torch.cuda.is_available() else "cpu") num_classes = 40 if args.model == 'dgcnn_kcutmix': model = DGCNN(num_classes) model = model.to(device) model = nn.DataParallel(model) optimizer = torch.optim.SGD(model.parameters(), lr=args.lr * 100, momentum=0.9, weight_decay=1e-4) scheduler_c = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 250, eta_min=1e-3) else: if args.model == 'pointnet_kcutmix': model = PointNetCls(num_classes, args.feature_transform) model = model.to(device) elif args.model == 'pointnet2_kcutmix': model = PointNet2ClsMsg(num_classes)
def train(cfg): name = cfg.class_name device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') log_dir = os.path.curdir train_dataset = KeypointDataset(cfg, 'train') train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=4, drop_last=True) val_dataset = KeypointDataset(cfg, 'val') val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=4, num_workers=4) # Load different models if cfg.network == 'rsnet': model = RSNet(train_dataset.nclasses, rg=2.0).to(device) elif cfg.network == 'pointnet': model = PointNetDenseCls(train_dataset.nclasses).to(device) elif cfg.network == 'pointnet2': model = Pointnet2SSG(train_dataset.nclasses).to(device) elif cfg.network == 'dgcnn': model = DGCNN(train_dataset.nclasses).to(device) elif cfg.network == 'graphcnn': model = GraphConvNet([3, 1024, 5, 1024, 5], [512, train_dataset.nclasses]).to(device) elif cfg.network == 'spidercnn': model = Spidercnn_seg_fullnet(train_dataset.nclasses).to(device) elif cfg.network == 'rscnn': model = RSCNN_MSN(train_dataset.nclasses).to(device) elif cfg.network == 'pointconv': model = PointConvDensityClsSsg(train_dataset.nclasses).to(device) else: logger.error('unrecognized network name') exit() logger.info('Start training on binary keypoint detection...') optimizer = torch.optim.Adam( model.parameters(), lr=1e-3 ) criterion = torch.nn.CrossEntropyLoss(ignore_index=-1).to(device) accum_iter, tot_iter = 0, 0 accum_loss = {'loss': 0} writer = SummaryWriter(log_dir) best_loss = 1e10 for epoch in range(cfg.max_epoch + 1): train_iter = train_dataloader.__iter__() # Training model.train() for i, data in enumerate(train_iter): pc, label = data logits = model(pc.transpose(1,2).to(device)) loss = criterion(logits.reshape(-1, 2), label.view(-1).cuda()) optimizer.zero_grad() loss.backward() optimizer.step() accum_loss['loss'] += loss.item() accum_iter += 1 tot_iter += 1 if tot_iter % 10 == 0 or tot_iter == 1: for key in accum_loss: writer.add_scalar('Train/%s' % key, accum_loss[key] / accum_iter, tot_iter) writer.flush() logger.info( f'Iter: {tot_iter}, Epoch: {epoch}, {key}: {accum_loss[key] / accum_iter}' ) accum_loss[key] = 0 accum_iter = 0 model.eval() # validation loss val_loss = {'loss': 0} for i, data in enumerate(val_dataloader): pc, label = data with torch.no_grad(): logits = model(pc.transpose(1,2).to(device)) loss = criterion(logits.reshape(-1, 2), label.view(-1).cuda()) val_loss['loss'] += loss.item() if val_loss['loss'] / len(val_dataloader) < best_loss: logger.info("best epoch: {}".format(epoch)) best_loss = val_loss['loss'] / len(val_dataloader) torch.save(model.state_dict(), os.path.join(log_dir, 'pck_best.pth')) for key in val_loss: writer.add_scalar('Val/%s' % key, val_loss[key] / len(val_dataloader), epoch) # writer.add_scalar('ValPck0.01', corr[0]) writer.flush() logger.info( f'Epoch: {epoch}, Val {key}: {val_loss[key] / len(val_dataloader)}' ) writer.close()
######################################## ## Intiate model ######################################## device = torch.device("cuda" if torch.cuda.is_available() else "cpu") num_classes = 40 if args.model == 'pointnet_kcutmix': model = PointNetCls(num_classes, args.feature_transform) model = model.to(device) elif args.model == 'pointnet2_kcutmix': model = PointNet2ClsMsg(num_classes) model = model.to(device) model = nn.DataParallel(model) elif args.model == 'dgcnn_kcutmix': model = DGCNN(num_classes) model = model.to(device) model = nn.DataParallel(model) elif args.model == 'rscnn_kcutmix': from models.rscnn import RSCNN import models.rscnn_utils.pointnet2_utils as pointnet2_utils import models.rscnn_utils.pytorch_utils as pt_utils model = RSCNN(num_classes) model = model.to(device) model = nn.DataParallel(model) if len(args.resume) > 1: print('=====> Loading from checkpoint...') checkpoint = torch.load('checkpoints/%s.pth' % args.resume) args = checkpoint['args']