def evaluate(args): test_data, test_label = load_data( 'experiment/data/modelnet40_ply_hdf5_2048/', train=False) testDataset = ModelNetDataLoader(test_data, test_label) testDataLoader = torch.utils.data.DataLoader(testDataset, batch_size=args.batch_size, shuffle=False) log.debug('Building Model', args.model_name) if args.model_name == 'pointnet': num_class = 40 model = PointNetCls(num_class, args.feature_transform) else: model = PointNet2ClsMsg() torch.backends.cudnn.benchmark = True model = torch.nn.DataParallel(model).cuda() log.debug('Using gpu:', args.gpu) if args.pretrain is None: log.err('No pretrain model') return log.debug('Loading pretrain model...') state_dict = torch.load(args.pretrain) model.load_state_dict(state_dict) acc = test_clf(model.eval(), testDataLoader) log.msg(Test_Accuracy='%.5f' % (acc))
def test_kitti_semseg(model, loader, model_name, num_classes, class_names): ious = np.zeros((num_classes, ), dtype=np.float32) count = np.zeros((num_classes, ), dtype=np.uint32) count[0] = 1 accuracy = [] for points, target in tqdm(loader, total=len(loader), smoothing=0.9, dynamic_ncols=True): batch_size, num_point, _ = points.size() points = points.float().transpose(2, 1).cuda() target = target.long().cuda() with torch.no_grad(): if model_name == 'pointnet': pred, _ = model(points) else: pred = model(points) pred_choice = pred.argmax(-1) target = target.squeeze(-1) for class_id in range(num_classes): I = torch.sum((pred_choice == class_id) & (target == class_id)).cpu().item() U = torch.sum((pred_choice == class_id) | (target == class_id)).cpu().item() iou = 1 if U == 0 else I / U ious[class_id] += iou count[class_id] += 1 correct = (pred_choice == target).sum().cpu().item() accuracy.append(correct / (batch_size * num_point)) categorical_iou = ious / count df = pd.DataFrame(categorical_iou, columns=['mIOU'], index=class_names) df = df.sort_values(by='mIOU', ascending=False) log.info('categorical mIOU') log.msg(df) acc = np.mean(accuracy) miou = np.mean(categorical_iou[1:]) return acc, miou
def train(args): experiment_dir = mkdir('experiment/') checkpoints_dir = mkdir('experiment/%s/' % (args.model_name)) kitti_utils = Semantic_KITTI_Utils(KITTI_ROOT, subset=args.subset) class_names = kitti_utils.class_names num_classes = kitti_utils.num_classes if args.subset == 'inview': train_npts = 8000 test_npts = 24000 if args.subset == 'all': train_npts = 50000 test_npts = 100000 log.info(subset=args.subset, train_npts=train_npts, test_npts=test_npts) dataset = SemKITTI_Loader(KITTI_ROOT, train_npts, train=True, subset=args.subset) dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) test_dataset = SemKITTI_Loader(KITTI_ROOT, test_npts, train=False, subset=args.subset) testdataloader = DataLoader(test_dataset, batch_size=int(args.batch_size / 2), shuffle=False, num_workers=args.workers, pin_memory=True) if args.model_name == 'pointnet': model = PointNetSeg(num_classes, input_dims=4, feature_transform=True) else: model = PointNet2SemSeg(num_classes, feature_dims=1) if args.optimizer == 'SGD': optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9) elif args.optimizer == 'Adam': optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-4) torch.backends.cudnn.benchmark = True model = torch.nn.DataParallel(model) model.cuda() log.info('Using gpu:', args.gpu) if args.pretrain is not None: log.info('Use pretrain model...') model.load_state_dict(torch.load(args.pretrain)) init_epoch = int(args.pretrain[:-4].split('-')[-1]) log.info('Restart training', epoch=init_epoch) else: log.msg('Training from scratch') init_epoch = 0 best_acc = 0 best_miou = 0 for epoch in range(init_epoch, args.epoch): model.train() lr = calc_decay(args.learning_rate, epoch) log.info(subset=args.subset, model=args.model_name, gpu=args.gpu, epoch=epoch, lr=lr) for param_group in optimizer.param_groups: param_group['lr'] = lr for points, target in tqdm(dataloader, total=len(dataloader), smoothing=0.9, dynamic_ncols=True): points = points.float().transpose(2, 1).cuda() target = target.long().cuda() if args.model_name == 'pointnet': logits, trans_feat = model(points) else: logits = model(points) #logits = logits.contiguous().view(-1, num_classes) #target = target.view(-1, 1)[:, 0] #loss = F.nll_loss(logits, target) logits = logits.transpose(2, 1) loss = nn.CrossEntropyLoss()(logits, target) if args.model_name == 'pointnet': loss += feature_transform_reguliarzer(trans_feat) * 0.001 optimizer.zero_grad() loss.backward() optimizer.step() torch.cuda.empty_cache() acc, miou = test_kitti_semseg(model.eval(), testdataloader, args.model_name, num_classes, class_names) save_model = False if acc > best_acc: best_acc = acc if miou > best_miou: best_miou = miou save_model = True if save_model: fn_pth = '%s-%s-%.5f-%04d.pth' % (args.model_name, args.subset, best_miou, epoch) log.info('Save model...', fn=fn_pth) torch.save(model.state_dict(), os.path.join(checkpoints_dir, fn_pth)) else: log.info('No need to save model') log.warn('Curr', accuracy=acc, mIOU=miou) log.warn('Best', accuracy=best_acc, mIOU=best_miou)
def train(args): experiment_dir = mkdir('experiment/') checkpoints_dir = mkdir('experiment/%s/'%(args.model_name)) kitti_utils = Semantic_KITTI_Utils(ROOT, subset = args.subset) class_names = kitti_utils.class_names num_classes = kitti_utils.num_classes if args.subset == 'inview': train_npts = 2000 test_npts = 2500 if args.subset == 'all': train_npts = 50000 test_npts = 100000 log.info(subset=args.subset, train_npts=train_npts, test_npts=test_npts) dataset = SemKITTI_Loader(ROOT, train_npts, train = True, subset = args.subset) dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) test_dataset = SemKITTI_Loader(ROOT, test_npts, train = False, subset = args.subset) testdataloader = DataLoader(test_dataset, batch_size=int(args.batch_size/2), shuffle=False, num_workers=args.workers, pin_memory=True) if args.model_name == 'pointnet': model = PointNetSeg(num_classes, input_dims = 4, feature_transform = True) else: model = PointNet2SemSeg(num_classes, feature_dims = 1) if args.optimizer == 'SGD': optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9) elif args.optimizer == 'Adam': optimizer = torch.optim.Adam( model.parameters(), lr=args.learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-4) torch.backends.cudnn.benchmark = True model = torch.nn.DataParallel(model) #use more than 1 gpu model.cuda() log.info('Using gpu:',args.gpu) if args.pretrain is not None: log.info('Use pretrain model...') model.load_state_dict(torch.load(args.pretrain)) init_epoch = int(args.pretrain[:-4].split('-')[-1]) log.info('Restart training', epoch=init_epoch) else: log.msg('Training from scratch') init_epoch = 0 best_acc = 0 best_miou = 0 #->5.12 add # to show details of epoch training loss_list = [] miou_list = [] acc_list = [] epoch_time = [] lr_list = [] #-<5.12 add for epoch in range(init_epoch,args.epoch): model.train() lr = calc_decay(args.learning_rate, epoch) log.info(model=args.model_name, gpu=args.gpu, epoch=epoch, lr=lr) for param_group in optimizer.param_groups: param_group['lr'] = lr for points, target in tqdm(dataloader, total=len(dataloader), smoothing=0.9, dynamic_ncols=True): points = points.float().transpose(2, 1).cuda() target = target.long().cuda() if args.model_name == 'pointnet': logits, trans_feat = model(points) else: logits = model(points) #logits = logits.contiguous().view(-1, num_classes) #target = target.view(-1, 1)[:, 0] #loss = F.nll_loss(logits, target) logits = logits.transpose(2, 1) loss = nn.CrossEntropyLoss()(logits, target) if args.model_name == 'pointnet': loss += feature_transform_reguliarzer(trans_feat) * 0.001 # loss_list.append(loss.item()) optimizer.zero_grad() loss.backward() optimizer.step() torch.cuda.empty_cache() acc, miou = test_kitti_semseg(model.eval(), testdataloader, args.model_name,num_classes,class_names) # miou_list.append(np.asscalar(miou)) # acc_list.append(np.asscalar(acc)) save_model = False if acc > best_acc: best_acc = acc if miou > best_miou: best_miou = miou save_model = True #->5.12 add loss_list.append(loss.item()) miou_list.append(np.asscalar(miou)) acc_list.append(np.asscalar(acc)) epoch_time.append(epoch) lr_list.append(lr) #->5.12 add if save_model: fn_pth = '%s-%.5f-%04d.pth' % (args.model_name, best_miou, epoch) log.info('Save model...',fn = fn_pth) torch.save(model.state_dict(), os.path.join(checkpoints_dir, fn_pth)) else: log.info('No need to save model') # 3.31 add |> # show(args) # 3.31 add |< log.warn('Curr',accuracy=acc, mIOU=miou) log.warn('Best',accuracy=best_acc, mIOU=best_miou) # 5.15 add label_size = {"size":40} fig = plt.figure() plt.plot(epoch_time,loss_list,label = "loss") plt.plot(epoch_time,miou_list,label = "mIOU") plt.plot(epoch_time,acc_list,label = "accuracy") # plt.plot(epoch_time,lr_list,label = "learning rate") plt.xlabel("epoch time", fontsize=40) plt.ylabel("value", fontsize=40) plt.title("training trendency", fontsize=60) plt.tick_params(labelsize=40) plt.legend(prop = label_size) plt.show()