def evaluate(args): test_data, test_label = load_data( 'experiment/data/modelnet40_ply_hdf5_2048/', train=False) testDataset = ModelNetDataLoader(test_data, test_label) testDataLoader = torch.utils.data.DataLoader(testDataset, batch_size=args.batch_size, shuffle=False) log.debug('Building Model', args.model_name) if args.model_name == 'pointnet': num_class = 40 model = PointNetCls(num_class, args.feature_transform) else: model = PointNet2ClsMsg() torch.backends.cudnn.benchmark = True model = torch.nn.DataParallel(model).cuda() log.debug('Using gpu:', args.gpu) if args.pretrain is None: log.err('No pretrain model') return log.debug('Loading pretrain model...') state_dict = torch.load(args.pretrain) model.load_state_dict(state_dict) acc = test_clf(model.eval(), testDataLoader) log.msg(Test_Accuracy='%.5f' % (acc))
def vis(args): test_data, test_label = load_data(root, train=False) log.info(test_data=test_data.shape, test_label=test_label.shape) log.debug('Building Model', args.model_name) if args.model_name == 'pointnet': num_class = 40 model = PointNetCls(num_class, args.feature_transform).cuda() else: model = PointNet2ClsMsg().cuda() torch.backends.cudnn.benchmark = True model = torch.nn.DataParallel(model) model.cuda() log.info('Using multi GPU:', args.gpu) if args.pretrain is None: log.err('No pretrain model') return log.debug('Loading pretrain model...') checkpoint = torch.load(args.pretrain) model.load_state_dict(checkpoint) model.eval() log.info('Press space to exit, press Q for next frame') for idx in range(test_data.shape[0]): point_np = test_data[idx:idx + 1] gt = test_label[idx][0] points = torch.from_numpy(point_np) points = points.transpose(2, 1).cuda() pred, trans_feat = model(points) pred_choice = pred.data.max(1)[1] log.info(gt=class_names[gt], pred_choice=class_names[pred_choice.cpu().numpy().item()]) point_cloud = open3d.geometry.PointCloud() point_cloud.points = open3d.utility.Vector3dVector(point_np[0]) vis = open3d.visualization.VisualizerWithKeyCallback() vis.create_window() vis.get_render_option().background_color = np.asarray([0, 0, 0]) vis.add_geometry(point_cloud) vis.register_key_callback(32, lambda vis: exit()) vis.run() vis.destroy_window()
def main(args): '''HYPER PARAMETER''' os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu datapath = './data/ModelNet/' '''CREATE DIR''' experiment_dir = Path('./eval_experiment/') experiment_dir.mkdir(exist_ok=True) file_dir = Path( str(experiment_dir) + '/%sModelNet40-' % args.model_name + str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))) file_dir.mkdir(exist_ok=True) checkpoints_dir = file_dir.joinpath('checkpoints/') checkpoints_dir.mkdir(exist_ok=True) os.system('cp %s %s' % (args.kb1checkpoint, checkpoints_dir)) log_dir = file_dir.joinpath('logs/') log_dir.mkdir(exist_ok=True) '''LOG''' args = parse_args() logger = logging.getLogger(args.model_name) logger.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler = logging.FileHandler( str(log_dir) + 'eval_%s_cls.txt' % args.model_name) file_handler.setLevel(logging.INFO) file_handler.setFormatter(formatter) logger.addHandler(file_handler) logger.info( '---------------------------------------------------EVAL---------------------------------------------------' ) logger.info('PARAMETER ...') logger.info(args) '''DATA LOADING''' logger.info('Load dataset ...') train_data, train_label, test_data, test_label = load_data( datapath, classification=True) logger.info("The number of training data is: %d", train_data.shape[0]) logger.info("The number of test data is: %d", test_data.shape[0]) testDataset = ModelNetDataLoader(test_data, test_label) testDataLoader = torch.utils.data.DataLoader(testDataset, batch_size=args.batchsize, shuffle=False) '''MODEL LOADING''' num_class = 39 kb1classifier = PointConvClsSsg(num_class).cuda() if args.kb1checkpoint is not None: print('Load k but 1 CheckPoint...') logger.info('Load k but 1 CheckPoint') kb1checkpoint = torch.load(args.kb1checkpoint) start_epoch = kb1checkpoint['epoch'] kb1classifier.load_state_dict(kb1checkpoint['model_state_dict']) else: print('Please load k but 1 Checkpoint to eval...') sys.exit(0) start_epoch = 0 num_class1 = 2 binaryclassifier = PointConvClsSsg(num_class1).cuda() if args.binarycheckpoint is not None: print('Load binary CheckPoint...') logger.info('Load binary CheckPoint') binarycheckpoint = torch.load(args.binarycheckpoint) start_epoch = binarycheckpoint['epoch'] binaryclassifier.load_state_dict(binarycheckpoint['model_state_dict']) else: print('Please load binary Checkpoint to eval...') sys.exit(0) start_epoch2 = 0 blue = lambda x: '\033[94m' + x + '\033[0m' '''EVAL''' logger.info('Start evaluating...') print('Start evaluating...') total_correct = 0 total_seen = 0 preds = [] for batch_id, data in tqdm(enumerate(testDataLoader, 0), total=len(testDataLoader), smoothing=0.9): pointcloud, target = data target = target[:, 0] #import ipdb; ipdb.set_trace() pred_view = torch.zeros(pointcloud.shape[0], num_class).cuda() binary_view = torch.zeros(pointcloud.shape[0], num_class1).cuda() for _ in range(args.num_view): pointcloud = generate_new_view(pointcloud) #import ipdb; ipdb.set_trace() #points = torch.from_numpy(pointcloud).permute(0, 2, 1) points = pointcloud.permute(0, 2, 1) points, target = points.cuda(), target.cuda() kb1classifier = kb1classifier.eval() binaryclassifier = binaryclassifier.eval() with torch.no_grad(): pred = kb1classifier(points) pred_binary = binaryclassifier(points) pred_view += pred binary_view += pred_binary kb1_logprob = pred_view.data binary_logprob = binary_view.data ## since we assigned the composite class the largest label, we will split the log-probability for the last label to two part, one for binary 0 and one for binary 1. binary_pred_logprob = kb1_logprob[:, -1].reshape( 1, len(kb1_logprob[:, -1])).transpose(0, 1).repeat(1, 2).view( -1, 2) + binary_logprob ## concatenate to get log-probability for all (40) classes pred_logprob = torch.from_numpy( np.c_[kb1_logprob[:, 0:-1].cpu().detach().numpy(), binary_pred_logprob.cpu().detach().numpy()]).to('cuda') pred_choices = pred_logprob.max(1)[1] ## reset labels mapper_dict = { **{key: key + 1 for key in range(12, 32)}, **{key: key + 2 for key in range(32, 38)}, **{ 38: 33, 39: 12 } } def mp(entry): return mapper_dict[entry] if entry in mapper_dict else entry mp = np.vectorize(mp) pred_choice = torch.from_numpy( np.array(mp(pred_choices.cpu().detach().numpy()))).to('cuda') preds.append(pred_choice.cpu().detach().numpy()) correct = pred_choice.eq( target.long().data).cpu().detach().numpy().sum() total_correct += correct.item() total_seen += float(points.size()[0]) accuracy = total_correct / total_seen ## confusion matrix cm = confusion_matrix(test_label.ravel(), np.concatenate(preds).ravel()) cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] t = pd.read_table('data/ModelNet/shape_names.txt', names=['label']) d = {key: val for key, val in zip(t.label, cm.diagonal())} print('Total Accuracy: %f' % accuracy) print('Accuracy per class:', d) logger.info('Total Accuracy: %f' % accuracy) logger.info('End of evaluation...')
def main(args): '''HYPER PARAMETER''' os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu datapath = './data/ModelNet/' '''CREATE DIR''' experiment_dir = Path('./experiment/') experiment_dir.mkdir(exist_ok=True) file_dir = Path( str(experiment_dir) + '/%sModelNet40-' % args.model_name + str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))) file_dir.mkdir(exist_ok=True) checkpoints_dir = file_dir.joinpath('checkpoints/') checkpoints_dir.mkdir(exist_ok=True) log_dir = file_dir.joinpath('logs/') log_dir.mkdir(exist_ok=True) '''LOG''' args = parse_args() logger = logging.getLogger(args.model_name) logger.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler = logging.FileHandler( str(log_dir) + 'train_%s_cls.txt' % args.model_name) file_handler.setLevel(logging.INFO) file_handler.setFormatter(formatter) logger.addHandler(file_handler) logger.info( '---------------------------------------------------TRANING---------------------------------------------------' ) logger.info('PARAMETER ...') logger.info(args) '''DATA LOADING''' logger.info('Load dataset ...') train_data, train_label, test_data, test_label = load_data( datapath, classification=True) logger.info("The number of training data is: %d", train_data.shape[0]) logger.info("The number of test data is: %d", test_data.shape[0]) trainDataset = ModelNetDataLoader(train_data, train_label) testDataset = ModelNetDataLoader(test_data, test_label) trainDataLoader = torch.utils.data.DataLoader(trainDataset, batch_size=args.batchsize, shuffle=True) testDataLoader = torch.utils.data.DataLoader(testDataset, batch_size=args.batchsize, shuffle=False) '''MODEL LOADING''' num_class = 40 classifier = PointConvClsSsg(num_class).cuda() if args.pretrain is not None: print('Use pretrain model...') logger.info('Use pretrain model') checkpoint = torch.load(args.pretrain) start_epoch = checkpoint['epoch'] classifier.load_state_dict(checkpoint['model_state_dict']) else: print('No existing model, starting training from scratch...') start_epoch = 0 if args.optimizer == 'SGD': optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9) elif args.optimizer == 'Adam': optimizer = torch.optim.Adam(classifier.parameters(), lr=args.learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.decay_rate) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.5) global_epoch = 0 global_step = 0 best_tst_accuracy = 0.0 blue = lambda x: '\033[94m' + x + '\033[0m' '''TRANING''' logger.info('Start training...') for epoch in range(start_epoch, args.epoch): print('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch)) logger.info('Epoch %d (%d/%s):', global_epoch + 1, epoch + 1, args.epoch) scheduler.step() for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9): points, target = data target = target[:, 0] points = points.transpose(2, 1) points, target = points.cuda(), target.cuda() optimizer.zero_grad() classifier = classifier.train() pred = classifier(points) loss = F.nll_loss(pred, target.long()) loss.backward() optimizer.step() global_step += 1 train_acc = test(classifier.eval(), trainDataLoader) if args.train_metric else None acc = test(classifier, testDataLoader) print('\r Loss: %f' % loss.data) logger.info('Loss: %.2f', loss.data) if args.train_metric: print('Train Accuracy: %f' % train_acc) logger.info('Train Accuracy: %f', (train_acc)) print( '\r Test %s: %f *** %s: %f' % (blue('Accuracy'), acc, blue('Best Accuracy'), best_tst_accuracy)) logger.info('Test Accuracy: %f *** Best Test Accuracy: %f', acc, best_tst_accuracy) if (acc >= best_tst_accuracy) and epoch > 5: best_tst_accuracy = acc logger.info('Save model...') save_checkpoint(global_epoch + 1, train_acc if args.train_metric else 0.0, acc, classifier, optimizer, str(checkpoints_dir), args.model_name) print('Saving model....') global_epoch += 1 print('Best Accuracy: %f' % best_tst_accuracy) logger.info('End of training...')
def main(args): """HYPER PARAMETER""" os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu datapath = './data/ModelNet/' '''CREATE DIR''' experiment_dir = Path('./eval_experiment/') experiment_dir.mkdir(exist_ok=True) file_dir = Path(str(experiment_dir) + '/%sModelNet40-' % args.model_name + str( datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))) file_dir.mkdir(exist_ok=True) checkpoints_dir = file_dir.joinpath('checkpoints/') checkpoints_dir.mkdir(exist_ok=True) os.system('cp %s %s' % (args.checkpoint, checkpoints_dir)) log_dir = file_dir.joinpath('logs/') log_dir.mkdir(exist_ok=True) '''LOG''' args = parse_args() logger = logging.getLogger(args.model_name) logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler = logging.FileHandler(str(log_dir) + 'eval_%s_cls.txt' % args.model_name) file_handler.setLevel(logging.INFO) file_handler.setFormatter(formatter) logger.addHandler(file_handler) logger.info( '---------------------------------------------------EVAL---------------------------------------------------') logger.info('PARAMETER ...') logger.info(args) '''DATA LOADING''' logger.info('Load dataset ...') train_data, train_label, test_data, test_label = load_data(datapath, classification=True) logger.info("The number of training data is: %d", train_data.shape[0]) logger.info("The number of test data is: %d", test_data.shape[0]) test_dataset = ModelNetDataLoader(test_data, test_label) test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batchsize, shuffle=False) '''MODEL LOADING''' num_class = 40 classifier = PointConvClsSsg(num_class).cuda() if args.checkpoint is not None: print('Load CheckPoint...') logger.info('Load CheckPoint') checkpoint = torch.load(args.checkpoint) start_epoch = checkpoint['epoch'] classifier.load_state_dict(checkpoint['model_state_dict']) else: print('Please load Checkpoint to eval...') start_epoch = 0 sys.exit(0) blue = lambda x: '\033[94m' + x + '\033[0m' '''EVAL''' logger.info('Start evaluating...') print('Start evaluating...') total_correct = 0 total_seen = 0 for batch_id, data in tqdm(enumerate(test_data_loader, 0), total=len(test_data_loader), smoothing=0.9): pointcloud, target = data target = target[:, 0] # import ipdb; ipdb.set_trace() pred_view = torch.zeros(pointcloud.shape[0], num_class).cuda() for _ in range(args.num_view): pointcloud = generate_new_view(pointcloud) # import ipdb; ipdb.set_trace() # points = torch.from_numpy(pointcloud).permute(0, 2, 1) points = pointcloud.permute(0, 2, 1) points, target = points.cuda(), target.cuda() classifier = classifier.eval() with torch.no_grad(): pred = classifier(points) pred_view += pred pred_choice = pred_view.data.max(1)[1] correct = pred_choice.eq(target.long().data).cpu().sum() total_correct += correct.item() total_seen += float(points.size()[0]) accuracy = total_correct / total_seen print('Total Accuracy: %f' % accuracy) logger.info('Total Accuracy: %f' % accuracy) logger.info('End of evaluation...')
def train(args): experiment_dir = mkdir('./experiment/') checkpoints_dir = mkdir('./experiment/clf/%s/' % (args.model_name)) train_data, train_label, test_data, test_label = load_data( 'experiment/data/modelnet40_ply_hdf5_2048/') trainDataset = ModelNetDataLoader(train_data, train_label, data_augmentation=args.augment) trainDataLoader = DataLoader(trainDataset, batch_size=args.batch_size, shuffle=True) testDataset = ModelNetDataLoader(test_data, test_label) testDataLoader = torch.utils.data.DataLoader(testDataset, batch_size=args.batch_size, shuffle=False) log.info('Building Model', args.model_name) if args.model_name == 'pointnet': num_class = 40 model = PointNetCls(num_class, args.feature_transform).cuda() else: model = PointNet2ClsMsg().cuda() torch.backends.cudnn.benchmark = True model = torch.nn.DataParallel(model).cuda() log.debug('Using gpu:', args.gpu) if args.pretrain is not None: log.info('Use pretrain model...') state_dict = torch.load(args.pretrain) model.load_state_dict(state_dict) init_epoch = int(args.pretrain[:-4].split('-')[-1]) log.info('start epoch from', init_epoch) else: log.info('Training from scratch') init_epoch = 0 if args.optimizer == 'SGD': optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9) elif args.optimizer == 'Adam': optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.decay_rate) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5) LEARNING_RATE_CLIP = 1e-5 global_epoch = 0 global_step = 0 best_tst_accuracy = 0.0 log.info('Start training...') for epoch in range(init_epoch, args.epoch): scheduler.step() lr = max(optimizer.param_groups[0]['lr'], LEARNING_RATE_CLIP) log.debug(job='clf', model=args.model_name, gpu=args.gpu, epoch='%d/%s' % (epoch, args.epoch), lr=lr) for param_group in optimizer.param_groups: param_group['lr'] = lr for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9): points, target = data target = target[:, 0] points = points.transpose(2, 1) points, target = points.cuda(), target.cuda() optimizer.zero_grad() model = model.train() pred, trans_feat = model(points) loss = F.nll_loss(pred, target.long()) if args.feature_transform and args.model_name == 'pointnet': loss += feature_transform_reguliarzer(trans_feat) * 0.001 loss.backward() optimizer.step() global_step += 1 log.debug('clear cuda cache') torch.cuda.empty_cache() acc = test_clf(model, testDataLoader) log.info(loss='%.5f' % (loss.data)) log.info(Test_Accuracy='%.5f' % acc) if acc >= best_tst_accuracy: best_tst_accuracy = acc fn_pth = 'clf-%s-%.5f-%04d.pth' % (args.model_name, acc, epoch) log.debug('Saving model....', fn_pth) torch.save(model.state_dict(), os.path.join(checkpoints_dir, fn_pth)) global_epoch += 1 log.info(Best_Accuracy=best_tst_accuracy) log.info('End of training...')
def main(args): '''HYPER PARAMETER''' os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu # datapath = './data/ModelNet/' datapath = './data/modelnet40_ply_hdf5_2048/' if args.rotation is not None: ROTATION = (int(args.rotation[0:2]), int(args.rotation[3:5])) else: ROTATION = None '''CREATE DIR''' experiment_dir = Path('./experiment/') experiment_dir.mkdir(exist_ok=True) checkpoints_dir = Path('./experiment/checkpoints/') checkpoints_dir.mkdir(exist_ok=True) log_dir = Path('./experiment/logs/') log_dir.mkdir(exist_ok=True) '''LOG''' args = parse_args() logger = logging.getLogger("PointNet2") logger.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler = logging.FileHandler( './experiment/logs/test_%s_' % args.model_name + str(datetime.datetime.now().strftime('%Y-%m-%d %H-%M')) + '.txt') file_handler.setLevel(logging.INFO) file_handler.setFormatter(formatter) logger.addHandler(file_handler) logger.info( '---------------------------------------------------Test---------------------------------------------------' ) logger.info('PARAMETER ...') logger.info(args) '''DATA LOADING''' logger.info('Load dataset ...') train_data, train_label, test_data, test_label = load_data( datapath, classification=True) logger.info("The number of training data is: %d", train_data.shape[0]) logger.info("The number of test data is: %d", test_data.shape[0]) trainDataset = ModelNetDataLoader(train_data, train_label, rotation=ROTATION) if ROTATION is not None: print('The range of training rotation is', ROTATION) testDataset = ModelNetDataLoader(test_data, test_label, rotation=ROTATION) trainDataLoader = torch.utils.data.DataLoader(trainDataset, batch_size=args.batchsize, shuffle=True) testDataLoader = torch.utils.data.DataLoader(testDataset, batch_size=args.batchsize, shuffle=False) # 不打乱 '''MODEL LOADING''' num_class = 40 ###################### PointNetCls ###################### classifier = PointNetCls(num_class, args.feature_transform).cuda( ) if args.model_name == 'pointnet' else PointNet2ClsMsg().cuda() if args.pretrain is not None: print('Use pretrain model...') logger.info('Use pretrain model') checkpoint = torch.load(args.pretrain) start_epoch = checkpoint['epoch'] classifier.load_state_dict(checkpoint['model_state_dict']) else: print('Please Input the pretrained model ***.pth') return if args.optimizer == 'SGD': optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9) elif args.optimizer == 'Adam': optimizer = torch.optim.Adam(classifier.parameters(), lr=args.learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.decay_rate) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5) global_epoch = 0 global_step = 0 best_tst_accuracy = 0.0 blue = lambda x: '\033[94m' + x + '\033[0m' '''TestING''' logger.info('Start testing...') scheduler.step() acc = test(classifier.eval(), testDataLoader) print('\r Test %s: %f' % (blue('Accuracy'), acc)) logger.info('Test Accuracy: %f', acc) logger.info('End of testing...')