Example #1
0
def vis(args):
    test_data, test_label = load_data(root, train=False)
    log.info(test_data=test_data.shape, test_label=test_label.shape)

    log.debug('Building Model', args.model_name)
    if args.model_name == 'pointnet':
        num_class = 40
        model = PointNetCls(num_class, args.feature_transform).cuda()
    else:
        model = PointNet2ClsMsg().cuda()

    torch.backends.cudnn.benchmark = True
    model = torch.nn.DataParallel(model)
    model.cuda()
    log.info('Using multi GPU:', args.gpu)

    if args.pretrain is None:
        log.err('No pretrain model')
        return

    log.debug('Loading pretrain model...')
    checkpoint = torch.load(args.pretrain)
    model.load_state_dict(checkpoint)
    model.eval()

    log.info('Press space to exit, press Q for next frame')

    for idx in range(test_data.shape[0]):
        point_np = test_data[idx:idx + 1]
        gt = test_label[idx][0]

        points = torch.from_numpy(point_np)
        points = points.transpose(2, 1).cuda()

        pred, trans_feat = model(points)
        pred_choice = pred.data.max(1)[1]
        log.info(gt=class_names[gt],
                 pred_choice=class_names[pred_choice.cpu().numpy().item()])

        point_cloud = open3d.geometry.PointCloud()
        point_cloud.points = open3d.utility.Vector3dVector(point_np[0])

        vis = open3d.visualization.VisualizerWithKeyCallback()
        vis.create_window()
        vis.get_render_option().background_color = np.asarray([0, 0, 0])
        vis.add_geometry(point_cloud)
        vis.register_key_callback(32, lambda vis: exit())
        vis.run()
        vis.destroy_window()
Example #2
0
if args.load_model and args.load_epoch != -1:
    is_resume = 1

if is_resume or args.mode == 'test':
    model = torch.load(args.load_model,
                       map_location='cuda:{}'.format(args.gpu))
    model.device_ids = [args.gpu]
    print('load model {}'.format(args.load_model))
else:
    model = PointNetCls(num_points=grasp_points_num,
                        input_chann=point_channel,
                        k=2)
if args.cuda:
    if args.gpu != -1:
        torch.cuda.set_device(args.gpu)
        model = model.cuda()
    else:
        device_id = [0, 1, 2, 3]
        torch.cuda.set_device(device_id[0])
        model = nn.DataParallel(model, device_ids=device_id).cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=30, gamma=0.5)


def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
import torch.utils.data
# import torch.nn as nn
# import torch.nn.functional as F
import numpy as np
import sys
from os import path
from model.pointnet import PointNetCls
import torch.multiprocessing as mp

sys.path.append(path.dirname(path.dirname(path.abspath("__file__"))))

# torch.cuda.manual_seed(1)  # don't delete

grasp_points_num = 1024
model = PointNetCls(num_points=grasp_points_num, input_chann=3, k=2)
model.cuda()

model.eval()
torch.set_grad_enabled(False)


def load_weight(weights_path):
    print('[Python] Load weight: {}'.format(weights_path))
    model.load_state_dict(torch.load(weights_path))


def classify_pcs(local_pcs, output_cls=0):
    """ Classify point clouds FPS:650 """
    print("[Python] Classify point clouds...")

    #     print(local_pcs)
    worker_init_fn=worker_init_fn,
    collate_fn=my_collate,
)

is_resume = 0
if args.load_model and args.load_epoch != -1:
    is_resume = 1

model = PointNetCls(num_points=grasp_points_num,
                    input_chann=point_channel,
                    k=2)
if is_resume or args.mode == 'test':
    model.load_state_dict(torch.load(args.load_model))
    print('load weights {}'.format(args.load_model))
if args.cuda:
    base_model = model.cuda()
    model = nn.DataParallel(base_model)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=30, gamma=0.5)


def train(model, dataset, loader, epoch):
    scheduler.step()
    dataset.train()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long(
Example #5
0
def main(testDataset):
    args = parse_args()
    '''HYPER PARAMETER'''
    # os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu if args.multi_gpu is None else '0,1,2,3'

    datapath = './data/ModelNet/'

    if args.rotation is not None:
        ROTATION = (int(args.rotation[0:2]),int(args.rotation[3:5]))
    else:
        ROTATION = None

    '''CREATE DIR'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = Path('./experiment/checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = Path('./experiment/logs/')
    log_dir.mkdir(exist_ok=True)

    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("PointNet2")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('./experiment/logs/train_%s_'%args.model_name+ str(datetime.datetime.now().strftime('%Y-%m-%d %H-%M'))+'.txt')
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info('---------------------------------------------------TRANING---------------------------------------------------')
    logger.info('PARAMETER ...')
    logger.info(args)

    '''DATA LOADING'''
    logger.info('Load dataset ...')
    # train_data, train_label, test_data, test_label = load_data(datapath, classification=True)
    # logger.info("The number of training data is: %d",train_data.shape[0])
    # logger.info("The number of test data is: %d", test_data.shape[0])
    # trainDataset = ModelNetDataLoader(train_data, train_label, rotation=ROTATION)
    #if ROTATION is not None:
    #    print('The range of training rotation is',ROTATION)
    # testDataset = ModelNetDataLoader(test_data, test_label, rotation=ROTATION)
#     trainDataLoader = torch.utils.data.DataLoader(trainDataset, batch_size=args.batchsize, shuffle=True)
    testDataLoader = torch.utils.data.DataLoader(testDataset, batch_size=args.batchsize, shuffle=False)

    '''MODEL LOADING'''
    num_class = 40
    classifier = PointNetCls(num_class,args.feature_transform).cuda() if args.model_name == 'pointnet' else PointNet2ClsMsg().cuda()

    '''GPU selection and multi-GPU'''
    if args.multi_gpu is not None:
        device_ids = [int(x) for x in args.multi_gpu.split(',')]
        torch.backends.cudnn.benchmark = True
        classifier.cuda(device_ids[0])
        classifier = torch.nn.DataParallel(classifier, device_ids=device_ids)
    else:
        classifier.cuda()

    
    
    
    ''' 使用预训练模型  '''
    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('No existing model, starting training from scratch...')
        start_epoch = 0


    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(
            classifier.parameters(),
            lr=args.learning_rate,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate
        )
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    blue = lambda x: '\033[94m' + x + '\033[0m'
    
    start_epoch = 0
    args.epoch = 1
    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch,args.epoch):
        print('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
        logger.info('Epoch %d (%d/%s):' ,global_epoch + 1, epoch + 1, args.epoch)

#         scheduler.step()
#         for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9):
#             points, target = data
#             target = target[:, 0]
#             points = points.transpose(2, 1)
#             points, target = points.cuda(), target.cuda()
#             optimizer.zero_grad()
#             classifier = classifier.train()
#             pred, trans_feat = classifier(points)
#             loss = F.nll_loss(pred, target.long())
#             if args.feature_transform and args.model_name == 'pointnet':
#                 loss += feature_transform_reguliarzer(trans_feat) * 0.001
#             loss.backward()
#             optimizer.step()
#             global_step += 1

        train_acc = test(classifier.eval(), trainDataLoader) if args.train_metric else None
        acc, fts = test(classifier, testDataLoader)
        # return fts
        # print('\r Loss: %f' % loss.data)
        # logger.info('Loss: %.2f', loss.data)
        if args.train_metric:
            print('Train Accuracy: %f' % train_acc)
            logger.info('Train Accuracy: %f', (train_acc))
        print('\r Test %s: %f' % (blue('Accuracy'),acc))
        logger.info('Test Accuracy: %f', acc)
        return fts
        if (acc >= best_tst_accuracy) and epoch > 5:
            best_tst_accuracy = acc
            logger.info('Save model...')
            save_checkpoint(
                global_epoch + 1,
                train_acc if args.train_metric else 0.0,
                acc,
                classifier,
                optimizer,
                str(checkpoints_dir),
                args.model_name)
            print('Saving model....')
        global_epoch += 1
    print('Best Accuracy: %f'%best_tst_accuracy)

    logger.info('End of training...')
Example #6
0
def main():

    train_loader = torch.utils.data.DataLoader(
        PointGraspOneViewDataset(
            grasp_points_num=grasp_points_num,
            path=args.data_path,
            tag='train',
            grasp_amount_per_file=6500,
            thresh_good=thresh_good,
            thresh_bad=thresh_bad,
        ),
        batch_size=args.batch_size,
        num_workers=32,
        pin_memory=True,
        shuffle=True,
        worker_init_fn=worker_init_fn,
        collate_fn=my_collate,
    )

    test_loader = torch.utils.data.DataLoader(
        PointGraspOneViewDataset(
            grasp_points_num=grasp_points_num,
            path=args.data_path,
            tag='test',
            grasp_amount_per_file=500,
            thresh_good=thresh_good,
            thresh_bad=thresh_bad,
            with_obj=True,
        ),
        batch_size=args.batch_size,
        num_workers=32,
        pin_memory=True,
        shuffle=True,
        worker_init_fn=worker_init_fn,
        collate_fn=my_collate,
    )

    is_resume = 0
    if args.load_model and args.load_epoch != -1:
        is_resume = 1

    if is_resume or args.mode == 'test':
        model = torch.load(args.load_model,
                           map_location='cuda:{}'.format(args.gpu))
        model.device_ids = [args.gpu]
        print('load model {}'.format(args.load_model))
    else:
        model = PointNetCls(num_points=grasp_points_num,
                            input_chann=point_channel,
                            k=2)
    if args.cuda:
        if args.gpu != -1:
            torch.cuda.set_device(args.gpu)
            model = model.cuda()
        else:
            device_id = [0, 1, 2, 3]
            torch.cuda.set_device(device_id[0])
            model = nn.DataParallel(model, device_ids=device_id).cuda()

    if args.mode == 'train':
        for epoch in range(is_resume * args.load_epoch, args.epoch):
            acc_train = train(model, train_loader, epoch)
            print('Train done, acc={}'.format(acc_train))
            acc, loss = test(model, test_loader)
            print('Test done, acc={}, loss={}'.format(acc, loss))
            logger.add_scalar('train_acc', acc_train, epoch)
            logger.add_scalar('test_acc', acc, epoch)
            logger.add_scalar('test_loss', loss, epoch)
            if epoch % args.save_interval == 0:
                path = os.path.join(args.model_path,
                                    args.tag + '_{}.model'.format(epoch))
                torch.save(model, path)
                print('Save model @ {}'.format(path))
    else:
        print('testing...')
        acc, loss = test(model, test_loader)
        print('Test done, acc={}, loss={}'.format(acc, loss))