Пример #1
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    torch.backends.cudnn.enabled = False
    
    '''DATA LOADING'''
    TEST_DATASET = ModelNetDataLoader(root=args.data_root, 
                                       tasks=args.test_tasks,
                                       labels=args.test_labels,
                                       partition='test',
                                       npoint=args.num_point,      
                                       normal_channel=args.normal)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=2)

    '''MODEL LOADING'''
    num_class = 40
    files = os.listdir(args.model_dir+'/logs')
    for f in files:
        if f.endswith('txt'):
            model_name = f.split('.')[0]

    MODEL = importlib.import_module(model_name)

    classifier = MODEL.get_model(num_class,normal_channel=args.normal).cuda()
    checkpoint = torch.load(str(args.model_dir) + '/checkpoints/best_model.pth')
    classifier.load_state_dict(checkpoint['model_state_dict'])

    with torch.no_grad():
        instance_acc, class_acc = test(classifier.eval(), testDataLoader, vote_num=args.num_votes)
        print('Test Instance Accuracy: %f, Class Accuracy: %f' % (instance_acc, class_acc))
Пример #2
0
def evaluate(args):
    test_data, test_label = load_data(
        'experiment/data/modelnet40_ply_hdf5_2048/', train=False)
    testDataset = ModelNetDataLoader(test_data, test_label)
    testDataLoader = torch.utils.data.DataLoader(testDataset,
                                                 batch_size=args.batch_size,
                                                 shuffle=False)

    log.debug('Building Model', args.model_name)
    if args.model_name == 'pointnet':
        num_class = 40
        model = PointNetCls(num_class, args.feature_transform)
    else:
        model = PointNet2ClsMsg()

    torch.backends.cudnn.benchmark = True
    model = torch.nn.DataParallel(model).cuda()
    log.debug('Using gpu:', args.gpu)

    if args.pretrain is None:
        log.err('No pretrain model')
        return

    log.debug('Loading pretrain model...')
    state_dict = torch.load(args.pretrain)
    model.load_state_dict(state_dict)

    acc = test_clf(model.eval(), testDataLoader)
    log.msg(Test_Accuracy='%.5f' % (acc))
def run_trainer_evaluator(simclr, classifier, optimizer, args):
    data_train = ModelNetDataLoader(root=DATA_PATH,
                                    npoint=1024,
                                    split='train',
                                    normal_channel=False)
    data_test = ModelNetDataLoader(root=DATA_PATH,
                                   npoint=1024,
                                   split='test',
                                   normal_channel=False)
    best_vloss = INFINITE

    for epoch in range(args.epochs):
        print(f'Starting epoch [{epoch}/{args.epochs}]')
        # create the data loader for train and validation data
        train_loader = torch.utils.data.DataLoader(data_train,
                                                   batch_size=args.batch_size,
                                                   shuffle=True)
        test_loader = torch.utils.data.DataLoader(data_test,
                                                  batch_size=args.batch_size,
                                                  shuffle=False)

        # train and retrieve training loss
        t_loss, t_acc = train_validate(simclr,
                                       classifier,
                                       optimizer,
                                       train_loader,
                                       args,
                                       is_train=True)

        # retrieve validation loss
        v_loss, v_acc = train_validate(simclr,
                                       classifier,
                                       optimizer,
                                       test_loader,
                                       args,
                                       is_train=False)

        print(
            f'\nTotal epoch losses: train: {round(t_loss,4)} - validation: {round(v_loss,4)}'
        )
        print(
            f'\nTotal epoch acc: train: {round(t_acc,4)} - validation: {round(v_acc,4)}\n',
            end='\r')

        # if the current loss is the new best, update checkpoint
        if v_loss < best_vloss:
            best_vloss = v_loss
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''CREATE DIR'''
    experiment_dir = 'log/classification/' + args.log_dir
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/eval.txt' % experiment_dir)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)
    '''DATA LOADING'''
    log_string('Load dataset ...')
    #分类使用的是ModelNet40数据集
    data_path = '/home/wgk/dataset/Pointnet_Pointnet2_pytorch/modelnet40_normal_resampled/'

    test_dataset = ModelNetDataLoader(root=data_path,
                                      args=args,
                                      split='test',
                                      process_data=False)
    testDataLoader = torch.utils.data.DataLoader(test_dataset,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=10)
    '''MODEL LOADING'''
    num_class = args.num_category
    model_name = os.listdir(experiment_dir + '/logs')[0].split('.')[0]
    model = importlib.import_module(model_name)

    classifier = model.get_model(num_class, normal_channel=args.use_normals)
    if not args.use_cpu:
        classifier = classifier.cuda()

    checkpoint = torch.load(
        str(experiment_dir) + '/checkpoints/best_model.pth')
    classifier.load_state_dict(checkpoint['model_state_dict'])

    with torch.no_grad():
        instance_acc, class_acc = test(classifier.eval(),
                                       testDataLoader,
                                       vote_num=args.num_votes,
                                       num_class=num_class)
        log_string('Test Instance Accuracy: %f, Class Accuracy: %f' %
                   (instance_acc, class_acc))
Пример #5
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''CREATE DIR'''
    experiment_dir = 'log/classification/' + args.log_dir
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/eval.txt' % experiment_dir)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)
    '''DATA LOADING'''
    log_string('Load dataset ...')
    DATA_PATH = 'data/modelnet40_normal_resampled/'
    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                      npoint=args.num_point,
                                      split='test',
                                      normal_channel=args.normal)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=4)
    '''MODEL LOADING'''
    num_class = 40
    model_name = os.listdir(experiment_dir + '/logs')[0].split('.')[0]
    MODEL = importlib.import_module(model_name)

    classifier = MODEL.get_model(num_class, normal_channel=args.normal)

    checkpoint = torch.load(str(experiment_dir) +
                            '/checkpoints/best_model.pth',
                            map_location=torch.device('cpu'))
    classifier.load_state_dict(checkpoint['model_state_dict'])

    with torch.no_grad():
        start_overall = time.time()
        instance_acc, class_acc = test(classifier.eval(),
                                       testDataLoader,
                                       vote_num=args.num_votes)
        log_string('Test Instance Accuracy: %f, Class Accuracy: %f' %
                   (instance_acc, class_acc))
        overall_time = time.time() - start_overall
        with open("overall.txt", 'a') as f:
            f.write(str(overall_time) + "\n")
Пример #6
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''CREATE DIR'''
    experiment_dir = args.log_dir
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/eval.txt' % experiment_dir)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)
    '''DATA LOADING'''
    log_string('Load dataset ...')
    DATA_PATH = 'data/modelnet40_normal_resampled/'
    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                      npoint=args.num_point,
                                      split='test',
                                      normal_channel=args.normal)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=4)
    '''MODEL LOADING'''
    num_class = 40
    model_name = "scratchpad__fixed_bug"
    MODEL = importlib.import_module(model_name)

    classifier = MODEL.get_model(num_class, normal_channel=args.normal).cuda()

    checkpoint = torch.load(str(experiment_dir) + '/seq_conv_best_model.pth')
    classifier.load_state_dict(checkpoint['model_state_dict'])

    with torch.no_grad():
        instance_acc, class_acc = test(classifier.eval(),
                                       testDataLoader,
                                       vote_num=args.num_votes)
        log_string('Test Instance Accuracy: %f, Class Accuracy: %f' %
                   (instance_acc, class_acc))
Пример #7
0
from models.pointnet import PointNetCls, feature_transform_regularizer
from models.pointnet2 import PointNet2ClsMsg
from models.dgcnn import DGCNN
from models.pointcnn import PointCNNCls

from utils import progress_bar, log_row

sys.path.append("./emd/")
import emd_module as emd

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

DATA_PATH = '/data/datasets/modelnet40_normal_resampled/'
TEST_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                  npoint=1024,
                                  split='test',
                                  normal_channel=False)
test_loader = torch.utils.data.DataLoader(TEST_DATASET,
                                          batch_size=1,
                                          shuffle=False,
                                          num_workers=4,
                                          drop_last=False)
PointcloudScaleAndTranslate = PointcloudScaleAndTranslate()
print('======> Successfully loaded!')


def cal_loss(pred, gold, smoothing=True):
    ''' Calculate cross entropy loss, apply label smoothing if needed. '''
    gold = gold.contiguous().view(-1)

    if smoothing:
Пример #8
0
def main(args):
    config = yaml.load(open("./config/config.yaml", "r"),
                       Loader=yaml.FullLoader)

    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    experiment_dir = Path('./log/')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath('classification')
    experiment_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        experiment_dir = experiment_dir.joinpath(timestr)
    else:
        experiment_dir = experiment_dir.joinpath(args.log_dir)
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)
    '''DATA LOADING'''
    log_string('Load dataset ...')
    DATA_PATH = 'data/modelnet40_normal_resampled/'

    TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                       npoint=args.num_point,
                                       split='train',
                                       normal_channel=args.normal)
    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                      npoint=args.num_point,
                                      split='test',
                                      normal_channel=args.normal)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                                  batch_size=args.batch_size,
                                                  shuffle=True,
                                                  num_workers=8)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=8)
    '''MODEL LOADING'''
    num_class = 40
    MODEL = importlib.import_module(args.model)
    shutil.copy('./models/%s.py' % args.model, str(experiment_dir))
    shutil.copy('./models/pointnet_util.py', str(experiment_dir))

    # online network
    online_network = MODEL.get_model(num_class,
                                     normal_channel=args.normal).cuda()
    criterion = MODEL.get_loss().cuda()
    # predictor network
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    predictor = MLPHead(
        in_channels=online_network.projetion.net[-1].out_features,
        **config['network']['projection_head']).to(device)

    # target encoder
    target_network = MODEL.get_model(num_class,
                                     normal_channel=args.normal).cuda()
    # load pre-trained model if defined

    try:
        checkpoint = torch.load('checkpoints/model.pth')
        online_network.load_state_dict(checkpoint['online_network_state_dict'])
        target_network.load_state_dict(checkpoint['target_network_state_dict'])
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0

#    if args.optimizer == 'Adam':
#        optimizer = torch.optim.Adam(
#            classifier.parameters(),
#            lr=args.learning_rate,
#            betas=(0.9, 0.999),
#            eps=1e-08,
#            weight_decay=args.decay_rate
#        )
#    else:
#        optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)

    optimizer = torch.optim.SGD(
        list(online_network.parameters()) + list(predictor.parameters()),
        **config['optimizer']['params'])
    trainer = BYOLTrainer(online_network=online_network,
                          target_network=target_network,
                          predictor=predictor,
                          optimizer=optimizer,
                          device=device,
                          **config['trainer'])

    trainer.train_pointnet(trainDataLoader, testDataLoader)
Пример #9
0
def main(args):
    # 定义输出log和输出console
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu  # 设置GPU的编号(可以多GPU运行)
    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))  # 获取当前时间
    experiment_dir = Path('./log/')  # 对目录初始化path类
    experiment_dir.mkdir(exist_ok=True)  # 创建目录./log/,exist_ok=True目录存在不报错
    # 创建classification目录
    experiment_dir = experiment_dir.joinpath('classification')
    experiment_dir.mkdir(exist_ok=True)  # 目录存在不报错
    if args.log_dir is None:
        experiment_dir = experiment_dir.joinpath(timestr)
    else:
        experiment_dir = experiment_dir.joinpath(args.log_dir)
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath(
        'checkpoints/')  # 创建checkpoints目录
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    # 设置日志级别info : 打印info,warning,error,critical级别的日志
    logger.setLevel(logging.INFO)
    # 配置日志的格式
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    # 格式化字符串,路径为log_dir/args.model.txt
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)
    '''DATA LOADING'''
    log_string('Load dataset ...')
    DATA_PATH = 'data/modelnet40_normal_resampled/'

    TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                       npoint=args.num_point,
                                       split='train',
                                       normal_channel=args.normal)
    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                      npoint=args.num_point,
                                      split='test',
                                      normal_channel=args.normal)
    import torch.utils.data.dataloader
    # 读取TRAIN_DATASET,设置batch_size,shuffle=True,打乱顺序,num_workers多线程
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                                  batch_size=args.batch_size,
                                                  shuffle=True,
                                                  num_workers=4)
    # test数据集不需要打乱顺序
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=4)
    '''MODEL LOADING'''
    num_class = 40
    # 导入model模型,相当于 import model
    MODEL = importlib.import_module(args.model)
    # 拷贝文件和权限,拷贝到experiment_dir='log\\classification\\pointnet2_cls_msg'
    shutil.copy('./models/%s.py' % args.model, str(experiment_dir))
    shutil.copy('./models/pointnet_util.py', str(experiment_dir))

    # 调用model(如pointnet_cls.py)中的方法
    classifier = MODEL.get_model(num_class, normal_channel=args.normal).cuda()
    criterion = MODEL.get_loss().cuda()

    # 间断后继续训练
    try:
        # 读取pth文件,pth中都以字典存储
        # 将每一层与它的对应参数建立映射关系.(如model的每一层的weights及偏置等等)储存
        # (注意,只有那些参数可以训练的layer才会被保存到模型的state_dict中,如卷积层,线性层等等)
        # 优化器对象Optimizer也有一个state_dict,它包含了优化器的状态以及被使用的超参数(如lr, momentum,weight_decay等)
        checkpoint = torch.load(
            str(experiment_dir) + '/checkpoints/best_model.pth')
        # 读取epoch
        start_epoch = checkpoint['epoch']
        # 冲checkpoint中读取model_state_dict,并且用load_state_dict恢复模型参数
        classifier.load_state_dict(checkpoint['model_state_dict'])
        # 写入log
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0

    # 创建optimizer优化器对象,这个对象能够保持当前参数状态并基于计算得到的梯度进行参数更新
    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(
            # 输入参数
            classifier.parameters(),
            lr=args.learning_rate,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate)
    else:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=0.01,
                                    momentum=0.9)
    # todo: https://blog.csdn.net/qyhaill/article/details/103043637
    # 每过step_size个epoch,做一次更新
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.7)
    global_epoch = 0
    global_step = 0
    best_instance_acc = 0.0
    best_class_acc = 0.0
    mean_correct = []
    '''TRANING'''
    # 输出log
    logger.info('Start training...')
    for epoch in range(start_epoch, args.epoch):
        log_string('Epoch %d (%d/%s):' %
                   (global_epoch + 1, epoch + 1, args.epoch))

        # 更新lr,在epoch处调整
        scheduler.step()
        # total迭代总次数,默认为迭代元素的长度,smoothing:0-1,0平均速度,1当前速度
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            # 从trainDataLoader中提取points:点集,target:对应的目标
            points, target = data
            # 将tensor转化为numpy
            points = points.data.numpy()
            '''数据增强模块'''
            # 随机丢点
            points = provider.random_point_dropout(points)
            # 随机范围
            points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                         0:3])
            # 随机移动点云
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
            # 转化为tensor
            points = torch.Tensor(points)
            # 得到target
            target = target[:, 0]
            # 输入要求(N,3)因此转置
            points = points.transpose(2, 1)
            # 存入显存
            points, target = points.cuda(), target.cuda()
            # 梯度归零
            optimizer.zero_grad()
            # 训练模式
            classifier = classifier.train()
            # 得到pointnet_cls前向传播返回的两个数据
            pred, trans_feat = classifier(points)
            # loss
            loss = criterion(pred, target.long(), trans_feat)
            # 得到最大值的index
            pred_choice = pred.data.max(1)[1]
            # 将预测pred_choice和target(label)比较返回Boolean,将正确的求和算出预测正确的个数
            correct = pred_choice.eq(target.long().data).cpu().sum()
            # correct.item()提取出tensor中的元素,将正确率其添加到mean_correct数组中
            mean_correct.append(correct.item() / float(points.size()[0]))
            # 反向传播
            loss.backward()
            # 更新梯度
            optimizer.step()
            global_step += 1

        train_instance_acc = np.mean(mean_correct)
        log_string('Train Instance Accuracy: %f' % train_instance_acc)

        # 在这个block(with torch.no_grad():)中不需要计算梯度(test模式)
        with torch.no_grad():
            # classifier.eval():在test模式中禁用dropout和BN
            instance_acc, class_acc = test(classifier.eval(), testDataLoader)

            if (instance_acc >= best_instance_acc):
                best_instance_acc = instance_acc
                best_epoch = epoch + 1

            if (class_acc >= best_class_acc):
                best_class_acc = class_acc
            log_string('Test Instance Accuracy: %f, Class Accuracy: %f' %
                       (instance_acc, class_acc))
            log_string('Best Instance Accuracy: %f, Class Accuracy: %f' %
                       (best_instance_acc, best_class_acc))

            if (instance_acc >= best_instance_acc):
                logger.info('Save model...')
                savepath = str(checkpoints_dir) + '/best_model.pth'
                log_string('Saving at %s' % savepath)
                state = {
                    'epoch': best_epoch,
                    'instance_acc': instance_acc,
                    'class_acc': class_acc,
                    'model_state_dict': classifier.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, savepath)
            global_epoch += 1

    logger.info('End of training...')
Пример #10
0
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    datapath = './data/ModelNet/'
    auxiliarypath = os.path.join(datapath, 'auxiliary')
    if ~os.path.exists(auxiliarypath):
        try:
            os.mkdir(auxiliarypath)
        except:
            pass
    '''CREATE DIR'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    file_dir = Path(
        str(experiment_dir) + '/%sModelNet40-' % args.model_name +
        str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
    file_dir.mkdir(exist_ok=True)
    checkpoints_dir = file_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = file_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger(args.model_name)
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(
        str(log_dir) + 'train_%s_cls.txt' % args.model_name)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------TRANING---------------------------------------------------'
    )
    logger.info('PARAMETER ...')
    logger.info(args)
    '''MODEL LOADING'''
    num_class = 40
    classifier = MainNet(num_class,
                         normal=args.with_normal,
                         random_sp=args.random_sample,
                         random_nb=args.random_neighbor).cuda()
    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('No existing model, starting training from scratch...')
        start_epoch = 0

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=0.01,
                                    momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=30,
                                                gamma=0.5)
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    blue = lambda x: '\033[94m' + x + '\033[0m'
    '''DATA LOADING'''
    logger.info('Load dataset ...')
    train_data, train_label, test_data, test_label = load_normal_data(
        './data/modelnet40_normal_resampled/')
    # logger.info('construct_MLS for train data...')
    # construct_MLS_multi(train_data, auxiliarypath, classifier.point_num)
    # logger.info('construct_MLS for test data...')
    # construct_MLS_multi(test_data, auxiliarypath, classifier.point_num, phase='test')

    train_neighbor_lists, train_data_idx_lists, train_local_axises = loadAuxiliaryInfo(
        auxiliarypath, classifier.point_num)
    logger.info("The number of training data is: %d", train_data.shape[0])
    logger.info("The number of test data is: %d", test_data.shape[0])
    trainDataset = ModelNetDataLoader(train_data, train_label,
                                      train_neighbor_lists,
                                      train_data_idx_lists, train_local_axises)
    test_neighbor_lists, test_data_idx_lists, test_local_axises = loadAuxiliaryInfo(
        auxiliarypath, classifier.point_num, phase='test')
    testDataset = ModelNetDataLoader(test_data, test_label,
                                     test_neighbor_lists, test_data_idx_lists,
                                     test_local_axises)
    trainDataLoader = torch.utils.data.DataLoader(trainDataset,
                                                  batch_size=args.batchsize,
                                                  shuffle=True)
    testDataLoader = torch.utils.data.DataLoader(testDataset,
                                                 batch_size=args.batchsize,
                                                 shuffle=False)
    '''TRANING'''
    logger.info('Start training...')
    first_time = True
    for epoch in range(start_epoch, args.epoch):
        print('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
        logger.info('Epoch %d (%d/%s):', global_epoch + 1, epoch + 1,
                    args.epoch)

        scheduler.step()
        losses = 0
        lc_stds = 0
        lc_consistences = 0
        mean_correct = []
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            points, target, neighbor_lists, data_idx_lists, local_axises = data
            # target = target[:, 0]
            # points = points.transpose(2, 1)
            if args.with_normal:
                points, target, neighbor_lists, data_idx_lists, local_axises = \
                    points.float().cuda(), target.cuda(), neighbor_lists.cuda(),  data_idx_lists.cuda(), local_axises.cuda()
            else:
                points, target, neighbor_lists, data_idx_lists, local_axises = \
                    points[:,:,0:3].float().cuda(), target.cuda(), neighbor_lists.cuda(), data_idx_lists.cuda(), local_axises.cuda()

            optimizer.zero_grad()
            classifier = classifier.train()
            pred, lc_std, lc_consistence = classifier(points, neighbor_lists,
                                                      data_idx_lists,
                                                      local_axises)
            loss = F.nll_loss(pred, target.long())
            total_loss = loss - lc_std - lc_consistence
            total_loss.backward()
            # print(classifier.axis_net.fc1.weight.grad)
            optimizer.step()
            global_step += 1
            losses += loss.item()
            lc_stds += lc_std.item()
            lc_consistences += lc_consistence.item()

            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.long().data).cpu().sum()
            mean_correct.append(correct.item() / float(points.size()[0]))
        if epoch % 10 == 9:
            train_acc = test(classifier.eval(), trainDataLoader,
                             args.with_normal) if args.train_metric else None
            writer3.add_scalar('quadratic', train_acc, global_step=epoch)
        acc = test(classifier.eval(), testDataLoader, args.with_normal)
        writer1.add_scalar('quadratic',
                           losses / (batch_id + 1),
                           global_step=epoch)
        writer2.add_scalar('quadratic', acc, global_step=epoch)

        print('\r Loss: %f' % float(losses / (batch_id + 1)),
              'lc_std: %f' % float(lc_stds / (batch_id + 1)),
              'lc_consistences: %f' % float(lc_consistences / (batch_id + 1)),
              'acc: %f' % np.mean(mean_correct))
        logger.info('Loss: %.2f', losses / (batch_id + 1))
        if args.train_metric and epoch % 10 == 9:
            print('Train Accuracy: %f' % train_acc)
            logger.info('Train Accuracy: %f', (train_acc))
        print(
            '\r Test %s: %f   ***  %s: %f' %
            (blue('Accuracy'), acc, blue('Best Accuracy'), best_tst_accuracy))
        logger.info('Test Accuracy: %f  *** Best Test Accuracy: %f', acc,
                    best_tst_accuracy)

        if (acc >= best_tst_accuracy) and epoch > 10:
            best_tst_accuracy = acc
            logger.info('Save model...')
            save_checkpoint(global_epoch + 1,
                            train_acc if args.train_metric else 0.0, acc,
                            classifier, optimizer, str(checkpoints_dir),
                            args.model_name)
            print('Saving model....')
        global_epoch += 1
    print('Best Accuracy: %f' % best_tst_accuracy)

    logger.info('End of training...')
Пример #11
0
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    datapath = './data/ModelNet/'
    '''CREATE DIR'''
    experiment_dir = Path('./eval_experiment/')
    experiment_dir.mkdir(exist_ok=True)
    file_dir = Path(
        str(experiment_dir) + '/%sModelNet40-' % args.model_name +
        str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
    file_dir.mkdir(exist_ok=True)
    checkpoints_dir = file_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    os.system('cp %s %s' % (args.kb1checkpoint, checkpoints_dir))
    log_dir = file_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger(args.model_name)
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(
        str(log_dir) + 'eval_%s_cls.txt' % args.model_name)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------EVAL---------------------------------------------------'
    )
    logger.info('PARAMETER ...')
    logger.info(args)
    '''DATA LOADING'''
    logger.info('Load dataset ...')
    train_data, train_label, test_data, test_label = load_data(
        datapath, classification=True)
    logger.info("The number of training data is: %d", train_data.shape[0])
    logger.info("The number of test data is: %d", test_data.shape[0])
    testDataset = ModelNetDataLoader(test_data, test_label)
    testDataLoader = torch.utils.data.DataLoader(testDataset,
                                                 batch_size=args.batchsize,
                                                 shuffle=False)
    '''MODEL LOADING'''
    num_class = 39
    kb1classifier = PointConvClsSsg(num_class).cuda()
    if args.kb1checkpoint is not None:
        print('Load k but 1 CheckPoint...')
        logger.info('Load k but 1 CheckPoint')
        kb1checkpoint = torch.load(args.kb1checkpoint)
        start_epoch = kb1checkpoint['epoch']
        kb1classifier.load_state_dict(kb1checkpoint['model_state_dict'])
    else:
        print('Please load k but 1 Checkpoint to eval...')
        sys.exit(0)
        start_epoch = 0

    num_class1 = 2
    binaryclassifier = PointConvClsSsg(num_class1).cuda()
    if args.binarycheckpoint is not None:
        print('Load binary CheckPoint...')
        logger.info('Load binary CheckPoint')
        binarycheckpoint = torch.load(args.binarycheckpoint)
        start_epoch = binarycheckpoint['epoch']
        binaryclassifier.load_state_dict(binarycheckpoint['model_state_dict'])
    else:
        print('Please load binary Checkpoint to eval...')
        sys.exit(0)
        start_epoch2 = 0

    blue = lambda x: '\033[94m' + x + '\033[0m'
    '''EVAL'''
    logger.info('Start evaluating...')
    print('Start evaluating...')

    total_correct = 0
    total_seen = 0
    preds = []
    for batch_id, data in tqdm(enumerate(testDataLoader, 0),
                               total=len(testDataLoader),
                               smoothing=0.9):
        pointcloud, target = data
        target = target[:, 0]
        #import ipdb; ipdb.set_trace()
        pred_view = torch.zeros(pointcloud.shape[0], num_class).cuda()
        binary_view = torch.zeros(pointcloud.shape[0], num_class1).cuda()

        for _ in range(args.num_view):
            pointcloud = generate_new_view(pointcloud)
            #import ipdb; ipdb.set_trace()
            #points = torch.from_numpy(pointcloud).permute(0, 2, 1)
            points = pointcloud.permute(0, 2, 1)
            points, target = points.cuda(), target.cuda()
            kb1classifier = kb1classifier.eval()
            binaryclassifier = binaryclassifier.eval()
            with torch.no_grad():
                pred = kb1classifier(points)
                pred_binary = binaryclassifier(points)
            pred_view += pred
            binary_view += pred_binary

        kb1_logprob = pred_view.data
        binary_logprob = binary_view.data
        ## since we assigned the composite class the largest label, we will split the log-probability for the last label to two part, one for binary 0 and one for binary 1.
        binary_pred_logprob = kb1_logprob[:, -1].reshape(
            1, len(kb1_logprob[:, -1])).transpose(0, 1).repeat(1, 2).view(
                -1, 2) + binary_logprob
        ## concatenate to get log-probability for all (40) classes
        pred_logprob = torch.from_numpy(
            np.c_[kb1_logprob[:, 0:-1].cpu().detach().numpy(),
                  binary_pred_logprob.cpu().detach().numpy()]).to('cuda')
        pred_choices = pred_logprob.max(1)[1]

        ## reset labels
        mapper_dict = {
            **{key: key + 1
               for key in range(12, 32)},
            **{key: key + 2
               for key in range(32, 38)},
            **{
                38: 33,
                39: 12
            }
        }

        def mp(entry):
            return mapper_dict[entry] if entry in mapper_dict else entry

        mp = np.vectorize(mp)

        pred_choice = torch.from_numpy(
            np.array(mp(pred_choices.cpu().detach().numpy()))).to('cuda')
        preds.append(pred_choice.cpu().detach().numpy())
        correct = pred_choice.eq(
            target.long().data).cpu().detach().numpy().sum()
        total_correct += correct.item()
        total_seen += float(points.size()[0])

    accuracy = total_correct / total_seen
    ## confusion matrix
    cm = confusion_matrix(test_label.ravel(), np.concatenate(preds).ravel())
    cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
    t = pd.read_table('data/ModelNet/shape_names.txt', names=['label'])
    d = {key: val for key, val in zip(t.label, cm.diagonal())}
    print('Total Accuracy: %f' % accuracy)
    print('Accuracy per class:', d)

    logger.info('Total Accuracy: %f' % accuracy)
    logger.info('End of evaluation...')
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    datapath = './data/ModelNet/'
    '''CREATE DIR'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    file_dir = Path(
        str(experiment_dir) + '/%sModelNet40-' % args.model_name +
        str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
    file_dir.mkdir(exist_ok=True)
    checkpoints_dir = file_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = file_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger(args.model_name)
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(
        str(log_dir) + 'train_%s_cls.txt' % args.model_name)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------TRANING---------------------------------------------------'
    )
    logger.info('PARAMETER ...')
    logger.info(args)
    '''DATA LOADING'''
    logger.info('Load dataset ...')
    train_data, train_label, test_data, test_label = load_data(
        datapath, classification=True)
    logger.info("The number of training data is: %d", train_data.shape[0])
    logger.info("The number of test data is: %d", test_data.shape[0])
    trainDataset = ModelNetDataLoader(train_data, train_label)
    testDataset = ModelNetDataLoader(test_data, test_label)
    trainDataLoader = torch.utils.data.DataLoader(trainDataset,
                                                  batch_size=args.batchsize,
                                                  shuffle=True)
    testDataLoader = torch.utils.data.DataLoader(testDataset,
                                                 batch_size=args.batchsize,
                                                 shuffle=False)
    '''MODEL LOADING'''
    num_class = 40
    classifier = PointConvClsSsg(num_class).cuda()
    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('No existing model, starting training from scratch...')
        start_epoch = 0

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=0.01,
                                    momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=30,
                                                gamma=0.5)
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    blue = lambda x: '\033[94m' + x + '\033[0m'
    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch, args.epoch):
        print('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
        logger.info('Epoch %d (%d/%s):', global_epoch + 1, epoch + 1,
                    args.epoch)

        scheduler.step()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            points, target = data
            target = target[:, 0]
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()
            classifier = classifier.train()
            pred = classifier(points)
            loss = F.nll_loss(pred, target.long())

            loss.backward()
            optimizer.step()
            global_step += 1

        train_acc = test(classifier.eval(),
                         trainDataLoader) if args.train_metric else None
        acc = test(classifier, testDataLoader)

        print('\r Loss: %f' % loss.data)
        logger.info('Loss: %.2f', loss.data)
        if args.train_metric:
            print('Train Accuracy: %f' % train_acc)
            logger.info('Train Accuracy: %f', (train_acc))
        print(
            '\r Test %s: %f   ***  %s: %f' %
            (blue('Accuracy'), acc, blue('Best Accuracy'), best_tst_accuracy))
        logger.info('Test Accuracy: %f  *** Best Test Accuracy: %f', acc,
                    best_tst_accuracy)

        if (acc >= best_tst_accuracy) and epoch > 5:
            best_tst_accuracy = acc
            logger.info('Save model...')
            save_checkpoint(global_epoch + 1,
                            train_acc if args.train_metric else 0.0, acc,
                            classifier, optimizer, str(checkpoints_dir),
                            args.model_name)
            print('Saving model....')
        global_epoch += 1
    print('Best Accuracy: %f' % best_tst_accuracy)

    logger.info('End of training...')
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    exp_dir = Path('./log/')
    exp_dir.mkdir(exist_ok=True)
    exp_dir = exp_dir.joinpath('classification')
    exp_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        exp_dir = exp_dir.joinpath(timestr)
    else:
        exp_dir = exp_dir.joinpath(args.log_dir)
    exp_dir.mkdir(exist_ok=True)
    checkpoints_dir = exp_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = exp_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)
    '''DATA LOADING'''
    log_string('Load dataset ...')
    #设置数据集路径
    data_path = '/home/wgk/dataset/Pointnet_Pointnet2_pytorch/modelnet40_normal_resampled/'
    #设置训练数据集
    train_dataset = ModelNetDataLoader(root=data_path,
                                       args=args,
                                       split='train',
                                       process_data=args.process_data)
    #设置测试集
    test_dataset = ModelNetDataLoader(root=data_path,
                                      args=args,
                                      split='test',
                                      process_data=args.process_data)
    #加载训练集合
    trainDataLoader = torch.utils.data.DataLoader(train_dataset,
                                                  batch_size=args.batch_size,
                                                  shuffle=True,
                                                  num_workers=1,
                                                  drop_last=True)
    testDataLoader = torch.utils.data.DataLoader(test_dataset,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=1)
    '''MODEL LOADING'''
    num_class = args.num_category
    #这里默认导入pointnet2_cls_msg.py文件,语法的使用 https://www.bilibili.com/read/cv5891176/
    model = importlib.import_module(args.model)

    shutil.copy('./models/%s.py' % args.model, str(exp_dir))
    shutil.copy('models/pointnet2_utils.py', str(exp_dir))
    shutil.copy('./train_classification.py', str(exp_dir))
    #加载分类器模型(实例化get_model这个class)
    classifier = model.get_model(num_class, normal_channel=args.use_normals)
    #实例化
    criterion = model.get_loss()
    #.apply函数,是应用在
    classifier.apply(inplace_relu)

    if not args.use_cpu:
        #将模型转移到gpu中
        classifier = classifier.cuda()
        criterion = criterion.cuda()

    try:
        checkpoint = torch.load(str(exp_dir) + '/checkpoints/best_model.pth')
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0

    #配置优化器
    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    else:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=0.01,
                                    momentum=0.9)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.7)
    global_epoch = 0
    global_step = 0
    best_instance_acc = 0.0
    best_class_acc = 0.0
    '''TRANING'''
    logger.info('Start training...')
    #默认训练200轮
    for epoch in range(start_epoch, args.epoch):
        log_string('Epoch %d (%d/%s):' %
                   (global_epoch + 1, epoch + 1, args.epoch))
        mean_correct = []
        classifier = classifier.train()

        scheduler.step()
        #
        for batch_id, (points, target) in tqdm(enumerate(trainDataLoader, 0),
                                               total=len(trainDataLoader),
                                               smoothing=0.9):
            optimizer.zero_grad()

            #获取到一个batch的数据,形状是(batch_size,点数=1024,channel)
            points = points.data.numpy()
            #print(points.shape)
            points = provider.random_point_dropout(points)
            #对数据集数据进行随机缩放
            points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                         0:3])
            #对数据集数据进行随机旋转
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
            #将ndarray数据转换为tensor
            points = torch.Tensor(points)
            #转置一下形状(batch_size,channel,点数)
            points = points.transpose(2, 1)

            if not args.use_cpu:
                #先将数据转移到显卡中
                points, target = points.cuda(), target.cuda()
            #执行训练,返回预测的值
            # pred.shape=[batchsize,40]  trans_feat.shape=[batchsize,1024,1]
            pred, trans_feat = classifier(points)
            #print(pred.shape,trans_feat.shape)
            #loss函数按说应该是添加一个正则规范项,但是这里的loss实际上并没有添加
            loss = criterion(pred, target.long(), trans_feat)
            pred_choice = pred.data.max(1)[1]
            #print(pred_choice)

            #查看预测值与真实值哪个位置相同,转到cpu中,并求总共有几个相同的
            correct = pred_choice.eq(target.long().data).cpu().sum()
            #求正确预测的百分比,points.size()[0]=batchsize
            mean_correct.append(correct.item() / float(points.size()[0]))
            #这里的loss此时是一个tensor,所以可应用backward函数
            loss.backward()
            optimizer.step()
            global_step += 1

        train_instance_acc = np.mean(mean_correct)
        log_string('Train Instance Accuracy: %f' % train_instance_acc)

        with torch.no_grad():
            instance_acc, class_acc = test(classifier.eval(),
                                           testDataLoader,
                                           num_class=num_class)

            if (instance_acc >= best_instance_acc):
                best_instance_acc = instance_acc
                best_epoch = epoch + 1

            if (class_acc >= best_class_acc):
                best_class_acc = class_acc
            log_string('Test Instance Accuracy: %f, Class Accuracy: %f' %
                       (instance_acc, class_acc))
            log_string('Best Instance Accuracy: %f, Class Accuracy: %f' %
                       (best_instance_acc, best_class_acc))

            if (instance_acc >= best_instance_acc):
                logger.info('Save model...')
                savepath = str(checkpoints_dir) + '/best_model.pth'
                log_string('Saving at %s' % savepath)
                state = {
                    'epoch': best_epoch,
                    'instance_acc': instance_acc,
                    'class_acc': class_acc,
                    'model_state_dict': classifier.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, savepath)
            global_epoch += 1

    logger.info('End of training...')
Пример #14
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''SET THE SEED'''
    setup_seed(args.seed)
    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    experiment_dir = Path('./log/')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath('classification')
    experiment_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        experiment_dir = experiment_dir.joinpath(timestr)
    else:
        experiment_dir = experiment_dir.joinpath(args.log_dir)
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    # log_dir = experiment_dir.joinpath('logs/')
    log_dir = experiment_dir.joinpath('./')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)
    '''DATA TYPE'''
    if args.use_voxel:
        assert "voxel" in args.dataset
        assert "mink" in args.model
        assert args.voxel_size > 0
    '''AUX SUPERVISION TYPE'''
    if args.aux == "pred":
        assert args.pred_path is not None

    if args.pred_path is not None:
        assert args.aux == "pred"

    args.with_pred = None
    args.with_instance = False
    args.with_seg = False
    if args.aux is not None:
        args.with_aux = True
        assert "scannet" in args.dataset
        if args.aux == "pred":
            args.with_pred = args.pred_path
        elif args.aux == "instance":
            args.with_instance = True
        elif args.aux == "seg":
            args.with_seg = True
        else:
            raise NotImplementedError
    else:
        args.with_aux = False
    '''DATA LOADING'''
    if "modelnet" in args.dataset:
        '''
        the modelnet 40 loading, support both the point & ME-point Ver.
        '''
        if not "voxel" in args.dataset:
            log_string('Load dataset {}'.format(args.dataset))
            num_class = 40
            DATA_PATH = './data/modelnet40_normal_resampled/'
            TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                               npoint=args.num_point,
                                               split='train',
                                               normal_channel=args.normal,
                                               apply_aug=True)
            TEST_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                              npoint=args.num_point,
                                              split='test',
                                              normal_channel=args.normal)

            trainDataLoader = torch.utils.data.DataLoader(
                TRAIN_DATASET,
                batch_size=args.batch_size,
                shuffle=True,
                num_workers=args.num_worker)
            testDataLoader = torch.utils.data.DataLoader(
                TEST_DATASET,
                batch_size=args.batch_size,
                shuffle=False,
                num_workers=args.num_worker)
        else:
            assert args.dataset == 'modelnet_voxel'
            '''
            use the modelnet example dataloader from the ME-engine
            however, it seems still is point-based, retrun point features,
            and feed in the TenesorField, not really the voxel-modelnet
            '''
            log_string('Load dataset {}'.format(args.dataset))
            num_class = 40
            DATA_PATH = './data/modelnet40_ply_hdf5_2048'

            trainset = ModelNet40H5(
                phase="train",
                transform=CoordinateTransformation(trans=0.2),
                data_root=DATA_PATH,
            )
            testset = ModelNet40H5(
                phase="test",
                transform=None,  # no transform for test
                data_root=DATA_PATH,
            )

            trainDataLoader = DataLoader(
                trainset,
                num_workers=args.num_worker,
                shuffle=True,
                batch_size=args.batch_size,
                collate_fn=minkowski_collate_fn,
                pin_memory=True,
            )

            testDataLoader = DataLoader(
                testset,
                num_workers=args.num_worker,
                shuffle=False,
                batch_size=args.batch_size,
                collate_fn=minkowski_collate_fn,
                pin_memory=True,
            )

    elif args.dataset == "scanobjnn":
        log_string('Load dataset {}'.format(args.dataset))
        num_class = 15
        DATA_PATH = './data/scanobjnn/main_split_nobg'
        TRAIN_DATASET = ScanObjectNNDataLoader(root=DATA_PATH,
                                               npoint=args.num_point,
                                               split='train',
                                               normal_channel=args.normal)
        TEST_DATASET = ScanObjectNNDataLoader(root=DATA_PATH,
                                              npoint=args.num_point,
                                              split='test',
                                              normal_channel=args.normal)
        trainDataLoader = torch.utils.data.DataLoader(
            TRAIN_DATASET,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.num_worker)
        testDataLoader = torch.utils.data.DataLoader(
            TEST_DATASET,
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=args.num_worker)

    elif "scannet" in args.dataset:
        num_class = 21
        if not "voxel" in args.dataset:
            if args.mode == "train":
                trainset = ScannetDataset(
                    root='./data/scannet_v2/scannet_pickles',
                    npoints=args.num_point,
                    split='train',
                    with_seg=args.with_seg,
                    with_instance=args.with_instance,
                    with_pred=args.pred_path)
                trainDataLoader = torch.utils.data.DataLoader(
                    trainset,
                    batch_size=args.batch_size,
                    shuffle=True,
                    num_workers=args.num_worker,
                    pin_memory=True)
            if args.mode == 'export':
                final_trainset = ScannetDatasetWholeScene_evaluation(root='./data/scannet_v2/scannet_pickles', scene_list_dir='./data/scannet_v2/metadata',split='train',block_points=args.num_point, with_rgb=True, with_norm=True,\
                                                                     with_seg=args.with_seg, with_instance=args.with_instance, with_pred=args.pred_path, delta=2.0)
                final_train_loader = torch.utils.data.DataLoader(
                    final_trainset,
                    batch_size=args.batch_size,
                    shuffle=False,
                    num_workers=0,
                    pin_memory=True)


            final_testset = ScannetDatasetWholeScene_evaluation(root='./data/scannet_v2/scannet_pickles', scene_list_dir='./data/scannet_v2/metadata',split='eval',block_points=args.num_point, with_rgb=True, with_norm=True, \
                                                                with_seg=args.with_seg, with_instance=args.with_instance, with_pred=args.pred_path, delta=1.0) # DEBUG: change to 1.0 to axquire proper
            final_test_loader = torch.utils.data.DataLoader(
                final_testset,
                batch_size=args.batch_size,
                shuffle=False,
                num_workers=0,
                pin_memory=True)

            # generate the trainset as whole_dataset for export
        else:
            trainDataLoader = initialize_data_loader(
                DatasetClass=ScannetSparseVoxelizationDataset,
                data_root='data/scannet_v2/scannet_pickles',
                phase="train",
                threads=4,  # num-workers
                shuffle=True,
                repeat=False,
                augment_data=True,
                batch_size=16,
                limit_numpoints=1200000,
            )

            # TODO: the testloader

    else:
        raise NotImplementedError
    '''MODEL LOADING'''
    # copy files
    if args.mode == "train":
        if not os.path.exists(os.path.join(str(experiment_dir), 'model')):
            os.mkdir(os.path.join(str(experiment_dir), 'model'))
        for filename in os.listdir('./model'):
            if ".py" in filename:
                shutil.copy(os.path.join("./model", filename),
                            os.path.join(str(experiment_dir), 'model'))
        shutil.copy("./train_cls.py", str(experiment_dir))

    if "mink" not in args.model:
        # no use mink-net
        if "seg" in args.model:
            N = args.num_point
        else:
            N = args.num_point
        MODEL = importlib.import_module(args.model)
        classifier = MODEL.get_model(num_class,
                                     normal_channel=args.normal,
                                     N=N).cuda()
        criterion = MODEL.get_loss().cuda()
        classifier.loss = criterion
    else:
        '''
        The Voxel-based Networks based on the MinkowskiEngine
        '''
        # TODO: should align with above, using importlib.import_module, maybe fix later
        # classifier = ResNet14(in_channels=3, out_channels=num_class, D=3)  # D is the conv spatial dimension, 3 menas 3-d shapes
        if "pointnet" in args.model:
            classifier = MinkowskiPointNet(in_channel=3,
                                           out_channel=41,
                                           embedding_channel=1024,
                                           dimension=3).cuda()
        elif "trans" in args.model:
            classifier = MinkowskiTransformer(in_channel=3,
                                              out_channel=41,
                                              num_class=num_class,
                                              embedding_channel=1024,
                                              dimension=3).cuda()
        criterion = nn.CrossEntropyLoss().cuda()
        classifier.loss = criterion
    '''Loading existing ckpt'''
    try:
        if args.pretrain:
            # FIXME: currently only loading the best_model.pth, should support string, maybe latter
            checkpoint = torch.load(
                str(experiment_dir) + '/checkpoints/best_model.pth')
            start_epoch = checkpoint['epoch']
            classifier.load_state_dict(checkpoint['model_state_dict'])
            log_string('Use pretrain model')
            start_epoch = 0
        else:
            start_epoch = 0
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0

    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    elif args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(classifier.parameters(), \
                                    lr=args.learning_rate, momentum=0.9,\
                                    weight_decay=args.decay_rate)
    else:
        raise NotImplementedError

    # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.7)
    # Use MultiStepLR as in paper, decay by 10 at [120, 160]
    # scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[120, 160], gamma=0.1)

    # FIXME:  for scannet, now using the cosine anneal
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                           args.epoch,
                                                           eta_min=0.0)

    global_epoch = 0
    global_step = 0
    if "scannet" in args.dataset:
        best_mIoU = 0.0
    else:
        best_instance_acc = 0.0
        best_class_acc = 0.0
    mean_correct = []

    # only run for one epoch on the eval-only mode
    if args.mode == "eval" or args.mode == "export":
        assert args.pretrain
        start_epoch = 0
        args.epoch = 1

    # '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch, args.epoch):
        log_string('Epoch %d (%d/%s):' %
                   (global_epoch + 1, epoch + 1, args.epoch))

        scheduler.step()
        log_string('Cur LR: {:.5f}'.format(optimizer.param_groups[0]['lr']))
        # when eval only, skip the traininig part

        if args.mode == "train":
            '''The main training-loop'''
            for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                       total=len(trainDataLoader),
                                       smoothing=0.9):
                if not args.use_voxel:
                    if "modelnet" in args.dataset:
                        # use points, normal unpacking
                        points, target = data
                        points = points.data.numpy()
                        points = provider.random_point_dropout(points)
                        # points[:,:, 0:3] = provider.random_scale_point_cloud(points[:,:, 0:3])
                        # points[:,:, 0:3] = provider.shift_point_cloud(points[:,:, 0:3])
                        points = torch.Tensor(points)
                        target = target[:, 0]

                        points = points.transpose(2, 1)
                        points, target = points.cuda(), target.cuda()
                    elif "scanobjnn" in args.dataset:
                        points, target, mask = data
                        points = points.data.numpy()
                        # TODO: move the aug in the dataset but not here
                        # points = provider.random_point_dropout(points)
                        points[:, :, 0:3] = provider.random_scale_point_cloud(
                            points[:, :, 0:3])
                        points[:, :,
                               0:3] = provider.shift_point_cloud(points[:, :,
                                                                        0:3])
                        points = torch.Tensor(points)
                        points = points.transpose(2, 1)
                        points, target = points.cuda(), target.cuda()
                    elif "scannet" in args.dataset:
                        # TODO: fiil the scannet loading here
                        # TODO: maybe implement the grad-accmu/or simply not
                        if args.aux is not None:
                            points, target, sample_weight, aux = data
                            points, target, sample_weight, aux = points.float(
                            ).transpose(1, 2).cuda(), target.cuda(
                            ), sample_weight.cuda(), aux.cuda()
                        else:
                            points, target, sample_weight = data
                            points, target, sample_weight = points.float(
                            ).transpose(
                                1,
                                2).cuda(), target.cuda(), sample_weight.cuda()

                else:
                    if "modelnet" in args.dataset:
                        # use voxel
                        # points = create_input_batch(data, True, 'cuda', quantization_size=args.voxel_size)
                        data['coordinates'][:, 1:] = data[
                            'coordinates'][:, 1:] / args.voxel_size
                        points = ME.TensorField(
                            coordinates=(data['coordinates'].cuda()),
                            features=data['features'].cuda())
                        target = data['labels'].cuda()
                    elif "scannet" in args.dataset:
                        dat = ME.SparseTensor(features=data[1],
                                              coordinates=data[0]).cuda()
                        target = data[2].cuda()

                optimizer.zero_grad()
                '''save the intermediate attention map'''
                # WANINIG: DISABLED FOR NOW!!!
                SAVE_INTERVAL = 50
                NUM_PER_EPOCH = 1

                if (epoch + 1) % SAVE_INTERVAL == 0:
                    if batch_id < NUM_PER_EPOCH:
                        classifier.save_flag = True
                    elif batch_id == NUM_PER_EPOCH:
                        intermediate_dict = classifier.save_intermediate()
                        intermediate_path = os.path.join(
                            experiment_dir, "attn")
                        if not os.path.exists(intermediate_path):
                            os.mkdir(intermediate_path)
                        torch.save(
                            intermediate_dict,
                            os.path.join(intermediate_path,
                                         "epoch_{}".format(epoch)))
                        log_string('Saved Intermediate at {}'.format(epoch))
                    else:
                        classifier.save_flag = False
                else:
                    classifier.save_flag = False

                classifier = classifier.train()
                # when with-instance, use instance label to guide the point-transformer training
                if args.aux is not None:
                    pred = classifier(points, aux)
                else:
                    pred = classifier(points)
                # if use_voxel, get the feature from the SparseTensor
                if args.use_voxel:
                    pred = pred.F
                if 'scannet' in args.dataset:
                    loss = criterion(pred, target.long(), sample_weight)
                else:
                    loss = criterion(pred, target.long())
                loss.backward()
                optimizer.step()
                global_step += 1

                if "scannet" in args.dataset:
                    pred_choice = torch.argmax(pred,
                                               dim=2).cpu().numpy()  # B,N
                    target = target.cpu().numpy()
                    correct = np.sum(pred_choice == target)
                    mean_correct.append(correct / pred_choice.size)
                else:
                    pred_choice = pred.data.max(1)[1]
                    correct = pred_choice.eq(target.long().data).cpu().sum()
                    mean_correct.append(correct.item() /
                                        float(points.size()[0]))

            train_instance_acc = np.mean(mean_correct)
            log_string('Train Instance Accuracy: %f' % train_instance_acc)
            '''TEST'''
            if not "scannet" in args.dataset:
                # WARNING: Temporarily disable eval for scannet for now, just test at last
                if (epoch + 1) % 20 == 0:
                    with torch.no_grad():
                        returned_metric = test(classifier.eval(),
                                               testDataLoader,
                                               num_class=num_class,
                                               log_string=log_string)

                    if 'scannet' in args.dataset:
                        mIoU = returned_metric
                        if (mIoU >= best_mIoU):
                            best_mIoU = mIoU
                            best_epoch = epoch + 1

                        if (mIoU >= best_mIoU):
                            logger.info('Save model...')
                            savepath = str(checkpoints_dir) + '/best_model.pth'
                            log_string('Saving at %s' % savepath)
                            state = {
                                'epoch': best_epoch,
                                'mIoU': mIoU,
                                'model_state_dict': classifier.state_dict(),
                                'optimizer_state_dict': optimizer.state_dict(),
                            }
                            torch.save(state, savepath)
                    else:
                        instance_acc, class_acc = returned_metric

                        if (instance_acc >= best_instance_acc):
                            best_instance_acc = instance_acc
                            best_epoch = epoch + 1

                        if (class_acc >= best_class_acc):
                            best_class_acc = class_acc

                        log_string(
                            'Test Instance Accuracy: %f, Class Accuracy: %f' %
                            (instance_acc, class_acc))
                        log_string(
                            'Best Instance Accuracy: %f, Class Accuracy: %f' %
                            (best_instance_acc, best_class_acc))

                        if (instance_acc >= best_instance_acc):
                            logger.info('Save model...')
                            savepath = str(checkpoints_dir) + '/best_model.pth'
                            log_string('Saving at %s' % savepath)
                            state = {
                                'epoch': best_epoch,
                                'instance_acc': instance_acc,
                                'class_acc': class_acc,
                                'model_state_dict': classifier.state_dict(),
                                'optimizer_state_dict': optimizer.state_dict(),
                            }
                            torch.save(state, savepath)

        global_epoch += 1

    # final save of the model
    logger.info('Save model...')
    savepath = str(checkpoints_dir) + '/final_model.pth'
    log_string('Saving at %s' % savepath)
    state = {
        'epoch': global_epoch,
        # 'mIoU': mIoU,
        'model_state_dict': classifier.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
    }
    torch.save(state, savepath)

    # for the scannet dataset, test at last
    if args.dataset == 'scannet':
        if not os.path.exists(os.path.join(str(experiment_dir), 'pred')):
            os.mkdir(os.path.join(str(experiment_dir), 'pred'))
        if args.mode == "export":
            test_scannet(args,
                         classifier.eval(),
                         final_test_loader,
                         log_string,
                         with_aux=args.with_aux,
                         save_dir=os.path.join(str(experiment_dir), 'pred'),
                         split='eval')
            test_scannet(args,
                         classifier.eval(),
                         final_train_loader,
                         log_string,
                         with_aux=args.with_aux,
                         save_dir=os.path.join(str(experiment_dir), 'pred'),
                         split='train')
        else:
            test_scannet(args,
                         classifier.eval(),
                         final_test_loader,
                         log_string,
                         with_aux=args.with_aux,
                         split='eval')

    # final save of the model
    logger.info('Save model...')
    savepath = str(checkpoints_dir) + '/best_model.pth'
    log_string('Saving at %s' % savepath)
    state = {
        'epoch': global_epoch,
        # 'mIoU': mIoU,
        'model_state_dict': classifier.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
    }
    torch.save(state, savepath)

    logger.info('End of training...')
Пример #15
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''CUDA ENV SETTINGS'''
    if args.gpu is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    if args.cudnn_off:
        torch.backends.cudnn.enabled = False  # needed on gypsum!

    # --------------------------------------------------------------------------
    '''CREATE DIR'''
    # --------------------------------------------------------------------------
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    experiment_dir = Path('./log/')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath('pretrain_part_seg')
    experiment_dir.mkdir(exist_ok=True)
    dir_name = args.model + '_ShapeNet' + \
                '_k-%d_seed-%d_lr-%.6f_lr-step-%d_lr-decay-%.2f_wt-decay-%.6f_l2norm-%d' \
                % ( args.k_shot, args.seed, args.learning_rate,
                    args.step_size, args.lr_decay, args.decay_rate,
                    int(args.l2_norm) )
    if args.normal:
        dir_name = dir_name + '_normals'
    if args.selfsup:
        dir_name = dir_name + 'selfsup-%s_selfsup_margin-%.2f_lambda-%.2f' \
                    % (args.ss_dataset, args.margin, args.lmbda)
    if args.rotation_z:
        dir_name = dir_name + '_rotation-z'

    if args.rotation_z_45:
        dir_name = dir_name + '_rotation-z-45'

    if args.random_anisotropic_scale:
        dir_name = dir_name + '_aniso-scale'

    experiment_dir = experiment_dir.joinpath(dir_name)

    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)

    # --------------------------------------------------------------------------
    '''LOG'''
    # --------------------------------------------------------------------------
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)
    configure(log_dir)  # tensorboard logdir
    log_string('OUTPUT DIR: %s' % experiment_dir)

    # --------------------------------------------------------------------------
    '''DATA LOADERS'''
    # --------------------------------------------------------------------------
    root = 'data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'

    TRAIN_DATASET = PartNormalDataset(root=root,
                                      npoints=args.npoint,
                                      split='trainval',
                                      normal_channel=args.normal,
                                      k_shot=args.k_shot)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                                  batch_size=args.batch_size,
                                                  shuffle=True,
                                                  num_workers=4)
    trainDataIterator = iter(trainDataLoader)

    TEST_DATASET = PartNormalDataset(root=root,
                                     npoints=args.npoint,
                                     split='test',
                                     normal_channel=args.normal)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=4)
    log_string("The number of training data is: %d" % len(TRAIN_DATASET))
    log_string("The number of test data is: %d" % len(TEST_DATASET))
    num_classes = 16
    num_part = 50

    if args.selfsup:
        log_string('Use self-supervision - alternate batches')
        if not args.retain_overlaps:
            log_string(
                '\tRemove overlaps between labeled and self-sup datasets')
            labeled_fns = list(itertools.chain(*TEST_DATASET.meta.values())) \
                            + list(itertools.chain(*TRAIN_DATASET.meta.values()))
        else:
            log_string('\tUse all files in self-sup dataset')
            labeled_fns = []

        if args.ss_dataset == 'dummy':
            log_string(
                'Using "dummy" self-supervision dataset (rest of labeled ShapeNetSeg)'
            )
            SELFSUP_DATASET = SelfSupPartNormalDataset(
                root=root,
                npoints=args.npoint,
                split='trainval',
                normal_channel=args.normal,
                k_shot=args.n_cls_selfsup,
                labeled_fns=labeled_fns)
        elif args.ss_dataset == 'acd':
            log_string('Using "ACD" self-supervision dataset (ShapeNet Seg)')
            ACD_ROOT = args.ss_path
            SELFSUP_DATASET = ACDSelfSupDataset(root=ACD_ROOT,
                                                npoints=args.npoint,
                                                normal_channel=args.normal,
                                                k_shot=args.n_cls_selfsup,
                                                exclude_fns=labeled_fns,
                                                use_val=True)
            log_string('\t %d samples' % len(SELFSUP_DATASET))
            selfsup_train_fns = list(
                itertools.chain(*SELFSUP_DATASET.meta.values()))
            log_string('Val dataset for self-sup')
            SELFSUP_VAL = ACDSelfSupDataset(root=ACD_ROOT,
                                            npoints=args.npoint,
                                            normal_channel=args.normal,
                                            class_choice='Airplane',
                                            k_shot=args.n_cls_selfsup,
                                            use_val=False,
                                            exclude_fns=selfsup_train_fns +
                                            labeled_fns)
            log_string('\t %d samples' % len(SELFSUP_VAL))

        selfsupDataLoader = torch.utils.data.DataLoader(
            SELFSUP_DATASET,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=4)
        selfsupIterator = iter(selfsupDataLoader)
        selfsupValLoader = torch.utils.data.DataLoader(
            SELFSUP_VAL,
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=4)

    log_string('Load ModelNet dataset for validation')
    DATA_PATH = 'data/modelnet40_normal_resampled/'
    MN_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                    npoint=args.npoint,
                                    split='train',
                                    normal_channel=args.normal)
    modelnetLoader = torch.utils.data.DataLoader(MN_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=True,
                                                 num_workers=4)

    # --------------------------------------------------------------------------
    '''MODEL LOADING'''
    # --------------------------------------------------------------------------
    MODEL = importlib.import_module(args.model)
    shutil.copy('models/%s.py' % args.model, str(experiment_dir))
    shutil.copy('models/pointnet_util.py', str(experiment_dir))

    if args.model == 'dgcnn':
        classifier = MODEL.get_model(num_part,
                                     normal_channel=args.normal,
                                     k=args.dgcnn_k).cuda()
    else:
        classifier = MODEL.get_model(num_part,
                                     normal_channel=args.normal).cuda()

    criterion = MODEL.get_loss().cuda()

    if args.selfsup:
        selfsupCriterion = MODEL.get_selfsup_loss(margin=args.margin).cuda()
        log_string("The number of self-sup data is: %d" % len(SELFSUP_DATASET))

    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)
        elif classname.find('Linear') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)

    try:
        checkpoint = torch.load(
            str(experiment_dir) + '/checkpoints/best_model.pth')
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0
        classifier = classifier.apply(weights_init)

    # --------------------------------------------------------------------------
    '''OPTIMIZER SETTINGS'''
    # --------------------------------------------------------------------------
    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    else:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=args.learning_rate,
                                    momentum=0.9)

    def bn_momentum_adjust(m, momentum):
        if isinstance(m, torch.nn.BatchNorm2d) or isinstance(
                m, torch.nn.BatchNorm1d):
            m.momentum = momentum

    # LEARNING_RATE_CLIP = 1e-5
    LEARNING_RATE_CLIP = args.lr_clip
    MOMENTUM_ORIGINAL = 0.1
    MOMENTUM_DECAY = 0.5
    MOMENTUM_DECAY_STEP = args.step_size

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        classifier = nn.DataParallel(classifier)

    # --------------------------------------------------------------------------
    '''TRAINING LOOP'''
    # --------------------------------------------------------------------------
    best_val_loss = 99999
    global_epoch = 0

    for epoch in range(start_epoch, args.epoch):
        log_string('Epoch %d (%d/%s):' %
                   (global_epoch + 1, epoch + 1, args.epoch))
        '''Adjust learning rate and BN momentum'''
        lr = max(
            args.learning_rate * (args.lr_decay**(epoch // args.step_size)),
            LEARNING_RATE_CLIP)
        log_string('Learning rate:%f' % lr)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        mean_loss = []
        momentum = MOMENTUM_ORIGINAL * (MOMENTUM_DECAY
                                        **(epoch // MOMENTUM_DECAY_STEP))
        if momentum < 0.01:
            momentum = 0.01
        print('BN momentum updated to: %f' % momentum)
        classifier = classifier.apply(
            lambda x: bn_momentum_adjust(x, momentum))
        '''learning one epoch'''
        num_iters = len(
            selfsupDataLoader)  # calc an epoch based on self-sup dataset

        for i in tqdm(list(range(num_iters)), total=num_iters, smoothing=0.9):
            '''applying self-supervised constrastive (pairwise) loss'''
            try:
                data_ss = next(selfsupIterator)
            except StopIteration:
                # reached end of this dataloader
                selfsupIterator = iter(selfsupDataLoader)
                data_ss = next(selfsupIterator)

            # DEBUG
            if DEBUG and i > 10:
                break

            points, label, target = data_ss  # (points: bs x 3 x n_pts, label: bs x 1, target: bs x n_pts)
            points = points.data.numpy()
            points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                         0:3])
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])

            if args.random_anisotropic_scale:
                points[:, :,
                       0:3] = provider.random_anisotropic_scale_point_cloud(
                           points[:, :, 0:3], scale_low=0.8, scale_high=1.25)

            # pts = torch.Tensor(points)
            # pts = pts.transpose(2,1)
            # np.save(osp.join(experiment_dir, 'pts.npy'), pts.cpu().numpy())

            if args.rotation_z:
                points[:, :, 0:3] = provider.rotate_point_cloud_y(points[:, :,
                                                                         0:3])

            if args.rotation_z_45:
                points[:, :,
                       0:3] = provider.rotate_point_cloud_y_pi4(points[:, :,
                                                                       0:3])

            points = torch.Tensor(points)
            points, label, target = points.float().cuda(), label.long().cuda(
            ), target.long().cuda()
            points = points.transpose(2, 1)
            # np.save(osp.join(experiment_dir, 'pts_z-rot.npy'), points.cpu().numpy())
            # np.save(osp.join(experiment_dir, 'target.npy'), target.cpu().numpy())

            # for self-sup category label is always unknown, so always zeros:
            category_label = torch.zeros([label.shape[0], 1,
                                          num_classes]).cuda()

            optimizer.zero_grad()
            classifier = classifier.train()

            _, _, feat = classifier(points,
                                    category_label)  # feat: [bs x ndim x npts]

            ss_loss = selfsupCriterion(feat, target) * args.lmbda
            ss_loss.backward()
            optimizer.step()
            mean_loss.append(ss_loss.item())
            log_value('selfsup_loss_iter', ss_loss.data,
                      epoch * num_iters + i + 1)

        train_loss_epoch = np.mean(mean_loss)
        log_string('Self-sup loss is: %.5f' % train_loss_epoch)
        log_value('selfsup_loss_epoch', train_loss_epoch, epoch)

        # # # DEBUG:
        # with torch.no_grad():
        #     sa3_wt = classifier.sa3.mlp_convs[2].weight.mean()
        #     log_string('SA3 avg wt is: %.5f' % sa3_wt.item())
        #     log_value('sa3_conv2_wt', sa3_wt.item(), epoch)
        '''validation after one epoch'''
        log_string('Validation: ACD on ShapeNet')
        with torch.no_grad():
            total_val_loss = 0
            for batch_id, (points, label,
                           target) in tqdm(enumerate(selfsupValLoader),
                                           total=len(selfsupValLoader),
                                           smoothing=0.9):
                if DEBUG and i > 10:
                    break
                cur_batch_size, NUM_POINT, _ = points.size()
                points, label, target = points.float().cuda(), label.long(
                ).cuda(), target.long().cuda()
                points = points.transpose(2, 1)
                category_label = torch.zeros([label.shape[0], 1,
                                              num_classes]).cuda()
                classifier = classifier.eval()
                _, _, feat = classifier(points, category_label)
                val_loss = selfsupCriterion(feat, target)
                total_val_loss += val_loss.data.cpu().item()
            avg_val_loss = total_val_loss / len(selfsupValLoader)
        log_value('selfsup_loss_val', avg_val_loss, epoch)
        '''(optional) validation on ModelNet40'''
        if args.modelnet_val:
            log_string('Validation: SVM on ModelNet40')
            with torch.no_grad():
                log_string('Extract features on ModelNet40')
                if args.model == 'pointnet_part_seg':
                    feat_train, label_train = extract_feats_pointnet(
                        classifier, modelnetLoader, subset=0.5)
                elif args.model == 'pointnet2_part_seg_msg':
                    feat_train, label_train = extract_feats(classifier,
                                                            modelnetLoader,
                                                            subset=0.5)
                else:
                    raise ValueError
                log_string('Training data: %d samples, %d features' %
                           feat_train.shape)
                start_time = time.time()
                log_string('Training SVM on ModelNet40')
                svm, best_C, best_score = cross_val_svm(feat_train,
                                                        label_train,
                                                        c_min=100,
                                                        c_max=501,
                                                        c_step=20,
                                                        verbose=False)
                elapsed_time = time.time() - start_time
            log_string('ModelNet val Accuracy: %f (elapsed: %f seconds)' %
                       (best_score, elapsed_time))
            log_value('modelnet_val', best_score, epoch)

        # save every epoch
        if epoch % 5 == 0:
            savepath = str(checkpoints_dir) + ('/model_%03d.pth' % epoch)
            log_string('Saving model at %s' % savepath)
            state = {
                'epoch': epoch,
                'selfsup_loss': ss_loss.data,
                'val_loss': avg_val_loss,
                'model_state_dict': classifier.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            torch.save(state, savepath)
            log_string('Saved model.')

        # save best model
        if avg_val_loss < best_val_loss:
            best_val_loss = avg_val_loss
            savepath = str(checkpoints_dir) + '/best_model.pth'
            log_string('Saving best model at %s' % savepath)
            state = {
                'epoch': epoch,
                'selfsup_loss': ss_loss.data,
                'val_loss': avg_val_loss,
                'model_state_dict': classifier.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            torch.save(state, savepath)
            log_string('Saved model.')

        log_value('train_lr', lr, epoch)
        log_value('train_bn_momentum', momentum, epoch)

        log_string('Epoch %d Self-sup train loss: %f  Val loss: %f ' %
                   (epoch + 1, train_loss_epoch, avg_val_loss))

        global_epoch += 1
Пример #16
0
def train(args):
    experiment_dir = mkdir('./experiment/')
    checkpoints_dir = mkdir('./experiment/clf/%s/' % (args.model_name))
    train_data, train_label, test_data, test_label = load_data(
        'experiment/data/modelnet40_ply_hdf5_2048/')

    trainDataset = ModelNetDataLoader(train_data,
                                      train_label,
                                      data_augmentation=args.augment)
    trainDataLoader = DataLoader(trainDataset,
                                 batch_size=args.batch_size,
                                 shuffle=True)

    testDataset = ModelNetDataLoader(test_data, test_label)
    testDataLoader = torch.utils.data.DataLoader(testDataset,
                                                 batch_size=args.batch_size,
                                                 shuffle=False)

    log.info('Building Model', args.model_name)
    if args.model_name == 'pointnet':
        num_class = 40
        model = PointNetCls(num_class, args.feature_transform).cuda()
    else:
        model = PointNet2ClsMsg().cuda()

    torch.backends.cudnn.benchmark = True
    model = torch.nn.DataParallel(model).cuda()
    log.debug('Using gpu:', args.gpu)

    if args.pretrain is not None:
        log.info('Use pretrain model...')
        state_dict = torch.load(args.pretrain)
        model.load_state_dict(state_dict)
        init_epoch = int(args.pretrain[:-4].split('-')[-1])
        log.info('start epoch from', init_epoch)
    else:
        log.info('Training from scratch')
        init_epoch = 0

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.5)
    LEARNING_RATE_CLIP = 1e-5

    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0

    log.info('Start training...')
    for epoch in range(init_epoch, args.epoch):
        scheduler.step()
        lr = max(optimizer.param_groups[0]['lr'], LEARNING_RATE_CLIP)

        log.debug(job='clf',
                  model=args.model_name,
                  gpu=args.gpu,
                  epoch='%d/%s' % (epoch, args.epoch),
                  lr=lr)

        for param_group in optimizer.param_groups:
            param_group['lr'] = lr

        for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            points, target = data
            target = target[:, 0]
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()
            model = model.train()
            pred, trans_feat = model(points)
            loss = F.nll_loss(pred, target.long())
            if args.feature_transform and args.model_name == 'pointnet':
                loss += feature_transform_reguliarzer(trans_feat) * 0.001
            loss.backward()
            optimizer.step()
            global_step += 1

        log.debug('clear cuda cache')
        torch.cuda.empty_cache()

        acc = test_clf(model, testDataLoader)
        log.info(loss='%.5f' % (loss.data))
        log.info(Test_Accuracy='%.5f' % acc)

        if acc >= best_tst_accuracy:
            best_tst_accuracy = acc
            fn_pth = 'clf-%s-%.5f-%04d.pth' % (args.model_name, acc, epoch)
            log.debug('Saving model....', fn_pth)
            torch.save(model.state_dict(),
                       os.path.join(checkpoints_dir, fn_pth))
        global_epoch += 1

    log.info(Best_Accuracy=best_tst_accuracy)
    log.info('End of training...')
Пример #17
0
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    # datapath = './data/ModelNet/'
    datapath = './data/modelnet40_ply_hdf5_2048/'
    if args.rotation is not None:
        ROTATION = (int(args.rotation[0:2]), int(args.rotation[3:5]))
    else:
        ROTATION = None
    '''CREATE DIR'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = Path('./experiment/checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = Path('./experiment/logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("PointNet2")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(
        './experiment/logs/test_%s_' % args.model_name +
        str(datetime.datetime.now().strftime('%Y-%m-%d %H-%M')) + '.txt')
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------Test---------------------------------------------------'
    )
    logger.info('PARAMETER ...')
    logger.info(args)
    '''DATA LOADING'''
    logger.info('Load dataset ...')
    train_data, train_label, test_data, test_label = load_data(
        datapath, classification=True)
    logger.info("The number of training data is: %d", train_data.shape[0])
    logger.info("The number of test data is: %d", test_data.shape[0])
    trainDataset = ModelNetDataLoader(train_data,
                                      train_label,
                                      rotation=ROTATION)
    if ROTATION is not None:
        print('The range of training rotation is', ROTATION)
    testDataset = ModelNetDataLoader(test_data, test_label, rotation=ROTATION)

    trainDataLoader = torch.utils.data.DataLoader(trainDataset,
                                                  batch_size=args.batchsize,
                                                  shuffle=True)
    testDataLoader = torch.utils.data.DataLoader(testDataset,
                                                 batch_size=args.batchsize,
                                                 shuffle=False)  # 不打乱
    '''MODEL LOADING'''
    num_class = 40
    ###################### PointNetCls ######################
    classifier = PointNetCls(num_class, args.feature_transform).cuda(
    ) if args.model_name == 'pointnet' else PointNet2ClsMsg().cuda()
    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('Please Input the pretrained model ***.pth')
        return

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=0.01,
                                    momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.5)
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    blue = lambda x: '\033[94m' + x + '\033[0m'
    '''TestING'''
    logger.info('Start testing...')

    scheduler.step()

    acc = test(classifier.eval(), testDataLoader)

    print('\r Test %s: %f' % (blue('Accuracy'), acc))
    logger.info('Test Accuracy: %f', acc)

    logger.info('End of testing...')
Пример #18
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    if not args.reduced_computation:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
        num_workers = 8
    else:
        num_workers = 0


    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    experiment_dir = Path(args.experiment_dir)
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath('classification')
    experiment_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        experiment_dir = experiment_dir.joinpath(timestr)
    else:
        experiment_dir = experiment_dir.joinpath(args.log_dir)
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    tensorboard_dir = experiment_dir.joinpath('tensorboard/')
    tensorboard_dir.mkdir(exist_ok=True)

    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)

    '''TENSORBOARD'''
    train_writer = SummaryWriter(tensorboard_dir.joinpath("train"))
    val_writer = SummaryWriter(tensorboard_dir.joinpath("validation"))

    '''DATA LOADING'''
    log_string('Load dataset ...')
    DATA_PATH = args.data_path

    class_in_filename = False if args.data_extension == ".npy" else True

    TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH, split_name=args.split_name, extension=args.data_extension, npoint=args.num_point, split='train',
                                       normal_channel=args.normal, class_in_filename=class_in_filename, uniform=args.uniform, voxel_size=args.reduced_resolution_voxel_size)
    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, split_name=args.split_name, extension=args.data_extension, npoint=args.num_point, split='validation',
                                      normal_channel=args.normal, class_in_filename=class_in_filename, uniform=args.uniform, voxel_size=args.reduced_resolution_voxel_size)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=num_workers)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=num_workers)

    '''MODEL LOADING'''
    num_class = args.num_classes
    MODEL = importlib.import_module(args.model)
    shutil.copy('./models/%s.py' % args.model, str(experiment_dir))
    shutil.copy('./models/pointnet_util.py', str(experiment_dir))

    classifier = MODEL.get_model(num_class, normal_channel=args.normal)
    if not args.reduced_computation:
        classifier = classifier.cuda()
    criterion = MODEL.get_loss()
    if not args.reduced_computation:
        criterion = criterion.cuda()

    try:
        checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0


    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(
            classifier.parameters(),
            lr=args.learning_rate,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate
        )
    else:
        optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)

    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.9, patience=10, min_lr=0.000001)
    global_epoch = 0
    global_step = 0
    best_instance_acc = 0.0
    best_class_acc = 0.0
    mean_correct = []

    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch,args.epoch):
        log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))

        mean_correct = []
        batch_tqdm = tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9)
        total_loss = 0
        predictions_likelihood_tot = torch.zeros([len(trainDataLoader.dataset), num_class])

        for batch_id, data in batch_tqdm:
            points, target = data
            points = points.data.numpy()
            if args.augment:
                points = provider.random_point_dropout(points)
                points[:,:, 0:3] = provider.random_scale_point_cloud(points[:,:, 0:3])
                points[:,:, 0:3] = provider.shift_point_cloud(points[:,:, 0:3])
            points = torch.Tensor(points)
            target = target[:, 0]

            points = points.transpose(2, 1)
            if not args.reduced_computation:
                points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()

            classifier = classifier.train()
            pred, trans_feat = classifier(points)
            loss = criterion(pred, target.long(), trans_feat)
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.long().data).cpu().sum()
            mean_correct.append(correct.item() / float(points.size()[0]))
            loss.backward()
            optimizer.step()
            global_step += 1
            total_loss += loss
            mean_loss = total_loss / (batch_id + 1)
            batch_tqdm.set_description(f"loss {mean_loss}, batch ({batch_id}/{len(trainDataLoader)})")
            preds_likelihood = torch.exp(pred)
            predictions_likelihood_tot[batch_id*trainDataLoader.batch_size:(batch_id+1)*trainDataLoader.batch_size] = preds_likelihood

        train_instance_acc = np.mean(mean_correct)
        log_string('Train Instance Accuracy: %f' % train_instance_acc)
        train_writer.add_scalar('Loss', mean_loss, epoch)
        train_writer.add_scalar('Accuracy', train_instance_acc, epoch)
        for cls in range(num_class):
            train_writer.add_histogram(f"class_{cls}", predictions_likelihood_tot[:, cls], epoch)

        with torch.no_grad():
            instance_acc, class_acc, val_loss = test(classifier.eval(), testDataLoader, criterion, num_class=num_class)
            scheduler.step(val_loss)
            val_writer.add_scalar('Loss', val_loss, epoch)
            val_writer.add_scalar('Accuracy', instance_acc, epoch)
            val_writer.add_scalar('Class_Accuracy', class_acc, epoch)

            if (instance_acc >= best_instance_acc):
                best_instance_acc = instance_acc
                best_epoch = epoch + 1

            if (class_acc >= best_class_acc):
                best_class_acc = class_acc
            log_string('Test Instance Accuracy: %f, Class Accuracy: %f'% (instance_acc, class_acc))
            log_string('Best Instance Accuracy: %f, Class Accuracy: %f'% (best_instance_acc, best_class_acc))

            if (instance_acc >= best_instance_acc):
                logger.info('Save model...')
                savepath = str(checkpoints_dir) + '/best_model.pth'
                log_string('Saving at %s'% savepath)
                state = {
                    'epoch': best_epoch,
                    'instance_acc': instance_acc,
                    'class_acc': class_acc,
                    'model_state_dict': classifier.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, savepath)
            global_epoch += 1

    logger.info('End of training...')
Пример #19
0
def main(args):
    """HYPER PARAMETER"""
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    datapath = './data/ModelNet/'

    '''CREATE DIR'''
    experiment_dir = Path('./eval_experiment/')
    experiment_dir.mkdir(exist_ok=True)
    file_dir = Path(str(experiment_dir) + '/%sModelNet40-' % args.model_name + str(
        datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
    file_dir.mkdir(exist_ok=True)
    checkpoints_dir = file_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    os.system('cp %s %s' % (args.checkpoint, checkpoints_dir))
    log_dir = file_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)

    '''LOG'''
    args = parse_args()
    logger = logging.getLogger(args.model_name)
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(str(log_dir) + 'eval_%s_cls.txt' % args.model_name)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------EVAL---------------------------------------------------')
    logger.info('PARAMETER ...')
    logger.info(args)

    '''DATA LOADING'''
    logger.info('Load dataset ...')
    train_data, train_label, test_data, test_label = load_data(datapath, classification=True)
    logger.info("The number of training data is: %d", train_data.shape[0])
    logger.info("The number of test data is: %d", test_data.shape[0])
    test_dataset = ModelNetDataLoader(test_data, test_label)
    test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batchsize, shuffle=False)

    '''MODEL LOADING'''
    num_class = 40
    classifier = PointConvClsSsg(num_class).cuda()
    if args.checkpoint is not None:
        print('Load CheckPoint...')
        logger.info('Load CheckPoint')
        checkpoint = torch.load(args.checkpoint)
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('Please load Checkpoint to eval...')
        start_epoch = 0
        sys.exit(0)

    blue = lambda x: '\033[94m' + x + '\033[0m'

    '''EVAL'''
    logger.info('Start evaluating...')
    print('Start evaluating...')

    total_correct = 0
    total_seen = 0
    for batch_id, data in tqdm(enumerate(test_data_loader, 0), total=len(test_data_loader), smoothing=0.9):
        pointcloud, target = data
        target = target[:, 0]
        # import ipdb; ipdb.set_trace()
        pred_view = torch.zeros(pointcloud.shape[0], num_class).cuda()

        for _ in range(args.num_view):
            pointcloud = generate_new_view(pointcloud)
            # import ipdb; ipdb.set_trace()
            # points = torch.from_numpy(pointcloud).permute(0, 2, 1)
            points = pointcloud.permute(0, 2, 1)
            points, target = points.cuda(), target.cuda()
            classifier = classifier.eval()
            with torch.no_grad():
                pred = classifier(points)
            pred_view += pred
        pred_choice = pred_view.data.max(1)[1]
        correct = pred_choice.eq(target.long().data).cpu().sum()
        total_correct += correct.item()
        total_seen += float(points.size()[0])

    accuracy = total_correct / total_seen
    print('Total Accuracy: %f' % accuracy)

    logger.info('Total Accuracy: %f' % accuracy)
    logger.info('End of evaluation...')
Пример #20
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    experiment_dir = Path('./log/')
    experiment_dir.mkdir(exist_ok=True)
    dataset_name = args.dataset_name
    experiment_dir = experiment_dir.joinpath(
        'classification_{}'.format(dataset_name))
    experiment_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        experiment_dir = experiment_dir.joinpath(timestr)
    else:
        experiment_dir = experiment_dir.joinpath(args.log_dir)
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)
    '''TENSORBOARD LOG'''
    writer = SummaryWriter()
    '''DATA LOADING'''
    log_string('Load dataset ...')

    DATA_PATH = os.path.join(ROOT_DIR, 'data', dataset_name)

    print("loading dataset from {}".format(dataset_name))
    if 'modelnet' in dataset_name:
        TRAIN_DATASET = ModelNetDataLoader(DATA_PATH,
                                           split='train',
                                           normal_channel=args.normal)
        TEST_DATASET = ModelNetDataLoader(DATA_PATH,
                                          split='test',
                                          normal_channel=args.normal)
        num_class = 40
    else:
        print(DATA_PATH)
        TRAIN_DATASET = ReplicaDataLoader(DATA_PATH,
                                          split='train',
                                          uniform=True,
                                          normal_channel=False,
                                          rot_transform=True)
        TEST_DATASET = ReplicaDataLoader(DATA_PATH,
                                         split='test',
                                         uniform=True,
                                         normal_channel=False,
                                         rot_transform=False)
        num_class = 31

    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                                  batch_size=args.batch_size,
                                                  shuffle=True,
                                                  num_workers=6)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=6)
    '''MODEL LOADING'''
    print("Number of classes are {:d}".format(num_class))
    MODEL = importlib.import_module(args.model)
    shutil.copy('./models/%s.py' % args.model, str(experiment_dir))
    shutil.copy('./models/pointnet_util.py', str(experiment_dir))

    print("Obtain GPU device ")
    train_GPU = True
    device = torch.device("cuda" if (
        torch.cuda.is_available() and train_GPU) else "cpu")
    print(device)
    print("Load the network to the device ")
    classifier = MODEL.get_model(num_class,
                                 normal_channel=args.normal).to(device)
    print("Load the loss to the device ")
    criterion = MODEL.get_loss().to(device)

    if os.path.exists((str(experiment_dir) + '/checkpoints/best_model.pth')):
        checkpoint = torch.load(
            str(experiment_dir) + '/checkpoints/best_model.pth')
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])

        # strict set to false to allow using the model trained with modelnet
    else:
        start_epoch = 0
        if dataset_name == 'replica':
            log_string('Use pretrain model of Model net')
            # double check again if there is pretrained modelnet model
            checkpoint = torch.load(
                str(experiment_dir).replace("replica",
                                            'modelnet40_normal_resampled') +
                '/checkpoints/best_model.pth')
            classifier = MODEL.get_model(40,
                                         normal_channel=args.normal).to(device)
            classifier.load_state_dict(checkpoint['model_state_dict'])
            classifier.fc3 = nn.Linear(256, num_class).to(device)
            print(classifier)
        else:
            log_string('No existing model, starting training from scratch...')

    if args.optimizer == 'Adam':
        print("Using Adam opimizer ")
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    else:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=0.01,
                                    momentum=0.9)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.7)
    global_epoch = 0
    global_step = 0
    best_instance_acc = 0.0
    best_class_acc = 0.0
    mean_correct = []
    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch, args.epoch):
        loss_array = np.zeros((len(trainDataLoader), 1))
        log_string('Epoch %d (%d/%s):' %
                   (global_epoch + 1, epoch + 1, args.epoch))
        classifier.train()  # setting the model to train mode
        print("Clear GPU cache ...")
        torch.cuda.empty_cache()
        scheduler.step()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            points, target = data
            points = points.data.numpy()
            points = provider.random_point_dropout(points)
            points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                         0:3])
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
            points = torch.Tensor(points)
            target = target[:, 0]

            points = points.transpose(2, 1)
            points, target = points.to(device), target.to(device)

            optimizer.zero_grad()

            pred, trans_feat = classifier(
                points)  ### This is the part of the runtime error:

            loss = criterion(pred, target.long(), trans_feat)
            loss_array[batch_id] = loss.cpu().detach().numpy()

            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.long().data).cpu().sum()
            mean_correct.append(correct.item() / float(points.size()[0]))
            loss.backward()
            optimizer.step()
            global_step += 1

            train_instance_acc = np.mean(mean_correct)
            log_string('Train Instance Accuracy: %f' % train_instance_acc)
        avg_loss = np.mean(loss_array[:])
        writer.add_scalar("Loss/train", avg_loss, epoch)

        ## This is for validation
        with torch.no_grad():
            instance_acc, class_acc = test(classifier.eval(), testDataLoader,
                                           device, num_class)

            writer.add_scalar("ClassAccuracy/test", class_acc, epoch)
            writer.add_scalar("InstanceAccuracy/test", instance_acc, epoch)

            if (instance_acc >= best_instance_acc):
                best_instance_acc = instance_acc
                best_epoch = epoch + 1

            if (class_acc >= best_class_acc):
                best_class_acc = class_acc
            log_string('Test Instance Accuracy: %f, Class Accuracy: %f' %
                       (instance_acc, class_acc))
            log_string('Best Instance Accuracy: %f, Class Accuracy: %f' %
                       (best_instance_acc, best_class_acc))

            if (instance_acc >= best_instance_acc):
                logger.info('Save model...')
                savepath = str(checkpoints_dir) + '/best_model.pth'.format(
                    epoch)
                log_string('Saving at %s' % savepath)
                state = {
                    'epoch': best_epoch,
                    'instance_acc': instance_acc,
                    'class_acc': class_acc,
                    'model_state_dict': classifier.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, savepath)
            global_epoch += 1

    logger.info('End of training...')
    writer.flush()
    writer.close()
Пример #21
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''CREATE DIR'''
    experiment_dir = osp.join(args.log_dir, 'ModelNet40-eval')
    experiment_dir = experiment_dir + '_' + str(args.num_point)
    if args.sqrt:
        experiment_dir = experiment_dir + '_do-sqrt'
    if args.do_sa3:
        experiment_dir = experiment_dir + '_sa3-feats'
    if args.svm_jitter:
        experiment_dir = experiment_dir + '_svm-jitter'
        args.batch_size = (args.batch_size // 8)  # 8x augmentation
    if args.random_feats:
        experiment_dir = experiment_dir + '_random-feats'
    if args.ckpt is not None:
        experiment_dir = experiment_dir + '_' + osp.splitext(args.ckpt)[0]

    os.makedirs(experiment_dir, exist_ok=True)
    '''LOG'''
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/eval.txt' % experiment_dir)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)
    log_string('Experiment dir: %s' % experiment_dir)
    '''DATA LOADING'''
    log_string('Load dataset ...')
    DATA_PATH = 'data/modelnet40_normal_resampled/'
    TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                       npoint=args.num_point,
                                       split='train',
                                       normal_channel=args.normal)
    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                      npoint=args.num_point,
                                      split='test',
                                      normal_channel=args.normal)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                                  batch_size=args.batch_size,
                                                  shuffle=False,
                                                  num_workers=4)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=4)

    if DEBUG:
        # ShapeNet training data
        shapenet_root = 'data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'
        SHAPENET_DATASET = PartNormalDataset(root=shapenet_root,
                                             npoints=args.num_point,
                                             split='trainval',
                                             normal_channel=args.normal)
        shapenetDataLoader = torch.utils.data.DataLoader(
            SHAPENET_DATASET,
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=4)
        ACD_ROOT = '/srv/data2/mgadelha/ShapeNetACD/'
        SELFSUP_DATASET = ACDSelfSupDataset(root=ACD_ROOT,
                                            npoints=args.num_point,
                                            normal_channel=args.normal)
        selfsupDataLoader = torch.utils.data.DataLoader(
            SELFSUP_DATASET,
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=4)
    '''MODEL LOADING'''
    shapenet_num_class = 50  #
    model_name = args.model
    MODEL = importlib.import_module(model_name)
    model = MODEL.get_model(shapenet_num_class, normal_channel=False).cuda()
    if not args.random_feats:
        log_string('Load ACD pre-trained model: %s' % args.log_dir)
        if args.ckpt is None:
            checkpoint = torch.load(
                str(args.log_dir) + '/checkpoints/best_model.pth')
        else:
            checkpoint = torch.load(
                str(args.log_dir) + '/checkpoints/' + args.ckpt)

        try:
            DATA_PARALLEL = False
            model.load_state_dict(checkpoint['model_state_dict'])
        except:
            DATA_PARALLEL = True
            model = nn.DataParallel(model)
            model.load_state_dict(checkpoint['model_state_dict'])
            model = model.module
    else:
        log_string('Using randomly initialized %s as feature extractor' %
                   model_name)

    # Extract features and save
    if not osp.exists(osp.join(experiment_dir, 'train-feats.npy')) or \
        not osp.exists(osp.join(experiment_dir, 'train-labels.txt')):

        log_string('Extract features ...')
        if args.model == 'pointnet_part_seg':
            feat_train, label_train = extract_feats_pointnet(
                model,
                trainDataLoader,
                do_sqrt=args.sqrt,
                do_global=args.do_sa3)
            feat_test, label_test = extract_feats_pointnet(
                model,
                testDataLoader,
                do_sqrt=args.sqrt,
                do_global=args.do_sa3)

        elif args.model == 'pointnet2_part_seg_msg':
            feat_train, label_train = extract_feats(
                model,
                trainDataLoader,
                do_sqrt=args.sqrt,
                do_sa3=args.do_sa3,
                do_svm_jitter=args.svm_jitter)
            feat_test, label_test = extract_feats(
                model,
                testDataLoader,
                do_sqrt=args.sqrt,
                do_sa3=args.do_sa3,
                do_svm_jitter=args.svm_jitter)

        elif args.model == 'dgcnn':
            pass
            # feat_train, label_train = extract_feats_dgcnn(model, trainDataLoader,
            #                                         do_sqrt=args.sqrt)
            # feat_test, label_test = extract_feats_dgcnn(model, testDataLoader,
            #                                         do_sqrt=args.sqrt)
        elif args.model == 'dgcnn_seg':
            feat_train, label_train = extract_feats_dgcnn(model,
                                                          trainDataLoader,
                                                          do_sqrt=args.sqrt)
            feat_test, label_test = extract_feats_dgcnn(model,
                                                        testDataLoader,
                                                        do_sqrt=args.sqrt)
        else:
            raise ValueError

        np.save(osp.join(experiment_dir, 'train-feats.npy'), feat_train)
        np.savetxt(osp.join(experiment_dir, 'train-labels.txt'), label_train)
        np.save(osp.join(experiment_dir, 'test-feats.npy'), feat_test)
        np.savetxt(osp.join(experiment_dir, 'test-labels.txt'), label_test)

    else:
        log_string('Loading pre-trained features')
        feat_train = np.load(osp.join(experiment_dir, 'train-feats.npy'))
        label_train = np.loadtxt(osp.join(experiment_dir, 'train-labels.txt'))
        feat_test = np.load(osp.join(experiment_dir, 'test-feats.npy'))
        label_test = np.loadtxt(osp.join(experiment_dir, 'test-labels.txt'))

    # Train linear SVM (one-vs-rest) on features

    # Train+test SVM on validation *or* test set
    log_string('Training linear SVM ...')
    if args.val_svm:
        log_string('Total data: %d samples, %d features' % feat_train.shape)
        val_acc, _, _ = train_val_svm(feat_train,
                                      label_train,
                                      svm_c=args.svm_c)
        log_string('Validation Accuracy: %f' % val_acc)
    else:
        # SVM training on *all* training data
        log_string('Training data: %d samples, %d features' % feat_train.shape)
        t_0 = time.time()
        if args.cross_val_svm:
            classifier, best_C, best_score = cross_val_svm(
                feat_train, label_train)
        else:
            classifier = LinearSVC(random_state=123,
                                   multi_class='ovr',
                                   C=args.svm_c,
                                   dual=False)
            classifier.fit(feat_train, label_train)
        train_acc = classifier.score(feat_train, label_train)
        log_string('Train Accuracy: %f' % train_acc)
        t_1 = time.time()
        log_string('Time elapsed: %f' % (t_1 - t_0))
        # test performance
        test_acc = classifier.score(feat_test, label_test)
        log_string('Test Accuracy: %f' % test_acc)
Пример #22
0
def get_acc():
    batch_size = 8

    config = yaml.load(open("/content/PointNet-BYOL/config/config.yaml", "r"),
                       Loader=yaml.FullLoader)
    #这里normal_channel一定要改成True,不然channel会变成3,无法与6匹配
    TRAIN_DATASET = ModelNetDataLoader(
        root='data/modelnet40_normal_resampled/',
        npoint=1024,
        split='train',
        normal_channel=True)
    TEST_DATASET = ModelNetDataLoader(root='data/modelnet40_normal_resampled/',
                                      npoint=1024,
                                      split='test',
                                      normal_channel=True)

    print("Input shape:", len(TRAIN_DATASET))

    train_loader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                               batch_size=16,
                                               shuffle=True,
                                               num_workers=12)
    test_loader = torch.utils.data.DataLoader(TEST_DATASET,
                                              batch_size=16,
                                              shuffle=False,
                                              num_workers=12)
    device = 'cuda' if torch.cuda.is_available(
    ) else 'cpu'  #'cuda' if torch.cuda.is_available() else 'cpu'
    encoder = get_model(num_class=40, normal_channel=True)
    #  output_feature_dim = encoder.projetion.net[0].in_features#

    #  load pre-trained parameters
    load_params = torch.load(
        os.path.join('/content/PointNet-BYOL/checkpoints/model.pth'),
        map_location=torch.device(torch.device(device)))

    if 'online_network_state_dict' in load_params:
        encoder.load_state_dict(load_params['online_network_state_dict'])
        print("Parameters successfully loaded.")

    # remove the projection head
    encoder = encoder.to(device)
    encoder.eval()

    #  x_train, y_train = get_features_from_encoder(encoder, train_loader)
    #  x_test, y_test = get_features_from_encoder(encoder, test_loader)

    #  x_train = torch.mean(x_train, dim=[2, 3])
    #  x_test = torch.mean(x_test, dim=[2, 3])

    #  print("Training data shape:", x_train.shape, y_train.shape)
    #  print("Testing data shape:", x_test.shape, y_test.shape)
    #  x_train=np.array(x_train)
    #  scaler = preprocessing.StandardScaler()
    #  scaler.fit(x_train)
    #  x_train = scaler.transform(x_train).astype(np.float32)
    #  x_test = scaler.transform(x_test).astype(np.float32)

    #  train_loader, test_loader = create_data_loaders_from_arrays(torch.tensor([item.cpu().detach().numpy() for item in x_train]).cuda(), \
    #  y_train, torch.from_numpy(x_test), y_test)
    #  train_loader, test_loader = create_data_loaders_from_arrays(torch.from_numpy(x_train), \
    #  y_train, torch.from_numpy(x_test), y_test)

    criterion = pointnet2_cls_msg_concat.get_loss().cuda()
    classifier = pointnet2_cls_msg_concat.get_model(
        num_class=40, normal_channel=True).cuda()
    #  optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
    optimizer = torch.optim.Adam(classifier.parameters(),
                                 lr=0.001,
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=1e-4)
    eval_every_n_epochs = 1
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.7)
    best_instance_acc = 0.0
    best_class_acc = 0.0
    mean_correct = []
    try:
        checkpoint = torch.load(
            '/content/PointNet-BYOL/checkpoints/best_model.pth')
        classifier.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        print('Use pretrain model')
    except:
        print('No existing model, starting training from scratch...')

    for epoch in range(20):
        print('Epoch %d ' % (epoch + 1))

        scheduler.step()
        for batch_id, data in tqdm(enumerate(train_loader, 0),
                                   total=len(train_loader),
                                   smoothing=0.9):
            points, target = data
            points = points.data.numpy()
            points = provider.random_point_dropout(points)
            points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                         0:3])
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
            points = torch.Tensor(points)
            target = target[:, 0]
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()
            classifier = classifier.train()
            feature_vector, cls = encoder(points)
            pred, cls = classifier(points, feature_vector)
            loss = criterion(pred, target.long(), target)
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.long().data).cpu().sum()
            mean_correct.append(correct.item() / float(points.size()[0]))
            loss.backward()
            optimizer.step()
        train_instance_acc = np.mean(mean_correct)
        print('Train Instance Accuracy: %f' % train_instance_acc)
        with torch.no_grad():
            instance_acc, class_acc = test(classifier.eval(), encoder.eval(),
                                           test_loader)

            if (instance_acc >= best_instance_acc):
                best_instance_acc = instance_acc
                best_epoch = epoch + 1

            if (class_acc >= best_class_acc):
                best_class_acc = class_acc
            print('Test Instance Accuracy: %f, Class Accuracy: %f' %
                  (instance_acc, class_acc))
            print('Best Instance Accuracy: %f, Class Accuracy: %f' %
                  (best_instance_acc, best_class_acc))

            if (instance_acc >= best_instance_acc):
                print('Save model...')
                savepath = 'checkpoints' + '/best_model.pth'
                print('Saving at %s' % savepath)
                state = {
                    'epoch': best_epoch,
                    'instance_acc': instance_acc,
                    'class_acc': class_acc,
                    'model_state_dict': classifier.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, savepath)

    print('End of training...')

    #  train = torch.utils.data.TensorDataset(pred1, target)
    #  train_loader = torch.utils.data.DataLoader(train, batch_size=96, shuffle=True)
    #  for epoch in range(10):
    #     train_acc = []
    #     for x, y in train_loader:
    #         x = x.to(device)
    #         y = y.to(device)
    #         # zero the parameter gradients
    #         optimizer.zero_grad()
    #         classifier = classifier.train()
    #         pred = classifier(x)
    #         predictions = torch.argmax(pred, dim=1)
    #         loss = criterion(pred, y.long(),y)
    #         loss.backward(retain_graph=True)
    #         optimizer.step()

    #     if epoch % eval_every_n_epochs == 0:
    #         train_total,total = 0,0
    #         train_correct,correct = 0,0
    #         for x, y in train_loader:
    #             x = x.to(device)
    #             y = y.to(device)
    #             classifier = classifier.train()
    #             pred = classifier(x)
    #             predictions = torch.argmax(pred, dim=1)

    #             train_total += y.size(0)
    #             train_correct += (predictions == y).sum().item()
    #         for x, y in test_loader:
    #             x = x.to(device)
    #             y = y.to(device)

    #             classifier = classifier.train()
    #             pred = classifier(x)
    #             predictions = torch.argmax(pred, dim=1)

    #             total += y.size(0)
    #             correct += (predictions == y).sum().item()
    #         train_acc=  train_correct / train_total
    #         acc =  correct / total
    #         print(f"Training accuracy: {np.mean(train_acc)}")
    #         print(f"Testing accuracy: {np.mean(acc)}")
    return train_instance_acc
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    '''CREATE DIR'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    file_dir = Path(str(experiment_dir) + '/%s_ModelNet40-' % args.model_name + str(
        datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
    file_dir.mkdir(exist_ok=True)
    checkpoints_dir = file_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = file_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)

    '''LOG'''
    args = parse_args()
    logger = logging.getLogger(args.model_name)
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(str(log_dir) + 'train_%s_cls.txt' % args.model_name)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------TRANING---------------------------------------------------')
    logger.info('PARAMETER ...')
    logger.info(args)

    '''DATA LOADING'''
    logger.info('Load dataset ...')
    DATA_PATH = './data/modelnet40_normal_resampled/'

    TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='train', normal_channel=args.normal)
    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test', normal_channel=args.normal)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batchsize, shuffle=True,
                                                  num_workers=args.num_workers)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batchsize, shuffle=False,
                                                 num_workers=args.num_workers)

    logger.info("The number of training data is: %d", len(TRAIN_DATASET))
    logger.info("The number of test data is: %d", len(TEST_DATASET))

    seed = 3
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)

    '''MODEL LOADING'''
    num_class = 40
    classifier = PointConvClsSsg(num_class).cuda()
    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('No existing model, starting training from scratch...')
        start_epoch = 0

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(
            classifier.parameters(),
            lr=args.learning_rate,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate
        )
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.7)
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    blue = lambda x: '\033[94m' + x + '\033[0m'

    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch, args.epoch):
        print('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
        logger.info('Epoch %d (%d/%s):', global_epoch + 1, epoch + 1, args.epoch)
        mean_correct = []

        scheduler.step()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9):
        # for batch_id, data in enumerate(trainDataLoader, 0):
            points, target = data
            points = points.data.numpy()
            # 增强数据: 随机放大和平移点云,随机移除一些点
            jittered_data = provider.random_scale_point_cloud(points[:, :, 0:3], scale_low=2.0 / 3, scale_high=3 / 2.0)
            jittered_data = provider.shift_point_cloud(jittered_data, shift_range=0.2)
            points[:, :, 0:3] = jittered_data
            points = provider.random_point_dropout_v2(points)
            provider.shuffle_points(points)
            points = torch.Tensor(points)
            target = target[:, 0]

            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()

            classifier = classifier.train()
            # pred = classifier(points[:, :3, :], points[:, 3:, :])
            pred = classifier(points[:, :3, :], None)
            loss = F.nll_loss(pred, target.long())
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.long().data).cpu().sum()
            mean_correct.append(correct.item() / float(points.size()[0]))
            loss.backward()
            optimizer.step()
            global_step += 1

        train_acc = np.mean(mean_correct)
        print('Train Accuracy: %f' % train_acc)
        logger.info('Train Accuracy: %f' % train_acc)

        acc = test(classifier, testDataLoader)

        if (acc >= best_tst_accuracy) and epoch > 5:
            best_tst_accuracy = acc
            logger.info('Save model...')
            save_checkpoint(
                global_epoch + 1,
                train_acc,
                acc,
                classifier,
                optimizer,
                str(checkpoints_dir),
                args.model_name)
            print('Saving model....')

        print('\r Loss: %f' % loss.data)
        logger.info('Loss: %.2f', loss.data)
        print('\r Test %s: %f   ***  %s: %f' % (blue('Accuracy'), acc, blue('Best Accuracy'), best_tst_accuracy))
        logger.info('Test Accuracy: %f  *** Best Test Accuracy: %f', acc, best_tst_accuracy)

        global_epoch += 1
    print('Best Accuracy: %f' % best_tst_accuracy)

    logger.info('End of training...')
Пример #24
0
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    '''CREATE DIR'''
    experiment_dir = Path('./eval_experiment/')
    experiment_dir.mkdir(exist_ok=True)
    file_dir = Path(str(experiment_dir) + '/%s_ModelNet40-'%args.model_name + str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
    file_dir.mkdir(exist_ok=True)
    checkpoints_dir = file_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    os.system('cp %s %s' % (args.checkpoint, checkpoints_dir))
    log_dir = file_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)

    '''LOG'''
    args = parse_args()
    logger = logging.getLogger(args.model_name)
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(str(log_dir) + 'eval_%s_cls.txt'%args.model_name)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info('---------------------------------------------------EVAL---------------------------------------------------')
    logger.info('PARAMETER ...')
    logger.info(args)

    '''DATA LOADING'''
    logger.info('Load dataset ...')
    DATA_PATH = './data/modelnet40_normal_resampled/'

    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test', normal_channel=args.normal)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batchsize, shuffle=False, num_workers=args.num_workers)
    logger.info("The number of test data is: %d", len(TEST_DATASET))

    seed = 3
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)

    '''MODEL LOADING'''
    num_class = 40
    classifier = PointConvClsSsg(num_class).cuda()
    if args.checkpoint is not None:
        print('Load CheckPoint...')
        logger.info('Load CheckPoint')
        checkpoint = torch.load(args.checkpoint)
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('Please load Checkpoint to eval...')
        sys.exit(0)
        start_epoch = 0

    blue = lambda x: '\033[94m' + x + '\033[0m'

    '''EVAL'''
    logger.info('Start evaluating...')
    print('Start evaluating...')

    classifier = classifier.eval()
    mean_correct = []
    for batch_id, data in tqdm(enumerate(testDataLoader, 0), total=len(testDataLoader), smoothing=0.9):
        pointcloud, target = data
        target = target[:, 0]

        points = pointcloud.permute(0, 2, 1)
        points, target = points.cuda(), target.cuda()
        with torch.no_grad():
            pred = classifier(points[:, :3, :], points[:, 3:, :])
        pred_choice = pred.data.max(1)[1]
        correct = pred_choice.eq(target.long().data).cpu().sum()

        mean_correct.append(correct.item()/float(points.size()[0]))

    accuracy = np.mean(mean_correct)
    print('Total Accuracy: %f'%accuracy)

    logger.info('Total Accuracy: %f'%accuracy)
    logger.info('End of evaluation...')
Пример #25
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    experiment_dir = Path('./log/')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath('classification')
    experiment_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        experiment_dir = experiment_dir.joinpath(timestr)
    else:
        experiment_dir = experiment_dir.joinpath(args.log_dir)
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)

    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)

    '''DATA LOADING'''
    log_string('Load dataset ...')
    DATA_PATH = 'data/modelnet40_normal_resampled/'

    TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='train',
                                                     normal_channel=args.normal)
    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test',
                                                    normal_channel=args.normal)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=4)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4)

    '''MODEL LOADING'''
    num_class = 40
    MODEL = importlib.import_module(args.model)
    shutil.copy('./models/%s.py' % args.model, str(experiment_dir))
    shutil.copy('./models/pointnet_util_psn.py', str(experiment_dir))

    classifier = MODEL.get_model(num_class,normal_channel=args.normal).cuda()
    criterion = MODEL.get_loss().cuda()

    try:
        checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0


    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(
            classifier.parameters(),
            lr=args.learning_rate,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate
        )
    else:
        optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.7)
    global_epoch = 0
    global_step = 0
    best_instance_acc = 0.0
    best_class_acc = 0.0
    mean_correct = []
    best_epoch = 0

    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch,args.epoch):
        log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))

        scheduler.step()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9):
            points, target = data
            points = points.data.numpy()
            points = provider.random_point_dropout(points)
            points[:,:, 0:3] = provider.random_scale_point_cloud(points[:,:, 0:3])
            points[:,:, 0:3] = provider.shift_point_cloud(points[:,:, 0:3])
            points = torch.Tensor(points)
            target = target[:, 0]

            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()

            classifier = classifier.train()
            pred, trans_feat = classifier(points, False)
            loss = criterion(pred, target.long(), trans_feat)
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.long().data).cpu().sum()
            mean_correct.append(correct.item() / float(points.size()[0]))
            loss.backward()
            optimizer.step()
            global_step += 1

        train_instance_acc = np.mean(mean_correct)
        log_string('Train Instance Accuracy: %f' % train_instance_acc)


        with torch.no_grad():
            instance_acc, class_acc = test(classifier.eval(), testDataLoader)

            if (instance_acc >= best_instance_acc):
                best_instance_acc = instance_acc
                best_epoch = epoch + 1

            if (class_acc >= best_class_acc):
                best_class_acc = class_acc
            log_string('Test Instance Accuracy: %f, Class Accuracy: %f'% (instance_acc, class_acc))
            log_string('Best Instance Accuracy: %f, Class Accuracy: %f'% (best_instance_acc, best_class_acc))

            if (instance_acc >= best_instance_acc):
                logger.info('Save model...')
                savepath = str(checkpoints_dir) + '/best_model.pth'
                log_string('Saving at %s'% savepath)
                state = {
                    'epoch': best_epoch,
                    'instance_acc': instance_acc,
                    'class_acc': class_acc,
                    'model_state_dict': classifier.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                }
                torch.save(state, savepath)
            global_epoch += 1

    logger.info('End of training...')
Пример #26
0
        print('=====> Building new model...')
        torch.manual_seed(args.seed)
        print("Random Seed: ", args.seed)

        START_EPOCH = 0
        acc_list = [0]

        print('Successfully built!')

    ########################################
    ## Load data
    ########################################
    print('======> Loading data')

    TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                       npoint=args.num_points,
                                       split='train',
                                       normal_channel=args.normal)
    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                      npoint=args.num_points,
                                      split='test',
                                      normal_channel=args.normal)
    train_loader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=4,
                                               drop_last=True)
    test_loader = torch.utils.data.DataLoader(TEST_DATASET,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=4,
                                              drop_last=False)