Exemplo n.º 1
0
def evaluate(args):
    cache = _load(root)
    norm = True if args.model_name == 'pointnet' else False
    test_ds = PartNormalDataset(root, cache, npoints=2048, split='test')
    testdataloader = DataLoader(test_ds,
                                batch_size=args.batch_size,
                                shuffle=False,
                                num_workers=int(args.workers))
    log.info("The number of test data is:", len(test_ds))

    log.info('Building Model', args.model_name)
    num_classes = 16
    num_part = 50
    if args.model_name == 'pointnet2':
        model = PointNet2PartSegMsg_one_hot(num_part)
    else:
        model = PointNetDenseCls(cat_num=num_classes, part_num=num_part)

    torch.backends.cudnn.benchmark = True
    model = torch.nn.DataParallel(model).cuda()
    log.debug('Using gpu:', args.gpu)

    if args.pretrain is None:
        log.err('No pretrain model')
        return

    log.debug('Loading pretrain model...')
    state_dict = torch.load(args.pretrain)
    model.load_state_dict(state_dict)

    log.info('Testing pretrain model...')

    test_metrics, test_hist_acc, cat_mean_iou = test_partseg(
        model.eval(),
        testdataloader,
        label_id_to_name,
        args.model_name,
        num_part,
    )

    log.info('test_hist_acc', len(test_hist_acc))
    log.info(cat_mean_iou)
    log.info('Test Accuracy', '%.5f' % test_metrics['accuracy'])
    log.info('Class avg mIOU:', '%.5f' % test_metrics['class_avg_iou'])
    log.info('Inctance avg mIOU:', '%.5f' % test_metrics['inctance_avg_iou'])
Exemplo n.º 2
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    experiment_dir = Path('./log/')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath('part_seg')
    experiment_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        experiment_dir = experiment_dir.joinpath(timestr)
    else:
        experiment_dir = experiment_dir.joinpath(args.log_dir)
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)

    root = 'data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'

    TRAIN_DATASET = PartNormalDataset(root=root,
                                      npoints=args.npoint,
                                      split='trainval',
                                      normal_channel=args.normal)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                                  batch_size=args.batch_size,
                                                  shuffle=True,
                                                  num_workers=4)
    TEST_DATASET = PartNormalDataset(root=root,
                                     npoints=args.npoint,
                                     split='test',
                                     normal_channel=args.normal)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=4)
    log_string("The number of training data is: %d" % len(TRAIN_DATASET))
    log_string("The number of test data is: %d" % len(TEST_DATASET))
    num_classes = 16
    num_part = 50
    '''MODEL LOADING'''
    MODEL = importlib.import_module(args.model)
    shutil.copy('models/%s.py' % args.model, str(experiment_dir))
    shutil.copy('models/pointnet_util.py', str(experiment_dir))

    classifier = MODEL.get_model(num_part, normal_channel=args.normal).cuda()
    criterion = MODEL.get_loss().cuda()

    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)
        elif classname.find('Linear') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)

    try:
        checkpoint = torch.load(
            str(experiment_dir) + '/checkpoints/best_model.pth')
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0
        classifier = classifier.apply(weights_init)

    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    else:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=args.learning_rate,
                                    momentum=0.9)

    def bn_momentum_adjust(m, momentum):
        if isinstance(m, torch.nn.BatchNorm2d) or isinstance(
                m, torch.nn.BatchNorm1d):
            m.momentum = momentum

    LEARNING_RATE_CLIP = 1e-5
    MOMENTUM_ORIGINAL = 0.1
    MOMENTUM_DECCAY = 0.5
    MOMENTUM_DECCAY_STEP = args.step_size

    best_acc = 0
    global_epoch = 0
    best_class_avg_iou = 0
    best_inctance_avg_iou = 0

    for epoch in range(start_epoch, args.epoch):
        log_string('Epoch %d (%d/%s):' %
                   (global_epoch + 1, epoch + 1, args.epoch))
        '''Adjust learning rate and BN momentum'''
        lr = max(
            args.learning_rate * (args.lr_decay**(epoch // args.step_size)),
            LEARNING_RATE_CLIP)
        log_string('Learning rate:%f' % lr)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        mean_correct = []
        momentum = MOMENTUM_ORIGINAL * (MOMENTUM_DECCAY
                                        **(epoch // MOMENTUM_DECCAY_STEP))
        if momentum < 0.01:
            momentum = 0.01
        print('BN momentum updated to: %f' % momentum)
        classifier = classifier.apply(
            lambda x: bn_momentum_adjust(x, momentum))
        '''learning one epoch'''
        for i, data in tqdm(enumerate(trainDataLoader),
                            total=len(trainDataLoader),
                            smoothing=0.9):
            points, label, target = data
            points = points.data.numpy()
            points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                         0:3])
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
            points = torch.Tensor(points)
            points, label, target = points.float().cuda(), label.long().cuda(
            ), target.long().cuda()
            points = points.transpose(2, 1)
            optimizer.zero_grad()
            classifier = classifier.train()
            seg_pred, trans_feat = classifier(
                points, to_categorical(label, num_classes))
            seg_pred = seg_pred.contiguous().view(-1, num_part)
            target = target.view(-1, 1)[:, 0]
            pred_choice = seg_pred.data.max(1)[1]
            correct = pred_choice.eq(target.data).cpu().sum()
            mean_correct.append(correct.item() /
                                (args.batch_size * args.npoint))
            loss = criterion(seg_pred, target, trans_feat)
            loss.backward()
            optimizer.step()
        train_instance_acc = np.mean(mean_correct)
        log_string('Train accuracy is: %.5f' % train_instance_acc)

        with torch.no_grad():
            test_metrics = {}
            total_correct = 0
            total_seen = 0
            total_seen_class = [0 for _ in range(num_part)]
            total_correct_class = [0 for _ in range(num_part)]
            shape_ious = {cat: [] for cat in seg_classes.keys()}
            seg_label_to_cat = {}  # {0:Airplane, 1:Airplane, ...49:Table}
            for cat in seg_classes.keys():
                for label in seg_classes[cat]:
                    seg_label_to_cat[label] = cat

            for batch_id, (points, label,
                           target) in tqdm(enumerate(testDataLoader),
                                           total=len(testDataLoader),
                                           smoothing=0.9):
                cur_batch_size, NUM_POINT, _ = points.size()
                points, label, target = points.float().cuda(), label.long(
                ).cuda(), target.long().cuda()
                points = points.transpose(2, 1)
                classifier = classifier.eval()
                seg_pred, _ = classifier(points,
                                         to_categorical(label, num_classes))
                cur_pred_val = seg_pred.cpu().data.numpy()
                cur_pred_val_logits = cur_pred_val
                cur_pred_val = np.zeros(
                    (cur_batch_size, NUM_POINT)).astype(np.int32)
                target = target.cpu().data.numpy()
                for i in range(cur_batch_size):
                    cat = seg_label_to_cat[target[i, 0]]
                    logits = cur_pred_val_logits[i, :, :]
                    cur_pred_val[i, :] = np.argmax(logits[:, seg_classes[cat]],
                                                   1) + seg_classes[cat][0]
                correct = np.sum(cur_pred_val == target)
                total_correct += correct
                total_seen += (cur_batch_size * NUM_POINT)

                for l in range(num_part):
                    total_seen_class[l] += np.sum(target == l)
                    total_correct_class[l] += (np.sum((cur_pred_val == l)
                                                      & (target == l)))

                for i in range(cur_batch_size):
                    segp = cur_pred_val[i, :]
                    segl = target[i, :]
                    cat = seg_label_to_cat[segl[0]]
                    part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
                    for l in seg_classes[cat]:
                        if (np.sum(segl == l) == 0) and (
                                np.sum(segp == l) == 0
                        ):  # part is not present, no prediction as well
                            part_ious[l - seg_classes[cat][0]] = 1.0
                        else:
                            part_ious[l - seg_classes[cat][0]] = np.sum(
                                (segl == l) & (segp == l)) / float(
                                    np.sum((segl == l) | (segp == l)))
                    shape_ious[cat].append(np.mean(part_ious))

            all_shape_ious = []
            for cat in shape_ious.keys():
                for iou in shape_ious[cat]:
                    all_shape_ious.append(iou)
                shape_ious[cat] = np.mean(shape_ious[cat])
            mean_shape_ious = np.mean(list(shape_ious.values()))
            test_metrics['accuracy'] = total_correct / float(total_seen)
            test_metrics['class_avg_accuracy'] = np.mean(
                np.array(total_correct_class) /
                np.array(total_seen_class, dtype=np.float))
            for cat in sorted(shape_ious.keys()):
                log_string('eval mIoU of %s %f' %
                           (cat + ' ' * (14 - len(cat)), shape_ious[cat]))
            test_metrics['class_avg_iou'] = mean_shape_ious
            test_metrics['inctance_avg_iou'] = np.mean(all_shape_ious)

        log_string(
            'Epoch %d test Accuracy: %f  Class avg mIOU: %f   Inctance avg mIOU: %f'
            %
            (epoch + 1, test_metrics['accuracy'],
             test_metrics['class_avg_iou'], test_metrics['inctance_avg_iou']))
        if (test_metrics['inctance_avg_iou'] >= best_inctance_avg_iou):
            logger.info('Save model...')
            savepath = str(checkpoints_dir) + '/best_model.pth'
            log_string('Saving at %s' % savepath)
            state = {
                'epoch': epoch,
                'train_acc': train_instance_acc,
                'test_acc': test_metrics['accuracy'],
                'class_avg_iou': test_metrics['class_avg_iou'],
                'inctance_avg_iou': test_metrics['inctance_avg_iou'],
                'model_state_dict': classifier.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            torch.save(state, savepath)
            log_string('Saving model....')

        if test_metrics['accuracy'] > best_acc:
            best_acc = test_metrics['accuracy']
        if test_metrics['class_avg_iou'] > best_class_avg_iou:
            best_class_avg_iou = test_metrics['class_avg_iou']
        if test_metrics['inctance_avg_iou'] > best_inctance_avg_iou:
            best_inctance_avg_iou = test_metrics['inctance_avg_iou']
        log_string('Best accuracy is: %.5f' % best_acc)
        log_string('Best class avg mIOU is: %.5f' % best_class_avg_iou)
        log_string('Best inctance avg mIOU is: %.5f' % best_inctance_avg_iou)
        global_epoch += 1
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    experiment_dir = 'log/part_seg/' + args.log_dir
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/eval.txt' % experiment_dir)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)

    root = 'data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'

    TEST_DATASET = PartNormalDataset(root=root,
                                     npoints=args.num_point,
                                     split='test',
                                     normal_channel=args.normal)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=4)
    log_string("The number of test data is: %d" % len(TEST_DATASET))
    num_classes = 16
    num_part = 50
    '''MODEL LOADING'''
    model_name = os.listdir(experiment_dir + '/logs')[0].split('.')[0]
    MODEL = importlib.import_module(model_name)
    classifier = MODEL.get_model(num_part, normal_channel=args.normal).cuda()
    checkpoint = torch.load(
        str(experiment_dir) + '/checkpoints/best_model.pth')
    classifier.load_state_dict(checkpoint['model_state_dict'])

    with torch.no_grad():
        test_metrics = {}
        total_correct = 0
        total_seen = 0
        total_seen_class = [0 for _ in range(num_part)]
        total_correct_class = [0 for _ in range(num_part)]
        shape_ious = {cat: [] for cat in seg_classes.keys()}
        seg_label_to_cat = {}  # {0:Airplane, 1:Airplane, ...49:Table}
        for cat in seg_classes.keys():
            for label in seg_classes[cat]:
                seg_label_to_cat[label] = cat

        for batch_id, (points, label,
                       target) in tqdm(enumerate(testDataLoader),
                                       total=len(testDataLoader),
                                       smoothing=0.9):
            batchsize, num_point, _ = points.size()
            cur_batch_size, NUM_POINT, _ = points.size()
            points, label, target = points.float().cuda(), label.long().cuda(
            ), target.long().cuda()
            points = points.transpose(2, 1)
            classifier = classifier.eval()
            vote_pool = torch.zeros(target.size()[0],
                                    target.size()[1], num_part).cuda()
            for _ in range(args.num_votes):
                seg_pred, _ = classifier(points,
                                         to_categorical(label, num_classes),
                                         False)
                vote_pool += seg_pred
            seg_pred = vote_pool / args.num_votes
            cur_pred_val = seg_pred.cpu().data.numpy()
            cur_pred_val_logits = cur_pred_val
            cur_pred_val = np.zeros(
                (cur_batch_size, NUM_POINT)).astype(np.int32)
            target = target.cpu().data.numpy()
            for i in range(cur_batch_size):
                cat = seg_label_to_cat[target[i, 0]]
                logits = cur_pred_val_logits[i, :, :]
                cur_pred_val[i, :] = np.argmax(logits[:, seg_classes[cat]],
                                               1) + seg_classes[cat][0]
            correct = np.sum(cur_pred_val == target)
            total_correct += correct
            total_seen += (cur_batch_size * NUM_POINT)

            for l in range(num_part):
                total_seen_class[l] += np.sum(target == l)
                total_correct_class[l] += (np.sum((cur_pred_val == l)
                                                  & (target == l)))

            for i in range(cur_batch_size):
                segp = cur_pred_val[i, :]
                segl = target[i, :]
                cat = seg_label_to_cat[segl[0]]
                part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
                for l in seg_classes[cat]:
                    if (np.sum(segl == l) == 0) and (
                            np.sum(segp == l) == 0
                    ):  # part is not present, no prediction as well
                        part_ious[l - seg_classes[cat][0]] = 1.0
                    else:
                        part_ious[l - seg_classes[cat][0]] = np.sum(
                            (segl == l) & (segp == l)) / float(
                                np.sum((segl == l) | (segp == l)))
                shape_ious[cat].append(np.mean(part_ious))

        all_shape_ious = []
        for cat in shape_ious.keys():
            for iou in shape_ious[cat]:
                all_shape_ious.append(iou)
            shape_ious[cat] = np.mean(shape_ious[cat])
        mean_shape_ious = np.mean(list(shape_ious.values()))
        test_metrics['accuracy'] = total_correct / float(total_seen)
        test_metrics['class_avg_accuracy'] = np.mean(
            np.array(total_correct_class) /
            np.array(total_seen_class, dtype=np.float))
        for cat in sorted(shape_ious.keys()):
            log_string('eval mIoU of %s %f' %
                       (cat + ' ' * (14 - len(cat)), shape_ious[cat]))
        test_metrics['class_avg_iou'] = mean_shape_ious
        test_metrics['inctance_avg_iou'] = np.mean(all_shape_ious)

    log_string('Accuracy is: %.5f' % test_metrics['accuracy'])
    log_string('Class avg accuracy is: %.5f' %
               test_metrics['class_avg_accuracy'])
    log_string('Class avg mIOU is: %.5f' % test_metrics['class_avg_iou'])
    log_string('Inctance avg mIOU is: %.5f' % test_metrics['inctance_avg_iou'])
def main(args):
    def log_string(str):
        logger.info(str)
        # print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu  # 0号GPU
    '''CREATE DIR'''  # 创建log存放的文件目录及文件夹,存储在log目录下
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    experiment_dir = Path('./log/')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath('part_seg')
    experiment_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        experiment_dir = experiment_dir.joinpath(timestr)
    else:
        experiment_dir = experiment_dir.joinpath(args.log_dir)
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(
        args
    )  # Namespace(batch_size=4, decay_rate=0.0001, epoch=251, gpu='0', learning_rate=0.001, log_dir='pointnet2_part_seg_msg', lr_decay=0.5, model='pointnet2_part_seg_msg', normal=True, npoint=2048, optimizer='Adam', step_size=20)

    root = 'data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'

    # 开始处理数据集
    # 返回2048个点,并进行正则化   提前已经分配好了哪些是训练集,哪些作为测试集
    TRAIN_DATASET = PartNormalDataset(root=root,
                                      npoints=args.npoint,
                                      split='trainval',
                                      normal_channel=args.normal)

    # 按照batch_size进行组装数据
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                                  batch_size=args.batch_size,
                                                  shuffle=True,
                                                  num_workers=4)
    # 测试数据同样处理
    TEST_DATASET = PartNormalDataset(root=root,
                                     npoints=args.npoint,
                                     split='test',
                                     normal_channel=args.normal)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=4)

    log_string("The number of training data is: %d" %
               len(TRAIN_DATASET))  # 训练数据 13998
    log_string("The number of test data is: %d" %
               len(TEST_DATASET))  # 测试数据 2874
    num_classes = 16
    num_part = 50
    '''MODEL LOADING'''
    MODEL = importlib.import_module(args.model)
    # 将模型和工具包都添加到log文件中
    shutil.copy('models/%s.py' % args.model, str(experiment_dir))
    shutil.copy('models/pointnet_util.py', str(experiment_dir))
    # 分类器,进行50分类,对2048个点都要进行分类
    classifier = MODEL.get_model(num_part, normal_channel=args.normal).cuda()
    criterion = MODEL.get_loss().cuda()  # 计算损失函数的方式

    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)
        elif classname.find('Linear') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)

    try:  # 加载预训练的模型
        checkpoint = torch.load(
            str(experiment_dir) + '/checkpoints/best_model.pth')
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0
        classifier = classifier.apply(weights_init)

    if args.optimizer == 'Adam':  #TODO 研究这些参数
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    else:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=args.learning_rate,
                                    momentum=0.9)
    # 依据动量进行调整
    def bn_momentum_adjust(m, momentum):
        if isinstance(m, torch.nn.BatchNorm2d) or isinstance(
                m, torch.nn.BatchNorm1d):
            m.momentum = momentum

    LEARNING_RATE_CLIP = 1e-5
    MOMENTUM_ORIGINAL = 0.1
    MOMENTUM_DECCAY = 0.5
    MOMENTUM_DECCAY_STEP = args.step_size

    best_acc = 0
    global_epoch = 0
    best_class_avg_iou = 0
    best_inctance_avg_iou = 0

    # 开始进行迭代训练
    for epoch in range(start_epoch, args.epoch):
        log_string('Epoch %d (%d/%s):' %
                   (global_epoch + 1, epoch + 1, args.epoch))
        '''Adjust learning rate and BN momentum'''
        lr = max(
            args.learning_rate * (args.lr_decay**(epoch // args.step_size)),
            LEARNING_RATE_CLIP)
        log_string('Learning rate:%f' % lr)
        #TODO:???
        # param_groups 是一个list,里面每一个item都是字典;这项作用是给内部的lr项赋值为param上的lr
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        mean_correct = []
        # 0.1*(0.5^(epoch//20))  每20步,动量减小一次
        momentum = MOMENTUM_ORIGINAL * (MOMENTUM_DECCAY
                                        **(epoch // MOMENTUM_DECCAY_STEP))
        if momentum < 0.01:
            momentum = 0.01
        print('BN momentum updated to: %f' % momentum)  # 0.100000
        classifier = classifier.apply(
            lambda x: bn_momentum_adjust(x, momentum))
        '''learning one epoch'''
        for i, data in tqdm(enumerate(trainDataLoader),
                            total=len(trainDataLoader),
                            smoothing=0.9):
            points, label, target = data
            # print(points.shape) # (4,2048,6)
            # 数据增强:做一些微小扰动
            points = points.data.numpy()
            # print(points.shape)  # (4,2048,6)
            points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                         0:3])
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
            points = torch.Tensor(points)
            points, label, target = points.float().cuda(), label.long().cuda(
            ), target.long().cuda()
            # print(points.shape)  # torch.Size([4, 2048, 6])
            # print(label.shape)  # torch.Size([4, 1])   每个样本的对应的种类标签
            # print(target.shape)  # torch.Size([4, 2048])   每个点的类别标签
            points = points.transpose(2, 1)
            # print(points.shape)  # torch.Size([4, 6, 2048])
            optimizer.zero_grad()
            classifier = classifier.train()
            seg_pred, trans_feat = classifier(
                points, to_categorical(label, num_classes)
            )  # seg_pred  torch.Size([4, 2048, 50])   trans_feat:torch.Size([4, 1024, 1]) ???
            seg_pred = seg_pred.contiguous().view(
                -1, num_part)  # torch.Size([8192, 50])
            target = target.view(-1, 1)[:, 0]  # 8192
            pred_choice = seg_pred.data.max(1)[1]  # 8192   预测的结果部件类别
            correct = pred_choice.eq(
                target.data).cpu().sum()  # tensor(249)   即只有249个正确
            mean_correct.append(
                correct.item() /
                (args.batch_size * args.npoint))  # 平均正确率  0.0303955078125
            loss = criterion(seg_pred, target, trans_feat)
            loss.backward()
            optimizer.step()
        train_instance_acc = np.mean(
            mean_correct
        )  # 1个epoch 准确率  mean_correct的list中有 3500个值  13998个样本,一个batch处理4个,共需3500步 step
        log_string('Train accuracy is: %.5f' %
                   train_instance_acc)  # 实例分割 准确率: 0.8502310616629464
        # 进行测试
        with torch.no_grad(
        ):  # 非训练过程,后续的tensor操作,不需要进行计算图的构建(计算过程的构建,以便梯度反向传播等操作),只用来进行测试
            test_metrics = {}
            total_correct = 0
            total_seen = 0
            total_seen_class = [0
                                for _ in range(num_part)]  # num_part个0组成的list
            total_correct_class = [0 for _ in range(num_part)]
            shape_ious = {cat: [] for cat in seg_classes.keys()}
            seg_label_to_cat = {}  # {0:Airplane, 1:Airplane, ...49:Table}
            for cat in seg_classes.keys():
                for label in seg_classes[cat]:  # 每种的部件类别
                    seg_label_to_cat[label] = cat

            for batch_id, (points, label,
                           target) in tqdm(enumerate(testDataLoader),
                                           total=len(testDataLoader),
                                           smoothing=0.9):
                cur_batch_size, NUM_POINT, _ = points.size(
                )  # torch.Size([4, 2048, 6])
                points, label, target = points.float().cuda(), label.long(
                ).cuda(), target.long().cuda()
                points = points.transpose(2, 1)  # torch.Size([4, 6, 2048])
                classifier = classifier.eval()
                seg_pred, _ = classifier(
                    points,
                    to_categorical(label,
                                   num_classes))  # torch.Size([4, 2048, 50])
                cur_pred_val = seg_pred.cpu().data.numpy()  # (4, 2048, 50)
                cur_pred_val_logits = cur_pred_val
                cur_pred_val = np.zeros(
                    (cur_batch_size, NUM_POINT)).astype(np.int32)  # (4, 2048)
                target = target.cpu().data.numpy(
                )  # 部件的类别一个batch中 所有点的类别  (4, 2048)
                for i in range(cur_batch_size):  # 对每个实例样本
                    cat = seg_label_to_cat[target[i, 0]]  # 获取每一个点其所对应的实例类别
                    logits = cur_pred_val_logits[i, :, :]  # (2048, 50)
                    cur_pred_val[i, :] = np.argmax(
                        logits[:, seg_classes[cat]], 1
                    ) + seg_classes[cat][
                        0]  # argmax 取出logits[:, seg_classes[cat]], 1)中元素最大值的索引,
                correct = np.sum(cur_pred_val == target)  # 7200
                total_correct += correct  # 当前正确点的总和
                total_seen += (cur_batch_size * NUM_POINT)  # 当前总的可见点,已经推理过的点
                # 每个部件进行统计
                for l in range(num_part):
                    total_seen_class[l] += np.sum(
                        target == l)  # 每个部件类别总的 需要判断的点
                    total_correct_class[l] += (np.sum((cur_pred_val == l)
                                                      & (target == l))
                                               )  # 每个部件类别正确的点

                for i in range(cur_batch_size):
                    segp = cur_pred_val[i, :]  # (4, 2048)
                    segl = target[i, :]
                    cat = seg_label_to_cat[segl[0]]  # 任意一个部件类别,即可确定一个实例类别
                    part_ious = [0.0 for _ in range(len(seg_classes[cat]))
                                 ]  # 实例类别cat有多少个子类别,生成同尺寸的0.0的列表
                    for l in seg_classes[cat]:
                        if (np.sum(segl == l) == 0) and (
                                np.sum(segp == l) == 0
                        ):  # part is not present, no prediction as well
                            part_ious[l - seg_classes[cat][0]] = 1.0
                        else:
                            part_ious[l - seg_classes[cat][0]] = np.sum(
                                (segl == l) & (segp == l)) / float(
                                    np.sum((segl == l) | (segp == l)))
                    shape_ious[cat].append(np.mean(part_ious))  # 每个样本的平均部件iou

            all_shape_ious = []
            for cat in shape_ious.keys():  # 计算所有shape的部件 实例iou
                for iou in shape_ious[cat]:
                    all_shape_ious.append(iou)
                shape_ious[cat] = np.mean(shape_ious[cat])  # 每个shape的平均实例iou
            mean_shape_ious = np.mean(list(shape_ious.values()))
            test_metrics['accuracy'] = total_correct / float(total_seen)
            test_metrics['class_avg_accuracy'] = np.mean(
                np.array(total_correct_class) /
                np.array(total_seen_class, dtype=np.float))
            for cat in sorted(shape_ious.keys()):
                log_string('eval mIoU of %s %f' %
                           (cat + ' ' * (14 - len(cat)), shape_ious[cat]))
            test_metrics['class_avg_iou'] = mean_shape_ious
            test_metrics['inctance_avg_iou'] = np.mean(all_shape_ious)

        log_string(
            'Epoch %d test Accuracy: %f  Class avg mIOU: %f   Inctance avg mIOU: %f'
            %
            (epoch + 1, test_metrics['accuracy'],
             test_metrics['class_avg_iou'], test_metrics['inctance_avg_iou']))
        if (test_metrics['inctance_avg_iou'] >= best_inctance_avg_iou):
            logger.info('Save model...')
            savepath = str(checkpoints_dir) + '/best_model.pth'
            log_string('Saving at %s' % savepath)
            state = {
                'epoch': epoch,
                'train_acc': train_instance_acc,
                'test_acc': test_metrics['accuracy'],
                'class_avg_iou': test_metrics['class_avg_iou'],
                'inctance_avg_iou': test_metrics['inctance_avg_iou'],
                'model_state_dict': classifier.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            torch.save(state, savepath)
            log_string('Saving model....')

        if test_metrics['accuracy'] > best_acc:
            best_acc = test_metrics['accuracy']
        if test_metrics['class_avg_iou'] > best_class_avg_iou:
            best_class_avg_iou = test_metrics['class_avg_iou']
        if test_metrics['inctance_avg_iou'] > best_inctance_avg_iou:
            best_inctance_avg_iou = test_metrics['inctance_avg_iou']
        log_string('Best accuracy is: %.5f' % best_acc)
        log_string('Best class avg mIOU is: %.5f' % best_class_avg_iou)
        log_string('Best inctance avg mIOU is: %.5f' % best_inctance_avg_iou)
        global_epoch += 1
Exemplo n.º 5
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''CREATE DIR'''
    experiment_dir = osp.join(args.log_dir, 'ModelNet40-eval')
    experiment_dir = experiment_dir + '_' + str(args.num_point)
    if args.sqrt:
        experiment_dir = experiment_dir + '_do-sqrt'
    if args.do_sa3:
        experiment_dir = experiment_dir + '_sa3-feats'
    if args.svm_jitter:
        experiment_dir = experiment_dir + '_svm-jitter'
        args.batch_size = (args.batch_size // 8)  # 8x augmentation
    if args.random_feats:
        experiment_dir = experiment_dir + '_random-feats'
    if args.ckpt is not None:
        experiment_dir = experiment_dir + '_' + osp.splitext(args.ckpt)[0]

    os.makedirs(experiment_dir, exist_ok=True)
    '''LOG'''
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/eval.txt' % experiment_dir)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)
    log_string('Experiment dir: %s' % experiment_dir)
    '''DATA LOADING'''
    log_string('Load dataset ...')
    DATA_PATH = 'data/modelnet40_normal_resampled/'
    TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                       npoint=args.num_point,
                                       split='train',
                                       normal_channel=args.normal)
    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                      npoint=args.num_point,
                                      split='test',
                                      normal_channel=args.normal)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                                  batch_size=args.batch_size,
                                                  shuffle=False,
                                                  num_workers=4)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=4)

    if DEBUG:
        # ShapeNet training data
        shapenet_root = 'data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'
        SHAPENET_DATASET = PartNormalDataset(root=shapenet_root,
                                             npoints=args.num_point,
                                             split='trainval',
                                             normal_channel=args.normal)
        shapenetDataLoader = torch.utils.data.DataLoader(
            SHAPENET_DATASET,
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=4)
        ACD_ROOT = '/srv/data2/mgadelha/ShapeNetACD/'
        SELFSUP_DATASET = ACDSelfSupDataset(root=ACD_ROOT,
                                            npoints=args.num_point,
                                            normal_channel=args.normal)
        selfsupDataLoader = torch.utils.data.DataLoader(
            SELFSUP_DATASET,
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=4)
    '''MODEL LOADING'''
    shapenet_num_class = 50  #
    model_name = args.model
    MODEL = importlib.import_module(model_name)
    model = MODEL.get_model(shapenet_num_class, normal_channel=False).cuda()
    if not args.random_feats:
        log_string('Load ACD pre-trained model: %s' % args.log_dir)
        if args.ckpt is None:
            checkpoint = torch.load(
                str(args.log_dir) + '/checkpoints/best_model.pth')
        else:
            checkpoint = torch.load(
                str(args.log_dir) + '/checkpoints/' + args.ckpt)

        try:
            DATA_PARALLEL = False
            model.load_state_dict(checkpoint['model_state_dict'])
        except:
            DATA_PARALLEL = True
            model = nn.DataParallel(model)
            model.load_state_dict(checkpoint['model_state_dict'])
            model = model.module
    else:
        log_string('Using randomly initialized %s as feature extractor' %
                   model_name)

    # Extract features and save
    if not osp.exists(osp.join(experiment_dir, 'train-feats.npy')) or \
        not osp.exists(osp.join(experiment_dir, 'train-labels.txt')):

        log_string('Extract features ...')
        if args.model == 'pointnet_part_seg':
            feat_train, label_train = extract_feats_pointnet(
                model,
                trainDataLoader,
                do_sqrt=args.sqrt,
                do_global=args.do_sa3)
            feat_test, label_test = extract_feats_pointnet(
                model,
                testDataLoader,
                do_sqrt=args.sqrt,
                do_global=args.do_sa3)

        elif args.model == 'pointnet2_part_seg_msg':
            feat_train, label_train = extract_feats(
                model,
                trainDataLoader,
                do_sqrt=args.sqrt,
                do_sa3=args.do_sa3,
                do_svm_jitter=args.svm_jitter)
            feat_test, label_test = extract_feats(
                model,
                testDataLoader,
                do_sqrt=args.sqrt,
                do_sa3=args.do_sa3,
                do_svm_jitter=args.svm_jitter)

        elif args.model == 'dgcnn':
            pass
            # feat_train, label_train = extract_feats_dgcnn(model, trainDataLoader,
            #                                         do_sqrt=args.sqrt)
            # feat_test, label_test = extract_feats_dgcnn(model, testDataLoader,
            #                                         do_sqrt=args.sqrt)
        elif args.model == 'dgcnn_seg':
            feat_train, label_train = extract_feats_dgcnn(model,
                                                          trainDataLoader,
                                                          do_sqrt=args.sqrt)
            feat_test, label_test = extract_feats_dgcnn(model,
                                                        testDataLoader,
                                                        do_sqrt=args.sqrt)
        else:
            raise ValueError

        np.save(osp.join(experiment_dir, 'train-feats.npy'), feat_train)
        np.savetxt(osp.join(experiment_dir, 'train-labels.txt'), label_train)
        np.save(osp.join(experiment_dir, 'test-feats.npy'), feat_test)
        np.savetxt(osp.join(experiment_dir, 'test-labels.txt'), label_test)

    else:
        log_string('Loading pre-trained features')
        feat_train = np.load(osp.join(experiment_dir, 'train-feats.npy'))
        label_train = np.loadtxt(osp.join(experiment_dir, 'train-labels.txt'))
        feat_test = np.load(osp.join(experiment_dir, 'test-feats.npy'))
        label_test = np.loadtxt(osp.join(experiment_dir, 'test-labels.txt'))

    # Train linear SVM (one-vs-rest) on features

    # Train+test SVM on validation *or* test set
    log_string('Training linear SVM ...')
    if args.val_svm:
        log_string('Total data: %d samples, %d features' % feat_train.shape)
        val_acc, _, _ = train_val_svm(feat_train,
                                      label_train,
                                      svm_c=args.svm_c)
        log_string('Validation Accuracy: %f' % val_acc)
    else:
        # SVM training on *all* training data
        log_string('Training data: %d samples, %d features' % feat_train.shape)
        t_0 = time.time()
        if args.cross_val_svm:
            classifier, best_C, best_score = cross_val_svm(
                feat_train, label_train)
        else:
            classifier = LinearSVC(random_state=123,
                                   multi_class='ovr',
                                   C=args.svm_c,
                                   dual=False)
            classifier.fit(feat_train, label_train)
        train_acc = classifier.score(feat_train, label_train)
        log_string('Train Accuracy: %f' % train_acc)
        t_1 = time.time()
        log_string('Time elapsed: %f' % (t_1 - t_0))
        # test performance
        test_acc = classifier.score(feat_test, label_test)
        log_string('Test Accuracy: %f' % test_acc)
Exemplo n.º 6
0
def main():

    # =================
    # 引入shapeNet的数据
    # ==================
    TEST_DATASET = PartNormalDataset(npoints=2048,
                                     split='test',
                                     normalize=False,
                                     jitter=False)

    l_point = []
    l_label = []
    l_part = []
    i = 0
    for point, label, part, _ in TEST_DATASET:
        l_point.append(point)
        l_label.append(label)
        l_part.append(part)
        print(label, end=',')
    l_point = np.array(l_point)
    l_label = np.array(l_label)  # label 在这里基本不涉及什么操作,
    l__part = np.array(l_part)

    # ====================
    # 引入modelnet 的数据
    # ==========================
    #     datapath = './data/ModelNet/'
    #     train_data, train_label, test_data, test_label = load_data(datapath, classification=True)
    #     l_point = np.array(test_data)
    #     l_label = np.array(test_label)

    ch = 1
    ch_all = 1
    # --------------------------
    # 各个数据组合进行测试,
    # 1=org-clf             out:feature+label
    # 2=org-part-clf            feature+label+part_sta
    # 3=org-partseg-clf         feature+label+part_sta
    if ch == 1:
        print('Org 处理...')
        testDataset = toDataset(l_point)
        fts = ex_clf.main(testDataset)  # 输入的数据类型point,label
        print('运算完毕')

        temp_dict = {}  # feature, label, part_sta
        temp_dict['feature'] = fts
        temp_dict['label'] = l_label.reshape(1, -1)
        savemat('org_shapeNet_test.mat', temp_dict)
        print('org_clf.mat 文件已经保存!')


#        print('保存源文件...')
#        for i,(j,k) in tqdm(enumerate(zip( l_point, l_label), 0), total=len(l_label)):
#            fp = os.path.join('./result/shapeNet/', '%04.d'%i+'_'+'%02.d'%k+'.txt')
#            fo = open(fp, 'w')
#            np.savetxt(fo, np.array(j).astype(np.float32), fmt='%.6f')
#            fo.close()
#        print('保存源文件完成')

    elif ch == 2:
        print('Part 处理...')
        part_all, part_sta = separate(l_point, l_part)
        temp_part = part_all
        part_all = point_2048(part_all)
        part_all = toDataset(part_all)

        aa = ex_clf.main(part_all)  # 输入的数据类型point,label
        print('运算完毕!')
        temp_dict = {}  # feature, label, part_sta
        temp_dict['feature'] = aa
        temp_dict['label'] = l_label.reshape(1, -1)
        temp_dict['part_sta'] = part_sta
        # savemat('part_clf_shape.mat',temp_dict)
        savemat('part_m.mat', temp_dict)
        print('part_clf.mat 文件已经保存!')

        print('保存part文件...')
        index = 0
        for it1 in range(len(part_sta)):
            for it2 in range(part_sta[it1]):
                # it1, label[it1], it2== 索引,类标签,part 分开

                fp = os.path.join(
                    './result/part', '%04.d' % it1 + '_' +
                    '%02.d' % l_label[it1, 0] + '_' + '%01.d' % it2 + '.txt')
                fo = open(fp, 'w')
                np.savetxt(fo,
                           np.array(temp_part[index]).astype(np.float32),
                           fmt='%.6f')
                fo.close()
                index += 1
        print('保存part文件完成')

    elif ch == 3 or ch_all == 1:
        print('Partseg 处理...')
        part_predict = ex_partseg.main(TEST_DATASET)
        part_all, part_sta = separate(l_point, part_predict)

        part_all = point_2048(part_all)
        part_all = toDataset(part_all)
        aa = ex_clf.main(part_all)  # 输入的数据类型point,label
        print('运算完毕!')

        temp_dict = {}  # feature, label, part_sta
        temp_dict['feature'] = aa
        temp_dict['label'] = l_label.reshape(1, -1)
        temp_dict['part_sta'] = part_sta

        # savemat('partseg_clf_shape.mat',temp_dict)
        savemat('partseg_m.mat', temp_dict)
        print('partset_clf.mat 文件已经保存!')
    else:
        print('没有相关的数据!')
Exemplo n.º 7
0
def main(args):
    os.environ[
        "CUDA_VISIBLE_DEVICES"] = args.gpu if args.multi_gpu is None else '0,1,2,3'
    '''CREATE DIR'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    file_dir = Path(
        str(experiment_dir) + '/' +
        str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
    file_dir.mkdir(exist_ok=True)
    checkpoints_dir = file_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = file_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger(args.model_name)
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(
        str(log_dir) + '/train_%s_partseg.txt' % args.model_name)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------TRANING---------------------------------------------------'
    )
    logger.info('PARAMETER ...')
    logger.info(args)

    TRAIN_DATASET = PartNormalDataset(npoints=2048, split='trainval')
    dataloader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                             batch_size=args.batchsize,
                                             shuffle=True,
                                             num_workers=int(args.workers))
    TEST_DATASET = PartNormalDataset(npoints=2048, split='test')
    testdataloader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=10,
                                                 shuffle=True,
                                                 num_workers=int(args.workers))
    print("The number of training data is:", len(TRAIN_DATASET))
    logger.info("The number of training data is:%d", len(TRAIN_DATASET))
    print("The number of test data is:", len(TEST_DATASET))
    logger.info("The number of test data is:%d", len(TEST_DATASET))
    num_classes = 16
    num_part = 50
    blue = lambda x: '\033[94m' + x + '\033[0m'
    model = PointNet2PartSeg_msg_one_hot(
        num_part) if args.model_name == 'pointnet2' else PointNetDenseCls(
            cat_num=num_classes, part_num=num_part)

    if args.pretrain is not None:
        model.load_state_dict(torch.load(args.pretrain))
        print('load model %s' % args.pretrain)
        logger.info('load model %s' % args.pretrain)
    else:
        print('Training from scratch')
        logger.info('Training from scratch')
    pretrain = args.pretrain
    init_epoch = int(pretrain[-14:-11]) if args.pretrain is not None else 0

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.5)
    '''GPU selection and multi-GPU'''
    if args.multi_gpu is not None:
        device_ids = [int(x) for x in args.multi_gpu.split(',')]
        torch.backends.cudnn.benchmark = True
        model.cuda(device_ids[0])
        model = torch.nn.DataParallel(model, device_ids=device_ids)
    else:
        model.cuda()
    criterion = PointNetLoss()
    LEARNING_RATE_CLIP = 1e-5

    history = defaultdict(lambda: list())
    best_acc = 0
    best_class_avg_iou = 0
    best_inctance_avg_iou = 0

    for epoch in range(init_epoch, args.epoch):
        scheduler.step()
        lr = max(optimizer.param_groups[0]['lr'], LEARNING_RATE_CLIP)
        print('Learning rate:%f' % lr)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        for i, data in tqdm(enumerate(dataloader, 0),
                            total=len(dataloader),
                            smoothing=0.9):
            points, label, target, norm_plt = data
            points, label, target = Variable(points.float()), Variable(
                label.long()), Variable(target.long())
            points = points.transpose(2, 1)
            norm_plt = norm_plt.transpose(2, 1)
            points, label, target, norm_plt = points.cuda(), label.squeeze(
            ).cuda(), target.cuda(), norm_plt.cuda()
            optimizer.zero_grad()
            model = model.train()
            if args.model_name == 'pointnet':
                labels_pred, seg_pred, trans_feat = model(
                    points, to_categorical(label, 16))
                seg_pred = seg_pred.contiguous().view(-1, num_part)
                target = target.view(-1, 1)[:, 0]
                loss, seg_loss, label_loss = criterion(labels_pred, label,
                                                       seg_pred, target,
                                                       trans_feat)
            else:
                seg_pred = model(points, norm_plt, to_categorical(label, 16))
                seg_pred = seg_pred.contiguous().view(-1, num_part)
                target = target.view(-1, 1)[:, 0]
                loss = F.nll_loss(seg_pred, target)

            history['loss'].append(loss.cpu().data.numpy())
            loss.backward()
            optimizer.step()

        forpointnet2 = args.model_name == 'pointnet2'
        test_metrics, test_hist_acc, cat_mean_iou = test_partseg(
            model, testdataloader, seg_label_to_cat, 50, forpointnet2)

        print(
            'Epoch %d %s accuracy: %f  Class avg mIOU: %f   Inctance avg mIOU: %f'
            %
            (epoch, blue('test'), test_metrics['accuracy'],
             test_metrics['class_avg_iou'], test_metrics['inctance_avg_iou']))

        logger.info(
            'Epoch %d %s Accuracy: %f  Class avg mIOU: %f   Inctance avg mIOU: %f'
            %
            (epoch, blue('test'), test_metrics['accuracy'],
             test_metrics['class_avg_iou'], test_metrics['inctance_avg_iou']))
        if test_metrics['accuracy'] > best_acc:
            best_acc = test_metrics['accuracy']
            torch.save(
                model.state_dict(), '%s/%s_%.3d_%.4f.pth' %
                (checkpoints_dir, args.model_name, epoch, best_acc))
            logger.info(cat_mean_iou)
            logger.info('Save model..')
            print('Save model..')
            print(cat_mean_iou)
        if test_metrics['class_avg_iou'] > best_class_avg_iou:
            best_class_avg_iou = test_metrics['class_avg_iou']
        if test_metrics['inctance_avg_iou'] > best_inctance_avg_iou:
            best_inctance_avg_iou = test_metrics['inctance_avg_iou']
        print('Best accuracy is: %.5f' % best_acc)
        logger.info('Best accuracy is: %.5f' % best_acc)
        print('Best class avg mIOU is: %.5f' % best_class_avg_iou)
        logger.info('Best class avg mIOU is: %.5f' % best_class_avg_iou)
        print('Best inctance avg mIOU is: %.5f' % best_inctance_avg_iou)
        logger.info('Best inctance avg mIOU is: %.5f' % best_inctance_avg_iou)
Exemplo n.º 8
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''CUDA ENV SETTINGS'''
    if args.gpu is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    if args.cudnn_off:
        torch.backends.cudnn.enabled = False  # needed on gypsum!

    # --------------------------------------------------------------------------
    '''CREATE DIR'''
    # --------------------------------------------------------------------------
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    experiment_dir = Path('./log/')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath('pretrain_part_seg')
    experiment_dir.mkdir(exist_ok=True)
    dir_name = args.model + '_ShapeNet' + \
                '_k-%d_seed-%d_lr-%.6f_lr-step-%d_lr-decay-%.2f_wt-decay-%.6f_l2norm-%d' \
                % ( args.k_shot, args.seed, args.learning_rate,
                    args.step_size, args.lr_decay, args.decay_rate,
                    int(args.l2_norm) )
    if args.normal:
        dir_name = dir_name + '_normals'
    if args.selfsup:
        dir_name = dir_name + 'selfsup-%s_selfsup_margin-%.2f_lambda-%.2f' \
                    % (args.ss_dataset, args.margin, args.lmbda)
    if args.rotation_z:
        dir_name = dir_name + '_rotation-z'

    if args.rotation_z_45:
        dir_name = dir_name + '_rotation-z-45'

    if args.random_anisotropic_scale:
        dir_name = dir_name + '_aniso-scale'

    experiment_dir = experiment_dir.joinpath(dir_name)

    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)

    # --------------------------------------------------------------------------
    '''LOG'''
    # --------------------------------------------------------------------------
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)
    configure(log_dir)  # tensorboard logdir
    log_string('OUTPUT DIR: %s' % experiment_dir)

    # --------------------------------------------------------------------------
    '''DATA LOADERS'''
    # --------------------------------------------------------------------------
    root = 'data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'

    TRAIN_DATASET = PartNormalDataset(root=root,
                                      npoints=args.npoint,
                                      split='trainval',
                                      normal_channel=args.normal,
                                      k_shot=args.k_shot)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                                  batch_size=args.batch_size,
                                                  shuffle=True,
                                                  num_workers=4)
    trainDataIterator = iter(trainDataLoader)

    TEST_DATASET = PartNormalDataset(root=root,
                                     npoints=args.npoint,
                                     split='test',
                                     normal_channel=args.normal)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=4)
    log_string("The number of training data is: %d" % len(TRAIN_DATASET))
    log_string("The number of test data is: %d" % len(TEST_DATASET))
    num_classes = 16
    num_part = 50

    if args.selfsup:
        log_string('Use self-supervision - alternate batches')
        if not args.retain_overlaps:
            log_string(
                '\tRemove overlaps between labeled and self-sup datasets')
            labeled_fns = list(itertools.chain(*TEST_DATASET.meta.values())) \
                            + list(itertools.chain(*TRAIN_DATASET.meta.values()))
        else:
            log_string('\tUse all files in self-sup dataset')
            labeled_fns = []

        if args.ss_dataset == 'dummy':
            log_string(
                'Using "dummy" self-supervision dataset (rest of labeled ShapeNetSeg)'
            )
            SELFSUP_DATASET = SelfSupPartNormalDataset(
                root=root,
                npoints=args.npoint,
                split='trainval',
                normal_channel=args.normal,
                k_shot=args.n_cls_selfsup,
                labeled_fns=labeled_fns)
        elif args.ss_dataset == 'acd':
            log_string('Using "ACD" self-supervision dataset (ShapeNet Seg)')
            ACD_ROOT = args.ss_path
            SELFSUP_DATASET = ACDSelfSupDataset(root=ACD_ROOT,
                                                npoints=args.npoint,
                                                normal_channel=args.normal,
                                                k_shot=args.n_cls_selfsup,
                                                exclude_fns=labeled_fns,
                                                use_val=True)
            log_string('\t %d samples' % len(SELFSUP_DATASET))
            selfsup_train_fns = list(
                itertools.chain(*SELFSUP_DATASET.meta.values()))
            log_string('Val dataset for self-sup')
            SELFSUP_VAL = ACDSelfSupDataset(root=ACD_ROOT,
                                            npoints=args.npoint,
                                            normal_channel=args.normal,
                                            class_choice='Airplane',
                                            k_shot=args.n_cls_selfsup,
                                            use_val=False,
                                            exclude_fns=selfsup_train_fns +
                                            labeled_fns)
            log_string('\t %d samples' % len(SELFSUP_VAL))

        selfsupDataLoader = torch.utils.data.DataLoader(
            SELFSUP_DATASET,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=4)
        selfsupIterator = iter(selfsupDataLoader)
        selfsupValLoader = torch.utils.data.DataLoader(
            SELFSUP_VAL,
            batch_size=args.batch_size,
            shuffle=False,
            num_workers=4)

    log_string('Load ModelNet dataset for validation')
    DATA_PATH = 'data/modelnet40_normal_resampled/'
    MN_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                    npoint=args.npoint,
                                    split='train',
                                    normal_channel=args.normal)
    modelnetLoader = torch.utils.data.DataLoader(MN_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=True,
                                                 num_workers=4)

    # --------------------------------------------------------------------------
    '''MODEL LOADING'''
    # --------------------------------------------------------------------------
    MODEL = importlib.import_module(args.model)
    shutil.copy('models/%s.py' % args.model, str(experiment_dir))
    shutil.copy('models/pointnet_util.py', str(experiment_dir))

    if args.model == 'dgcnn':
        classifier = MODEL.get_model(num_part,
                                     normal_channel=args.normal,
                                     k=args.dgcnn_k).cuda()
    else:
        classifier = MODEL.get_model(num_part,
                                     normal_channel=args.normal).cuda()

    criterion = MODEL.get_loss().cuda()

    if args.selfsup:
        selfsupCriterion = MODEL.get_selfsup_loss(margin=args.margin).cuda()
        log_string("The number of self-sup data is: %d" % len(SELFSUP_DATASET))

    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)
        elif classname.find('Linear') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)

    try:
        checkpoint = torch.load(
            str(experiment_dir) + '/checkpoints/best_model.pth')
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0
        classifier = classifier.apply(weights_init)

    # --------------------------------------------------------------------------
    '''OPTIMIZER SETTINGS'''
    # --------------------------------------------------------------------------
    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    else:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=args.learning_rate,
                                    momentum=0.9)

    def bn_momentum_adjust(m, momentum):
        if isinstance(m, torch.nn.BatchNorm2d) or isinstance(
                m, torch.nn.BatchNorm1d):
            m.momentum = momentum

    # LEARNING_RATE_CLIP = 1e-5
    LEARNING_RATE_CLIP = args.lr_clip
    MOMENTUM_ORIGINAL = 0.1
    MOMENTUM_DECAY = 0.5
    MOMENTUM_DECAY_STEP = args.step_size

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        classifier = nn.DataParallel(classifier)

    # --------------------------------------------------------------------------
    '''TRAINING LOOP'''
    # --------------------------------------------------------------------------
    best_val_loss = 99999
    global_epoch = 0

    for epoch in range(start_epoch, args.epoch):
        log_string('Epoch %d (%d/%s):' %
                   (global_epoch + 1, epoch + 1, args.epoch))
        '''Adjust learning rate and BN momentum'''
        lr = max(
            args.learning_rate * (args.lr_decay**(epoch // args.step_size)),
            LEARNING_RATE_CLIP)
        log_string('Learning rate:%f' % lr)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        mean_loss = []
        momentum = MOMENTUM_ORIGINAL * (MOMENTUM_DECAY
                                        **(epoch // MOMENTUM_DECAY_STEP))
        if momentum < 0.01:
            momentum = 0.01
        print('BN momentum updated to: %f' % momentum)
        classifier = classifier.apply(
            lambda x: bn_momentum_adjust(x, momentum))
        '''learning one epoch'''
        num_iters = len(
            selfsupDataLoader)  # calc an epoch based on self-sup dataset

        for i in tqdm(list(range(num_iters)), total=num_iters, smoothing=0.9):
            '''applying self-supervised constrastive (pairwise) loss'''
            try:
                data_ss = next(selfsupIterator)
            except StopIteration:
                # reached end of this dataloader
                selfsupIterator = iter(selfsupDataLoader)
                data_ss = next(selfsupIterator)

            # DEBUG
            if DEBUG and i > 10:
                break

            points, label, target = data_ss  # (points: bs x 3 x n_pts, label: bs x 1, target: bs x n_pts)
            points = points.data.numpy()
            points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                         0:3])
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])

            if args.random_anisotropic_scale:
                points[:, :,
                       0:3] = provider.random_anisotropic_scale_point_cloud(
                           points[:, :, 0:3], scale_low=0.8, scale_high=1.25)

            # pts = torch.Tensor(points)
            # pts = pts.transpose(2,1)
            # np.save(osp.join(experiment_dir, 'pts.npy'), pts.cpu().numpy())

            if args.rotation_z:
                points[:, :, 0:3] = provider.rotate_point_cloud_y(points[:, :,
                                                                         0:3])

            if args.rotation_z_45:
                points[:, :,
                       0:3] = provider.rotate_point_cloud_y_pi4(points[:, :,
                                                                       0:3])

            points = torch.Tensor(points)
            points, label, target = points.float().cuda(), label.long().cuda(
            ), target.long().cuda()
            points = points.transpose(2, 1)
            # np.save(osp.join(experiment_dir, 'pts_z-rot.npy'), points.cpu().numpy())
            # np.save(osp.join(experiment_dir, 'target.npy'), target.cpu().numpy())

            # for self-sup category label is always unknown, so always zeros:
            category_label = torch.zeros([label.shape[0], 1,
                                          num_classes]).cuda()

            optimizer.zero_grad()
            classifier = classifier.train()

            _, _, feat = classifier(points,
                                    category_label)  # feat: [bs x ndim x npts]

            ss_loss = selfsupCriterion(feat, target) * args.lmbda
            ss_loss.backward()
            optimizer.step()
            mean_loss.append(ss_loss.item())
            log_value('selfsup_loss_iter', ss_loss.data,
                      epoch * num_iters + i + 1)

        train_loss_epoch = np.mean(mean_loss)
        log_string('Self-sup loss is: %.5f' % train_loss_epoch)
        log_value('selfsup_loss_epoch', train_loss_epoch, epoch)

        # # # DEBUG:
        # with torch.no_grad():
        #     sa3_wt = classifier.sa3.mlp_convs[2].weight.mean()
        #     log_string('SA3 avg wt is: %.5f' % sa3_wt.item())
        #     log_value('sa3_conv2_wt', sa3_wt.item(), epoch)
        '''validation after one epoch'''
        log_string('Validation: ACD on ShapeNet')
        with torch.no_grad():
            total_val_loss = 0
            for batch_id, (points, label,
                           target) in tqdm(enumerate(selfsupValLoader),
                                           total=len(selfsupValLoader),
                                           smoothing=0.9):
                if DEBUG and i > 10:
                    break
                cur_batch_size, NUM_POINT, _ = points.size()
                points, label, target = points.float().cuda(), label.long(
                ).cuda(), target.long().cuda()
                points = points.transpose(2, 1)
                category_label = torch.zeros([label.shape[0], 1,
                                              num_classes]).cuda()
                classifier = classifier.eval()
                _, _, feat = classifier(points, category_label)
                val_loss = selfsupCriterion(feat, target)
                total_val_loss += val_loss.data.cpu().item()
            avg_val_loss = total_val_loss / len(selfsupValLoader)
        log_value('selfsup_loss_val', avg_val_loss, epoch)
        '''(optional) validation on ModelNet40'''
        if args.modelnet_val:
            log_string('Validation: SVM on ModelNet40')
            with torch.no_grad():
                log_string('Extract features on ModelNet40')
                if args.model == 'pointnet_part_seg':
                    feat_train, label_train = extract_feats_pointnet(
                        classifier, modelnetLoader, subset=0.5)
                elif args.model == 'pointnet2_part_seg_msg':
                    feat_train, label_train = extract_feats(classifier,
                                                            modelnetLoader,
                                                            subset=0.5)
                else:
                    raise ValueError
                log_string('Training data: %d samples, %d features' %
                           feat_train.shape)
                start_time = time.time()
                log_string('Training SVM on ModelNet40')
                svm, best_C, best_score = cross_val_svm(feat_train,
                                                        label_train,
                                                        c_min=100,
                                                        c_max=501,
                                                        c_step=20,
                                                        verbose=False)
                elapsed_time = time.time() - start_time
            log_string('ModelNet val Accuracy: %f (elapsed: %f seconds)' %
                       (best_score, elapsed_time))
            log_value('modelnet_val', best_score, epoch)

        # save every epoch
        if epoch % 5 == 0:
            savepath = str(checkpoints_dir) + ('/model_%03d.pth' % epoch)
            log_string('Saving model at %s' % savepath)
            state = {
                'epoch': epoch,
                'selfsup_loss': ss_loss.data,
                'val_loss': avg_val_loss,
                'model_state_dict': classifier.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            torch.save(state, savepath)
            log_string('Saved model.')

        # save best model
        if avg_val_loss < best_val_loss:
            best_val_loss = avg_val_loss
            savepath = str(checkpoints_dir) + '/best_model.pth'
            log_string('Saving best model at %s' % savepath)
            state = {
                'epoch': epoch,
                'selfsup_loss': ss_loss.data,
                'val_loss': avg_val_loss,
                'model_state_dict': classifier.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            torch.save(state, savepath)
            log_string('Saved model.')

        log_value('train_lr', lr, epoch)
        log_value('train_bn_momentum', momentum, epoch)

        log_string('Epoch %d Self-sup train loss: %f  Val loss: %f ' %
                   (epoch + 1, train_loss_epoch, avg_val_loss))

        global_epoch += 1
Exemplo n.º 9
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''CUDA ENV SETTINGS'''
    if args.gpu is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    if args.cudnn_off:
        torch.backends.cudnn.enabled = False  # needed on gypsum!

    # --------------------------------------------------------------------------
    '''CREATE DIR'''
    # --------------------------------------------------------------------------
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    experiment_dir = Path('./log/')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath('part_seg_shapenet')
    experiment_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        experiment_dir = experiment_dir.joinpath(timestr)
    else:
        # if args.k_shot > 0:
        dir_name = args.model + '_ShapeNet' + \
                    '_k-%d_seed-%d_lr-%.6f_lr-step-%d_lr-decay-%.2f_wt-decay-%.6f_l2norm-%d' \
                    % ( args.k_shot, args.seed, args.learning_rate,
                        args.step_size, args.lr_decay, args.decay_rate,
                        int(args.l2_norm) )
        if args.normal:
            dir_name = dir_name + '_normals'
        if args.category:
            dir_name = dir_name + '_category-label'
        if args.selfsup:
            dir_name = dir_name + '_selfsup-%s_margin-%.2f_lambda-%.2f' \
                        % (args.ss_dataset, args.margin, args.lmbda)
        if args.anneal_lambda:
            dir_name = dir_name + '_anneal-lambda_step-%d_rate-%.2f' \
                        % (args.anneal_step, args.anneal_rate)

        experiment_dir = experiment_dir.joinpath(dir_name)
        # else:
        #     experiment_dir = experiment_dir.joinpath(args.log_dir)

    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)

    # --------------------------------------------------------------------------
    '''LOG'''
    # --------------------------------------------------------------------------
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETERS ...')
    log_string(args)
    configure(log_dir)  # tensorboard logdir

    # --------------------------------------------------------------------------
    '''DATA LOADERS'''
    # --------------------------------------------------------------------------
    root = 'data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'

    TRAIN_DATASET = PartNormalDataset(root=root,
                                      npoints=args.npoint,
                                      split='trainval',
                                      normal_channel=args.normal,
                                      k_shot=args.k_shot)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                                  batch_size=args.batch_size,
                                                  shuffle=True,
                                                  num_workers=4)
    trainDataIterator = iter(trainDataLoader)

    TEST_DATASET = PartNormalDataset(root=root,
                                     npoints=args.npoint,
                                     split='test',
                                     normal_channel=args.normal)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=4)

    log_string("The number of training data is: %d" % len(TRAIN_DATASET))
    log_string("The number of test data is: %d" % len(TEST_DATASET))
    num_classes = args.num_classes
    num_part = args.num_parts

    if args.selfsup:
        log_string('Use self-supervision - alternate batches')
        if not args.retain_overlaps:
            log_string(
                '\tRemove overlaps between labeled and self-sup datasets')
            labeled_fns = list(itertools.chain(*TEST_DATASET.meta.values())) \
                            + list(itertools.chain(*TRAIN_DATASET.meta.values()))
        else:
            log_string('\tUse all files in self-sup dataset')
            labeled_fns = []
        if args.ss_dataset == 'dummy':
            log_string(
                'Using "dummy" self-supervision dataset (rest of labeled ShapeNetSeg)'
            )
            SELFSUP_DATASET = SelfSupPartNormalDataset(
                root=root,
                npoints=args.npoint,
                split='trainval',
                normal_channel=args.normal,
                k_shot=args.n_cls_selfsup,
                labeled_fns=labeled_fns)
        elif args.ss_dataset == 'acd':
            log_string('Using "ACD" self-supervision dataset (ShapeNet Seg)')
            ACD_ROOT = args.ss_path
            SELFSUP_DATASET = ACDSelfSupDataset(root=ACD_ROOT,
                                                npoints=args.npoint,
                                                normal_channel=args.normal,
                                                k_shot=args.n_cls_selfsup,
                                                exclude_fns=labeled_fns)

        selfsupDataLoader = torch.utils.data.DataLoader(
            SELFSUP_DATASET,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=4)
        selfsupIterator = iter(selfsupDataLoader)

    # --------------------------------------------------------------------------
    '''MODEL LOADING'''
    # --------------------------------------------------------------------------
    MODEL = importlib.import_module(args.model)
    shutil.copy('models/%s.py' % args.model, str(experiment_dir))
    shutil.copy('models/pointnet_util.py', str(experiment_dir))

    if 'dgcnn' in args.model:
        print('DGCNN params')
        classifier = MODEL.get_model(num_part,
                                     normal_channel=args.normal,
                                     k=args.dgcnn_k).cuda()
    else:
        classifier = MODEL.get_model(num_part,
                                     normal_channel=args.normal).cuda()

    criterion = MODEL.get_loss().cuda()

    if args.selfsup:
        selfsupCriterion = MODEL.get_selfsup_loss(margin=args.margin).cuda()
        log_string("The number of self-sup data is: %d" % len(SELFSUP_DATASET))

    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)
        elif classname.find('Linear') != -1:
            torch.nn.init.xavier_normal_(m.weight.data)
            torch.nn.init.constant_(m.bias.data, 0.0)

    if args.pretrained is None:
        # Default: load saved checkpoint from experiment_dir or start from scratch
        try:
            checkpoint = torch.load(
                str(experiment_dir) + '/checkpoints/best_model.pth')
            start_epoch = checkpoint['epoch']
            classifier.load_state_dict(checkpoint['model_state_dict'])
            log_string('Use pretrained model from checkpoints')
        except:
            log_string('No existing model, starting training from scratch...')
            start_epoch = 0
            classifier = classifier.apply(weights_init)
    else:
        # Path to a pre-trained model is provided (self-sup)
        log_string('Loading pretrained model from %s' % args.pretrained)
        start_epoch = 0
        ckpt = torch.load(args.pretrained)
        classifier.load_state_dict(ckpt['model_state_dict'])

    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    else:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=args.learning_rate,
                                    momentum=0.9)

    def bn_momentum_adjust(m, momentum):
        if isinstance(m, torch.nn.BatchNorm2d) or isinstance(
                m, torch.nn.BatchNorm1d):
            m.momentum = momentum

    LEARNING_RATE_CLIP = 1e-5
    MOMENTUM_ORIGINAL = 0.1
    MOMENTUM_DECAY = 0.5
    MOMENTUM_DECAY_STEP = args.step_size

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        classifier = nn.DataParallel(classifier)

    # --------------------------------------------------------------------------
    ''' MODEL TRAINING '''
    # --------------------------------------------------------------------------
    best_acc = 0
    global_epoch = 0

    if args.pretrained is not None:
        if args.init_cls:
            # Initialize the last layer of loaded model using logistic regression
            classifier = train_init_class(classifier, criterion,
                                          trainDataLoader, num_classes,
                                          num_part)

    for epoch in range(start_epoch, args.epoch):
        log_string('Epoch %d (%d/%s):' %
                   (global_epoch + 1, epoch + 1, args.epoch))
        '''Adjust learning rate and BN momentum'''
        lr = max(
            args.learning_rate * (args.lr_decay**(epoch // args.step_size)),
            LEARNING_RATE_CLIP)
        log_string('Learning rate:%f' % lr)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        mean_correct = []
        momentum = MOMENTUM_ORIGINAL * (MOMENTUM_DECAY
                                        **(epoch // MOMENTUM_DECAY_STEP))
        if momentum < 0.01:
            momentum = 0.01
        print('BN momentum updated to: %f' % momentum)
        classifier = classifier.apply(
            lambda x: bn_momentum_adjust(x, momentum))
        ''' Adjust (anneal) self-sup lambda '''
        if args.anneal_lambda:
            lmbda = args.lmbda * (args.anneal_rate
                                  **(epoch // args.anneal_step))
        else:
            lmbda = args.lmbda
        '''learning one epoch'''
        num_iters = len(trainDataLoader)  # num iters in an epoch
        if args.selfsup:
            num_iters = len(
                selfsupDataLoader)  # calc an epoch based on self-sup dataset

        for i in tqdm(list(range(num_iters)),
                      total=num_iters,
                      smoothing=0.9,
                      desc='Training'):
            # ------------------------------------------------------------------
            #   SUPERVISED LOSS
            # ------------------------------------------------------------------
            try:
                data = next(trainDataIterator)
            except StopIteration:
                # reached end of this dataloader
                trainDataIterator = iter(trainDataLoader)
                data = next(trainDataIterator)

            points, label, target = data
            cur_batch_size, NUM_POINT, _ = points.size()
            points = points.data.numpy()
            points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                         0:3])
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
            points = torch.Tensor(points)
            points, label, target = points.float().cuda(), label.long().cuda(
            ), target.long().cuda()
            points = points.transpose(2, 1)

            if args.category:
                category_label = to_categorical(label,
                                                num_classes).contiguous()
            else:
                category_label = torch.zeros([label.shape[0], 1,
                                              num_classes]).cuda()

            optimizer.zero_grad()
            classifier = classifier.train()
            '''applying supervised cross-entropy loss'''
            seg_pred, trans_feat, feat = classifier(points.contiguous(),
                                                    category_label)
            seg_pred = seg_pred.contiguous().view(-1, num_part)
            target = target.view(-1, 1)[:, 0]
            pred_choice = seg_pred.data.max(1)[1]
            correct = pred_choice.eq(target.data).cpu().sum()

            mean_correct.append(correct.item() / (cur_batch_size * NUM_POINT))
            loss = criterion(seg_pred, target, trans_feat)

            loss.backward()
            optimizer.step()

            # ------------------------------------------------------------------
            #   SELF-SUPERVISED LOSS
            # ------------------------------------------------------------------
            if args.selfsup:
                try:
                    data_ss = next(selfsupIterator)
                except StopIteration:
                    # reached end of this dataloader
                    selfsupIterator = iter(selfsupDataLoader)
                    data_ss = next(selfsupIterator)

                points, label, target = data_ss
                points = points.data.numpy()
                points[:, :,
                       0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                       0:3])
                points[:, :, 0:3] = provider.shift_point_cloud(points[:, :,
                                                                      0:3])
                points = torch.Tensor(points)
                points, label, target = points.float().cuda(), label.long(
                ).cuda(), target.long().cuda()
                points = points.transpose(2, 1)
                # for self-sup category label is always unknown, so always zeros:
                category_label = torch.zeros([label.shape[0], 1,
                                              num_classes]).cuda()
                if args.normal:
                    # put dummy cols of zeros for normals in self-sup data
                    cur_batch_size, _, NUM_POINT = points.size()
                    points = points[:, 0:3, :]
                    points = torch.cat([
                        points,
                        torch.zeros([cur_batch_size, 3, NUM_POINT]).cuda()
                    ], 1)

                optimizer.zero_grad()
                classifier = classifier.train()
                '''applying self-supervised constrastive (pairwise) loss'''
                _, _, feat = classifier(points, category_label)
                ss_loss = selfsupCriterion(feat, target) * lmbda
                ss_loss.backward()
                optimizer.step()

        # ----------------------------------------------------------------------
        #   Logging metrics after one epoch
        # ----------------------------------------------------------------------
        train_instance_acc = np.mean(mean_correct)
        log_string('Train accuracy is: %.5f' % train_instance_acc)
        log_string('Supervised loss is: %.5f' % loss.data)
        log_value('train_loss', loss.data, epoch)

        if args.selfsup:
            log_string('Self-sup loss is: %.5f' % ss_loss.data)
            log_value('selfsup_loss', ss_loss.data, epoch)

        # save every epoch
        savepath = str(checkpoints_dir) + ('/model_%03d.pth' % epoch)
        log_string('Saving model at %s' % savepath)
        state = {
            'epoch': epoch,
            'train_acc': train_instance_acc,
            'model_state_dict': classifier.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
        }
        torch.save(state, savepath)
        # log_string('Saved model.')
        log_value('train_acc', train_instance_acc, epoch)
        log_value('train_lr', lr, epoch)
        log_value('train_bn_momentum', momentum, epoch)
        log_value('selfsup_lambda', lmbda, epoch)

        global_epoch += 1

    # ----------------------------------------------------------------------
    #   Evaluation on test-set after completing training epochs
    # ----------------------------------------------------------------------
    with torch.no_grad():
        test_metrics = {}
        total_correct = 0
        total_seen = 0
        total_seen_class = [0 for _ in range(num_part)]
        total_correct_class = [0 for _ in range(num_part)]
        shape_ious = {cat: [] for cat in seg_classes.keys()}
        seg_label_to_cat = {}  # {0:Airplane, 1:Airplane, ...49:Table}
        for cat in seg_classes.keys():
            for label in seg_classes[cat]:
                seg_label_to_cat[label] = cat

        for batch_id, (points, label,
                       target) in tqdm(enumerate(testDataLoader),
                                       total=len(testDataLoader),
                                       smoothing=0.9,
                                       desc='Evaluation'):
            cur_batch_size, NUM_POINT, _ = points.size()
            points, label, target = points.float().cuda(), label.long().cuda(
            ), target.long().cuda()
            points = points.transpose(2, 1)

            if args.category:
                category_label = to_categorical(label,
                                                num_classes).contiguous()
            else:
                category_label = torch.zeros([label.shape[0], 1,
                                              num_classes]).cuda()

            classifier = classifier.eval()
            seg_pred, _, _ = classifier(points,
                                        to_categorical(label, num_classes))
            cur_pred_val = seg_pred.cpu().data.numpy()
            cur_pred_val_logits = cur_pred_val
            cur_pred_val = np.zeros(
                (cur_batch_size, NUM_POINT)).astype(np.int32)
            target = target.cpu().data.numpy()
            for i in range(cur_batch_size):
                cat = seg_label_to_cat[target[i, 0]]
                logits = cur_pred_val_logits[i, :, :]
                cur_pred_val[i, :] = np.argmax(logits[:, seg_classes[cat]],
                                               1) + seg_classes[cat][0]
            correct = np.sum(cur_pred_val == target)

            total_correct += correct
            total_seen += (cur_batch_size * NUM_POINT)

            for l in range(num_part):
                total_seen_class[l] += np.sum(target == l)
                total_correct_class[l] += (np.sum((cur_pred_val == l)
                                                  & (target == l)))

            for i in range(cur_batch_size):
                segp = cur_pred_val[i, :]
                segl = target[i, :]
                cat = seg_label_to_cat[segl[0]]
                part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
                for l in seg_classes[cat]:
                    if (np.sum(segl == l) == 0) and (
                            np.sum(segp == l) == 0
                    ):  # part is not present, no prediction as well
                        part_ious[l - seg_classes[cat][0]] = 1.0
                    else:
                        part_ious[l - seg_classes[cat][0]] = np.sum(
                            (segl == l) & (segp == l)) / float(
                                np.sum((segl == l) | (segp == l)))
                shape_ious[cat].append(np.mean(part_ious))
                # print('\nTest IOUS: %f' % np.mean(part_ious))

        all_shape_ious = []
        for cat in shape_ious.keys():
            for iou in shape_ious[cat]:
                all_shape_ious.append(iou)
            shape_ious[cat] = np.mean(shape_ious[cat])
        mean_shape_ious = np.mean(list(shape_ious.values()))
        test_metrics['accuracy'] = total_correct / float(total_seen)
        test_metrics['class_avg_accuracy'] = np.mean(
            np.array(total_correct_class) /
            np.array(total_seen_class, dtype=np.float))
        for cat in sorted(shape_ious.keys()):
            log_string('eval mIoU of %s %f' %
                       (cat + ' ' * (14 - len(cat)), shape_ious[cat]))
        test_metrics['class_avg_iou'] = mean_shape_ious
        test_metrics['instance_avg_iou'] = np.mean(all_shape_ious)

    log_string(
        'Epoch %d test Accuracy: %f  Class avg mIOU: %f   Instance avg mIOU: %f'
        % (epoch + 1, test_metrics['accuracy'], test_metrics['class_avg_iou'],
           test_metrics['instance_avg_iou']))
Exemplo n.º 10
0
def vis(args):
    cache = _load(root)
    norm = True if args.model_name == 'pointnet' else False
    test_ds = PartNormalDataset(root, cache, npoints=2048, split='test')
    testdataloader = DataLoader(test_ds,
                                batch_size=args.batch_size,
                                shuffle=True,
                                num_workers=int(args.workers))
    log.info("The number of test data is:", len(test_ds))

    log.info('Building Model', args.model_name)
    num_classes = 16
    num_part = 50
    if args.model_name == 'pointnet':
        model = PointNetDenseCls(cat_num=num_classes, part_num=num_part)
    else:
        model = PointNet2PartSegMsg_one_hot(num_part)

    torch.backends.cudnn.benchmark = True
    model = torch.nn.DataParallel(model)
    model.cuda()
    log.debug('Using multi GPU:', args.gpu)

    if args.pretrain is None:
        log.err('No pretrain model')
        return

    log.info('Loading pretrain model...')
    checkpoint = torch.load(args.pretrain)
    model.load_state_dict(checkpoint)

    log.info('Press space to exit, press Q for next frame')
    for batch_id, (points, label, target,
                   norm_plt) in enumerate(testdataloader):
        batchsize, num_point, _ = points.size()
        points, label, target, norm_plt = points.float(), label.long(
        ), target.long(), norm_plt.float()
        points = points.transpose(2, 1)
        norm_plt = norm_plt.transpose(2, 1)
        points, label, target, norm_plt = points.cuda(), label.squeeze().cuda(
        ), target.cuda(), norm_plt.cuda()
        if args.model_name == 'pointnet':
            labels_pred, seg_pred, _ = model(points, to_categorical(label, 16))
        else:
            seg_pred = model(points, norm_plt, to_categorical(label, 16))
        pred_choice = seg_pred.max(-1)[1]
        log.info(seg_pred=seg_pred.shape, pred_choice=pred_choice.shape)
        log.info(seg_pred=seg_pred.shape, pred_choice=pred_choice.shape)

        cmap_plt = plt.cm.get_cmap("hsv", num_part)
        cmap_list = [cmap_plt(i)[:3] for i in range(num_part)]
        np.random.shuffle(cmap_list)
        cmap = np.array(cmap_list)

        #log.info('points',points.shape,'label',label.shape,'target',target.shape,'norm_plt',norm_plt.shape)
        for idx in range(batchsize):
            pt, gt, pred = points[idx].transpose(
                1, 0), target[idx], pred_choice[idx].transpose(-1, 0)
            # log.info('pt',pt.size(),'gt',gt.size(),'pred',pred.shape)

            gt_color = cmap[gt.cpu().numpy() - 1, :]
            pred_color = cmap[pred.cpu().numpy() - 1, :]

            point_cloud = open3d.geometry.PointCloud()
            point_cloud.points = open3d.utility.Vector3dVector(
                pt.cpu().numpy())
            point_cloud.colors = open3d.utility.Vector3dVector(pred_color)

            vis = open3d.visualization.VisualizerWithKeyCallback()
            vis.create_window()
            vis.get_render_option().background_color = np.asarray([0, 0, 0])
            vis.add_geometry(point_cloud)
            vis.register_key_callback(32, lambda vis: exit())
            vis.run()
            vis.destroy_window()
Exemplo n.º 11
0
def train(args):
    experiment_dir = mkdir('./experiment/')
    checkpoints_dir = mkdir('./experiment/partseg/%s/' % (args.model_name))
    cache = _load(root)

    norm = True if args.model_name == 'pointnet' else False
    npoints = 2048
    train_ds = PartNormalDataset(root,
                                 cache,
                                 npoints=npoints,
                                 split='trainval',
                                 data_augmentation=args.augment)
    dataloader = DataLoader(train_ds,
                            batch_size=args.batch_size,
                            shuffle=True,
                            num_workers=int(args.workers))

    test_ds = PartNormalDataset(root, cache, npoints=npoints, split='test')
    testdataloader = DataLoader(test_ds,
                                batch_size=args.batch_size,
                                shuffle=False,
                                num_workers=int(args.workers))

    num_classes = 16
    num_part = 50
    log.info(len_training=len(train_ds), len_testing=len(test_ds))
    log.info(num_classes=num_classes, num_part=num_part)

    if args.model_name == 'pointnet':
        model = PointNetDenseCls(cat_num=num_classes, part_num=num_part)
    else:
        model = PointNet2PartSegMsg_one_hot(num_part)

    torch.backends.cudnn.benchmark = True
    model = torch.nn.DataParallel(model).cuda()
    log.debug('Using gpu:', args.gpu)

    if args.pretrain is not None and args.pretrain != 'None':
        log.debug('Use pretrain model...')
        model.load_state_dict(torch.load(args.pretrain))
        init_epoch = int(args.pretrain[:-4].split('-')[-1])
        log.debug('start epoch from', init_epoch)
    else:
        log.debug('Training from scratch')
        init_epoch = 0

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.5)

    history = {'loss': []}
    best_acc = 0
    best_class_avg_iou = 0
    best_inctance_avg_iou = 0
    LEARNING_RATE_CLIP = 1e-5

    # criterion = PointNetLoss()
    def feature_transform_reguliarzer(trans):
        d = trans.size()[1]
        I = torch.eye(d)[None, :, :]
        if trans.is_cuda:
            I = I.cuda()
        loss = torch.mean(
            torch.norm(torch.bmm(trans,
                                 trans.transpose(2, 1) - I), dim=(1, 2)))
        return loss

    def PointNet_Loss(labels_pred, label, seg_pred, seg, trans_feat):
        mat_diff_loss_scale = 0.001
        weight = 1
        seg_loss = F.nll_loss(seg_pred, seg)
        mat_diff_loss = feature_transform_reguliarzer(trans_feat)
        label_loss = F.nll_loss(labels_pred, label)
        loss = weight * seg_loss + (
            1 - weight) * label_loss + mat_diff_loss * mat_diff_loss_scale
        return loss, seg_loss, label_loss

    for epoch in range(init_epoch, args.epoch):
        scheduler.step()
        lr = max(optimizer.param_groups[0]['lr'], LEARNING_RATE_CLIP)
        log.info(job='partseg',
                 model=args.model_name,
                 gpu=args.gpu,
                 epoch='%d/%s' % (epoch, args.epoch),
                 lr=lr)

        for param_group in optimizer.param_groups:
            param_group['lr'] = lr

        for i, data in tqdm(enumerate(dataloader, 0),
                            total=len(dataloader),
                            smoothing=0.9):
            points, label, target, norm_plt = data
            points, label, target = points.float(), label.long(), target.long()
            points = points.transpose(2, 1)
            norm_plt = norm_plt.transpose(2, 1)
            points, label, target, norm_plt = points.cuda(), label.squeeze(
            ).cuda(), target.cuda(), norm_plt.cuda()
            optimizer.zero_grad()
            model = model.train()

            if args.model_name == 'pointnet':
                labels_pred, seg_pred, trans_feat = model(
                    points, to_categorical(label, 16))
                seg_pred = seg_pred.contiguous().view(-1, num_part)
                target = target.view(-1, 1)[:, 0]
                # loss, seg_loss, label_loss = criterion(labels_pred, label, seg_pred, target, trans_feat)
                loss, seg_loss, label_loss = PointNet_Loss(
                    labels_pred, label, seg_pred, target, trans_feat)
            else:
                seg_pred = model(points, norm_plt, to_categorical(label, 16))
                seg_pred = seg_pred.contiguous().view(-1, num_part)
                target = target.view(-1, 1)[:, 0]
                loss = F.nll_loss(seg_pred, target)

            history['loss'].append(loss.cpu().data.numpy())
            loss.backward()
            optimizer.step()

        log.debug('clear cuda cache')
        torch.cuda.empty_cache()

        test_metrics, test_hist_acc, cat_mean_iou = test_partseg(
            model.eval(),
            testdataloader,
            label_id_to_name,
            args.model_name,
            num_part,
        )

        save_model = False
        if test_metrics['accuracy'] > best_acc:
            best_acc = test_metrics['accuracy']

        if test_metrics['class_avg_iou'] > best_class_avg_iou:
            best_class_avg_iou = test_metrics['class_avg_iou']

        if test_metrics['inctance_avg_iou'] > best_inctance_avg_iou:
            best_inctance_avg_iou = test_metrics['inctance_avg_iou']
            save_model = True

        if save_model:
            fn_pth = 'partseg-%s-%.5f-%04d.pth' % (
                args.model_name, best_inctance_avg_iou, epoch)
            log.info('Save model...', fn=fn_pth)
            torch.save(model.state_dict(),
                       os.path.join(checkpoints_dir, fn_pth))
            log.info(cat_mean_iou)
        else:
            log.info('No need to save model')

        log.warn('Curr',
                 accuracy=test_metrics['accuracy'],
                 class_avg_mIOU=test_metrics['class_avg_iou'],
                 inctance_avg_mIOU=test_metrics['inctance_avg_iou'])

        log.warn('Best',
                 accuracy=best_acc,
                 class_avg_mIOU=best_class_avg_iou,
                 inctance_avg_mIOU=best_inctance_avg_iou)