def _augment_batch_data(self, batch_data):
     rotated_data = provider.rotate_point_cloud(batch_data)
     rotated_data = provider.rotate_perturbation_point_cloud(rotated_data)
     jittered_data = provider.random_scale_point_cloud(rotated_data[:, :,
                                                                    0:3])
     jittered_data = provider.shift_point_cloud(jittered_data)
     jittered_data = provider.jitter_point_cloud(jittered_data)
     rotated_data[:, :, 0:3] = jittered_data
     return provider.shuffle_points(rotated_data)
Esempio n. 2
0
 def _augment_batch_data(self, batch_data):
     if self.normal_channel:
         rotated_data = provider.rotate_point_cloud_with_normal(batch_data)
         rotated_data = provider.rotate_perturbation_point_cloud_with_normal(rotated_data)
     else:
         rotated_data = provider.rotate_point_cloud(batch_data)
         rotated_data = provider.rotate_perturbation_point_cloud(rotated_data)
     jittered_data = provider.random_scale_point_cloud(rotated_data[:,:,0:3])
     jittered_data = provider.shift_point_cloud(jittered_data)
     jittered_data = provider.jitter_point_cloud(jittered_data)
     rotated_data[:,:,0:3] = jittered_data
     return provider.shuffle_points(rotated_data)
Esempio n. 3
0
 def _augment_batch_data(self, batch_data):
     if self.normal_channel:
         rotated_data = provider.rotate_point_cloud_with_normal(batch_data)
         rotated_data = provider.rotate_perturbation_point_cloud_with_normal(rotated_data)
     else:
         rotated_data = provider.rotate_point_cloud(batch_data)
         rotated_data = provider.rotate_perturbation_point_cloud(rotated_data)
 
     jittered_data = provider.random_scale_point_cloud(rotated_data[:,:,0:3])
     jittered_data = provider.shift_point_cloud(jittered_data)
     jittered_data = provider.jitter_point_cloud(jittered_data)
     rotated_data[:,:,0:3] = jittered_data
     return provider.shuffle_points(rotated_data)
Esempio n. 4
0
 def _augment_batch_data(self, batch_data, augment, rotate=0):
     if augment:
         #augment points
         jittered_data = provider.random_scale_point_cloud(batch_data[:, :,
                                                                      0:3])
         jittered_data = provider.shift_point_cloud(jittered_data)
         jittered_data = provider.jitter_point_cloud(jittered_data)
         batch_data[:, :, 0:3] = jittered_data
     if rotate == 2:
         #rotated points and normal
         batch_data = provider.rotate_point_cloud_with_normal(batch_data)
     elif rotate == 3:
         batch_data = provider.rotate_perturbation_point_cloud_with_normal(
             batch_data)
     return provider.shuffle_points(batch_data)
Esempio n. 5
0
def augment_batch_data_MODELNET(batch_data, is_include_normal):
    '''
    is_include_normal=False: xyz
    is_include_normal=True: xyznxnynz
    '''
    if is_include_normal:
        rotated_data = provider.rotate_point_cloud_with_normal(batch_data)
        rotated_data = provider.rotate_perturbation_point_cloud_with_normal(
            rotated_data)
    else:
        rotated_data = provider.rotate_point_cloud(batch_data)
        rotated_data = provider.rotate_perturbation_point_cloud(rotated_data)

    jittered_data = provider.random_scale_point_cloud(rotated_data[:, :, 0:3])
    jittered_data = provider.shift_point_cloud(jittered_data)
    jittered_data = provider.jitter_point_cloud(jittered_data)
    rotated_data[:, :, 0:3] = jittered_data
    return provider.shuffle_points(rotated_data)
Esempio n. 6
0
 def __data_generation(self, batch_idx):
     x = np.zeros((self.batch_size, self.npoints, 3))
     y = np.zeros((self.batch_size, ))
     for i, idx in enumerate(batch_idx, 0):
         x[i] = self.datas[
             idx, 0:self.
             npoints, :]  # take the first n points. TODO: random choice
         y[i] = self.labels[idx]
     if self.augment and np.random.rand() > 0.5:
         # implement data augmentation to the whole BATCH
         rotated_x = provider.rotate_point_cloud(x)  # rotate around x-axis
         rotated_x = provider.rotate_perturbation_point_cloud(
             rotated_x)  # slightly rotate around every aixs
         jittered_x = provider.random_scale_point_cloud(
             rotated_x)  # random scale a little bit
         jittered_x = provider.shift_point_cloud(
             jittered_x)  # shift a little
         jittered_x = provider.jitter_point_cloud(
             jittered_x)  # add random noise (jitter)
         jittered_x = provider.shuffle_points(
             jittered_x)  # shuffle the point. for FPS
         x = jittered_x
     return x, keras.utils.to_categorical(y, num_classes=len(self.cat))
Esempio n. 7
0
    def _augment_batch_data(self, batch_data):
        if self.normal_channel:
            rotated_data = provider.rotate_point_cloud_with_normal(batch_data)
            rotated_data = provider.rotate_perturbation_point_cloud_with_normal(
                rotated_data)
        else:
            rotated_data = provider.rotate_point_cloud(batch_data)
            rotated_data = provider.rotate_perturbation_point_cloud(
                rotated_data)

        jittered_data = provider.random_scale_point_cloud(
            rotated_data[:, :, 0:3],
            scale_low=self.scale_low,
            scale_high=self.scale_high)
        jittered_data = provider.shift_point_cloud(
            jittered_data, shift_range=self.shift_range)
        jittered_data = provider.jitter_point_cloud(jittered_data,
                                                    sigma=self.jitter_sigma,
                                                    clip=0.1)
        rotated_data[:, :, 0:3] = jittered_data
        if self.shuffle_points:
            return provider.shuffle_points(rotated_data)
        else:
            return rotated_data
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    '''CREATE DIR'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    file_dir = Path(str(experiment_dir) + '/%s_ModelNet40-' % args.model_name + str(
        datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
    file_dir.mkdir(exist_ok=True)
    checkpoints_dir = file_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = file_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)

    '''LOG'''
    args = parse_args()
    logger = logging.getLogger(args.model_name)
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(str(log_dir) + 'train_%s_cls.txt' % args.model_name)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------TRANING---------------------------------------------------')
    logger.info('PARAMETER ...')
    logger.info(args)

    '''DATA LOADING'''
    logger.info('Load dataset ...')
    DATA_PATH = './data/modelnet40_normal_resampled/'

    TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='train', normal_channel=args.normal)
    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test', normal_channel=args.normal)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batchsize, shuffle=True,
                                                  num_workers=args.num_workers)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batchsize, shuffle=False,
                                                 num_workers=args.num_workers)

    logger.info("The number of training data is: %d", len(TRAIN_DATASET))
    logger.info("The number of test data is: %d", len(TEST_DATASET))

    seed = 3
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)

    '''MODEL LOADING'''
    num_class = 40
    classifier = PointConvClsSsg(num_class).cuda()
    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('No existing model, starting training from scratch...')
        start_epoch = 0

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(
            classifier.parameters(),
            lr=args.learning_rate,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate
        )
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.7)
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    blue = lambda x: '\033[94m' + x + '\033[0m'

    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch, args.epoch):
        print('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
        logger.info('Epoch %d (%d/%s):', global_epoch + 1, epoch + 1, args.epoch)
        mean_correct = []

        scheduler.step()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9):
        # for batch_id, data in enumerate(trainDataLoader, 0):
            points, target = data
            points = points.data.numpy()
            # 增强数据: 随机放大和平移点云,随机移除一些点
            jittered_data = provider.random_scale_point_cloud(points[:, :, 0:3], scale_low=2.0 / 3, scale_high=3 / 2.0)
            jittered_data = provider.shift_point_cloud(jittered_data, shift_range=0.2)
            points[:, :, 0:3] = jittered_data
            points = provider.random_point_dropout_v2(points)
            provider.shuffle_points(points)
            points = torch.Tensor(points)
            target = target[:, 0]

            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()

            classifier = classifier.train()
            # pred = classifier(points[:, :3, :], points[:, 3:, :])
            pred = classifier(points[:, :3, :], None)
            loss = F.nll_loss(pred, target.long())
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.long().data).cpu().sum()
            mean_correct.append(correct.item() / float(points.size()[0]))
            loss.backward()
            optimizer.step()
            global_step += 1

        train_acc = np.mean(mean_correct)
        print('Train Accuracy: %f' % train_acc)
        logger.info('Train Accuracy: %f' % train_acc)

        acc = test(classifier, testDataLoader)

        if (acc >= best_tst_accuracy) and epoch > 5:
            best_tst_accuracy = acc
            logger.info('Save model...')
            save_checkpoint(
                global_epoch + 1,
                train_acc,
                acc,
                classifier,
                optimizer,
                str(checkpoints_dir),
                args.model_name)
            print('Saving model....')

        print('\r Loss: %f' % loss.data)
        logger.info('Loss: %.2f', loss.data)
        print('\r Test %s: %f   ***  %s: %f' % (blue('Accuracy'), acc, blue('Best Accuracy'), best_tst_accuracy))
        logger.info('Test Accuracy: %f  *** Best Test Accuracy: %f', acc, best_tst_accuracy)

        global_epoch += 1
    print('Best Accuracy: %f' % best_tst_accuracy)

    logger.info('End of training...')
Esempio n. 9
0
def augment_batch_data(batch_data):
    augmented_data = provider.scale_point_cloud_1(batch_data)
    augmented_data = provider.translate_point_cloud_1(augmented_data)
    augmented_data = provider.jitter_point_cloud_1(augmented_data)

    return provider.shuffle_points(augmented_data)
Esempio n. 10
0
 def _augment_batch_data(self, batch_data):
     rotated_data = provider.rotate_point_cloud(batch_data)
     return provider.shuffle_points(rotated_data)
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''CREATE DIR'''
    timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
    experiment_dir = Path('./log/')
    experiment_dir.mkdir(exist_ok=True)
    experiment_dir = experiment_dir.joinpath('cls')
    experiment_dir.mkdir(exist_ok=True)
    if args.log_dir is None:
        experiment_dir = experiment_dir.joinpath(timestr)
    else:
        experiment_dir = experiment_dir.joinpath(args.log_dir)
    experiment_dir.mkdir(exist_ok=True)
    checkpoints_dir = experiment_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = experiment_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger("Model")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(args)
    '''DATA LOADING'''
    log_string('Load dataset ...')
    # DATA_PATH = 'data/modelnet40_normal_resampled/'
    DATA_PATH = args.data_dir

    # if args.model == 'pointcnn_cls':
    #     trainDataLoader = PyGDataloader(TRAIN_DATASET, args.batch_size, shuffle=True)
    #     testDataLoader = PyGDataloader(TEST_DATASET, args.batch_size, shuffle=False)
    # else:
    TRAIN_DATASET = ClsDataLoader(root=DATA_PATH,
                                  dataset_name=args.dataset_name,
                                  npoint=args.num_point,
                                  split='train',
                                  normal_channel=args.normal)
    TEST_DATASET = ClsDataLoader(root=DATA_PATH,
                                 dataset_name=args.dataset_name,
                                 npoint=args.num_point,
                                 split='test',
                                 normal_channel=args.normal)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET,
                                                  batch_size=args.batch_size,
                                                  shuffle=True,
                                                  num_workers=args.num_worker,
                                                  drop_last=True)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=args.num_worker,
                                                 drop_last=True)
    '''MODEL LOADING'''
    num_class = args.num_class
    MODEL = importlib.import_module(args.model)
    shutil.copy('./models/%s.py' % args.model, str(experiment_dir))
    shutil.copy('./models/pointnet_util.py', str(experiment_dir))

    classifier = MODEL.get_model(num_class, normal_channel=args.normal).cuda()
    criterion = MODEL.get_loss().cuda()

    # try:
    #     checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')
    #     start_epoch = checkpoint['epoch']
    #     classifier.load_state_dict(checkpoint['model_state_dict'])
    #     log_string('Use pretrain model')
    # except:
    #     log_string('No existing model, starting training from scratch...')
    #     start_epoch = 0

    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    else:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=0.01,
                                    momentum=0.9)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.7)

    try:
        checkpoint = torch.load(
            str(experiment_dir) + '/checkpoints/last_model.pth')
        start_epoch = checkpoint['epoch'] + 1
        classifier.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        best_instance_acc = checkpoint['instance_acc']
        best_class_acc = checkpoint['class_acc']
        log_string('Use pretrain model')
    except:
        log_string('No existing model, starting training from scratch...')
        start_epoch = 0
        best_instance_acc = 0.0
        best_class_acc = 0.0

    global_epoch = 0
    global_step = 0
    mean_correct = []
    '''TRAINING'''
    logger.info('Start training...')
    writer_loss = SummaryWriter(os.path.join(str(log_dir), 'loss'))
    writer_train_instance_accuracy = SummaryWriter(
        os.path.join(str(log_dir), 'train_instance_accuracy'))
    writer_test_instance_accuracy = SummaryWriter(
        os.path.join(str(log_dir), 'test_instance_accuracy'))
    writer_test_class_accuracy = SummaryWriter(
        os.path.join(str(log_dir), 'test_class_accuracy'))
    for epoch in range(start_epoch, args.epoch):
        log_string('Epoch %d (%d/%s):' %
                   (global_epoch + 1, epoch + 1, args.epoch))

        scheduler.step()
        log_string('lr: %f' % optimizer.param_groups[0]['lr'])
        running_loss = 0.0
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            points, target = data
            points = points.data.numpy()
            points = provider.random_point_dropout(points)
            points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :,
                                                                         0:3])
            points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
            if args.model == 'pointcnn_cls' and args.pointcnn_data_aug == True:
                points = provider.shuffle_points(points)
                points[:, :, 0:3] = provider.rotate_point_cloud(points[:, :,
                                                                       0:3])
                points[:, :, 0:3] = provider.jitter_point_cloud(points[:, :,
                                                                       0:3])
            points = torch.Tensor(points)
            target = target[:, 0]

            points = points.transpose(2, 1)

            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()

            classifier = classifier.train()
            if args.model == 'pointcnn_cls':
                points = points.transpose(2, 1)
                if args.dataset_name == 'cifar':
                    pos = points.reshape((-1, 6))
                    # normalise rgb
                    pos[:, 3:6] = pos[:, 3:6] / 255.0
                else:
                    pos = points.reshape((-1, 3))
                x = np.arange(0, args.batch_size)
                batch = torch.from_numpy(np.repeat(x, args.num_point)).cuda()
                pred, trans_feat = classifier(pos, batch)
            else:
                pred, trans_feat = classifier(points)

            loss = criterion(pred, target.long(), trans_feat)
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.long().data).cpu().sum()
            mean_correct.append(correct.item() / float(points.size()[0]))
            loss.backward()
            optimizer.step()
            global_step += 1

            running_loss += loss.item()
            if batch_id % 10 == 9:  # print every 10 batches
                niter = epoch * len(trainDataLoader) + batch_id
                writer_loss.add_scalar('Train/loss', loss.item(), niter)

        log_string('Loss: %f' % (running_loss / len(trainDataLoader)))
        train_instance_acc = np.mean(mean_correct)
        log_string('Train Instance Accuracy: %f' % train_instance_acc)
        writer_train_instance_accuracy.add_scalar('Train/instance_accuracy',
                                                  train_instance_acc.item(),
                                                  epoch)

        with torch.no_grad():
            instance_acc, class_acc = test(classifier.eval(), testDataLoader,
                                           num_class)
            writer_test_instance_accuracy.add_scalar('Test/instance_accuracy',
                                                     instance_acc.item(),
                                                     epoch)
            writer_test_class_accuracy.add_scalar('Test/class_accuracy',
                                                  class_acc.item(), epoch)

            if instance_acc >= best_instance_acc:
                best_instance_acc = instance_acc
                best_epoch = epoch + 1

            if class_acc >= best_class_acc:
                best_class_acc = class_acc
            log_string('Test Instance Accuracy: %f, Class Accuracy: %f' %
                       (instance_acc, class_acc))
            log_string('Best Instance Accuracy: %f, Class Accuracy: %f' %
                       (best_instance_acc, best_class_acc))

            logger.info('Save the last model...')
            savepath_last = str(checkpoints_dir) + '/last_model.pth'
            log_string('Saving at %s' % savepath_last)
            state_last = {
                'epoch': epoch,
                'instance_acc': instance_acc,
                'class_acc': class_acc,
                'model_state_dict': classifier.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'scheduler_state_dict': scheduler.state_dict(),
            }
            torch.save(state_last, savepath_last)

            if instance_acc >= best_instance_acc:
                logger.info('Save model...')
                savepath = str(checkpoints_dir) + '/best_model.pth'
                log_string('Saving at %s' % savepath)
                state = {
                    'epoch': best_epoch,
                    'instance_acc': instance_acc,
                    'class_acc': class_acc,
                    'model_state_dict': classifier.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'scheduler_state_dict': scheduler.state_dict(),
                }
                torch.save(state, savepath)
            global_epoch += 1

    logger.info('End of training...')
    writer_loss.close()
    writer_train_instance_accuracy.close()
    writer_test_instance_accuracy.close()
    writer_test_class_accuracy.close()