Esempio n. 1
0
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''CREATE DIR'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    file_dir = Path(
        str(experiment_dir) + '/%s_ModelNet40-' % args.model_name +
        str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
    file_dir.mkdir(exist_ok=True)
    checkpoints_dir = file_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = file_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger(args.model_name)
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(
        str(log_dir) + 'train_%s_cls.txt' % args.model_name)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------TRANING---------------------------------------------------'
    )
    logger.info('PARAMETER ...')
    logger.info(args)
    '''DATA LOADING'''
    logger.info('Load dataset ...')
    DATA_PATH = './data/point_model/'

    TRAIN_DATASET = ReductionDataLoader(root=DATA_PATH,
                                        npoint=args.num_point,
                                        split='train',
                                        normal_channel=args.normal)
    # TEST_DATASET = ReductionDataLoader(root=DATA_PATH, npoint=args.num_point, split='test',
    #                                    normal_channel=args.normal)

    logger.info("The number of training data is: %d", len(TRAIN_DATASET))
    # logger.info("The number of test data is: %d", len(TEST_DATASET))

    seed = 3
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)
    '''MODEL LOADING'''
    embs_len = 3
    reductioner = TD_Reduction(embs_len).cuda()
    criterion = ChamferLoss()
    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        reductioner.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('No existing model, starting training from scratch...')
        start_epoch = 0

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(reductioner.parameters(),
                                    lr=0.01,
                                    momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(reductioner.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=20,
                                                gamma=0.5)
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    train_steps = 4800
    # test_steps = 20
    # blue = lambda x: '\033[94m' + x + '\033[0m'
    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch, args.epoch):
        train_idxs = np.random.choice(TRAIN_DATASET.__len__(), train_steps)
        # test_idxs = np.random.choice(TEST_DATASET.__len__(), test_steps)
        train_sampler = torch.utils.data.sampler.RandomSampler(train_idxs)
        # test_sampler = torch.utils.data.sampler.SequentialSampler(test_idxs)
        trainDataLoader = torch.utils.data.DataLoader(
            TRAIN_DATASET,
            batch_size=args.batchsize,
            sampler=train_sampler,
            num_workers=args.num_workers)
        # testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batchsize, sampler=test_sampler,
        #                                              num_workers=args.num_workers)
        print('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
        logger.info('Epoch %d (%d/%s):', global_epoch + 1, epoch + 1,
                    args.epoch)
        # mean_correct = []

        scheduler.step()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            # for batch_id, data in enumerate(trainDataLoader, 0):
            points_set = data
            points_set = points_set.data.numpy()
            # 增强数据: 随机缩放和平移点云,随机移除一些点
            jittered_data, j_scale = provider.random_scale_point_cloud(
                points_set[:, :, 0:3], scale_low=2.0 / 3, scale_high=3 / 2.0)
            jittered_data, j_shift = provider.shift_point_cloud(
                jittered_data, shift_range=0.2)
            points_set[:, :, 0:3] = jittered_data
            points_set[:, :1024, :] = provider.random_point_dropout_v2(
                points_set[:, :1024, :])
            # 推理
            # points = torch.Tensor(points_set[:, :1024, :])
            target = torch.Tensor(points_set[:, 1024:2048, :])

            points = target.transpose(2, 1)  # 此处将target自编码
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()
            reductioner = reductioner.train()
            _, pred = reductioner(points[:, :3, :], None)

            loss, _, _ = criterion(pred, target)

            # import open3d as o3d
            # vis_target = target[0, :, :].data.cpu().numpy()
            # vis_pred = pred[0, :, :].data.cpu().numpy()
            # vis_target_cloud = o3d.PointCloud()
            # vis_target_cloud.points = o3d.Vector3dVector(vis_target)
            # vis_target_cloud.paint_uniform_color([0, 0, 0])
            # vis_pred_cloud = o3d.PointCloud()
            # vis_pred_cloud.points = o3d.Vector3dVector(vis_pred)
            # vis_pred_cloud.paint_uniform_color([0, 0, 1])
            # o3d.draw_geometries([vis_target_cloud, vis_pred_cloud])

            loss.backward()
            optimizer.step()
            global_step += 1

        if epoch > 20:
            logger.info('Save model...')
            save_checkpoint(global_epoch + 1, 0., 0., reductioner, optimizer,
                            str(checkpoints_dir), args.model_name)
            print('Saving model....')

        print('\r Loss: %f' % loss.data)
        logger.info('Loss: %.2f', loss.data)

        global_epoch += 1

    print('Best Accuracy: %f' % best_tst_accuracy)

    logger.info('End of training...')
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    '''CREATE DIR'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    file_dir = Path(str(experiment_dir) + '/%s_ModelNet40-' % args.model_name + str(
        datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
    file_dir.mkdir(exist_ok=True)
    checkpoints_dir = file_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = file_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)

    '''LOG'''
    args = parse_args()
    logger = logging.getLogger(args.model_name)
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(str(log_dir) + 'train_%s_cls.txt' % args.model_name)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------TRANING---------------------------------------------------')
    logger.info('PARAMETER ...')
    logger.info(args)

    '''DATA LOADING'''
    logger.info('Load dataset ...')
    DATA_PATH = './data/modelnet40_normal_resampled/'

    TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='train', normal_channel=args.normal)
    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test', normal_channel=args.normal)
    trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batchsize, shuffle=True,
                                                  num_workers=args.num_workers)
    testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batchsize, shuffle=False,
                                                 num_workers=args.num_workers)

    logger.info("The number of training data is: %d", len(TRAIN_DATASET))
    logger.info("The number of test data is: %d", len(TEST_DATASET))

    seed = 3
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)

    '''MODEL LOADING'''
    num_class = 40
    classifier = PointConvClsSsg(num_class).cuda()
    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        classifier.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('No existing model, starting training from scratch...')
        start_epoch = 0

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(
            classifier.parameters(),
            lr=args.learning_rate,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=args.decay_rate
        )
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.7)
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    blue = lambda x: '\033[94m' + x + '\033[0m'

    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch, args.epoch):
        print('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
        logger.info('Epoch %d (%d/%s):', global_epoch + 1, epoch + 1, args.epoch)
        mean_correct = []

        scheduler.step()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9):
        # for batch_id, data in enumerate(trainDataLoader, 0):
            points, target = data
            points = points.data.numpy()
            # 增强数据: 随机放大和平移点云,随机移除一些点
            jittered_data = provider.random_scale_point_cloud(points[:, :, 0:3], scale_low=2.0 / 3, scale_high=3 / 2.0)
            jittered_data = provider.shift_point_cloud(jittered_data, shift_range=0.2)
            points[:, :, 0:3] = jittered_data
            points = provider.random_point_dropout_v2(points)
            provider.shuffle_points(points)
            points = torch.Tensor(points)
            target = target[:, 0]

            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()

            classifier = classifier.train()
            # pred = classifier(points[:, :3, :], points[:, 3:, :])
            pred = classifier(points[:, :3, :], None)
            loss = F.nll_loss(pred, target.long())
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.long().data).cpu().sum()
            mean_correct.append(correct.item() / float(points.size()[0]))
            loss.backward()
            optimizer.step()
            global_step += 1

        train_acc = np.mean(mean_correct)
        print('Train Accuracy: %f' % train_acc)
        logger.info('Train Accuracy: %f' % train_acc)

        acc = test(classifier, testDataLoader)

        if (acc >= best_tst_accuracy) and epoch > 5:
            best_tst_accuracy = acc
            logger.info('Save model...')
            save_checkpoint(
                global_epoch + 1,
                train_acc,
                acc,
                classifier,
                optimizer,
                str(checkpoints_dir),
                args.model_name)
            print('Saving model....')

        print('\r Loss: %f' % loss.data)
        logger.info('Loss: %.2f', loss.data)
        print('\r Test %s: %f   ***  %s: %f' % (blue('Accuracy'), acc, blue('Best Accuracy'), best_tst_accuracy))
        logger.info('Test Accuracy: %f  *** Best Test Accuracy: %f', acc, best_tst_accuracy)

        global_epoch += 1
    print('Best Accuracy: %f' % best_tst_accuracy)

    logger.info('End of training...')
def main(args):
    '''HYPER PARAMETER'''
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    '''CREATE DIR'''
    experiment_dir = Path('./experiment/')
    experiment_dir.mkdir(exist_ok=True)
    file_dir = Path(
        str(experiment_dir) + '/%s_ModelNet40-' % args.model_name +
        str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
    file_dir.mkdir(exist_ok=True)
    checkpoints_dir = file_dir.joinpath('checkpoints/')
    checkpoints_dir.mkdir(exist_ok=True)
    log_dir = file_dir.joinpath('logs/')
    log_dir.mkdir(exist_ok=True)
    '''LOG'''
    args = parse_args()
    logger = logging.getLogger(args.model_name)
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler(
        str(log_dir) + 'train_%s_cls.txt' % args.model_name)
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    logger.info(
        '---------------------------------------------------TRANING---------------------------------------------------'
    )
    logger.info('PARAMETER ...')
    logger.info(args)
    '''DATA LOADING'''
    logger.info('Load dataset ...')
    DATA_PATH = './data/point_data/'

    TRAIN_DATASET = TranslationDataLoader(root=DATA_PATH,
                                          npoint=args.num_point,
                                          split='train',
                                          normal_channel=args.normal)
    TEST_DATASET = TranslationDataLoader(root=DATA_PATH,
                                         npoint=args.num_point,
                                         split='test',
                                         normal_channel=args.normal)

    logger.info("The number of training data is: %d", len(TRAIN_DATASET))
    logger.info("The number of test data is: %d", len(TEST_DATASET))

    seed = 3
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)
    '''MODEL LOADING'''
    embs_len = 3
    estimator = PointConvTrans(embs_len).cuda()
    if args.pretrain is not None:
        print('Use pretrain model...')
        logger.info('Use pretrain model')
        checkpoint = torch.load(args.pretrain)
        start_epoch = checkpoint['epoch']
        estimator.load_state_dict(checkpoint['model_state_dict'])
    else:
        print('No existing model, starting training from scratch...')
        start_epoch = 0

    if args.optimizer == 'SGD':
        optimizer = torch.optim.SGD(estimator.parameters(),
                                    lr=0.01,
                                    momentum=0.9)
    elif args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(estimator.parameters(),
                                     lr=args.learning_rate,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=args.decay_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=30,
                                                gamma=0.7)
    global_epoch = 0
    global_step = 0
    best_tst_accuracy = 0.0
    train_steps = 4800
    test_steps = 1600
    blue = lambda x: '\033[94m' + x + '\033[0m'
    '''TRANING'''
    logger.info('Start training...')
    for epoch in range(start_epoch, args.epoch):
        train_idxs = np.random.choice(TRAIN_DATASET.__len__(), train_steps)
        test_idxs = np.random.choice(TEST_DATASET.__len__(), test_steps)
        train_sampler = torch.utils.data.sampler.RandomSampler(train_idxs)
        test_sampler = torch.utils.data.sampler.SequentialSampler(test_idxs)
        trainDataLoader = torch.utils.data.DataLoader(
            TRAIN_DATASET,
            batch_size=args.batchsize,
            sampler=train_sampler,
            num_workers=args.num_workers)
        testDataLoader = torch.utils.data.DataLoader(
            TEST_DATASET,
            batch_size=args.batchsize,
            sampler=test_sampler,
            num_workers=args.num_workers)
        print('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
        logger.info('Epoch %d (%d/%s):', global_epoch + 1, epoch + 1,
                    args.epoch)
        mean_correct = []

        scheduler.step()
        for batch_id, data in tqdm(enumerate(trainDataLoader, 0),
                                   total=len(trainDataLoader),
                                   smoothing=0.9):
            # for batch_id, data in enumerate(trainDataLoader, 0):
            points_set, n_cent, n_size = data
            points_set = points_set.data.numpy()
            n_cent = n_cent.cuda()
            n_size = n_size.cuda()
            # 增强数据: 随机缩放和平移点云,随机移除一些点
            jittered_data, j_scale = provider.random_scale_point_cloud(
                points_set[:, :, 0:3], scale_low=2.0 / 3, scale_high=3 / 2.0)
            jittered_data, j_shift = provider.shift_point_cloud(
                jittered_data, shift_range=0.2)
            points_set[:, :, 0:3] = jittered_data
            points_set[:, :1024, :] = provider.random_point_dropout_v2(
                points_set[:, :1024, :])
            # 推理
            points = torch.Tensor(points_set[:, :1024, :])
            target = torch.Tensor(points_set[:, 1024, :])
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()
            estimator = estimator.train()
            pred = estimator(points[:, :3, :], None)

            # 数据还原
            j_scale = torch.Tensor(np.tile(j_scale[:, np.newaxis],
                                           (1, 3))).cuda()
            j_shift = torch.Tensor(j_shift).cuda()
            n_size = torch.unsqueeze(n_size, dim=1).repeat(1, 3)
            real_t = (((target - j_shift) / j_scale) * n_size) + n_cent
            real_p = (((pred - j_shift) / j_scale) * n_size) + n_cent
            loss = F.mse_loss(real_p, real_t)

            # import open3d as o3d
            # vis_point = points[0, :, :].data.cpu().numpy().T
            # vis_target = target[:1, :].data.cpu().numpy()
            # vis_pred = pred[:1, :].data.cpu().numpy()
            # vis_cent = np.array([[0., 0., 0.]])
            # vis_point_cloud = o3d.PointCloud()
            # vis_point_cloud.points = o3d.Vector3dVector(vis_point)
            # vis_point_cloud.paint_uniform_color([1, 0, 0])
            # vis_target_cloud = o3d.PointCloud()
            # vis_target_cloud.points = o3d.Vector3dVector(vis_target)
            # vis_target_cloud.paint_uniform_color([0, 0, 0])
            # vis_pred_cloud = o3d.PointCloud()
            # vis_pred_cloud.points = o3d.Vector3dVector(vis_pred)
            # vis_pred_cloud.paint_uniform_color([0, 0, 1])
            # vis_cent_cloud = o3d.PointCloud()
            # vis_cent_cloud.points = o3d.Vector3dVector(vis_cent)
            # vis_cent_cloud.paint_uniform_color([0, 1, 0])
            # vis_diff = vis_target - vis_pred
            # vis_dist = np.linalg.norm(vis_diff)
            # print(vis_dist)
            # o3d.draw_geometries([vis_point_cloud, vis_target_cloud, vis_pred_cloud, vis_cent_cloud])

            diff = real_p - real_t
            distance = torch.norm(diff, dim=1)
            correct = torch.sum(distance < 0.05)
            mean_correct.append(correct.item() / float(points.size()[0]))
            loss.backward()
            optimizer.step()
            global_step += 1

        train_acc = np.mean(mean_correct)
        print('Train Accuracy: %f' % train_acc)
        logger.info('Train Accuracy: %f' % train_acc)

        acc = trans_test(estimator, testDataLoader)

        if (acc >= best_tst_accuracy) and epoch > 5:
            best_tst_accuracy = acc
            logger.info('Save model...')
            save_checkpoint(global_epoch + 1, train_acc, acc, estimator,
                            optimizer, str(checkpoints_dir), args.model_name)
            print('Saving model....')

        print('\r Loss: %f' % loss.data)
        logger.info('Loss: %.2f', loss.data)
        print(
            '\r Test %s: %f   ***  %s: %f' %
            (blue('Accuracy'), acc, blue('Best Accuracy'), best_tst_accuracy))
        logger.info('Test Accuracy: %f  *** Best Test Accuracy: %f', acc,
                    best_tst_accuracy)

        global_epoch += 1
    print('Best Accuracy: %f' % best_tst_accuracy)

    logger.info('End of training...')