Exemple #1
0
def main():
    opt = OptInit().initialize()

    print('===> Creating dataloader ...')
    train_dataset = GeoData.S3DIS(opt.train_path,
                                  5,
                                  True,
                                  pre_transform=T.NormalizeScale())
    if opt.multi_gpus:
        train_loader = DataListLoader(train_dataset,
                                      batch_size=opt.batch_size,
                                      shuffle=True,
                                      num_workers=4)
    else:
        train_loader = DataLoader(train_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=True,
                                  num_workers=4)
    opt.n_classes = train_loader.dataset.num_classes

    print('===> Loading the network ...')
    opt.model = getattr(models, opt.model_name)(opt).to(opt.device)
    if opt.multi_gpus:
        opt.model = DataParallel(getattr(models,
                                         opt.model_name)(opt)).to(opt.device)
    print('===> loading pre-trained ...')
    load_pretrained_models(opt)

    print('===> Init the optimizer ...')
    opt.criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    opt.valid_metric = miou
    opt.optimizer = torch.optim.Adam(opt.model.parameters(), lr=opt.lr)
    opt.scheduler = torch.optim.lr_scheduler.StepLR(opt.optimizer,
                                                    opt.lr_adjust_freq, 0.5)
    load_pretrained_optimizer(opt)

    print('===> start training ...')
    for _ in range(opt.total_epochs):
        opt.epoch += 1
        train(train_loader, opt)
        # valid(train_loader, opt)
        opt.scheduler.step()
    print('Saving the final model.Finish!')
Exemple #2
0
def main():
    opt = OptInit().initialize()
    opt.printer.info('===> Creating dataloader ...')
    train_dataset = GeoData.S3DIS(opt.train_path,
                                  5,
                                  True,
                                  pre_transform=T.NormalizeScale())
    if opt.multi_gpus:
        train_loader = DataListLoader(train_dataset,
                                      batch_size=opt.batch_size,
                                      shuffle=True,
                                      num_workers=4)
    else:
        train_loader = DataLoader(train_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=True,
                                  num_workers=4)
    opt.n_classes = train_loader.dataset.num_classes

    opt.printer.info('===> Loading the network ...')
    model = SparseDeepGCN(opt).to(opt.device)
    if opt.multi_gpus:
        model = DataParallel(SparseDeepGCN(opt)).to(opt.device)
    opt.printer.info('===> loading pre-trained ...')
    model, opt.best_value, opt.epoch = load_pretrained_models(
        model, opt.pretrained_model, opt.phase)

    opt.printer.info('===> Init the optimizer ...')
    criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    if opt.optim.lower() == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    elif opt.optim.lower() == 'radam':
        optimizer = optim.RAdam(model.parameters(), lr=opt.lr)
    else:
        raise NotImplementedError('opt.optim is not supported')
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq,
                                                opt.lr_decay_rate)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(
        opt.pretrained_model, optimizer, scheduler, opt.lr)

    opt.printer.info('===> Init Metric ...')
    opt.losses = AverageMeter()
    # opt.test_metric = miou
    # opt.test_values = AverageMeter()
    opt.test_value = 0.

    opt.printer.info('===> start training ...')
    for _ in range(opt.total_epochs):
        opt.epoch += 1
        train(model, train_loader, optimizer, scheduler, criterion, opt)
        # test_value = test(model, test_loader, test_metric, opt)
        scheduler.step()
    opt.printer.info('Saving the final model.Finish!')
Exemple #3
0
def main():
    opt = OptInit().get_args()
    logging.info('===> Creating dataloader ...')
    train_dataset = GeoData.S3DIS(opt.data_dir,
                                  opt.area,
                                  True,
                                  pre_transform=T.NormalizeScale())
    train_loader = DenseDataLoader(train_dataset,
                                   batch_size=opt.batch_size,
                                   shuffle=True,
                                   num_workers=4)
    test_dataset = GeoData.S3DIS(opt.data_dir,
                                 opt.area,
                                 train=False,
                                 pre_transform=T.NormalizeScale())
    test_loader = DenseDataLoader(test_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=False,
                                  num_workers=0)
    opt.n_classes = train_loader.dataset.num_classes

    logging.info('===> Loading the network ...')
    model = DenseDeepGCN(opt).to(opt.device)
    if opt.multi_gpus:
        model = DataParallel(DenseDeepGCN(opt)).to(opt.device)
    logging.info('===> loading pre-trained ...')
    model, opt.best_value, opt.epoch = load_pretrained_models(
        model, opt.pretrained_model, opt.phase)
    logging.info(model)

    logging.info('===> Init the optimizer ...')
    criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq,
                                                opt.lr_decay_rate)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(
        opt.pretrained_model, optimizer, scheduler, opt.lr)

    logging.info('===> Init Metric ...')
    opt.losses = AverageMeter()
    opt.test_value = 0.

    logging.info('===> start training ...')
    for _ in range(opt.epoch, opt.total_epochs):
        opt.epoch += 1
        logging.info('Epoch:{}'.format(opt.epoch))
        train(model, train_loader, optimizer, scheduler, criterion, opt)
        if opt.epoch % opt.eval_freq == 0 and opt.eval_freq != -1:
            test(model, test_loader, opt)
        scheduler.step()
    logging.info('Saving the final model.Finish!')
Exemple #4
0
def main():
    opt = OptInit().initialize()

    print('===> Creating dataloader...')
    test_dataset = GeoData.S3DIS(opt.test_path,
                                 5,
                                 False,
                                 pre_transform=T.NormalizeScale())
    test_loader = DataLoader(test_dataset,
                             batch_size=opt.batch_size,
                             shuffle=False,
                             num_workers=0)
    opt.n_classes = test_loader.dataset.num_classes
    if opt.no_clutter:
        opt.n_classes -= 1

    print('===> Loading the network ...')
    opt.model = getattr(models, opt.model_name)(opt).to(opt.device)
    load_pretrained_models(opt)

    print('===> Start Evaluation ...')
    test(test_loader, opt.model, opt)
Exemple #5
0
def main():
    opt = OptInit().initialize()
    opt.batch_size = 1
    os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpuNum

    print('===> Creating dataloader...')
    # def __init__(self,
    #              root,
    #              is_train=True,
    #              is_validation=False,
    #              is_test=False,
    #              num_channel=5,
    #              pre_transform=None,
    #              pre_filter=None)
    test_dataset = BigredDataset(root=opt.test_path,
                                 is_train=False,
                                 is_validation=False,
                                 is_test=True,
                                 num_channel=5,
                                 pre_transform=T.NormalizeScale())
    test_loader = DenseDataLoader(test_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=False,
                                  num_workers=32)
    opt.n_classes = 2

    print('len(test_loader):', len(test_loader))
    print('phase: ', opt.phase)
    print('batch_size: ', opt.batch_size)
    print('use_cpu: ', opt.use_cpu)
    print('gpuNum: ', opt.gpuNum)
    print('multi_gpus: ', opt.multi_gpus)
    print('test_path: ', opt.test_path)
    print('in_channels: ', opt.in_channels)
    print('device: ', opt.device)

    print('===> Loading the network ...')
    model = DenseDeepGCN(opt).to(opt.device)
    load_package = torch.load(opt.pretrained_model)
    model, opt.best_value, opt.epoch = load_pretrained_models(
        model, opt.pretrained_model, opt.phase)
    pdb.set_trace()
    for item in load_package.keys():
        if (item != 'optimizer_state_dict' and item != 'state_dict'
                and item != 'scheduler_state_dict'):
            print(str(item), load_package[item])

    print('===> Start Evaluation ...')
    test(model, test_loader, opt)
Exemple #6
0
def main():
    opt = OptInit().get_args()

    logging.info('===> Creating dataloader...')
    test_dataset = GeoData.S3DIS(opt.data_dir,
                                 opt.area,
                                 train=False,
                                 pre_transform=T.NormalizeScale())
    test_loader = DenseDataLoader(test_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=False,
                                  num_workers=0)
    opt.n_classes = test_loader.dataset.num_classes
    if opt.no_clutter:
        opt.n_classes -= 1

    logging.info('===> Loading the network ...')
    model = DenseDeepGCN(opt).to(opt.device)
    model, opt.best_value, opt.epoch = load_pretrained_models(
        model, opt.pretrained_model, opt.phase)

    logging.info('===> Start Evaluation ...')
    test(model, test_loader, opt)
Exemple #7
0
                    cur_shape_iou_tot += I/U
                    cur_shape_iou_cnt += 1.

            if cur_shape_iou_cnt > 0:
                cur_shape_miou = cur_shape_iou_tot / cur_shape_iou_cnt
                shape_iou_tot += cur_shape_miou
                shape_iou_cnt += 1.

    shape_mIoU = shape_iou_tot / shape_iou_cnt
    part_iou = np.divide(part_intersect[1:], part_union[1:])
    mean_part_iou = np.mean(part_iou)
    opt.printer.info("===> Category {}-{}, Part mIOU is{:.4f} \t ".format(
                      opt.category_no, opt.category, mean_part_iou))


if __name__ == '__main__':
    opt = OptInit().initialize()
    opt.printer.info('===> Creating dataloader ...')
    test_dataset = PartNet(opt.data_dir, opt.dataset, opt.category, opt.level, 'val')
    test_loader = DenseDataLoader(test_dataset, batch_size=1, shuffle=True, num_workers=1)
    opt.n_classes = test_loader.dataset.num_classes

    opt.printer.info('===> Loading the network ...')
    model = DenseDeepGCN(opt).to(opt.device)
    opt.printer.info('===> loading pre-trained ...')
    model, opt.best_value, opt.epoch = load_pretrained_models(model, opt.pretrained_model, opt.phase)

    test(model, test_loader, opt)


Exemple #8
0
def main():
    opt = OptInit().get_args()
    logging.info('===> Creating dataloader ...')
    train_dataset = GeoData.S3DIS(opt.data_dir,
                                  opt.area,
                                  True,
                                  pre_transform=T.NormalizeScale())
    train_loader = DenseDataLoader(train_dataset,
                                   batch_size=opt.batch_size,
                                   shuffle=True,
                                   num_workers=4)
    test_dataset = GeoData.S3DIS(opt.data_dir,
                                 opt.area,
                                 train=False,
                                 pre_transform=T.NormalizeScale())
    test_loader = DenseDataLoader(test_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=False,
                                  num_workers=0)
    opt.n_classes = train_loader.dataset.num_classes

    logging.info('===> Loading the network ...')
    model = DenseDeepGCN(opt).to(opt.device)
    if opt.multi_gpus:
        model = DataParallel(DenseDeepGCN(opt)).to(opt.device)

    logging.info('===> loading pre-trained ...')
    model, opt.best_value, opt.epoch = load_pretrained_models(
        model, opt.pretrained_model, opt.phase)
    logging.info(model)

    logging.info('===> Init the optimizer ...')
    criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq,
                                                opt.lr_decay_rate)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(
        opt.pretrained_model, optimizer, scheduler, opt.lr)

    logging.info('===> Init Metric ...')
    opt.losses = AverageMeter()
    opt.test_value = 0.

    logging.info('===> start training ...')
    for _ in range(opt.epoch, opt.total_epochs):
        opt.epoch += 1
        logging.info('Epoch:{}'.format(opt.epoch))
        train(model, train_loader, optimizer, criterion, opt)
        if opt.epoch % opt.eval_freq == 0 and opt.eval_freq != -1:
            test(model, test_loader, opt)
        scheduler.step()

        # ------------------ save checkpoints
        # min or max. based on the metrics
        is_best = (opt.test_value < opt.best_value)
        opt.best_value = max(opt.test_value, opt.best_value)
        model_cpu = {k: v.cpu() for k, v in model.state_dict().items()}
        save_checkpoint(
            {
                'epoch': opt.epoch,
                'state_dict': model_cpu,
                'optimizer_state_dict': optimizer.state_dict(),
                'scheduler_state_dict': scheduler.state_dict(),
                'best_value': opt.best_value,
            }, is_best, opt.ckpt_dir, opt.exp_name)

        # ------------------ tensorboard log
        info = {
            'loss': opt.losses.avg,
            'test_value': opt.test_value,
            'lr': scheduler.get_lr()[0]
        }
        opt.writer.add_scalars('epoch', info, opt.iter)

    logging.info('Saving the final model.Finish!')