Пример #1
0
def train(model, train_loader, test_loader, opt):
    opt.printer.info('===> Init the optimizer ...')
    criterion = nn.NLLLoss().to(opt.device)
    # criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq,
                                                opt.lr_decay_rate)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(
        opt.pretrained_model, optimizer, scheduler, opt.lr, opt.use_ckpt_lr)
    opt.printer.info('===> Init Metric ...')
    opt.losses = AverageMeter()
    opt.test_value = 0.

    opt.printer.info('===> start training ...')
    for _ in range(opt.epoch, opt.total_epochs):
        opt.epoch += 1
        train_step(model, train_loader, optimizer, scheduler, criterion, opt)
        if opt.epoch % opt.test_freq:
            test(model, test_loader, opt)
        save_ckpt(model, optimizer, scheduler, opt)
        scheduler.step()
    opt.printer.info(
        'Saving the final model.Finish! Category {}-{}. Best part mIou is {}. Best shape mIOU is {}.'
        .format(opt.category_no, opt.category, opt.best_value,
                opt.best_shapeMiou))
Пример #2
0
def train(model, train_loader, val_loader, test_loader, opt):
    logging.info('===> Init the optimizer ...')
    criterion = nn.NLLLoss().to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)  # weight_decay=1e-4
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq, opt.lr_decay_rate)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(opt.pretrained_model, optimizer, scheduler, opt.lr)
    logging.info('===> Init Metric ...')
    opt.losses = AverageMeter()

    best_val_part_miou = 0.
    best_test_part_miou = 0.
    test_part_miou_val_best = 0.

    logging.info('===> start training ...')
    for _ in range(opt.epoch, opt.total_epochs):
        opt.epoch += 1
        # reset tracker
        opt.losses.reset()

        train_epoch(model, train_loader, optimizer, criterion, opt)
        val_part_iou, val_shape_mIoU = test(model, val_loader, opt)
        test_part_iou, test_shape_mIoU = test(model, test_loader, opt)

        scheduler.step()

        # ------------------  save ckpt
        if val_part_iou > best_val_part_miou:
            best_val_part_miou = val_part_iou
            test_part_miou_val_best = test_part_iou
            logging.info("Got a new best model on Validation with Part iou {:.4f}".format(best_val_part_miou))
            save_ckpt(model, optimizer, scheduler, opt, 'val_best')
        if test_part_iou > best_test_part_miou:
            best_test_part_miou = test_part_iou
            logging.info("Got a new best model on Test with Part iou {:.4f}".format(best_test_part_miou))
            save_ckpt(model, optimizer, scheduler, opt, 'test_best')

        # ------------------ show information
        logging.info(
            "===> Epoch {} Category {}-{}, Train Loss {:.4f}, mIoU on val {:.4f}, mIoU on test {:4f}, "
            "Best val mIoU {:.4f} Its test mIoU {:.4f}. Best test mIoU {:.4f}".format(
                opt.epoch, opt.category_no, opt.category, opt.losses.avg, val_part_iou, test_part_iou,
                best_val_part_miou, test_part_miou_val_best, best_test_part_miou))

        info = {
            'loss': opt.losses.avg,
            'val_part_miou': val_part_iou,
            'test_part_miou': test_part_iou,
            'lr': scheduler.get_lr()[0]
        }
        for tag, value in info.items():
            opt.logger.scalar_summary(tag, value, opt.step)

    save_ckpt(model, optimizer, scheduler, opt, 'last')
    logging.info(
        'Saving the final model.Finish! Category {}-{}. Best val part mIoU is {:.4f}. Its test mIoU is {:.4f}. '
        'Best test part mIoU is {:.4f}. Last test mIoU {:.4f} \n\n\n'.
            format(opt.category_no, opt.category, best_val_part_miou, test_part_miou_val_best,
                   best_test_part_miou, test_part_iou))
Пример #3
0
def main():
    opt = OptInit().initialize()

    print('===> Creating dataloader ...')
    train_dataset = GeoData.S3DIS(opt.train_path,
                                  5,
                                  True,
                                  pre_transform=T.NormalizeScale())
    if opt.multi_gpus:
        train_loader = DataListLoader(train_dataset,
                                      batch_size=opt.batch_size,
                                      shuffle=True,
                                      num_workers=4)
    else:
        train_loader = DataLoader(train_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=True,
                                  num_workers=4)
    opt.n_classes = train_loader.dataset.num_classes

    print('===> Loading the network ...')
    opt.model = getattr(models, opt.model_name)(opt).to(opt.device)
    if opt.multi_gpus:
        opt.model = DataParallel(getattr(models,
                                         opt.model_name)(opt)).to(opt.device)
    print('===> loading pre-trained ...')
    load_pretrained_models(opt)

    print('===> Init the optimizer ...')
    opt.criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    opt.valid_metric = miou
    opt.optimizer = torch.optim.Adam(opt.model.parameters(), lr=opt.lr)
    opt.scheduler = torch.optim.lr_scheduler.StepLR(opt.optimizer,
                                                    opt.lr_adjust_freq, 0.5)
    load_pretrained_optimizer(opt)

    print('===> start training ...')
    for _ in range(opt.total_epochs):
        opt.epoch += 1
        train(train_loader, opt)
        # valid(train_loader, opt)
        opt.scheduler.step()
    print('Saving the final model.Finish!')
Пример #4
0
def main():
    opt = OptInit().initialize()
    opt.printer.info('===> Creating dataloader ...')
    train_dataset = GeoData.S3DIS(opt.train_path,
                                  5,
                                  True,
                                  pre_transform=T.NormalizeScale())
    if opt.multi_gpus:
        train_loader = DataListLoader(train_dataset,
                                      batch_size=opt.batch_size,
                                      shuffle=True,
                                      num_workers=4)
    else:
        train_loader = DataLoader(train_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=True,
                                  num_workers=4)
    opt.n_classes = train_loader.dataset.num_classes

    opt.printer.info('===> Loading the network ...')
    model = SparseDeepGCN(opt).to(opt.device)
    if opt.multi_gpus:
        model = DataParallel(SparseDeepGCN(opt)).to(opt.device)
    opt.printer.info('===> loading pre-trained ...')
    model, opt.best_value, opt.epoch = load_pretrained_models(
        model, opt.pretrained_model, opt.phase)

    opt.printer.info('===> Init the optimizer ...')
    criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    if opt.optim.lower() == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    elif opt.optim.lower() == 'radam':
        optimizer = optim.RAdam(model.parameters(), lr=opt.lr)
    else:
        raise NotImplementedError('opt.optim is not supported')
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq,
                                                opt.lr_decay_rate)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(
        opt.pretrained_model, optimizer, scheduler, opt.lr)

    opt.printer.info('===> Init Metric ...')
    opt.losses = AverageMeter()
    # opt.test_metric = miou
    # opt.test_values = AverageMeter()
    opt.test_value = 0.

    opt.printer.info('===> start training ...')
    for _ in range(opt.total_epochs):
        opt.epoch += 1
        train(model, train_loader, optimizer, scheduler, criterion, opt)
        # test_value = test(model, test_loader, test_metric, opt)
        scheduler.step()
    opt.printer.info('Saving the final model.Finish!')
Пример #5
0
def main():
    opt = OptInit().get_args()
    logging.info('===> Creating dataloader ...')
    train_dataset = GeoData.S3DIS(opt.data_dir,
                                  opt.area,
                                  True,
                                  pre_transform=T.NormalizeScale())
    train_loader = DenseDataLoader(train_dataset,
                                   batch_size=opt.batch_size,
                                   shuffle=True,
                                   num_workers=4)
    test_dataset = GeoData.S3DIS(opt.data_dir,
                                 opt.area,
                                 train=False,
                                 pre_transform=T.NormalizeScale())
    test_loader = DenseDataLoader(test_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=False,
                                  num_workers=0)
    opt.n_classes = train_loader.dataset.num_classes

    logging.info('===> Loading the network ...')
    model = DenseDeepGCN(opt).to(opt.device)
    if opt.multi_gpus:
        model = DataParallel(DenseDeepGCN(opt)).to(opt.device)
    logging.info('===> loading pre-trained ...')
    model, opt.best_value, opt.epoch = load_pretrained_models(
        model, opt.pretrained_model, opt.phase)
    logging.info(model)

    logging.info('===> Init the optimizer ...')
    criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq,
                                                opt.lr_decay_rate)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(
        opt.pretrained_model, optimizer, scheduler, opt.lr)

    logging.info('===> Init Metric ...')
    opt.losses = AverageMeter()
    opt.test_value = 0.

    logging.info('===> start training ...')
    for _ in range(opt.epoch, opt.total_epochs):
        opt.epoch += 1
        logging.info('Epoch:{}'.format(opt.epoch))
        train(model, train_loader, optimizer, scheduler, criterion, opt)
        if opt.epoch % opt.eval_freq == 0 and opt.eval_freq != -1:
            test(model, test_loader, opt)
        scheduler.step()
    logging.info('Saving the final model.Finish!')
Пример #6
0
def train():
    info_format = 'Epoch: [{}]\t loss: {: .6f} train mF1: {: .6f} \t val mF1: {: .6f}\t test mF1: {:.6f} \t ' \
                  'best val mF1: {: .6f}\t best test mF1: {:.6f}'
    opt.printer.info('===> Init the optimizer ...')
    criterion = torch.nn.BCEWithLogitsLoss().to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    scheduler = ReduceLROnPlateau(optimizer, "min", patience=opt.lr_patience, verbose=True, factor=0.5, cooldown=30,
                                  min_lr=opt.lr/100)
    opt.scheduler = 'ReduceLROnPlateau'

    optimizer, scheduler, opt.lr = load_pretrained_optimizer(opt.pretrained_model, optimizer, scheduler, opt.lr)

    opt.printer.info('===> Init Metric ...')
    opt.losses = AverageMeter()

    best_val_value = 0.
    best_test_value = 0.

    opt.printer.info('===> Start training ...')
    for _ in range(opt.epoch, opt.total_epochs):
        opt.epoch += 1
        loss, train_value = train_step(model, train_loader, optimizer, criterion, opt)
        val_value = test(model, valid_loader, opt)
        test_value = test(model, test_loader, opt)

        if val_value > best_val_value:
            best_val_value = val_value
            save_ckpt(model, optimizer, scheduler, opt.epoch, opt.save_path, opt.post, name_post='val_best')
        if test_value > best_test_value:
            best_test_value = test_value
            save_ckpt(model, optimizer, scheduler, opt.epoch, opt.save_path, opt.post, name_post='test_best')

        opt.printer.info(info_format.format(opt.epoch, loss, train_value, val_value, test_value, best_val_value,
                                            best_test_value))

        if opt.scheduler == 'ReduceLROnPlateau':
            scheduler.step(opt.losses.avg)
        else:
            scheduler.step()

    opt.printer.info('Saving the final model.Finish!')
Пример #7
0
def main():
    opt = OptInit().get_args()
    logging.info('===> Creating dataloader ...')
    train_dataset = GeoData.S3DIS(opt.data_dir,
                                  opt.area,
                                  True,
                                  pre_transform=T.NormalizeScale())
    train_loader = DenseDataLoader(train_dataset,
                                   batch_size=opt.batch_size,
                                   shuffle=True,
                                   num_workers=4)
    test_dataset = GeoData.S3DIS(opt.data_dir,
                                 opt.area,
                                 train=False,
                                 pre_transform=T.NormalizeScale())
    test_loader = DenseDataLoader(test_dataset,
                                  batch_size=opt.batch_size,
                                  shuffle=False,
                                  num_workers=0)
    opt.n_classes = train_loader.dataset.num_classes

    logging.info('===> Loading the network ...')
    model = DenseDeepGCN(opt).to(opt.device)
    if opt.multi_gpus:
        model = DataParallel(DenseDeepGCN(opt)).to(opt.device)

    logging.info('===> loading pre-trained ...')
    model, opt.best_value, opt.epoch = load_pretrained_models(
        model, opt.pretrained_model, opt.phase)
    logging.info(model)

    logging.info('===> Init the optimizer ...')
    criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq,
                                                opt.lr_decay_rate)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(
        opt.pretrained_model, optimizer, scheduler, opt.lr)

    logging.info('===> Init Metric ...')
    opt.losses = AverageMeter()
    opt.test_value = 0.

    logging.info('===> start training ...')
    for _ in range(opt.epoch, opt.total_epochs):
        opt.epoch += 1
        logging.info('Epoch:{}'.format(opt.epoch))
        train(model, train_loader, optimizer, criterion, opt)
        if opt.epoch % opt.eval_freq == 0 and opt.eval_freq != -1:
            test(model, test_loader, opt)
        scheduler.step()

        # ------------------ save checkpoints
        # min or max. based on the metrics
        is_best = (opt.test_value < opt.best_value)
        opt.best_value = max(opt.test_value, opt.best_value)
        model_cpu = {k: v.cpu() for k, v in model.state_dict().items()}
        save_checkpoint(
            {
                'epoch': opt.epoch,
                'state_dict': model_cpu,
                'optimizer_state_dict': optimizer.state_dict(),
                'scheduler_state_dict': scheduler.state_dict(),
                'best_value': opt.best_value,
            }, is_best, opt.ckpt_dir, opt.exp_name)

        # ------------------ tensorboard log
        info = {
            'loss': opt.losses.avg,
            'test_value': opt.test_value,
            'lr': scheduler.get_lr()[0]
        }
        opt.writer.add_scalars('epoch', info, opt.iter)

    logging.info('Saving the final model.Finish!')
Пример #8
0
def train(model, train_loader, test_loader, opt):
    logging.info('===> Init the optimizer ...')
    criterion = SmoothCrossEntropy()
    if opt.use_sgd:
        logging.info("===> Use SGD")
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=opt.lr * 100,
                                    momentum=0.9,
                                    weight_decay=opt.weight_decay)
    else:
        logging.info("===> Use Adam")
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=opt.lr,
                                     weight_decay=opt.weight_decay)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                           opt.epochs,
                                                           eta_min=opt.lr)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(
        opt.pretrained_model, optimizer, scheduler, opt.lr)

    logging.info('===> Init Metric ...')
    opt.train_losses = AverageMeter()
    opt.test_losses = AverageMeter()
    best_test_overall_acc = 0.
    avg_acc_when_best = 0.

    logging.info('===> start training ...')
    for _ in range(opt.epoch, opt.epochs):
        opt.epoch += 1
        # reset tracker
        opt.train_losses.reset()
        opt.test_losses.reset()

        train_overall_acc, train_class_acc, opt = train_step(
            model, train_loader, optimizer, criterion, opt)
        test_overall_acc, test_class_acc, opt = infer(model, test_loader,
                                                      criterion, opt)

        scheduler.step()

        # ------------------  save ckpt
        if test_overall_acc > best_test_overall_acc:
            best_test_overall_acc = test_overall_acc
            avg_acc_when_best = test_class_acc
            logging.info(
                "Got a new best model on Test with Overall ACC {:.4f}. "
                "Its avg acc is {:.4f}".format(best_test_overall_acc,
                                               avg_acc_when_best))
            save_ckpt(model, optimizer, scheduler, opt, 'best')

        # ------------------ show information
        logging.info(
            "===> Epoch {}/{}, Train Loss {:.4f}, Test Overall Acc {:.4f}, Test Avg Acc {:4f}, "
            "Best Test Overall Acc {:.4f}, Its test avg acc {:.4f}.".format(
                opt.epoch, opt.epochs, opt.train_losses.avg, test_overall_acc,
                test_class_acc, best_test_overall_acc, avg_acc_when_best))

        info = {
            'train_loss': opt.train_losses.avg,
            'train_OA': train_overall_acc,
            'train_avg_acc': train_class_acc,
            'test_loss': opt.test_losses.avg,
            'test_OA': test_overall_acc,
            'test_avg_acc': test_class_acc,
            'lr': scheduler.get_lr()[0]
        }
        for tag, value in info.items():
            opt.writer.scalar_summary(tag, value, opt.step)

    save_ckpt(model, optimizer, scheduler, opt, 'last')
    logging.info(
        'Saving the final model.Finish! Best Test Overall Acc {:.4f}, Its test avg acc {:.4f}. '
        'Last Test Overall Acc {:.4f}, Its test avg acc {:.4f}.'.format(
            best_test_overall_acc, avg_acc_when_best, test_overall_acc,
            test_class_acc))