コード例 #1
0
def main():

    args = argument_parser.parse_args()
    print(args)
    torch.manual_seed(args.seed)
    torch.backends.cudnn.benchmark = True
    # hardcoding scannet

    # get handle to lmdb dataset
    lmdb_handle = dataset_base.LMDBHandle(os.path.join(constants.HDD_DATASET_ROOT, args.dataset, "dataset.lmdb"), args.memory_hog)
    
    # create train val and test sets
    train_set = get_active_dataset(args.active_selection_mode)(args.dataset, lmdb_handle, args.superpixel_dir, args.base_size, 'seedset_0')
    val_set = IndoorScenes(args.dataset, lmdb_handle, args.base_size, 'val')
    test_set = IndoorScenes(args.dataset, lmdb_handle, args.base_size, 'test')

    class_weights = None
    if args.use_balanced_weights:
        class_weights = calculate_weights_labels(get_active_dataset(args.active_selection_mode)(args.dataset, lmdb_handle, args.superpixel_dir, args.base_size, 'train'))

    saver = Saver(args)
    saver.save_experiment_config()
    summary = TensorboardSummary(saver.experiment_dir)
    writer = summary.create_summary()

    # get active selection method
    active_selector = get_active_selector(args, lmdb_handle, train_set)


    # for each active selection iteration
    for selection_iter in range(args.max_iterations):

        fraction_of_data_labeled = int(round(train_set.get_fraction_of_labeled_data() * 100))
        
        if os.path.exists(os.path.join(constants.RUNS, args.dataset, args.checkname, f'runs_{fraction_of_data_labeled:03d}', "selections")):
            # resume: load selections if this is a rerun, and selections are available from a previous run
            train_set.load_selections(os.path.join(constants.RUNS, args.dataset, args.checkname, f'runs_{fraction_of_data_labeled:03d}', "selections"))
        elif os.path.exists(os.path.join(constants.RUNS, args.dataset, args.checkname, f'runs_{fraction_of_data_labeled:03d}', "selections.txt")):
            # resume: load selections if this is a rerun, and selections are available from a previous run
            train_set.load_selections(os.path.join(constants.RUNS, args.dataset, args.checkname, f'runs_{fraction_of_data_labeled:03d}', "selections.txt"))
        else:
            # active selection iteration

            train_set.make_dataset_multiple_of_batchsize(args.batch_size)
            # create model from scratch
            model = DeepLab(num_classes=train_set.num_classes, backbone=args.backbone, output_stride=args.out_stride, sync_bn=args.sync_bn,
                            mc_dropout=((args.active_selection_mode.startswith('viewmc')) or(args.active_selection_mode.startswith('vote')) or args.view_entropy_mode == 'mc_dropout'))
            model = model.cuda()
            # create trainer
            trainer = Trainer(args, model, train_set, val_set, test_set, class_weights, Saver(args, suffix=f'runs_{fraction_of_data_labeled:03d}'))
            
            # train for args.epochs epochs
            lr_scheduler = trainer.lr_scheduler
            for epoch in range(args.epochs):
                trainer.training(epoch)
                if epoch % args.eval_interval == (args.eval_interval - 1):
                    trainer.validation(epoch)
                if lr_scheduler:
                    lr_scheduler.step()

            train_set.reset_dataset()
            epoch = trainer.load_best_checkpoint()

            # get best val miou / metrics
            _, best_mIoU, best_mIoU_20, best_Acc, best_Acc_class, best_FWIoU = trainer.validation(epoch, test=True)

            trainer.evaluator.dump_matrix(os.path.join(trainer.saver.experiment_dir, "confusion_matrix.npy"))

            writer.add_scalar('active_loop/mIoU', best_mIoU, train_set.get_fraction_of_labeled_data() * 100)
            writer.add_scalar('active_loop/mIoU_20', best_mIoU_20, train_set.get_fraction_of_labeled_data() * 100)
            writer.add_scalar('active_loop/Acc', best_Acc, train_set.get_fraction_of_labeled_data() * 100)
            writer.add_scalar('active_loop/Acc_class', best_Acc_class, train_set.get_fraction_of_labeled_data() * 100)
            writer.add_scalar('active_loop/fwIoU', best_FWIoU, train_set.get_fraction_of_labeled_data() * 100)

            # make active selection
            active_selector.select_next_batch(model, train_set, args.active_selection_size)
            # save selections
            trainer.saver.save_active_selections(train_set.get_selections(), args.active_selection_mode.endswith("_region"))
            trainer.train_writer.close()
            trainer.val_writer.close()

        print(selection_iter, " / Train-set length: ", len(train_set))
        
    writer.close()
コード例 #2
0
def main():
    # dataset
    trainset, valset, testset = build_datasets(args.dataset, args.base_size,
                                               args.crop_size)

    model = BiSeNet(trainset.num_classes, args.context_path, args.in_planes)

    class_weights = None
    if args.use_balanced_weights:  # default false
        class_weights = np.array([  # med_freq
            0.382900,
            0.452448,
            0.637584,
            0.377464,
            0.585595,
            0.479574,
            0.781544,
            0.982534,
            1.017466,
            0.624581,
            2.589096,
            0.980794,
            0.920340,
            0.667984,
            1.172291,  # 15
            0.862240,
            0.921714,
            2.154782,
            1.187832,
            1.178115,  # 20
            1.848545,
            1.428922,
            2.849658,
            0.771605,
            1.656668,  # 25
            4.483506,
            2.209922,
            1.120280,
            2.790182,
            0.706519,  # 30
            3.994768,
            2.220004,
            0.972934,
            1.481525,
            5.342475,  # 35
            0.750738,
            4.040773  # 37
        ])
        # class_weights = np.load('/datasets/rgbd_dataset/SUNRGBD/train/class_weights.npy')
        # class_weights = cal_class_weights(trainset, trainset.num_classes)

    saver = Saver(args, timestamp=get_curtime())
    writer = SummaryWriter(saver.experiment_dir)
    trainer = Trainer(args, model, trainset, valset, testset, class_weights,
                      saver, writer)

    start_epoch = 0

    miou_caches = AccCaches(patience=5)  # miou
    for epoch in range(start_epoch, args.epochs):
        trainer.training(epoch)
        if epoch % args.eval_interval == (args.eval_interval - 1):
            miou, pixelAcc = trainer.validation(epoch)
            miou_caches.add(epoch, miou)
            if miou_caches.full():
                print('acc caches:', miou_caches.accs)
                print('best epoch:', trainer.best_epoch, 'best miou:',
                      trainer.best_mIoU)
                _, max_miou = miou_caches.max_cache_acc()
                if max_miou < trainer.best_mIoU:
                    print('end training')
                    break

    print('valid')
    print('best mIoU:', trainer.best_mIoU, 'pixelAcc:', trainer.best_pixelAcc)

    # test
    epoch = trainer.load_best_checkpoint()
    test_mIoU, test_pixelAcc = trainer.validation(epoch, test=True)
    print('test')
    print('best mIoU:', test_mIoU, 'pixelAcc:', test_pixelAcc)

    writer.flush()
    writer.close()
コード例 #3
0
ファイル: train.py プロジェクト: zhouleiSJTU/ViewAL
def main():

    # script for training a model using 100% train set

    args = argument_parser.parse_args()
    print(args)
    torch.manual_seed(args.seed)

    lmdb_handle = dataset_base.LMDBHandle(
        os.path.join(constants.HDD_DATASET_ROOT, args.dataset, "dataset.lmdb"),
        args.memory_hog)
    train_set = IndoorScenes(args.dataset, lmdb_handle, args.base_size,
                             'train')
    val_set = IndoorScenes(args.dataset, lmdb_handle, args.base_size, 'val')
    test_set = IndoorScenes(args.dataset, lmdb_handle, args.base_size, 'test')
    train_set.make_dataset_multiple_of_batchsize(args.batch_size)

    model = DeepLab(num_classes=train_set.num_classes,
                    backbone=args.backbone,
                    output_stride=args.out_stride,
                    sync_bn=args.sync_bn)
    model = model.cuda()

    class_weights = None
    if args.use_balanced_weights:
        class_weights = calculate_weights_labels(train_set)

    saver = Saver(args)
    trainer = Trainer(args, model, train_set, val_set, test_set, class_weights,
                      Saver(args))
    summary = TensorboardSummary(saver.experiment_dir)
    writer = summary.create_summary()

    start_epoch = 0
    if args.resume:
        args.resume = os.path.join(constants.RUNS, args.dataset, args.resume,
                                   'checkpoint.pth.tar')
        if not os.path.isfile(args.resume):
            raise RuntimeError(f"=> no checkpoint found at {args.resume}")
        checkpoint = torch.load(args.resume)
        start_epoch = checkpoint['epoch']
        trainer.model.load_state_dict(checkpoint['state_dict'])
        trainer.optimizer.load_state_dict(checkpoint['optimizer'])
        trainer.best_pred = checkpoint['best_pred']
        print(
            f'=> loaded checkpoint {args.resume} (epoch {checkpoint["epoch"]})'
        )

    lr_scheduler = trainer.lr_scheduler

    for epoch in range(start_epoch, args.epochs):
        trainer.training(epoch)
        if epoch % args.eval_interval == (args.eval_interval - 1):
            trainer.validation(epoch)
        if lr_scheduler:
            lr_scheduler.step()

    epoch = trainer.load_best_checkpoint()
    _, best_mIoU, best_mIoU_20, best_Acc, best_Acc_class, best_FWIoU = trainer.validation(
        epoch, test=True)

    writer.add_scalar('test/mIoU', best_mIoU, epoch)
    writer.add_scalar('test/mIoU_20', best_mIoU_20, epoch)
    writer.add_scalar('test/Acc', best_Acc, epoch)
    writer.add_scalar('test/Acc_class', best_Acc_class, epoch)
    writer.add_scalar('test/fwIoU', best_FWIoU, epoch)

    trainer.train_writer.close()
    trainer.val_writer.close()