Exemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--dice-weight', type=float)
    arg('--nll-weights', action='store_true')
    arg('--device-ids', type=str, help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--size',
        type=str,
        default='1280x1920',
        help='Input size, for example 288x384. Must be multiples of 32')
    utils.add_args(parser)
    args = parser.parse_args()

    model_name = 'unet_11'

    args.root = str(utils.MODEL_PATH / model_name)

    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    model = UNet11()

    device_ids = list(map(int, args.device_ids.split(',')))
    model = nn.DataParallel(model, device_ids=device_ids).cuda()

    loss = Loss()

    def make_loader(ds_root: Path, to_augment=False, shuffle=False):
        return DataLoader(dataset=CarvanaDataset(ds_root,
                                                 to_augment=to_augment),
                          shuffle=shuffle,
                          num_workers=args.workers,
                          batch_size=args.batch_size,
                          pin_memory=True)

    train_root = utils.DATA_ROOT / str(args.fold) / 'train'
    valid_root = utils.DATA_ROOT / str(args.fold) / 'val'

    valid_loader = make_loader(valid_root)
    train_loader = make_loader(train_root, to_augment=True, shuffle=True)

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    utils.train(init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),
                args=args,
                model=model,
                criterion=loss,
                train_loader=train_loader,
                valid_loader=valid_loader,
                validation=validation,
                fold=args.fold)
Exemplo n.º 2
0
                    f.write('\tloss_gen_CE_t : {0:0.4f}\n'.format(
                        loss_gen_CE_t.item()))
                    f.write('\tloss_gen_CE_s : {0:0.4f}\n'.format(
                        loss_gen_CE_s.item()))
                    f.write('\tloss_dis_s1 : {0:0.4f}\n'.format(
                        loss_dis_s1.item()))
                    f.write('\tloss_dis_t1 : {0:0.4f}\n'.format(
                        loss_dis_t1.item()))
                    f.write('\tloss_dis_s2 : {0:0.4f}\n'.format(
                        loss_dis_s2.item()))
                    f.write('\tloss_dis_t2 : {0:0.4f}\n'.format(
                        loss_dis_t2.item()))
                    f.write(
                        '\tavgaccuracy_tgt : {0:0.2f}\n'.format(avgaccuracy1))
                    f.write(
                        '\tavgaccuracy_src : {0:0.2f}\n'.format(avgaccuracy2))
                    f.write('\tagreement : {0}\n'.format(nagree))
                    f.close()

            if epoch >= opt.num_epochs:
                os.rename(run_dir, run_dir[:-8])
                break


if __name__ == '__main__':
    arg_parser = argparse.ArgumentParser()
    arg_parser = utils.add_args(arg_parser)
    opt_ = arg_parser.parse_args()

    main(opt_)
Exemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--mode', choices=['train', 'valid', 'predict_valid', 'predict_test'],
        default='train')
    arg('--limit', type=int, help='use only N images for valid/train')
    arg('--dice-weight', type=float, default=0.0)
    arg('--nll-weights', action='store_true')
    arg('--device-ids', type=str, help='For example 0,1 to run on two GPUs')
    arg('--size', type=str, default='768x512',
        help='Input size, for example 768x512. Must be multiples of 32')
    arg('--model')
    utils.add_args(parser)
    args = parser.parse_args()

    root = Path(args.root)
    if args.model:
        model = getattr(unet_models, args.model)()
    else:
        model = UNet()

    w, h = map(int, args.size.split('x'))
    if not (w % 32 == 0 and h % 32 == 0):
        parser.error('Wrong --size: both dimensions should be multiples of 32')
    size = (w, h)
    out_size = (w // model.output_downscaled, h // model.output_downscaled)

    if utils.cuda_is_available:
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()

    if args.nll_weights:
        class_weighs = np.sqrt(np.array(
            [1 / ratio for cls, ratio in dataset.CLS_RATIOS.items()]))
        class_weighs /= class_weighs.sum()

    else:
        class_weighs = None
    loss = Loss(dice_weight=args.dice_weight, class_weights=class_weighs)

    if args.limit:
        limit = args.limit
        valid_limit = limit // 5
    else:
        limit = valid_limit = None

    def make_loader(ds_root: Path, limit_: int):
        return DataLoader(
            dataset=StreetDataset(ds_root, size, out_size=out_size, limit=limit_),
            shuffle=True,
            num_workers=args.workers,
            batch_size=args.batch_size,
        )
    valid_root = utils.DATA_ROOT / 'validation'

    if args.mode == 'train':
        train_loader = make_loader(utils.DATA_ROOT / 'training', limit)
        valid_loader = make_loader(valid_root, valid_limit)
        if root.exists() and args.clean:
            shutil.rmtree(str(root))
        root.mkdir(exist_ok=True)
        root.joinpath('params.json').write_text(
            json.dumps(vars(args), indent=True, sort_keys=True))
        utils.train(
            init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),
            args=args,
            model=model,
            criterion=loss,
            train_loader=train_loader,
            valid_loader=valid_loader,
            validation=validation,
            save_predictions=save_predictions,
            patience=2,
        )

    elif args.mode == 'valid':
        valid_loader = make_loader(valid_root, valid_limit)
        state = torch.load(str(Path(args.root) / 'model.pt'))
        model.load_state_dict(state['model'])
        validation(model, loss, tqdm.tqdm(valid_loader, desc='Validation'))

    elif args.mode == 'predict_valid':
        utils.load_best_model(model, root)
        predict(model, valid_root, out_path=root / 'validation',
                size=size, batch_size=args.batch_size)

    elif args.mode == 'predict_test':
        utils.load_best_model(model, root)
        test_root = utils.DATA_ROOT / 'testing'
        predict(model, test_root, out_path=root / 'testing',
                size=size, batch_size=args.batch_size)
Exemplo n.º 4
0
            'best-model_{fold}.pt'.format(fold=fold, model_name=model_name)))

    model.load_state_dict(state['model'])
    model.eval()

    return model


if __name__ == '__main__':
    img_rows, img_cols = 1280, 1918

    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--fold', type=int)

    utils.add_args(parser)
    args = parser.parse_args()

    model_path = utils.MODEL_PATH
    data_path = utils.DATA_ROOT

    model_name = 'unet_11'

    pred_path = Path(model_name)
    pred_path.mkdir(exist_ok=True, parents=True)

    fold_path = pred_path / str(args.fold)

    val_path = fold_path / 'val'
    val_path.mkdir(exist_ok=True, parents=True)
Exemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--mode',
        choices=['train', 'valid', 'predict_valid', 'predict_test'],
        default='predict_test')
    arg('--limit', type=int, help='use only N images for valid/train')
    arg('--dice-weight', type=float, default=0.0)
    arg('--device-ids', type=str, help='For example 0,1 to run on two GPUs')
    arg('--size',
        type=str,
        default='768x512',
        help='Input size, for example 768x512. Must be multiples of 32')
    utils.add_args(parser)
    args = parser.parse_args()

    root = Path(args.root)
    model = UNet11()
    if args.device_ids:
        device_ids = list(map(int, args.device_ids.split(',')))
    else:
        device_ids = None

    model = nn.DataParallel(model, device_ids=device_ids).cuda()
    loss = Loss(dice_weight=args.dice_weight)

    w, h = map(int, args.size.split('x'))
    if not (w % 32 == 0 and h % 32 == 0):
        parser.error('Wrong --size: both dimentions should be multiples of 32')
    size = (w, h)

    if args.limit:
        limit = args.limit
        valid_limit = limit // 5
    else:
        limit = valid_limit = None

    def make_loader(ds_root: Path, limit_: int, augmentation=False):
        return DataLoader(dataset=StreetDataset(ds_root,
                                                size,
                                                limit=limit_,
                                                augmentation=augmentation),
                          shuffle=True,
                          num_workers=args.workers,
                          batch_size=args.batch_size,
                          pin_memory=True)

    valid_root = utils.DATA_ROOT / 'validation'

    if args.mode == 'train':
        train_loader = make_loader(utils.DATA_ROOT / 'training',
                                   limit,
                                   augmentation=True)
        valid_loader = make_loader(valid_root, valid_limit)
        if root.exists() and args.clean:
            shutil.rmtree(str(root))
        root.mkdir(exist_ok=True)
        root.joinpath('params.json').write_text(
            json.dumps(vars(args), indent=True, sort_keys=True))

        utils.train(
            init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),
            args=args,
            model=model,
            criterion=loss,
            train_loader=train_loader,
            valid_loader=valid_loader,
            validation=validation,
            patience=2,
        )

    # elif args.mode == 'valid':
    #     valid_loader = make_loader(valid_root, valid_limit)
    #     state = torch.load(str(Path(args.root) / 'model.pt'))
    #     model.load_state_dict(state['model'])
    #     validation(model, loss, tqdm.tqdm(valid_loader, desc='Validation'))
    #
    # elif args.mode == 'predict_valid':
    #     utils.load_best_model(model, root)
    #     predict(model, valid_root, out_path=root / 'validation',
    #             size=size, batch_size=args.batch_size)
    #
    elif args.mode == 'predict_test':
        utils.load_best_model(model, root)
        test_root = utils.DATA_ROOT / 'testing'
        predict(model,
                test_root,
                out_path=root / 'testing',
                size=size,
                batch_size=args.batch_size,
                workers=args.workers)