示例#1
0
def settings():

    GPU_TRAIN = torch.cuda.is_available()
    SEED = 2020
    FP16 = True
    NUM_CORES = multiprocessing.cpu_count()
    BS = 8
    if GPU_TRAIN:
        CUDA_NAME = torch.cuda.get_device_name()
        FP16 = True
        if CUDA_NAME in ['Tesla K80', 'Tesla P4']:
            BS = 32
        else:
            BS = 64
        if FP16:
            BS = int(BS * 2)
        print(f'GPU: {CUDA_NAME}')

    np.random.seed(SEED)
    #random.seed(SEED)
    set_global_seed(SEED)
    prepare_cudnn()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    print(f'Number of cores CPU: {NUM_CORES}')
    print(f'Batch size: {BS}')
    print(torch.cuda.get_device_properties(device).total_memory / 2**20)

    return NUM_CORES - 2, BS
示例#2
0
def main(args, unknown_args):
    args, config = parse_args_uargs(args, unknown_args)
    set_global_seed(args.seed)
    prepare_cudnn(args.deterministic, args.benchmark)

    if args.logdir is not None:
        os.makedirs(args.logdir, exist_ok=True)
        dump_environment(config, args.logdir, args.configs)

    if args.expdir is not None:
        module = import_module(expdir=args.expdir)  # noqa: F841
        if args.logdir is not None:
            dump_code(args.expdir, args.logdir)

    env = ENVIRONMENTS.get_from_params(**config["environment"])

    algorithm_name = config["algorithm"].pop("algorithm")
    if algorithm_name in OFFPOLICY_ALGORITHMS_NAMES:
        ALGORITHMS = OFFPOLICY_ALGORITHMS
        trainer_fn = OffpolicyTrainer
        sync_epoch = False
    elif algorithm_name in ONPOLICY_ALGORITHMS_NAMES:
        ALGORITHMS = ONPOLICY_ALGORITHMS
        trainer_fn = OnpolicyTrainer
        sync_epoch = True
    else:
        # @TODO: add registry for algorithms, trainers, samplers
        raise NotImplementedError()

    db_server = DATABASES.get_from_params(
        **config.get("db", {}), sync_epoch=sync_epoch
    )

    algorithm_fn = ALGORITHMS.get(algorithm_name)
    algorithm = algorithm_fn.prepare_for_trainer(env_spec=env, config=config)

    if args.resume is not None:
        checkpoint = utils.load_checkpoint(filepath=args.resume)
        checkpoint = utils.any2device(checkpoint, utils.get_device())
        algorithm.unpack_checkpoint(
            checkpoint=checkpoint,
            with_optimizer=False
        )

    monitoring_params = config.get("monitoring_params", None)

    trainer = trainer_fn(
        algorithm=algorithm,
        env_spec=env,
        db_server=db_server,
        logdir=args.logdir,
        monitoring_params=monitoring_params,
        **config["trainer"],
    )

    trainer.run()
示例#3
0
def main(args, unknown_args):
    args, config = parse_args_uargs(args, unknown_args)
    set_global_seed(args.seed)
    prepare_cudnn(args.deterministic, args.benchmark)

    Experiment, Runner = import_experiment_and_runner(Path(args.expdir))

    experiment = Experiment(config)
    runner = Runner()

    if experiment.logdir is not None:
        dump_config(config, experiment.logdir, args.configs)
        dump_code(args.expdir, experiment.logdir)

    runner.run_experiment(experiment, check=args.check)
示例#4
0
def main(args, unknown_args):
    """Run the ``catalyst-dl run`` script"""
    args, config = parse_args_uargs(args, unknown_args)
    set_global_seed(args.seed)
    prepare_cudnn(args.deterministic, args.benchmark)

    Experiment, Runner = import_experiment_and_runner(Path(args.expdir))

    runner_params = config.pop("runner_params", {}) or {}
    experiment = Experiment(config)
    runner = Runner(**runner_params)

    if experiment.logdir is not None:
        dump_environment(config, experiment.logdir, args.configs)
        dump_code(args.expdir, experiment.logdir)

    check_run = safitty.get(config, "args", "check", default=False)
    runner.run_experiment(experiment, check=check_run)
示例#5
0
def main(args, _=None):
    """Run the ``catalyst-data image2embeddings`` script."""
    global IMG_SIZE

    utils.set_global_seed(args.seed)
    utils.prepare_cudnn(args.deterministic, args.benchmark)

    IMG_SIZE = (args.img_size, args.img_size)  # noqa: WPS442

    if args.traced_model is not None:
        device = utils.get_device()
        model = torch.jit.load(str(args.traced_model), map_location=device)
    else:
        model = ResnetEncoder(arch=args.arch, pooling=args.pooling)
        model = model.eval()
        model, _, _, _, device = utils.process_components(model=model)

    df = pd.read_csv(args.in_csv)
    df = df.reset_index().drop("index", axis=1)
    df = list(df.to_dict("index").values())

    open_fn = ImageReader(
        input_key=args.img_col, output_key="image", rootpath=args.rootpath
    )

    dataloader = utils.get_loader(
        df,
        open_fn,
        batch_size=args.batch_size,
        num_workers=args.num_workers,
        dict_transform=dict_transformer,
    )

    features = []
    dataloader = tqdm(dataloader) if args.verbose else dataloader
    with torch.no_grad():
        for batch in dataloader:
            batch_features = model(batch["image"].to(device))
            batch_features = batch_features.cpu().detach().numpy()
            features.append(batch_features)

    features = np.concatenate(features, axis=0)
    np.save(args.out_npy, features)
示例#6
0
def setup_runtime(cfg_env: DictConfig):
    """
    Setup runtime environment.
    Runtime options:
        ["cuda", "cuda:0", "cuda:<index>", "cpu", ""]
    Args:
        cfg_env (dict): Configuration

    Returns:
        None
    """
    runtime: str = cfg_env.runtime

    runtime_name, runtime_devices = \
        runtime.split(":") if ":" in runtime else [runtime, ""]

    if runtime_name == "cuda" and runtime_devices:
        os.environ["CUDA_VISIBLE_DEVICES"] = f"{runtime_devices}"
        logger.info(f"[Environment] Configuration: CUDA_VISIBLE_DEVICES="
                    f"{os.environ['CUDA_VISIBLE_DEVICES']}")
    elif runtime_name == "cpu":
        os.environ["CUDA_VISIBLE_DEVICES"] = ""
        logger.info(f"[Environment] Configuration: CUDA_VISIBLE_DEVICES="
                    f"{os.environ['CUDA_VISIBLE_DEVICES']}")

    from catalyst.utils import set_global_seed, prepare_cudnn, get_device

    seed: int = cfg_env.seed

    set_global_seed(seed)
    logger.info(f"[Environment] Configuration. Seed: {seed}")

    prepare_cudnn(deterministic=True, benchmark=False)
    logger.info(f"[Environment] Configuration. CUDNN: "
                f"deterministic=True, benchmark=False")

    device = get_device()

    logger.info(f"[Environment] Runtime: {device}")

    return device
示例#7
0
def make_classifier():
    # Set seeds for reproducibility
    set_global_seed(SEED)
    prepare_cudnn(deterministic=True)
    np.random.seed(SEED)
    torch.manual_seed(SEED)

    model = BertForSequenceClassification(PRETRAINED_MODEL_NAME, NUM_LABELS)
    model.to(device)
    print(f'Loaded model: {PRETRAINED_MODEL_NAME}')
    print(f'Trainable parameters: {model.n_trainable()}')

    criterion = model.configure_loss()
    optimizer = model.configure_optimizers(1)
    scheduler = model.configure_scheduler(optimizer)

    return {
        'criterion': criterion,
        'optimizer': optimizer,
        'scheduler': scheduler,
        'model': model,
        # 'epoch': 0
    }
示例#8
0
    test_filenames)}, columns=['ImageFileName'])

test_dataset = DataLoader(
    ALASKATestData(test_df, augmentations=albu.Compose([
        # albu.CenterCrop()
        albu.Normalize(),
        ToTensorV2()
    ])
    ),
    batch_size=1, shuffle=False, num_workers=args.nw)
print(len(train_data))
print(len(val_data))

SEED = 2020
utils.set_global_seed(SEED)
utils.prepare_cudnn(deterministic=True)
loaders = {'train': train_data,
           'valid': val_data}
criterion = nn.CrossEntropyLoss()

model = ENet('efficientnet-b0')
print(model)
optimizer = Lookahead(RAdam(
    model.parameters(), lr=args.lr, weight_decay=args.wd))
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
    optimizer, factor=0.25, patience=3)
num_epochs = args.e
logdir = "./logs/effnet-b0"
fp16_params = None  # dict(opt_level="O1")
runner = SupervisedRunner(device='cuda')
示例#9
0
def main(args, unknown_args):
    args, config = parse_args_uargs(args, unknown_args)
    set_global_seed(args.seed)
    prepare_cudnn(args.deterministic, args.benchmark)

    args.vis = args.vis or 0
    args.infer = args.infer or 0
    args.valid = args.valid or 0
    args.train = args.train or 0

    if args.expdir is not None:
        module = import_module(expdir=args.expdir)  # noqa: F841

    environment_name = config["environment"].pop("environment")
    environment_fn = ENVIRONMENTS.get(environment_name)

    algorithm_name = config["algorithm"].pop("algorithm")

    if algorithm_name in OFFPOLICY_ALGORITHMS_NAMES:
        ALGORITHMS = OFFPOLICY_ALGORITHMS
        sync_epoch = False
    elif algorithm_name in ONPOLICY_ALGORITHMS_NAMES:
        ALGORITHMS = ONPOLICY_ALGORITHMS
        sync_epoch = True
    else:
        raise NotImplementedError()

    algorithm_fn = ALGORITHMS.get(algorithm_name)

    processes = []
    sampler_id = args.sampler_id

    def on_exit():
        for p in processes:
            p.terminate()

    atexit.register(on_exit)

    params = dict(
        seed=args.seed,
        logdir=args.logdir,
        algorithm_fn=algorithm_fn,
        environment_fn=environment_fn,
        config=config,
        resume=args.resume,
        db=args.db,
        sync_epoch=sync_epoch
    )

    if args.check:
        mode = "train"
        mode = "valid" if (args.valid is not None and args.valid > 0) else mode
        mode = "infer" if (args.infer is not None and args.infer > 0) else mode
        params_ = dict(
            visualize=(args.vis is not None and args.vis > 0),
            mode=mode,
            id=sampler_id
        )
        run_sampler(**params, **params_)
        return

    for i in range(args.vis):
        params_ = dict(
            visualize=True, mode="infer", id=sampler_id, exploration_power=0.0
        )
        p = mp.Process(
            target=run_sampler,
            kwargs=dict(**params, **params_),
            daemon=args.daemon,
        )
        p.start()
        processes.append(p)
        sampler_id += 1
        time.sleep(args.run_delay)

    for i in range(args.infer):
        params_ = dict(
            visualize=False,
            mode="infer",
            id=sampler_id,
            exploration_power=0.0
        )
        p = mp.Process(
            target=run_sampler,
            kwargs=dict(**params, **params_),
            daemon=args.daemon,
        )
        p.start()
        processes.append(p)
        sampler_id += 1
        time.sleep(args.run_delay)

    for i in range(args.valid):
        params_ = dict(
            visualize=False,
            mode="valid",
            id=sampler_id,
            exploration_power=0.0
        )
        p = mp.Process(
            target=run_sampler,
            kwargs=dict(**params, **params_),
            daemon=args.daemon,
        )
        p.start()
        processes.append(p)
        sampler_id += 1
        time.sleep(args.run_delay)

    for i in range(1, args.train + 1):
        exploration_power = i / args.train
        params_ = dict(
            visualize=False,
            mode="train",
            id=sampler_id,
            exploration_power=exploration_power
        )
        p = mp.Process(
            target=run_sampler,
            kwargs=dict(**params, **params_),
            daemon=args.daemon,
        )
        p.start()
        processes.append(p)
        sampler_id += 1
        time.sleep(args.run_delay)

    for p in processes:
        p.join()
示例#10
0
    parser.add_argument("--train", help="train", type=bool, default=False)
    parser.add_argument("--make_prediction", help="to make prediction", type=bool, default=False)
    parser.add_argument("--preload", help="save processed data", type=bool, default=False)
    parser.add_argument("--separate_decoder", help="number of epochs", type=bool, default=False)
    parser.add_argument("--multigpu", help="use multi-gpu", type=bool, default=False)
    parser.add_argument("--lookahead", help="use lookahead", type=bool, default=False)

    args, unknown = parser.parse_known_args()
    # args.train = False
    args.optimize_postprocess = False
    print(args)
    if args.task == 'classification':
        os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    set_global_seed(args.seed)
    prepare_cudnn(deterministic=True)

    sub_name = f'Model_{args.task}_{args.model_type}_{args.encoder}_bs_{args.bs}_{str(datetime.datetime.now().date())}'
    logdir = f"./logs/{sub_name}" if args.logdir is None else args.logdir

    preprocessing_fn = smp.encoders.get_preprocessing_fn(args.encoder, args.encoder_weights)
    loaders = prepare_loaders(path=args.path, bs=args.bs,
                              num_workers=args.num_workers, preprocessing_fn=preprocessing_fn, preload=args.preload,
                              image_size=(args.height, args.width), augmentation=args.augmentation, task=args.task)
    test_loader = loaders['test']
    del loaders['test']

    model = get_model(model_type=args.segm_type, encoder=args.encoder, encoder_weights=args.encoder_weights,
                      activation=None, task=args.task)

    optimizer = get_optimizer(optimizer=args.optimizer, lookahead=args.lookahead, model=model,
示例#11
0
def main():
    # Enable argument parsing for file paths
    args = vars(get_args())

    train_images_path = args["train_images"]
    train_masks_path = args["train_masks"]
    test_images_path = args["test_images"]
    test_masks_path = args["test_masks"]

    # print out yaml file configuration
    dir_path = os.path.dirname(os.path.realpath(__file__))
    yaml_path = os.path.join(dir_path, "config/igvc.yaml")
    ARCH = yaml.safe_load(open(yaml_path, "r"))

    # Set a seed for reproducibility
    utils.set_global_seed(ARCH["train"]["seed"])
    utils.prepare_cudnn(deterministic=ARCH["train"]["cudnn"])

    # Set up U-Net with pretrained EfficientNet backbone
    model = smp.Unet(
        encoder_name=ARCH["encoder"]["name"],
        encoder_weights=ARCH["encoder"]["weight"],
        classes=ARCH["train"]["classes"],
        activation=ARCH["encoder"]["activation"],
    )

    # Get Torch loaders
    loaders = get_loaders(
        images=np.load(train_images_path),
        masks=np.load(train_masks_path),
        image_arr_path=train_images_path,
        mask_arr_path=train_masks_path,
        random_state=ARCH["train"]["random_state"],
        valid_size=ARCH["train"]["valid_size"],
        batch_size=ARCH["train"]["batch_size"],
        num_workers=ARCH["train"]["num_workers"],
    )

    # Optimize for cross entropy using Adam
    criterion = {
        "CE": CrossentropyND(),
    }

    optimizer = AdamW(
        model.parameters(),
        lr=ARCH["train"]["lr"],
        betas=(ARCH["train"]["betas_min"], ARCH["train"]["betas_max"]),
        eps=float(ARCH["train"]["eps"]),
        weight_decay=ARCH["train"]["w_decay"],
        amsgrad=ARCH["train"]["amsgrad"],
    )

    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        factor=ARCH["train"]["optim_factor"],
        patience=ARCH["train"]["optim_patience"],
    )

    device = utils.get_device()
    print("Using device: {}".format(device))
    print(f"torch: {torch.__version__}, catalyst: {catalyst.__version__}")

    runner = SupervisedRunner(device=device,
                              input_key="image",
                              input_target_key="mask")

    # Use Catalyst callbacks for metric calculations during training
    callbacks = [
        CriterionCallback(input_key="mask", prefix="loss", criterion_key="CE"),
        MulticlassDiceMetricCallback(input_key="mask"),
    ]

    # Train and print model training logs
    runner.train(
        model=model,
        criterion=criterion,
        optimizer=optimizer,
        scheduler=scheduler,
        loaders=loaders,
        callbacks=callbacks,
        logdir=ARCH["train"]["logdir"],
        num_epochs=ARCH["train"]["epochs"],
        main_metric="loss",
        minimize_metric=ARCH["train"]["minimize_metric"],
        fp16=ARCH["train"]["fp16"],
        verbose=ARCH["train"]["verbose"],
    )

    # Test model on test dataset
    test_data = SegmentationDataset(test_images_path, test_masks_path)
    infer_loader = DataLoader(
        test_data,
        batch_size=ARCH["test"]["batch_size"],
        shuffle=ARCH["test"]["shuffle"],
        num_workers=ARCH["test"]["num_workers"],
    )

    # Get model predictions on test dataset
    predictions = np.vstack(
        list(
            map(
                lambda x: x["logits"].cpu().numpy(),
                runner.predict_loader(
                    loader=infer_loader,
                    resume=f"content/full_model2/checkpoints/best.pth",
                ),
            )))

    save_result(predictions, test_data)
示例#12
0
from torch import nn
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau

from utils.dataset import get_train_val_dataloaders
from utils.callbacks import DiceCallback as MyDiceCallbak, IouCallback as MyIouCallback
from utils.coord_conv import CoordConv

from catalyst.dl import SupervisedRunner, DiceCallback, IouCallback
from catalyst.utils import set_global_seed, prepare_cudnn

import segmentation_models_pytorch as smp

prepare_cudnn(True, True)
set_global_seed(0)


class Model(nn.Module):
    def __init__(self, encoder):
        super().__init__()

        self.coord_conv = CoordConv(3, 3, True, kernel_size=3, padding=1)
        self.coord_conv_decoder = CoordConv(16,
                                            16,
                                            True,
                                            kernel_size=3,
                                            padding=1)
        self.model = smp.Unet(encoder,
                              encoder_weights='imagenet',
                              classes=4,
                              activation=None)
示例#13
0
def seed_all(SEED):
    set_global_seed(SEED)
    prepare_cudnn(deterministic=True)
示例#14
0
def main(args, _=None):
    """Run the ``catalyst-data text2embeddings`` script."""
    batch_size = args.batch_size
    num_workers = args.num_workers
    max_length = args.max_length
    pooling_groups = args.pooling.split(",")
    bert_level = args.bert_level

    if bert_level is not None:
        assert (args.output_hidden_states
                ), "You need hidden states output for level specification"

    utils.set_global_seed(args.seed)
    utils.prepare_cudnn(args.deterministic, args.benchmark)

    if getattr(args, "in_huggingface", False):
        model_config = BertConfig.from_pretrained(args.in_huggingface)
        model_config.output_hidden_states = args.output_hidden_states
        model = BertModel.from_pretrained(args.in_huggingface,
                                          config=model_config)
        tokenizer = BertTokenizer.from_pretrained(args.in_huggingface)
    else:
        model_config = BertConfig.from_pretrained(args.in_config)
        model_config.output_hidden_states = args.output_hidden_states
        model = BertModel(config=model_config)
        tokenizer = BertTokenizer.from_pretrained(args.in_vocab)
    if getattr(args, "in_model", None) is not None:
        checkpoint = utils.load_checkpoint(args.in_model)
        checkpoint = {"model_state_dict": checkpoint}
        utils.unpack_checkpoint(checkpoint=checkpoint, model=model)

    model = model.eval()
    model, _, _, _, device = utils.process_components(model=model)

    df = pd.read_csv(args.in_csv)
    df = df.dropna(subset=[args.txt_col])
    df.to_csv(f"{args.out_prefix}.df.csv", index=False)
    df = df.reset_index().drop("index", axis=1)
    df = list(df.to_dict("index").values())
    num_samples = len(df)

    open_fn = LambdaReader(
        input_key=args.txt_col,
        output_key=None,
        lambda_fn=partial(
            tokenize_text,
            strip=args.strip,
            lowercase=args.lowercase,
            remove_punctuation=args.remove_punctuation,
        ),
        tokenizer=tokenizer,
        max_length=max_length,
    )

    dataloader = utils.get_loader(
        df,
        open_fn,
        batch_size=batch_size,
        num_workers=num_workers,
    )

    features = {}
    dataloader = tqdm(dataloader) if args.verbose else dataloader
    with torch.no_grad():
        for idx, batch_input in enumerate(dataloader):
            batch_input = utils.any2device(batch_input, device)
            batch_output = model(**batch_input)
            mask = (batch_input["attention_mask"].unsqueeze(-1)
                    if args.mask_for_max_length else None)

            if utils.check_ddp_wrapped(model):
                # using several gpu
                hidden_size = model.module.config.hidden_size
                hidden_states = model.module.config.output_hidden_states

            else:
                # using cpu or one gpu
                hidden_size = model.config.hidden_size
                hidden_states = model.config.output_hidden_states

            batch_features = process_bert_output(
                bert_output=batch_output,
                hidden_size=hidden_size,
                output_hidden_states=hidden_states,
                pooling_groups=pooling_groups,
                mask=mask,
            )

            # create storage based on network output
            if idx == 0:
                for layer_name, layer_value in batch_features.items():
                    if bert_level is not None and bert_level != layer_name:
                        continue
                    layer_name = (layer_name if isinstance(layer_name, str)
                                  else f"{layer_name:02d}")
                    _, embedding_size = layer_value.shape
                    features[layer_name] = np.memmap(
                        f"{args.out_prefix}.{layer_name}.npy",
                        dtype=np.float32,
                        mode="w+",
                        shape=(num_samples, embedding_size),
                    )

            indices = np.arange(idx * batch_size,
                                min((idx + 1) * batch_size, num_samples))
            for layer_name2, layer_value2 in batch_features.items():
                if bert_level is not None and bert_level != layer_name2:
                    continue
                layer_name2 = (layer_name2 if isinstance(layer_name2, str) else
                               f"{layer_name2:02d}")
                features[layer_name2][indices] = _detach(layer_value2)

    if args.force_save:
        for key, mmap in features.items():
            mmap.flush()
            np.save(f"{args.out_prefix}.{key}.force.npy",
                    mmap,
                    allow_pickle=False)