Exemple #1
0
def main():
    args = get_parser().parse_args()
    # Arguments by hand
    args.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    args.target_name = "LST_status"

    table = pd.read_csv(args.table_data)
    list_wsi = os.listdir(args.wsi)
    list_lst = [
        table[table['ID'] == x][args.target_name].item() for x in list_wsi
    ]
    list_dataset = []

    ## Initialisation model
    model = Classifier(args=args)

    ## Création des datasets
    for path in list_wsi:
        args.wsi = os.path.join(args.wsi, path)
        list_dataset.append(dataset(args))
        args.wsi = os.path.dirname(args.wsi)
    list_dataset = np.array(list_dataset)

    ## Kfold_validation
    splitter = StratifiedKFold(n_splits=3)
    for r_eval, (id_train,
                 id_val) in enumerate(splitter.split(list_lst, list_lst)):

        model.name = 'repeat_val_{}'.format(r_eval)
        dataset_train = list_dataset[id_train]
        dataset_val = list_dataset[id_val]
        for db in dataset_train:
            db.transform = get_transform(train=True)
        for db in dataset_val:
            db.transform = get_transform(train=False)
        dataset_train = torch.utils.data.ConcatDataset(dataset_train)
        dataset_val = torch.utils.data.ConcatDataset(dataset_val)
        dataloader_train = DataLoader(dataset=dataset_train,
                                      batch_size=args.batch_size,
                                      num_workers=24)
        dataloader_val = DataLoader(dataset=dataset_val,
                                    batch_size=args.batch_size,
                                    num_workers=24)

        # Initialize dataloader Creates 2 dataset : Careful, if I want to load all in memory ill have to change that, to have only one dataset.
        dataloader_train, dataloader_val = make_loaders(args=args)

        while model.counter['epochs'] < args.epochs:
            print("Begin training")
            train(model=model, dataloader=dataloader_train)
            val(model=model, dataloader=dataloader_val)
            if model.early_stopping.early_stop:
                break
        model.writer.close()
Exemple #2
0
def pretrain(args):
    tf = get_transform(args, 'none')
    ds = get_dataset(args, tf, 'none')

    args, model, ckpt_available = get_model_ckpt(args)

    if ckpt_available:
        print("loaded checkpoint {} in pretraining stage".format(args.ckpt_name))
        loss_fn = get_loss(args)
        sub_optimizer = get_sub_optimizer(args, model)
        optimizer = get_optimizer(args, sub_optimizer)
        scheduler = get_scheduler(args, optimizer)

        # setup nvidia/apex amp
        # model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level, num_losses=1)
        # model = idist.auto_model(model)

        trainer = get_trainer(args, model, loss_fn, optimizer, scheduler)
def pretrain(args):
    tf = get_transform(args, 'none')
    ds = get_dataset(args, tf, 'none')

    args, model, ckpt_available = get_model_ckpt(args)

    if ckpt_available:
        print("loaded checkpoint {} in pretraining stage".format(
            args.ckpt_name))
    loss_fn = get_loss(args)
    sub_optimizer = get_sub_optimizer(args, model)
    optimizer = get_optimizer(args, sub_optimizer)
    scheduler = get_scheduler(args, optimizer)

    # setup nvidia/apex amp
    # model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level, num_losses=1)
    # model = idist.auto_model(model)

    trainer = get_trainer(args, model, loss_fn, optimizer, scheduler)

    metrics = get_metrics(args)
    logger = get_logger(args)

    @trainer.on(Events.STARTED)
    def on_training_started(engine):
        print("Begin Pretraining")

        # batch-wise
    @trainer.on(Events.ITERATION_COMPLETED)
    def log_iter_results(engine):
        log_results(logger, 'pretrain/iter', engine.state,
                    engine.state.iteration)

    # epoch-wise (ckpt)
    @trainer.on(Events.EPOCH_COMPLETED)
    def save_epoch(engine):
        log_results(logger, 'pretrain/epoch', engine.state, engine.state.epoch)
        log_results_cmd(logger, 'pretrain/epoch', engine.state,
                        engine.state.epoch)
        save_ckpt(args, engine.state.epoch, engine.state.metrics['loss'],
                  model)

    trainer.run(ds, max_epochs=args.epoch)
Exemple #4
0
def eval_linear(pretrain_args, args):
    # get pretrained model
    pt_args, pt_model, ckpt_available = get_model_ckpt(pretrain_args)
    
    tf = get_transform(args, 'train')
    ds = get_dataset(args, tf, 'train')

    if ckpt_available:
        print("loaded pretrained model {} in eval linear".format(args.ckpt_name))

    model = get_linear(args, pt_model, args.num_classes)
    loss_fn = get_loss(args)
    optimizer = get_sub_optimizer(args, model)
    scheduler = get_scheduler(args, optimizer)

    trainer = get_trainer(args, model, loss_fn, optimizer, scheduler)
    evaluator = get_evaluator(args, model, loss_fn)

    # metrics = get_metrics(args)
    logger = get_logger(args)
    trainer.run(ds, max_epochs=args.epoch)
Exemple #5
0
'''
get_transform(args, eval_stage)
get_dataset(args, transform, eval_stage)
In pretraining stage, eval_stage set to 'none'
'''
from dataloader import get_transform
from dataloader import get_dataset
from ckpt import get_model_ckpt, save_ckpt
from model import get_model
from loss import get_loss
from optimizer import get_optimizer, get_sub_optimizer, get_scheduler

from utils import prepare_batch
from logger import get_logger, log_results, log_results_cmd

from ignite.engine.engine import Engine, State, Events
from ignite.metrics import Loss
from metric import get_metrics
from metric.stat_metric import StatMetric
import numpy as np
# from apex import amp
import ignite.distributed as idist
from ignite.contrib.engines import common

def get_trainer(args, model, loss_fn, optimizer, scheduler):
    def update_model(trainer, batch):
        model.train()
        optimizer.zero_grad()

        # to gpu