예제 #1
0
def test_callbacks():
    from torchelie.recipes import Recipe

    def train(b):
        x, y = b
        return {'pred': torch.randn(y.shape[0])}

    m = nn.Linear(2, 2)

    r = Recipe(train, DataLoader(FakeImg(), 4))
    r.callbacks.add_callbacks([
        tcb.Counter(),
        tcb.AccAvg(),
        tcb.Checkpoint('/tmp/m', m),
        tcb.ClassificationInspector(1, ['1', '2']),
        tcb.ConfusionMatrix(['one', 'two']),
        tcb.ImageGradientVis(),
        tcb.MetricsTable(),
    ])
    r.run(1)
예제 #2
0
def MixupClassification(model,
                        train_loader,
                        test_loader,
                        classes,
                        *,
                        lr=3e-3,
                        beta1=0.9,
                        wd=1e-2,
                        visdom_env='main',
                        test_every=1000,
                        log_every=100):
    """
    A Classification recipe with a default froward training / testing pass
    using cross entropy and mixup, and extended with RAdamW and
    ReduceLROnPlateau.

    Args:
        model (nn.Module): a model learnable with cross entropy
        train_loader (DataLoader): Training set dataloader. Must have soft
            targets. Should be a DataLoader loading a MixupDataset or
            compatible.
        test_loader (DataLoader): Testing set dataloader. Dataset must have
            categorical targets.
        classes (list of str): classes name, in order
        lr (float): the learning rate
        beta1 (float): RAdamW's beta1
        wd (float): weight decay
        visdom_env (str): name of the visdom environment to use, or None for
            not using Visdom (default: None)
        test_every (int): testing frequency, in number of iterations (default:
            1000)
        log_every (int): logging frequency, in number of iterations (default:
            1000)
    """

    from torchelie.loss import continuous_cross_entropy

    def train_step(batch):
        x, y = batch
        pred = model(x)
        loss = continuous_cross_entropy(pred, y)
        loss.backward()
        return {'loss': loss}

    def validation_step(batch):
        x, y = batch
        pred = model(x)
        loss = torch.nn.functional.cross_entropy(pred, y)
        return {'loss': loss, 'pred': pred}

    loop = TrainAndTest(model,
                        train_step,
                        validation_step,
                        train_loader,
                        test_loader,
                        visdom_env=visdom_env,
                        test_every=test_every,
                        log_every=log_every)

    loop.callbacks.add_callbacks([
        tcb.WindowedMetricAvg('loss'),
    ])
    loop.register('classes', classes)

    loop.test_loop.callbacks.add_callbacks([
        tcb.AccAvg(post_each_batch=False),
        tcb.WindowedMetricAvg('loss', False),
    ])

    if visdom_env is not None:
        loop.callbacks.add_epilogues(
            [tcb.ImageGradientVis(),
             tcb.MetricsTable()])

    if len(classes) <= 25:
        loop.test_loop.callbacks.add_callbacks([
            tcb.ConfusionMatrix(classes),
        ])

    loop.test_loop.callbacks.add_callbacks([
        tcb.ClassificationInspector(30, classes, False),
        tcb.MetricsTable(False)
    ])

    opt = RAdamW(model.parameters(),
                 lr=lr,
                 betas=(beta1, 0.999),
                 weight_decay=wd)
    loop.callbacks.add_callbacks([
        tcb.Optimizer(opt, log_lr=True),
        tcb.LRSched(torch.optim.lr_scheduler.ReduceLROnPlateau(opt))
    ])
    return loop
예제 #3
0
def Classification(model,
                   train_fun,
                   test_fun,
                   train_loader,
                   test_loader,
                   classes,
                   visdom_env=None,
                   test_every=1000,
                   log_every=100):
    """
    Classification training and testing loop. Displays Loss, accuracy,
    gradient based visualization of features, a classification report on worst
    samples, best samples and confusing samples, a confusion matrix,
    VisdomLogger, StdLogger, MetricsTable.

    Args:
        model (nn.Model): a model
        train_fun (Callabble): a function that takes a batch as a single
            argument, performs a training forward pass and return a dict of
            values to populate the recipe's state. It expects the logits
            predictions under key "preds", and this batch's loss under key
            "loss".
        test_fun (Callable): a function taking a batch as a single argument
            then performs something to evaluate your model and returns a dict
            to populate the state. It expects the logits
            predictions under key "preds", and this batch's loss under key
            "loss".
        train_loader (DataLoader): Training set dataloader
        test_loader (DataLoader): Testing set dataloader
        classes (list of str): classes name, in order
        visdom_env (str): name of the visdom environment to use, or None for
            not using Visdom (default: None)
        test_every (int): testing frequency, in number of iterations (default:
            1000)
        log_every (int): logging frequency, in number of iterations (default:
            100)
    """

    loop = TrainAndTest(model,
                        train_fun,
                        test_fun,
                        train_loader,
                        test_loader,
                        visdom_env=visdom_env,
                        test_every=test_every,
                        log_every=log_every)
    loop.callbacks.add_callbacks([
        tcb.AccAvg(),
        tcb.WindowedMetricAvg('loss'),
    ])

    loop.test_loop.callbacks.add_callbacks([
        tcb.AccAvg(post_each_batch=False),
        tcb.WindowedMetricAvg('loss', False),
    ])

    if visdom_env is not None:
        if len(classes) <= 25:
            loop.callbacks.add_epilogues([
                tcb.ConfusionMatrix(classes),
                tcb.ImageGradientVis(),
            ])
        loop.callbacks.add_epilogues([
            tcb.ClassificationInspector(30, classes),
            tcb.MetricsTable()
        ])

    if len(classes) <= 25:
        loop.test_loop.callbacks.add_callbacks([
            tcb.ConfusionMatrix(classes),
        ])

    loop.test_loop.callbacks.add_callbacks([
        tcb.ClassificationInspector(30, classes, False),
        tcb.MetricsTable(False)
    ])
    return loop
예제 #4
0
def Classification(model,
                   train_fun,
                   test_fun,
                   train_loader,
                   test_loader,
                   classes,
                   *,
                   visdom_env=None,
                   test_every=1000,
                   log_every=100):
    """
    Classification training and testing loop. Both forward functions must
    return a per-batch loss and logits predictions. It expands from
    :code:`TrainAndTest`. Both :code:`train_fun` and :code:`test_fun` must
    :code:`return {'loss': batch_loss, 'preds': logits_predictions}`. The model
    is automatically registered and checkpointed as :code:`checkpoint['model']`,
    and put in eval mode when testing. The list of classes is checkpointed as
    well in :code:`checkpoint['classes']`.


    Training callbacks:

    - AccAvg for displaying accuracy
    - WindowedMetricAvg for displaying loss
    - ConfusionMatrix if len(classes) <= 25
    - ClassificationInspector
    - MetricsTable
    - ImageGradientVis

    Inherited training callbacks:

    - Counter for counting iterations, connected to the testing loop as well
    - VisdomLogger
    - StdoutLogger

    Testing:

    Testing loop is in :code:`.test_loop`.

    Testing callbacks:

    - AccAvg
    - WindowedMetricAvg
    - ConfusionMatrix if :code:`len(classes) <= 25`
    - ClassificationInspector
    - MetricsTable

    Inherited testing callbacks:

    - VisdomLogger
    - StdoutLogger
    - Checkpoint saving the best testing loss


    Args:
        model (nn.Model): a model
        train_fun (Callabble): a function that takes a batch as a single
            argument, performs a training forward pass and return a dict of
            values to populate the recipe's state. It expects the logits
            predictions under key "preds", and this batch's loss under key
            "loss".
        test_fun (Callable): a function taking a batch as a single argument
            then performs something to evaluate your model and returns a dict
            to populate the state. It expects the logits
            predictions under key "preds", and this batch's loss under key
            "loss".
        train_loader (DataLoader): Training set dataloader
        test_loader (DataLoader): Testing set dataloader
        classes (list of str): classes name, in order
        visdom_env (str): name of the visdom environment to use, or None for
            not using Visdom (default: None)
        test_every (int): testing frequency, in number of iterations (default:
            1000)
        log_every (int): logging frequency, in number of iterations (default:
            100)
    """

    key_best = (lambda state: -state['test_loop']['callbacks']['state'][
        'metrics']['loss'])

    loop = TrainAndTest(model,
                        train_fun,
                        test_fun,
                        train_loader,
                        test_loader,
                        visdom_env=visdom_env,
                        test_every=test_every,
                        log_every=log_every,
                        key_best=key_best)

    loop.callbacks.add_callbacks([
        tcb.AccAvg(),
        tcb.WindowedMetricAvg('loss'),
    ])
    loop.register('classes', classes)

    loop.test_loop.callbacks.add_callbacks([
        tcb.AccAvg(post_each_batch=False),
        tcb.WindowedMetricAvg('loss', False),
    ])

    if visdom_env is not None:
        if len(classes) <= 25:
            loop.callbacks.add_epilogues([
                tcb.ConfusionMatrix(classes),
                tcb.ImageGradientVis(),
            ])
        loop.callbacks.add_epilogues(
            [tcb.ClassificationInspector(30, classes),
             tcb.MetricsTable()])

    if len(classes) <= 25:
        loop.test_loop.callbacks.add_callbacks([
            tcb.ConfusionMatrix(classes),
        ])

    loop.test_loop.callbacks.add_callbacks([
        tcb.ClassificationInspector(30, classes, False),
        tcb.MetricsTable(False)
    ])
    return loop