Esempio n. 1
0
def fit(epochs, model, loss_fn, opt, data, callbacks=None, metrics=None):
    cb_handler = CallbackHandler(callbacks)
    pbar = master_bar(range(epochs))
    cb_handler.on_train_begin(epochs, pbar=pbar, metrics=metrics)

    exception = False
    try:
        for epoch in pbar:
            model.train()
            cb_handler.on_epoch_begin()

            for xb, yb in progress_bar(data.train_dl, parent=pbar):
                xb, yb = cb_handler.on_batch_begin(xb, yb)
                loss, _ = loss_batch(model, xb, yb, loss_fn, opt, cb_handler)
                if cb_handler.on_batch_end(loss): break

            if hasattr(data, 'valid_dl') and data.valid_dl is not None:
                model.eval()
                with torch.no_grad():
                    *val_metrics, nums = zip(*[
                        loss_batch(model,
                                   xb,
                                   yb,
                                   loss_fn,
                                   cb_handler=cb_handler,
                                   metrics=metrics)
                        for xb, yb in progress_bar(data.valid_dl, parent=pbar)
                    ])
                val_metrics = [
                    np.sum(np.multiply(val, nums)) / np.sum(nums)
                    for val in val_metrics
                ]

            else:
                val_metrics = None
            if cb_handler.on_epoch_end(val_metrics): break
    except Exception as e:
        exception = e
        raise e
    finally:
        cb_handler.on_train_end(exception)
Esempio n. 2
0
def validate(model,
             dl,
             loss_fn=None,
             metrics=None,
             cb_handler=None,
             pbar=None):
    model.eval()
    with torch.no_grad():
        return zip(*[
            loss_batch(
                model, xb, yb, loss_fn, cb_handler=cb_handler, metrics=metrics)
            for xb, yb in progress_bar(dl, parent=pbar)
        ])
Esempio n. 3
0
def fit(epochs, model, loss_fn, opt, data, callbacks=None, metrics=None):
    cb_handler = CallbackHandler(callbacks)
    pbar = master_bar(range(epochs))
    cb_handler.on_train_begin(epochs, pbar=pbar, metrics=metrics)

    exception = False
    try:
        for epoch in pbar:
            model.train()
            cb_handler.on_epoch_begin()

            for xb, yb in progress_bar(data.train_dl, parent=pbar):
                xb, yb = cb_handler.on_batch_begin(xb, yb)
                loss, _ = loss_batch(model, xb, yb, loss_fn, opt, cb_handler)
                if cb_handler.on_batch_end(loss): break

            if hasattr(data, 'valid_dl') and data.valid_dl is not None:
                *val_metrics, nums = validate(model,
                                              data.valid_dl,
                                              loss_fn=loss_fn,
                                              cb_handler=cb_handler,
                                              metrics=metrics,
                                              pbar=pbar)
                nums = np.array(nums, dtype=np.float32)
                val_metrics = [
                    (torch.stack(val).cpu().numpy() * nums).sum() / nums.sum()
                    for val in val_metrics
                ]

            else:
                val_metrics = None
            if cb_handler.on_epoch_end(val_metrics): break
    except Exception as e:
        exception = e
        raise e
    finally:
        cb_handler.on_train_end(exception)