Beispiel #1
0
def valid_fn(
    model: nn.Module,
    loader: DataLoader,
    device: str,
    loss_fn: nn.Module,
    verbose: bool = True,
) -> dict:
    """Validation step.

    Args:
        model (nn.Module): model to train
        loader (DataLoader): loader with data
        device (str): device to use for placing batches
        loss_fn (nn.Module): loss function, should be callable
        verbose (bool, optional): verbosity mode.
            Default is True.

    Returns:
        dict with metics computed during the validation on loader
    """
    model.eval()

    metrics = {
        "loss": [],
        "gap": [],
        "accuracy": [],
    }

    with torch.no_grad(), tqdm(
        total=len(loader), desc="valid", disable=not verbose
    ) as progress:
        for _idx, batch in enumerate(loader):
            inputs, targets = t2d(batch, device)

            outputs = model(inputs, targets)
            loss = loss_fn(outputs, targets)

            _loss = loss.detach().item()
            metrics["loss"].append(_loss)

            classes = torch.argmax(outputs, 1)
            _acc = (classes == targets).float().mean().detach().item()
            metrics["accuracy"].append(_acc)

            confidences, predictions = torch.max(outputs, dim=1)
            _gap = gap(predictions, confidences, targets)
            metrics["gap"].append(_gap)

            progress.set_postfix_str(
                f"loss {_loss:.4f}, gap {_gap:.4f}, accuracy - {_acc}"
            )
            progress.update(1)

            if _idx == DEBUG:
                break

    metrics["loss"] = np.mean(metrics["loss"])
    metrics["gap"] = np.mean(metrics["gap"])
    metrics["accuracy"] = np.mean(metrics["accuracy"])
    return metrics
Beispiel #2
0
def train_fn(
    model: nn.Module,
    loader: DataLoader,
    device: str,
    loss_fn: nn.Module,
    optimizer: optim.Optimizer,
    scheduler=None,
    accumulation_steps: int = 1,
    verbose: bool = True,
) -> dict:
    """Train step.

    Args:
        model (nn.Module): model to train
        loader (DataLoader): loader with data
        device (str): device to use for placing batches
        loss_fn (nn.Module): loss function, should be callable
        optimizer (optim.Optimizer): model parameters optimizer
        scheduler ([type], optional): batch scheduler to use.
            Default is `None`.
        accumulation_steps (int, optional): number of steps to accumulate gradients.
            Default is `1`.
        verbose (bool, optional): verbosity mode.
            Default is True.

    Returns:
        dict with metics computed during the training on loader
    """
    model.train()

    metrics = {
        "loss": [],
        "gap": [],
        "accuracy": [],
    }

    with tqdm(total=len(loader), desc="train",
              disable=not verbose) as progress:
        for _idx, batch in enumerate(loader):
            inputs, targets = t2d(batch, device)

            zero_grad(optimizer)

            outputs = model(inputs, targets)
            loss = loss_fn(outputs, targets)

            _loss = loss.detach().item()
            metrics["loss"].append(_loss)

            classes = torch.argmax(outputs, 1)
            _acc = (classes == targets).float().mean().detach().item()
            metrics["accuracy"].append(_acc)

            confidences, predictions = torch.max(outputs, dim=1)
            _gap = gap(predictions, confidences, targets)
            metrics["gap"].append(_gap)

            loss.backward()

            progress.set_postfix_str(
                f"loss {_loss:.4f}, gap {_gap:.4f}, accuracy {_acc:.4f}")

            if (_idx + 1) % accumulation_steps == 0:
                optimizer.step()
                if scheduler is not None:
                    scheduler.step()

            progress.update(1)

            if _idx == DEBUG:
                break

    metrics["loss"] = np.mean(metrics["loss"])
    metrics["gap"] = np.mean(metrics["gap"])
    metrics["accuracy"] = np.mean(metrics["accuracy"])
    return metrics