Example #1
0
def evaluate(model_path: Path, datasets: typing.List[Datasets], output_folder: Path, find_mistakes: bool = False, include_heading: bool = False) -> str:
    """Evaluate a model, returning the results as CSV.

    Args:
        model_path (Path): path to the model folder containing the YAML file and the saved weights
        datasets (typing.List[Datasets]): the datasets to evaluate on
        output_folder (Path): output folder for the mistake images (if applicable)
        find_mistakes (bool, optional): whether to output all mistakes as images to the output folder. Defaults to False.
        include_heading (bool, optional): whether to include a heading in the CSV output. Defaults to False.

    Raises:
        ValueError: if the YAML config file is missing

    Returns:
        str: the CSV string
    """
    model_name = model_path.stem
    config_file = model_path.parent / f"{model_name}.yaml"
    if not config_file.exists():
        raise ValueError("config file missing")
    cfg = CN.load_yaml_with_base(config_file)
    model = torch.load(model_path, map_location=DEVICE)
    model = device(model)
    model.eval()
    datasets = {mode: build_dataset(cfg, mode)
                for mode in datasets}
    classes = next(iter(datasets.values())).classes

    csv = []
    if include_heading:
        csv.append(_csv_heading(classes))
    for mode, dataset in datasets.items():
        # Load dataset
        loader = build_data_loader(cfg, dataset, mode)
        # Compute statistics over whole dataset
        agg = StatsAggregator(classes)
        for images, labels in device(loader):
            predictions = model(images)
            agg.add_batch(predictions, labels, **(dict(inputs=images)
                                                  if find_mistakes else dict()))

        csv.append(_csv(model, agg, model_name, mode))
        if find_mistakes:
            groundtruth, mistakes = zip(*sorted(agg.mistakes,
                                                key=lambda x: x[0]))
            imgs = torch.tensor(mistakes).permute((0, 2, 3, 1))
            imgs = unnormalize(imgs).permute((0, 3, 1, 2))
            img = torchvision.utils.make_grid(imgs, pad_value=1, nrow=4)
            img = img.numpy().transpose((1, 2, 0)) * 255
            img = Image.fromarray(img.astype(np.uint8))
            mistakes_file = output_folder / \
                f"{model_name}_{mode.value}_mistakes.png"
            logger.info(f"Writing mistakes to {mistakes_file}")
            img.save(mistakes_file)
            groundtruth_file = output_folder / \
                f"{model_name}_{mode.value}_groundtruth.csv"
            with groundtruth_file.open("w") as f:
                f.write(",".join(map(str, groundtruth)))
    return "\n".join(csv)
Example #2
0
def aggregator() -> StatsAggregator:
    agg = StatsAggregator(["a", "b"])
    a_output = np.array([.9, .1, .8, .2, .9, .9, .9, .2])
    b_output = 1 - a_output
    outputs = torch.tensor(np.stack([a_output, b_output], axis=-1))
    labels = torch.tensor([0, 0, 0, 1, 0, 0, 1, 0])
    # predicted:          [0, 1, 0, 1, 0, 0, 0, 1]
    agg.add_batch(outputs, labels)
    return agg
Example #3
0
def test_f1_score(aggregator: StatsAggregator):
    a_precision = 4 / 5
    a_recall = 4 / 6
    assert np.isclose(aggregator.f1_score("a"), 2 * a_precision * a_recall,
                      a_precision + a_recall)
    b_precision = 1 / 3
    b_recall = 1 / 2
    assert np.isclose(aggregator.f1_score("b"), 2 * b_precision * b_recall,
                      b_precision + b_recall)
Example #4
0
def _csv(model: torch.nn.Module, agg: StatsAggregator, model_name: str, mode: Datasets) -> str:
    params = sum([np.prod(p.size()) for p in model.parameters()])
    return ",".join(map(str, [model_name,
                              mode.value,
                              params,
                              agg.accuracy(),
                              *map(agg.precision, agg.classes),
                              *map(agg.recall, agg.classes),
                              *map(agg.f1_score, agg.classes),
                              *agg.confusion_matrix.flatten()
                              ]))
Example #5
0
def test_empty_batch():
    aggregator = StatsAggregator(["a", "b"])
    assert aggregator.accuracy() == 0
    assert aggregator.precision("a") == 0
    assert aggregator.precision("b") == 0
    assert aggregator.recall("a") == 0
    assert aggregator.recall("b") == 0
    assert aggregator.f1_score("a") == 0
    assert aggregator.f1_score("b") == 0
Example #6
0
def test_recall(aggregator: StatsAggregator):
    assert np.isclose(aggregator.recall("a"), 4 / 6)
    assert np.isclose(aggregator.recall("b"), 1 / 2)
Example #7
0
def test_precision(aggregator: StatsAggregator):
    assert np.isclose(aggregator.precision("a"), 4 / 5)
    assert np.isclose(aggregator.precision("b"), 1 / 3)
Example #8
0
def test_accuracy(aggregator: StatsAggregator):
    assert np.isclose(aggregator.accuracy(), 5 / 8)
Example #9
0
def train_model(cfg: CN,
                run_dir: Path,
                model: torch.nn.Module,
                is_inception: bool = False,
                model_name: str = None,
                eval_on_train: bool = False) -> nn.Module:
    """Train a model that has already been loaded.

    Args:
        cfg (CN): the configuration object describing the model, dataset, etc.
        run_dir (Path): where to write tensorboard files, the active YAML file, and the chosen weights
        model (torch.nn.Module): the loaded model
        is_inception (bool, optional): whether the model is InceptionV3. Defaults to False.
        model_name (str, optional): the name of the model (by default the last component of the run directory). Defaults to None.
        eval_on_train (bool, optional): whether to evaluate on the training set. Defaults to False.

    Returns:
        nn.Module: the trained model
    """
    logger.info(f"Starting training in {run_dir}")
    if not model_name:
        model_name = run_dir.name

    # Create folder
    if run_dir.exists():
        logger.warning(
            f"The folder {run_dir} already exists and will be overwritten by this run"
        )
        shutil.rmtree(run_dir, ignore_errors=True)
    run_dir.mkdir(parents=True, exist_ok=True)

    # Store config
    with (run_dir / f"{model_name}.yaml").open("w") as f:
        cfg.dump(stream=f)

    # Move model to device
    device(model)

    best_weights, best_accuracy, best_step = copy.deepcopy(
        model.state_dict()), 0., 0

    criterion = nn.CrossEntropyLoss()

    modes = {Datasets.TRAIN, Datasets.VAL}
    if eval_on_train:
        dataset = build_dataset(cfg, Datasets.TRAIN)
        datasets = {mode: dataset for mode in modes}
    else:
        datasets = {mode: build_dataset(cfg, mode) for mode in modes}
    classes = datasets[Datasets.TRAIN].classes
    loader = {
        mode: build_data_loader(cfg, datasets[mode], mode)
        for mode in modes
    }
    writer = {mode: SummaryWriter(run_dir / mode.value) for mode in modes}
    aggregator = {mode: StatsAggregator(classes) for mode in modes}

    def log(step: int, loss: float, mode: Datasets):
        if mode == Datasets.TRAIN:
            logger.info(f"Step {step:5d}: loss {loss:.3f}")

        w, agg = (x[mode] for x in (writer, aggregator))

        w.add_scalar("Loss", loss, step)
        w.add_scalar("Accuracy", agg.accuracy(), step)
        for c in classes:
            w.add_scalar(f"Precision/{c}", agg.precision(c), step)
            w.add_scalar(f"Recall/{c}", agg.recall(c), step)
            w.add_scalar(f"F1 score/{c}", agg.f1_score(c), step)

    def perform_iteration(data: typing.Tuple[torch.Tensor, torch.Tensor],
                          mode: Datasets):
        inputs, labels = map(device, data)
        with torch.set_grad_enabled(mode == Datasets.TRAIN):
            # Reset gradients
            optimizer.zero_grad()

            # Forward pass and compute loss
            if is_inception and mode == Datasets.TRAIN:
                # Special case for inception models
                outputs, auxiliary_outputs = model(inputs)
                loss1 = criterion(outputs, labels)
                loss2 = criterion(auxiliary_outputs, labels)
                loss = loss1 + 0.4 * loss2
            else:
                outputs = model(inputs)
                loss = criterion(outputs, labels)

            if mode == Datasets.TRAIN:
                loss.backward()

        with torch.no_grad():
            aggregator[mode].add_batch(outputs, labels)

        # Perform optimisation
        if mode == Datasets.TRAIN:
            optimizer.step()

        # Return
        return loss.item()

    step = 0
    log_every_n = 100

    # Ensure we're in training mode
    model.train()

    # Loop over training phases
    for phase in cfg.TRAINING.PHASES:

        for p in model.parameters():
            p.requires_grad = False
        parameters = list(model.parameters()) if phase.PARAMS == "all" \
            else model.params[phase.PARAMS]
        for p in parameters:
            p.requires_grad = True
        optimizer = build_optimizer_from_config(phase.OPTIMIZER, parameters)

        # Loop over epochs (passes over the whole dataset)
        for epoch in range(phase.EPOCHS):
            aggregator[Datasets.TRAIN].reset()

            # Iterate the training set
            losses = []
            for i, data in enumerate(loader[Datasets.TRAIN]):

                # Perform training iteration
                losses.append(perform_iteration(data, mode=Datasets.TRAIN))

                if step % log_every_n == 0:
                    loss = np.mean(list(losses))
                    log(step, loss, Datasets.TRAIN)
                    aggregator[Datasets.TRAIN].reset()
                    losses = []

                    # Validate entire validation dataset
                    model.eval()
                    aggregator[Datasets.VAL].reset()

                    # Iterate entire val dataset
                    perform_val_iteration = functools.partial(
                        perform_iteration, mode=Datasets.VAL)
                    val_losses = map(perform_val_iteration,
                                     loader[Datasets.VAL])

                    # Gather losses and log
                    val_loss = np.mean(list(val_losses))
                    log(step, val_loss, Datasets.VAL)
                    model.train()

                # Save weights if we get a better performance
                accuracy = aggregator[Datasets.VAL].accuracy()
                if accuracy >= best_accuracy:
                    best_accuracy = accuracy
                    best_weights = copy.deepcopy(model.state_dict())
                    best_step = step

                # Get ready for next step
                step += 1

    # Clean up
    for w in writer.values():
        w.flush()
        w.close()

    logger.info("Finished training")

    logger.info(
        f"Restoring best weight state (step {best_step} with validation accuracy of {best_accuracy})"
    )
    model.load_state_dict(best_weights)
    torch.save(model, run_dir / f"{model_name}.pt")
    with (run_dir / f"{model_name}.txt").open("w") as f:
        f.write(f"exported at step: {best_step}")
    return model