Пример #1
0
def store_epoch_metrics(
        azure_and_tensorboard_logger: AzureAndTensorboardLogger,
        df_logger: DataframeLogger, epoch: int, metrics: MetricsDict,
        learning_rates: List[float], config: ModelConfigBase) -> None:
    """
    Writes the loss, Dice scores, and learning rates into a file for Tensorboard visualization,
    and into the AzureML run context.
    :param azure_and_tensorboard_logger: An instance of AzureAndTensorboardLogger.
    :param df_logger: An instance of DataframeLogger, for logging results to csv.
    :param epoch: The epoch corresponding to the results.
    :param metrics: The metrics of the specified epoch, averaged along its batches.
    :param learning_rates: The logged learning rates.
    :param config: one of SegmentationModelBase
    """
    if config.is_segmentation_model:
        azure_and_tensorboard_logger.log_segmentation_epoch_metrics(
            metrics, learning_rates)
        logger_row = {
            LoggingColumns.Dice.value:
            metrics.get_single_metric(MetricType.DICE),
            LoggingColumns.Loss.value:
            metrics.get_single_metric(MetricType.LOSS),
            LoggingColumns.SecondsPerEpoch.value:
            metrics.get_single_metric(MetricType.SECONDS_PER_EPOCH)
        }

    elif config.is_scalar_model:
        assert isinstance(metrics, MetricsDict)
        azure_and_tensorboard_logger.log_classification_epoch_metrics(metrics)
        logger_row: Dict[str, float] = {}  # type: ignore
        for hue_name, metric_name, metric_value in metrics.enumerate_single_values(
        ):
            logging_column_name = get_column_name_for_logging(
                metric_name, hue_name=hue_name)
            logger_row[logging_column_name] = metric_value
    else:
        raise ValueError(
            "Model must be either classification, regression or segmentation model"
        )

    logger_row.update({
        LoggingColumns.Epoch.value:
        epoch,
        LoggingColumns.CrossValidationSplitIndex.value:
        config.cross_validation_split_index
    })

    df_logger.add_record(logger_row)
Пример #2
0
def store_epoch_stats_for_segmentation(
        outputs_dir: Path, epoch: int, learning_rates: List[float],
        training_results: MetricsDict,
        validation_results: MetricsDict) -> None:
    """
    Writes a dictionary of statistics for a segmentation training run to a file. Successive calls to the function
    append another line of metrics. The first line of the file contains the column headers (names of the metrics).
    :param training_results: A MetricsDict object with all metrics that were achieved on the training set in the
    current epoch.
    :param validation_results: A MetricsDict object with all metrics that were achieved on the validation set in the
    current epoch.
    :param learning_rates: The learning rates that were used in the current epoch.
    :param epoch: The number of the current training epoch.
    :param outputs_dir: The directory in which the statistics file should be created.
    :return:
    """
    epoch_stats = {
        "Epoch":
        str(epoch),
        "LearningRate":
        format_learning_rates(learning_rates),
        "TrainLoss":
        metrics_util.format_metric(
            training_results.get_single_metric(MetricType.LOSS)),
        "TrainDice":
        metrics_util.format_metric(
            training_results.get_single_metric(MetricType.DICE)),
        "ValLoss":
        metrics_util.format_metric(
            validation_results.get_single_metric(MetricType.LOSS)),
        "ValDice":
        metrics_util.format_metric(
            validation_results.get_single_metric(MetricType.DICE)),
    }
    # When using os.linesep, additional LF characters are inserted. Expected behaviour only when
    # using this on both Windows and Linux.
    line_sep = "\n"
    tab = "\t"
    full_file = outputs_dir / TRAIN_STATS_FILE
    if not full_file.exists():
        header = tab.join(epoch_stats.keys())
        full_file.write_text(header + line_sep)
    line = tab.join(epoch_stats.values())
    with full_file.open("a") as f:
        f.write(line + line_sep)
def test_add_foreground_dice() -> None:
    g1 = "Liver"
    g2 = "Lung"
    ground_truth_ids = [BACKGROUND_CLASS_NAME, g1, g2]
    dice = [0.85, 0.75, 0.55]
    m = MetricsDict(hues=ground_truth_ids)
    for j, ground_truth_id in enumerate(ground_truth_ids):
        m.add_metric(MetricType.DICE, dice[j], hue=ground_truth_id)
    metrics.add_average_foreground_dice(m)
    assert m.get_single_metric(MetricType.DICE) == 0.5 * (dice[1] + dice[2])
Пример #4
0
def add_average_foreground_dice(metrics: MetricsDict) -> None:
    """
    If the given metrics dictionary contains an entry for Dice score, and only one value for the Dice score per class,
    then add an average Dice score for all foreground classes to the metrics dictionary (modified in place).
    :param metrics: The object that holds metrics. The average Dice score will be written back into this object.
    """
    all_dice = []
    for structure_name in metrics.get_hue_names(include_default=False):
        if structure_name != BACKGROUND_CLASS_NAME:
            all_dice.append(metrics.get_single_metric(MetricType.DICE, hue=structure_name))
    metrics.add_metric(MetricType.DICE, np.nanmean(all_dice).item())
Пример #5
0
    def log_segmentation_epoch_metrics(self, metrics: MetricsDict,
                                       learning_rates: List[float]) -> None:
        """
        Logs segmentation metrics (e.g. loss, dice scores, learning rates) to an event file for TensorBoard
        visualization and to the AzureML run context
        :param learning_rates: The logged learning rates.
        :param metrics: The metrics of the specified epoch, averaged along its batches.
        """
        logging_fn = self.log_to_azure_and_tensorboard
        logging_fn(MetricType.LOSS.value,
                   metrics.get_single_metric(MetricType.LOSS))
        logging_fn("Dice/AverageExceptBackground",
                   metrics.get_single_metric(MetricType.DICE))
        logging_fn(
            "Voxels/ProportionForeground",
            metrics.get_single_metric(MetricType.PROPORTION_FOREGROUND_VOXELS))
        logging_fn("TimePerEpoch_Seconds",
                   metrics.get_single_metric(MetricType.SECONDS_PER_EPOCH))

        if learning_rates is not None:
            for i, lr in enumerate(learning_rates):
                logging_fn("LearningRate/Index_{}".format(i), lr)

        for class_name in metrics.get_hue_names(include_default=False):
            # Tensorboard groups metrics by what is before the slash.
            # With metrics Dice/Foo and Dice/Bar, it will create a section for "Dice",
            # and inside of it, there are graphs for Foo and Bar
            get_label = lambda x, y: "{}/{}".format(x, y)
            logging_fn(
                get_label("Dice", class_name),
                metrics.get_single_metric(MetricType.DICE, hue=class_name))
            logging_fn(
                get_label("Voxels", class_name),
                metrics.get_single_metric(
                    MetricType.PROPORTION_FOREGROUND_VOXELS, hue=class_name))
def test_get_single_metric() -> None:
    h1 = "a"
    m = MetricsDict(hues=[h1])
    m1, v1 = ("foo", 1.0)
    m2, v2 = (MetricType.LOSS, 2.0)
    m.add_metric(m1, v1, hue=h1)
    m.add_metric(m2, v2)
    assert m.get_single_metric(m1, h1) == v1
    assert m.get_single_metric(m2) == v2
    with pytest.raises(KeyError) as ex1:
        m.get_single_metric(m1, "no such hue")
    assert "no such hue" in str(ex1)
    with pytest.raises(KeyError) as ex2:
        m.get_single_metric("no such metric", h1)
    assert "no such metric" in str(ex2)
    m.add_metric(m2, v2)
    with pytest.raises(ValueError) as ex3:
        m.get_single_metric(m2)
    assert "Expected a single entry" in str(ex3)