def init_evaluator() -> eval_.Evaluator:
    """Initializes an evaluator.

    Args:
        directory (str): The directory for the results file.
        result_file_name (str): The result file name (CSV file).

    Returns:
        eval.Evaluator: An evaluator.
    """
    # os.makedirs(directory, exist_ok=True)  # generate result directory, if it does not exists

    # evaluator = eval_.Evaluator(eval_.ConsoleEvaluatorWriter(5))
    evaluation_metrics = [metric.DiceCoefficient(), metric.HausdorffDistance()]
    # evaluation_metrics = [metric.DiceCoefficient(), metric.HausdorffDistance(), metric.Accuracy(), metric.CohenKappaCoefficient(), metric.ProbabilisticDistance()]
    evaluation_metrics = [metric.DiceCoefficient(), metric.HausdorffDistance(95), metric.CohenKappaCoefficient(), metric.Accuracy(),
                          metric.JaccardCoefficient(), metric.MutualInformation(), metric.Precision(), metric.VolumeSimilarity(), metric.AreaUnderCurve(),
                          metric.FalseNegative(),metric.FalsePositive(), metric.TruePositive(), metric.TrueNegative(),metric.Sensitivity(),metric.Specificity()]


    evaluation_metrics=[metric.DiceCoefficient(),metric.JaccardCoefficient(),metric.SurfaceDiceOverlap(),metric.Accuracy(),
                        metric.FMeasure(),metric.CohenKappaCoefficient(),metric.VolumeSimilarity(),metric.MutualInformation(),metric.AreaUnderCurve(),
                        metric.HausdorffDistance()]

    evaluator = eval_.SegmentationEvaluator(evaluation_metrics,{})
    # evaluator.add_writer(eval_.CSVEvaluatorWriter(os.path.join(directory, result_file_name)))
    evaluator.add_label(1, 'WhiteMatter')
    evaluator.add_label(2, 'GreyMatter')
    evaluator.add_label(3, 'Hippocampus')
    evaluator.add_label(4, 'Amygdala')
    evaluator.add_label(5, 'Thalamus')

    # warnings.warn('Initialized evaluation with the Dice coefficient. Do you know other suitable metrics?')
    # you should add more metrics than just the Hausdorff distance!
    return evaluator
def init_evaluator(directory: str, result_file_name: str = 'results.csv') -> eval_.Evaluator:
    """Initializes an evaluator.

    Args:
        directory (str): The directory for the results file.
        result_file_name (str): The result file name (CSV file).

    Returns:
        eval.Evaluator: An evaluator.
    """
    os.makedirs(directory, exist_ok=True)  # generate result directory, if it does not exists

    evaluator = eval_.Evaluator(eval_.ConsoleEvaluatorWriter(5))
    evaluator.add_writer(eval_.CSVEvaluatorWriter(os.path.join(directory, result_file_name)))
    evaluator.add_label(1, "WhiteMatter")
    evaluator.add_label(2, "GreyMatter")
    evaluator.add_label(3, "Hippocampus")
    evaluator.add_label(4, "Amygdala")
    evaluator.add_label(5, "Thalamus")
    evaluator.metrics = [metric.DiceCoefficient(),
                         metric.AreaUnderCurve(),
                         metric.VolumeSimilarity(),
                         metric.Accuracy(),
                         metric.AverageDistance(),
                         metric.CohenKappaMetric(),
                         metric.FalseNegative(),
                         metric.FalsePositive(),
                         metric.Fallout(),
                         metric.GroundTruthArea(),
                         metric.GroundTruthVolume(),
                         metric.Specificity(),
                         metric.Sensitivity()
                         ]
    return evaluator
예제 #3
0
def main(data_dir: str, result_file: str, result_summary_file: str):
    # initialize metrics
    metrics = [
        metric.DiceCoefficient(),
        metric.HausdorffDistance(percentile=95, metric='HDRFDST95'),
        metric.VolumeSimilarity()
    ]

    # define the labels to evaluate
    labels = {1: 'WHITEMATTER', 2: 'GREYMATTER', 5: 'THALAMUS'}

    evaluator = eval_.SegmentationEvaluator(metrics, labels)

    # get subjects to evaluate
    subject_dirs = [
        subject for subject in glob.glob(os.path.join(data_dir, '*'))
        if os.path.isdir(subject)
        and os.path.basename(subject).startswith('Subject')
    ]

    for subject_dir in subject_dirs:
        subject_id = os.path.basename(subject_dir)
        print(f'Evaluating {subject_id}...')

        # load ground truth image and create artificial prediction by erosion
        ground_truth = sitk.ReadImage(
            os.path.join(subject_dir, f'{subject_id}_GT.mha'))
        prediction = ground_truth
        for label_val in labels.keys():
            # erode each label we are going to evaluate
            prediction = sitk.BinaryErode(prediction, 1, sitk.sitkBall, 0,
                                          label_val)

        # evaluate the "prediction" against the ground truth
        evaluator.evaluate(prediction, ground_truth, subject_id)

    # use two writers to report the results
    writer.CSVWriter(result_file).write(evaluator.results)

    print('\nSubject-wise results...')
    writer.ConsoleWriter().write(evaluator.results)

    # report also mean and standard deviation among all subjects
    functions = {'MEAN': np.mean, 'STD': np.std}
    writer.CSVStatisticsWriter(result_summary_file,
                               functions=functions).write(evaluator.results)
    print('\nAggregated statistic results...')
    writer.ConsoleStatisticsWriter(functions=functions).write(
        evaluator.results)

    # clear results such that the evaluator is ready for the next evaluation
    evaluator.clear()
def init_evaluator(write_to_console: bool = True,
                   csv_file: str = None,
                   calculate_distance_metrics: bool = False):
    evaluator = eval.Evaluator(EvaluatorAggregator())
    if write_to_console:
        evaluator.add_writer(eval.ConsoleEvaluatorWriter(5))
    if csv_file is not None:
        evaluator.add_writer(eval.CSVEvaluatorWriter(csv_file))
    if calculate_distance_metrics:
        evaluator.metrics = [
            pymia_metric.DiceCoefficient(),
            pymia_metric.HausdorffDistance(),
            pymia_metric.HausdorffDistance(percentile=95, metric='HDRFDST95'),
            pymia_metric.VolumeSimilarity()
        ]
    else:
        evaluator.metrics = [
            pymia_metric.DiceCoefficient(),
            pymia_metric.VolumeSimilarity()
        ]
    evaluator.add_label(1, cfg.FOREGROUND_NAME)
    return evaluator