def init_evaluator() -> eval_.Evaluator:
    """Initializes an evaluator.

    Args:
        directory (str): The directory for the results file.
        result_file_name (str): The result file name (CSV file).

    Returns:
        eval.Evaluator: An evaluator.
    """
    # os.makedirs(directory, exist_ok=True)  # generate result directory, if it does not exists

    # evaluator = eval_.Evaluator(eval_.ConsoleEvaluatorWriter(5))
    evaluation_metrics = [metric.DiceCoefficient(), metric.HausdorffDistance()]
    # evaluation_metrics = [metric.DiceCoefficient(), metric.HausdorffDistance(), metric.Accuracy(), metric.CohenKappaCoefficient(), metric.ProbabilisticDistance()]
    evaluation_metrics = [metric.DiceCoefficient(), metric.HausdorffDistance(95), metric.CohenKappaCoefficient(), metric.Accuracy(),
                          metric.JaccardCoefficient(), metric.MutualInformation(), metric.Precision(), metric.VolumeSimilarity(), metric.AreaUnderCurve(),
                          metric.FalseNegative(),metric.FalsePositive(), metric.TruePositive(), metric.TrueNegative(),metric.Sensitivity(),metric.Specificity()]


    evaluation_metrics=[metric.DiceCoefficient(),metric.JaccardCoefficient(),metric.SurfaceDiceOverlap(),metric.Accuracy(),
                        metric.FMeasure(),metric.CohenKappaCoefficient(),metric.VolumeSimilarity(),metric.MutualInformation(),metric.AreaUnderCurve(),
                        metric.HausdorffDistance()]

    evaluator = eval_.SegmentationEvaluator(evaluation_metrics,{})
    # evaluator.add_writer(eval_.CSVEvaluatorWriter(os.path.join(directory, result_file_name)))
    evaluator.add_label(1, 'WhiteMatter')
    evaluator.add_label(2, 'GreyMatter')
    evaluator.add_label(3, 'Hippocampus')
    evaluator.add_label(4, 'Amygdala')
    evaluator.add_label(5, 'Thalamus')

    # warnings.warn('Initialized evaluation with the Dice coefficient. Do you know other suitable metrics?')
    # you should add more metrics than just the Hausdorff distance!
    return evaluator
def init_evaluator(directory: str, result_file_name: str = 'results.csv') -> eval_.Evaluator:
    """Initializes an evaluator.

    Args:
        directory (str): The directory for the results file.
        result_file_name (str): The result file name (CSV file).

    Returns:
        eval.Evaluator: An evaluator.
    """
    os.makedirs(directory, exist_ok=True)  # generate result directory, if it does not exists

    evaluator = eval_.Evaluator(eval_.ConsoleEvaluatorWriter(5))
    evaluator.add_writer(eval_.CSVEvaluatorWriter(os.path.join(directory, result_file_name)))
    evaluator.add_label(1, "WhiteMatter")
    evaluator.add_label(2, "GreyMatter")
    evaluator.add_label(3, "Hippocampus")
    evaluator.add_label(4, "Amygdala")
    evaluator.add_label(5, "Thalamus")
    evaluator.metrics = [metric.DiceCoefficient(),
                         metric.AreaUnderCurve(),
                         metric.VolumeSimilarity(),
                         metric.Accuracy(),
                         metric.AverageDistance(),
                         metric.CohenKappaMetric(),
                         metric.FalseNegative(),
                         metric.FalsePositive(),
                         metric.Fallout(),
                         metric.GroundTruthArea(),
                         metric.GroundTruthVolume(),
                         metric.Specificity(),
                         metric.Sensitivity()
                         ]
    return evaluator
示例#3
0
def init_evaluator(directory: str,
                   result_file_name: str = 'results.csv') -> eval_.Evaluator:
    """Initializes an evaluator.

    Args:
        directory (str): The directory for the results file.
        result_file_name (str): The result file name (CSV file).

    Returns:
        eval.Evaluator: An evaluator.
    """
    os.makedirs(
        directory,
        exist_ok=True)  # generate result directory, if it does not exists

    evaluator = eval_.Evaluator(eval_.ConsoleEvaluatorWriter(5))
    evaluator.add_writer(
        eval_.CSVEvaluatorWriter(os.path.join(directory, result_file_name)))
    evaluator.add_label(1, "WhiteMatter")
    evaluator.add_label(2, "GreyMatter")
    evaluator.add_label(3, "Hippocampus")
    evaluator.add_label(4, "Amygdala")
    evaluator.add_label(5, "Thalamus")
    evaluator.metrics = [metric.DiceCoefficient()]
    return evaluator
示例#4
0
def init_evaluator(directory: str,
                   result_file_name: str = 'results.csv') -> eval_.Evaluator:
    """Initializes an evaluator.

    Args:
        directory (str): The directory for the results file.
        result_file_name (str): The result file name (CSV file).

    Returns:
        eval.Evaluator: An evaluator.
    """
    os.makedirs(
        directory,
        exist_ok=True)  # generate result directory, if it does not exists

    evaluator = eval_.Evaluator(eval_.ConsoleEvaluatorWriter(5))
    evaluator.add_writer(
        eval_.CSVEvaluatorWriter(os.path.join(directory, result_file_name)))
    evaluator.add_label(1, 'WhiteMatter')
    evaluator.add_label(2, 'GreyMatter')
    evaluator.add_label(3, 'Hippocampus')
    evaluator.add_label(4, 'Amygdala')
    evaluator.add_label(5, 'Thalamus')
    evaluator.metrics = [
        metric.DiceCoefficient(),
        metric.HausdorffDistance(95)
    ]  # Solutions
    # todo: add hausdorff distance, 95th percentile (see metric.HausdorffDistance)
    # evaluator.add_metric(metric.HausdorffDistance(95))
    # warnings.warn('Initialized evaluation with the Dice coefficient. Do you know other suitable metrics?')
    return evaluator
def dice(prediction, target):
    _check_ndarray(prediction)
    _check_ndarray(target)

    d = m.DiceCoefficient()
    d.confusion_matrix = m.ConfusionMatrix(prediction, target)
    return d.calculate()
示例#6
0
def init_evaluator() -> pymia_eval.Evaluator:
    evaluator = pymia_eval.Evaluator(pymia_eval.ConsoleEvaluatorWriter(5))
    evaluator.add_label(1, 'Structure 1')
    evaluator.add_label(2, 'Structure 2')
    evaluator.add_label(3, 'Structure 3')
    evaluator.add_label(4, 'Structure 4')
    evaluator.metrics = [pymia_metric.DiceCoefficient()]
    return evaluator
示例#7
0
def init_evaluator(csv_file: str=None):
    evaluator = pymia_eval.Evaluator(pymia_eval.ConsoleEvaluatorWriter(5))
    if csv_file is not None:
        evaluator.add_writer(pymia_eval.CSVEvaluatorWriter(csv_file))
    evaluator.add_writer(EvaluatorAggregator())
    evaluator.metrics = [pymia_metric.DiceCoefficient()]
    evaluator.add_label(1, "WhiteMatter")
    evaluator.add_label(2, "GreyMatter")
    evaluator.add_label(3, "Hippocampus")
    evaluator.add_label(4, "Amygdala")
    evaluator.add_label(5, "Thalamus")
    return evaluator
示例#8
0
def main(data_dir: str, result_file: str, result_summary_file: str):
    # initialize metrics
    metrics = [
        metric.DiceCoefficient(),
        metric.HausdorffDistance(percentile=95, metric='HDRFDST95'),
        metric.VolumeSimilarity()
    ]

    # define the labels to evaluate
    labels = {1: 'WHITEMATTER', 2: 'GREYMATTER', 5: 'THALAMUS'}

    evaluator = eval_.SegmentationEvaluator(metrics, labels)

    # get subjects to evaluate
    subject_dirs = [
        subject for subject in glob.glob(os.path.join(data_dir, '*'))
        if os.path.isdir(subject)
        and os.path.basename(subject).startswith('Subject')
    ]

    for subject_dir in subject_dirs:
        subject_id = os.path.basename(subject_dir)
        print(f'Evaluating {subject_id}...')

        # load ground truth image and create artificial prediction by erosion
        ground_truth = sitk.ReadImage(
            os.path.join(subject_dir, f'{subject_id}_GT.mha'))
        prediction = ground_truth
        for label_val in labels.keys():
            # erode each label we are going to evaluate
            prediction = sitk.BinaryErode(prediction, 1, sitk.sitkBall, 0,
                                          label_val)

        # evaluate the "prediction" against the ground truth
        evaluator.evaluate(prediction, ground_truth, subject_id)

    # use two writers to report the results
    writer.CSVWriter(result_file).write(evaluator.results)

    print('\nSubject-wise results...')
    writer.ConsoleWriter().write(evaluator.results)

    # report also mean and standard deviation among all subjects
    functions = {'MEAN': np.mean, 'STD': np.std}
    writer.CSVStatisticsWriter(result_summary_file,
                               functions=functions).write(evaluator.results)
    print('\nAggregated statistic results...')
    writer.ConsoleStatisticsWriter(functions=functions).write(
        evaluator.results)

    # clear results such that the evaluator is ready for the next evaluation
    evaluator.clear()
def init_evaluator(write_to_console: bool = True,
                   csv_file: str = None,
                   calculate_distance_metrics: bool = False):
    evaluator = eval.Evaluator(EvaluatorAggregator())
    if write_to_console:
        evaluator.add_writer(eval.ConsoleEvaluatorWriter(5))
    if csv_file is not None:
        evaluator.add_writer(eval.CSVEvaluatorWriter(csv_file))
    if calculate_distance_metrics:
        evaluator.metrics = [
            pymia_metric.DiceCoefficient(),
            pymia_metric.HausdorffDistance(),
            pymia_metric.HausdorffDistance(percentile=95, metric='HDRFDST95'),
            pymia_metric.VolumeSimilarity()
        ]
    else:
        evaluator.metrics = [
            pymia_metric.DiceCoefficient(),
            pymia_metric.VolumeSimilarity()
        ]
    evaluator.add_label(1, cfg.FOREGROUND_NAME)
    return evaluator
示例#10
0
def init_evaluator() -> eval_.Evaluator:
    """Initializes an evaluator.

    Returns:
        eval.Evaluator: An evaluator.
    """

    # initialize metrics
    metrics = [metric.DiceCoefficient(), metric.HausdorffDistance(95.0)]

    # define the labels to evaluate
    labels = {
        1: 'WhiteMatter',
        2: 'GreyMatter',
        3: 'Hippocampus',
        4: 'Amygdala',
        5: 'Thalamus'
    }

    evaluator = eval_.SegmentationEvaluator(metrics, labels)
    return evaluator
示例#11
0
def init_evaluator() -> eval_.Evaluator:
    """Initializes an evaluator.

    Returns:
        eval.Evaluator: An evaluator.
    """

    # initialize metrics
    metrics = [metric.DiceCoefficient()]
    # todo: add hausdorff distance, 95th percentile (see metric.HausdorffDistance)
    warnings.warn('Initialized evaluation with the Dice coefficient. Do you know other suitable metrics?')

    # define the labels to evaluate
    labels = {1: 'WhiteMatter',
              2: 'GreyMatter',
              3: 'Hippocampus',
              4: 'Amygdala',
              5: 'Thalamus'
              }

    evaluator = eval_.SegmentationEvaluator(metrics, labels)
    return evaluator
def init_evaluator(directory: object, result_file_name: object = 'results.csv') -> object:
    """Initializes an evaluator.

    Args:
        directory (str): The directory for the results file.
        result_file_name (str): The result file name (CSV file).

    Returns:
        eval.Evaluator: An evaluator.
    """
    os.makedirs(directory, exist_ok=True)  # generate result directory, if it does not exists

    evaluator = eval_.Evaluator(eval_.ConsoleEvaluatorWriter(5))
    evaluator.add_writer(eval_.CSVEvaluatorWriter(os.path.join(directory, result_file_name)))
    evaluator.add_label(1, 'WhiteMatter')
    evaluator.add_label(2, 'GreyMatter')
    evaluator.add_label(3, 'Hippocampus')
    evaluator.add_label(4, 'Amygdala')
    evaluator.add_label(5, 'Thalamus')
    evaluator.metrics = [metric.DiceCoefficient(), metric.HausdorffDistance()]
    # warnings.warn('Initialized evaluation with the Dice coefficient. Do you know other suitable metrics?')
    # you should add more metrics than just the Hausdorff distance!
    return evaluator
示例#13
0
def main(inputdir: str, csvoutputdir: str, segname: str):

    # ! THIS PARAMETER FOR THE DEFORMATION SIGMA HAS TO BE TUNED PER LABEL TO MATCH INTERRATER-VARIABILITY ! #
    sigmaarr = np.linspace(2, 8, 31)

    subjroot = '/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/MANAGE/data/robustness/preprocessed_segmented'
    csvoutputdir = os.path.join(
        '/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/MANAGE/data/robustness/segdeform/interrateroutput',
        segname)

    # make output directory if it does not already exists
    if not os.path.isdir(csvoutputdi):
        os.makedirs(csvoutputdi)

    patlist = os.listdir(subjroot)

    evaluator = pymia_eval.Evaluator(pymia_eval.ConsoleEvaluatorWriter(5))
    evaluator.add_label(1, segname)
    evaluator.add_metric(pymia_metric.DiceCoefficient())

    # for sigmaidx, sigma in enumerate(deformation_sigma):
    for sigmaval in sigmaarr:
        evaluator.add_writer(
            pymia_eval.CSVEvaluatorWriter(
                os.path.join(csvoutputdir,
                             'results_' + str(sigmaval) + '.csv')))
        for patidx, pat in enumerate(patlist):
            # read CET image
            img_orig = sitk.ReadImage(
                os.path.join(subjroot, pat, pat + '_' + segname + '.nii.gz'))
            for runidx_cet in range(0, 100):
                deformed = elasticdeform(img_orig, sigmaval)

                evaluator.evaluate(
                    img_orig, deformed,
                    pat + '_' + str(sigmaval) + '_' + str(runidx_cet))
示例#14
0
def main(hdf_file, log_dir):
    # initialize the evaluator with the metrics and the labels to evaluate
    metrics = [metric.DiceCoefficient()]
    labels = {1: 'WHITEMATTER',
              2: 'GREYMATTER',
              3: 'HIPPOCAMPUS',
              4: 'AMYGDALA',
              5: 'THALAMUS'}
    evaluator = eval_.SegmentationEvaluator(metrics, labels)

    # we want to log the mean and standard deviation of the metrics among all subjects of the dataset
    functions = {'MEAN': np.mean, 'STD': np.std}
    statistics_aggregator = writer.StatisticsAggregator(functions=functions)
    console_writer = writer.ConsoleStatisticsWriter(functions=functions)

    # initialize TensorBoard writer
    tb = tensorboard.SummaryWriter(os.path.join(log_dir, 'logging-example-torch'))

    # setup the training datasource
    train_subjects, valid_subjects = ['Subject_1', 'Subject_2', 'Subject_3'], ['Subject_4']
    extractor = extr.DataExtractor(categories=(defs.KEY_IMAGES, defs.KEY_LABELS))
    indexing_strategy = extr.SliceIndexing()

    augmentation_transforms = [augm.RandomElasticDeformation(), augm.RandomMirror()]
    transforms = [tfm.Permute(permutation=(2, 0, 1)), tfm.Squeeze(entries=(defs.KEY_LABELS,))]
    train_transforms = tfm.ComposeTransform(augmentation_transforms + transforms)
    train_dataset = extr.PymiaDatasource(hdf_file, indexing_strategy, extractor, train_transforms,
                                         subject_subset=train_subjects)

    # setup the validation datasource
    valid_transforms = tfm.ComposeTransform([tfm.Permute(permutation=(2, 0, 1))])
    valid_dataset = extr.PymiaDatasource(hdf_file, indexing_strategy, extractor, valid_transforms,
                                         subject_subset=valid_subjects)
    direct_extractor = extr.ComposeExtractor(
        [extr.SubjectExtractor(),
         extr.ImagePropertiesExtractor(),
         extr.DataExtractor(categories=(defs.KEY_LABELS,))]
    )
    assembler = assm.SubjectAssembler(valid_dataset)

    # torch specific handling
    pytorch_train_dataset = pymia_torch.PytorchDatasetAdapter(train_dataset)
    train_loader = torch_data.dataloader.DataLoader(pytorch_train_dataset, batch_size=16, shuffle=True)

    pytorch_valid_dataset = pymia_torch.PytorchDatasetAdapter(valid_dataset)
    valid_loader = torch_data.dataloader.DataLoader(pytorch_valid_dataset, batch_size=16, shuffle=False)

    u_net = unet.UNetModel(ch_in=2, ch_out=6, n_channels=16, n_pooling=3).to(device)

    print(u_net)

    optimizer = optim.Adam(u_net.parameters(), lr=1e-3)
    train_batches = len(train_loader)

    # looping over the data in the dataset
    epochs = 100
    for epoch in range(epochs):
        u_net.train()
        print(f'Epoch {epoch + 1}/{epochs}')

        # training
        print('training')
        for i, batch in enumerate(train_loader):
            x, y = batch[defs.KEY_IMAGES].to(device), batch[defs.KEY_LABELS].to(device).long()
            logits = u_net(x)

            optimizer.zero_grad()
            loss = F.cross_entropy(logits, y)
            loss.backward()
            optimizer.step()

            tb.add_scalar('train/loss', loss.item(), epoch*train_batches + i)
            print(f'[{i + 1}/{train_batches}]\tloss: {loss.item()}')

        # validation
        print('validation')
        with torch.no_grad():
            u_net.eval()
            valid_batches = len(valid_loader)
            for i, batch in enumerate(valid_loader):
                x, sample_indices = batch[defs.KEY_IMAGES].to(device), batch[defs.KEY_SAMPLE_INDEX]

                logits = u_net(x)
                prediction = logits.argmax(dim=1, keepdim=True)

                numpy_prediction = prediction.cpu().numpy().transpose((0, 2, 3, 1))

                is_last = i == valid_batches - 1
                assembler.add_batch(numpy_prediction, sample_indices.numpy(), is_last)

                for subject_index in assembler.subjects_ready:
                    subject_prediction = assembler.get_assembled_subject(subject_index)

                    direct_sample = train_dataset.direct_extract(direct_extractor, subject_index)
                    target, image_properties = direct_sample[defs.KEY_LABELS],  direct_sample[defs.KEY_PROPERTIES]

                    # evaluate the prediction against the reference
                    evaluator.evaluate(subject_prediction[..., 0], target[..., 0], direct_sample[defs.KEY_SUBJECT])

            # calculate mean and standard deviation of each metric
            results = statistics_aggregator.calculate(evaluator.results)
            # log to TensorBoard into category train
            for result in results:
                tb.add_scalar(f'valid/{result.metric}-{result.id_}', result.value, epoch)

            console_writer.write(evaluator.results)

            # clear results such that the evaluator is ready for the next evaluation
            evaluator.clear()
示例#15
0
def main(hdf_file, log_dir):
    # initialize the evaluator with the metrics and the labels to evaluate
    metrics = [metric.DiceCoefficient()]
    labels = {
        1: 'WHITEMATTER',
        2: 'GREYMATTER',
        3: 'HIPPOCAMPUS',
        4: 'AMYGDALA',
        5: 'THALAMUS'
    }
    evaluator = eval_.SegmentationEvaluator(metrics, labels)

    # we want to log the mean and standard deviation of the metrics among all subjects of the dataset
    functions = {'MEAN': np.mean, 'STD': np.std}
    statistics_aggregator = writer.StatisticsAggregator(functions=functions)
    console_writer = writer.ConsoleStatisticsWriter(functions=functions)

    # initialize TensorBoard writer
    summary_writer = tf.summary.create_file_writer(
        os.path.join(log_dir, 'logging-example-tensorflow'))

    # setup the training datasource
    train_subjects, valid_subjects = ['Subject_1', 'Subject_2',
                                      'Subject_3'], ['Subject_4']
    extractor = extr.DataExtractor(categories=(defs.KEY_IMAGES,
                                               defs.KEY_LABELS))
    indexing_strategy = extr.SliceIndexing()

    augmentation_transforms = [
        augm.RandomElasticDeformation(),
        augm.RandomMirror()
    ]
    transforms = [tfm.Squeeze(entries=(defs.KEY_LABELS, ))]
    train_transforms = tfm.ComposeTransform(augmentation_transforms +
                                            transforms)
    train_dataset = extr.PymiaDatasource(hdf_file,
                                         indexing_strategy,
                                         extractor,
                                         train_transforms,
                                         subject_subset=train_subjects)

    # setup the validation datasource
    batch_size = 16
    valid_transforms = tfm.ComposeTransform([])
    valid_dataset = extr.PymiaDatasource(hdf_file,
                                         indexing_strategy,
                                         extractor,
                                         valid_transforms,
                                         subject_subset=valid_subjects)
    direct_extractor = extr.ComposeExtractor([
        extr.SubjectExtractor(),
        extr.ImagePropertiesExtractor(),
        extr.DataExtractor(categories=(defs.KEY_LABELS, ))
    ])
    assembler = assm.SubjectAssembler(valid_dataset)

    # tensorflow specific handling
    train_gen_fn = pymia_tf.get_tf_generator(train_dataset)
    tf_train_dataset = tf.data.Dataset.from_generator(
        generator=train_gen_fn,
        output_types={
            defs.KEY_IMAGES: tf.float32,
            defs.KEY_LABELS: tf.int64,
            defs.KEY_SAMPLE_INDEX: tf.int64
        })
    tf_train_dataset = tf_train_dataset.batch(batch_size).shuffle(
        len(train_dataset))

    valid_gen_fn = pymia_tf.get_tf_generator(valid_dataset)
    tf_valid_dataset = tf.data.Dataset.from_generator(
        generator=valid_gen_fn,
        output_types={
            defs.KEY_IMAGES: tf.float32,
            defs.KEY_LABELS: tf.int64,
            defs.KEY_SAMPLE_INDEX: tf.int64
        })
    tf_valid_dataset = tf_valid_dataset.batch(batch_size)

    u_net = unet.build_model(channels=2,
                             num_classes=6,
                             layer_depth=3,
                             filters_root=16)

    optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
    train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)

    train_batches = len(train_dataset) // batch_size

    # looping over the data in the dataset
    epochs = 100
    for epoch in range(epochs):
        print(f'Epoch {epoch + 1}/{epochs}')

        # training
        print('training')
        for i, batch in enumerate(tf_train_dataset):
            x, y = batch[defs.KEY_IMAGES], batch[defs.KEY_LABELS]

            with tf.GradientTape() as tape:
                logits = u_net(x, training=True)
                loss = tf.keras.losses.sparse_categorical_crossentropy(
                    y, logits, from_logits=True)

            grads = tape.gradient(loss, u_net.trainable_variables)
            optimizer.apply_gradients(zip(grads, u_net.trainable_variables))

            train_loss(loss)

            with summary_writer.as_default():
                tf.summary.scalar('train/loss',
                                  train_loss.result(),
                                  step=epoch * train_batches + i)
            print(
                f'[{i + 1}/{train_batches}]\tloss: {train_loss.result().numpy()}'
            )

        # validation
        print('validation')
        valid_batches = len(valid_dataset) // batch_size
        for i, batch in enumerate(tf_valid_dataset):
            x, sample_indices = batch[defs.KEY_IMAGES], batch[
                defs.KEY_SAMPLE_INDEX]

            logits = u_net(x)
            prediction = tf.expand_dims(tf.math.argmax(logits, -1), -1)

            numpy_prediction = prediction.numpy()

            is_last = i == valid_batches - 1
            assembler.add_batch(numpy_prediction, sample_indices.numpy(),
                                is_last)

            for subject_index in assembler.subjects_ready:
                subject_prediction = assembler.get_assembled_subject(
                    subject_index)

                direct_sample = train_dataset.direct_extract(
                    direct_extractor, subject_index)
                target, image_properties = direct_sample[
                    defs.KEY_LABELS], direct_sample[defs.KEY_PROPERTIES]

                # evaluate the prediction against the reference
                evaluator.evaluate(subject_prediction[..., 0], target[..., 0],
                                   direct_sample[defs.KEY_SUBJECT])

        # calculate mean and standard deviation of each metric
        results = statistics_aggregator.calculate(evaluator.results)
        # log to TensorBoard into category train
        with summary_writer.as_default():
            for result in results:
                tf.summary.scalar(f'valid/{result.metric}-{result.id_}',
                                  result.value, epoch)

        console_writer.write(evaluator.results)

        # clear results such that the evaluator is ready for the next evaluation
        evaluator.clear()
示例#16
0
def main(hdf_file: str, log_dir: str):
    # initialize the evaluator with the metrics and the labels to evaluate
    metrics = [metric.DiceCoefficient()]
    labels = {1: 'WHITEMATTER', 2: 'GREYMATTER', 5: 'THALAMUS'}
    evaluator = eval_.SegmentationEvaluator(metrics, labels)

    # we want to log the mean and standard deviation of the metrics among all subjects of the dataset
    functions = {'MEAN': np.mean, 'STD': np.std}
    statistics_aggregator = writer.StatisticsAggregator(functions=functions)

    # initialize TensorBoard writer
    # tb = tensorboard.SummaryWriter(os.path.join(log_dir, 'logging-example-torch'))
    tb = tf.summary.create_file_writer(
        os.path.join(log_dir, 'logging-example-tensorflow'))

    # initialize the data handling
    dataset = extr.PymiaDatasource(
        hdf_file, extr.SliceIndexing(),
        extr.DataExtractor(categories=(defs.KEY_IMAGES, )))
    gen_fn = pymia_tf.get_tf_generator(dataset)
    tf_dataset = tf.data.Dataset.from_generator(generator=gen_fn,
                                                output_types={
                                                    defs.KEY_IMAGES:
                                                    tf.float32,
                                                    defs.KEY_SAMPLE_INDEX:
                                                    tf.int64
                                                })
    loader = tf_dataset.batch(100)

    assembler = assm.SubjectAssembler(dataset)
    direct_extractor = extr.ComposeExtractor([
        extr.SubjectExtractor(),  # extraction of the subject name
        extr.ImagePropertiesExtractor(
        ),  # Extraction of image properties (origin, spacing, etc.) for storage
        extr.DataExtractor(
            categories=(defs.KEY_LABELS,
                        ))  # Extraction of "labels" entries for evaluation
    ])

    # initialize a dummy network, which returns a random prediction
    class DummyNetwork(tf.keras.Model):
        def call(self, inputs):
            return tf.random.uniform((*inputs.shape[:-1], 1),
                                     0,
                                     6,
                                     dtype=tf.int32)

    dummy_network = DummyNetwork()
    tf.random.set_seed(0)  # set seed for reproducibility

    nb_batches = len(dataset) // 2

    epochs = 10
    for epoch in range(epochs):
        print(f'Epoch {epoch + 1}/{epochs}')
        for i, batch in enumerate(loader):
            # get the data from batch and predict
            x, sample_indices = batch[defs.KEY_IMAGES], batch[
                defs.KEY_SAMPLE_INDEX]
            prediction = dummy_network(x)

            # translate the prediction to numpy
            numpy_prediction = prediction.numpy()

            # add the batch prediction to the assembler
            is_last = i == nb_batches - 1
            assembler.add_batch(numpy_prediction, sample_indices.numpy(),
                                is_last)

            # process the subjects/images that are fully assembled
            for subject_index in assembler.subjects_ready:
                subject_prediction = assembler.get_assembled_subject(
                    subject_index)

                # extract the target and image properties via direct extract
                direct_sample = dataset.direct_extract(direct_extractor,
                                                       subject_index)
                reference, image_properties = direct_sample[
                    defs.KEY_LABELS], direct_sample[defs.KEY_PROPERTIES]

                # evaluate the prediction against the reference
                evaluator.evaluate(subject_prediction[..., 0], reference[...,
                                                                         0],
                                   direct_sample[defs.KEY_SUBJECT])

        # calculate mean and standard deviation of each metric
        results = statistics_aggregator.calculate(evaluator.results)
        # log to TensorBoard into category train
        for result in results:
            with tb.as_default():
                tf.summary.scalar(f'train/{result.metric}-{result.id_}',
                                  result.value, epoch)

        # clear results such that the evaluator is ready for the next evaluation
        evaluator.clear()
示例#17
0
文件: main.py 项目: CGPiguet/MyMIALab
def atlas_creation():
    #Load the train labels_native with their transform
    wdpath = 'C:/Users/Admin/PycharmProjects/MyMIALab/data/train'
    results_labels_nii = []
    results_affine = []
    resample_labels = []

    for dirpath, subdirs, files in os.walk(wdpath):
        for x in files:
            if x.endswith("labels_native.nii.gz"):
                results_labels_nii.append(os.path.join(dirpath, x))
            if x.endswith("affine.txt"):
                results_affine.append(os.path.join(dirpath, x))

    #Resample the train labels_native with the transform
    for i in range(0, len(results_affine)):
        transform = sitk.ReadTransform(results_affine[i])
        labels_image = sitk.ReadImage(results_labels_nii[i])
        resample_image = sitk.Resample(labels_image, transform,
                                       sitk.sitkNearestNeighbor, 0,
                                       labels_image.GetPixelIDValue())
        resample_labels.append(resample_image)
        #without resample
        #resample_labels.append(labels_image)

    # Threshold the images to sort them in 5 categories
    white_matter_list = []
    grey_matter_list = []
    hippocampus_list = []
    amygdala_list = []
    thalamus_list = []
    for i in range(0, len(resample_labels)):
        white_matter_list.append(sitk.Threshold(resample_labels[i], 1, 1, 0))
        grey_matter_list.append(sitk.Threshold(resample_labels[i], 2, 2, 0))
        hippocampus_list.append(sitk.Threshold(resample_labels[i], 3, 3, 0))
        amygdala_list.append(sitk.Threshold(resample_labels[i], 4, 4, 0))
        thalamus_list.append(sitk.Threshold(resample_labels[i], 5, 5, 0))

    #sum them up and divide by their number of images to make a probability map
    white_matter_map = 0
    grey_matter_map = 0
    hippocampus_map = 0
    amygdala_map = 0
    thalamus_map = 0

    for i in range(1, len(resample_labels)):
        white_matter_map = sitk.Add(white_matter_map, white_matter_list[i])
        grey_matter_map = sitk.Add(grey_matter_map, grey_matter_list[i])
        hippocampus_map = sitk.Add(hippocampus_map, hippocampus_list[i])
        amygdala_map = sitk.Add(amygdala_map, amygdala_list[i])
        thalamus_map = sitk.Add(thalamus_map, thalamus_list[i])

    white_matter_map = sitk.Divide(white_matter_map, len(white_matter_list))
    grey_matter_map = sitk.Divide(grey_matter_map, len(grey_matter_list))
    hippocampus_map = sitk.Divide(hippocampus_map, len(hippocampus_list))
    amygdala_map = sitk.Divide(amygdala_map, len(amygdala_list))
    thalamus_map = sitk.Divide(thalamus_map, len(thalamus_list))
    #atlas = sitk.Divide(sum_images, len(test_resample))
    #slice = sitk.GetArrayFromImage(atlas)[90,:,:]
    #plt.imshow(slice)

    sitk.WriteImage(
        hippocampus_map,
        'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/hippocampus_map_no_threshold.nii',
        False)
    sitk.WriteImage(
        white_matter_map,
        'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/white_matter_map_no_threshold.nii',
        False)
    sitk.WriteImage(
        grey_matter_map,
        'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/grey_matter_map_no_threshold.nii',
        False)
    sitk.WriteImage(
        amygdala_map,
        'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/amygdala_map_no_threshold.nii',
        False)
    sitk.WriteImage(
        thalamus_map,
        'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/thalamus_map_no_threshold.nii',
        False)

    #Threhold the 5 different maps to get a binary map
    white_matter_map = sitk.BinaryThreshold(white_matter_map, 0, 1, 1, 0)
    grey_matter_map = sitk.BinaryThreshold(grey_matter_map, 0, 2, 2, 0)
    hippocampus_map = sitk.BinaryThreshold(hippocampus_map, 0, 3, 3, 0)
    amygdala_map = sitk.BinaryThreshold(amygdala_map, 0, 4, 4, 0)
    thalamus_map = sitk.BinaryThreshold(thalamus_map, 0, 5, 5, 0)

    #Save the images
    sitk.WriteImage(
        grey_matter_map,
        'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/grey_matter_map.nii',
        False)
    sitk.WriteImage(
        white_matter_map,
        'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/white_matter_map.nii',
        False)
    sitk.WriteImage(
        hippocampus_map,
        'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/hippocampus_map.nii',
        False)
    sitk.WriteImage(
        amygdala_map,
        'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/amygdala_map.nii',
        False)
    sitk.WriteImage(
        thalamus_map,
        'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/thalamus_map.nii',
        False)

    #Load the test labels_native and their transform
    wdpath_test = 'C:/Users/Admin/PycharmProjects/MyMIALab/data/test'
    test_results_nii = []
    test_results_affine = []
    test_resample = []
    for dirpath, subdirs, files in os.walk(wdpath_test):
        for x in files:
            if x.endswith("labels_native.nii.gz"):
                test_results_nii.append(os.path.join(dirpath, x))
            if x.endswith("affine.txt"):
                test_results_affine.append(os.path.join(dirpath, x))

    #Resample the labels_native with the transform
    for i in range(0, len(test_results_affine)):
        test_transform = sitk.ReadTransform(test_results_affine[i])
        test_image = sitk.ReadImage(test_results_nii[i])
        test_resample_image = sitk.Resample(test_image, test_transform,
                                            sitk.sitkNearestNeighbor)
        test_resample.append(test_resample_image)
        #Without resample
        #test_resample.append(test_image)

    #Save the first test patient labels
    sitk.WriteImage(
        test_resample[0],
        'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/test.nii',
        False)

    #Compute the dice coeefficent (and the Hausdorff distance)
    label_list = [
        'White Matter', 'Grey Matter', 'Hippocampus', 'Amygdala', 'Thalamus'
    ]
    map_list = [
        white_matter_map, grey_matter_map, hippocampus_map, amygdala_map,
        thalamus_map
    ]
    dice_list = []
    for i in range(0, 5):
        evaluator = eval_.Evaluator(eval_.ConsoleEvaluatorWriter(5))
        evaluator.metrics = [
            metric.DiceCoefficient(),
            metric.Sensitivity(),
            metric.Precision(),
            metric.Fallout()
        ]
        evaluator.add_writer(
            eval_.CSVEvaluatorWriter(
                os.path.join(
                    'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result',
                    'Results_' + label_list[i] + '.csv')))
        evaluator.add_label(i + 1, label_list[i])
        for j in range(0, len(test_resample)):
            evaluator.evaluate(test_resample[j], map_list[i],
                               'Patient ' + str(j))
示例#18
0
def main(hdf_file: str, log_dir: str):
    # initialize the evaluator with the metrics and the labels to evaluate
    metrics = [metric.DiceCoefficient()]
    labels = {1: 'WHITEMATTER', 2: 'GREYMATTER', 5: 'THALAMUS'}
    evaluator = eval_.SegmentationEvaluator(metrics, labels)

    # we want to log the mean and standard deviation of the metrics among all subjects of the dataset
    functions = {'MEAN': np.mean, 'STD': np.std}
    statistics_aggregator = writer.StatisticsAggregator(functions=functions)

    # initialize TensorBoard writer
    tb = tensorboard.SummaryWriter(
        os.path.join(log_dir, 'logging-example-torch'))

    # initialize the data handling
    transform = tfm.Permute(permutation=(2, 0, 1), entries=(defs.KEY_IMAGES, ))
    dataset = extr.PymiaDatasource(
        hdf_file, extr.SliceIndexing(),
        extr.DataExtractor(categories=(defs.KEY_IMAGES, )), transform)
    pytorch_dataset = pymia_torch.PytorchDatasetAdapter(dataset)
    loader = torch_data.dataloader.DataLoader(pytorch_dataset,
                                              batch_size=100,
                                              shuffle=False)

    assembler = assm.SubjectAssembler(dataset)
    direct_extractor = extr.ComposeExtractor([
        extr.SubjectExtractor(),  # extraction of the subject name
        extr.ImagePropertiesExtractor(
        ),  # Extraction of image properties (origin, spacing, etc.) for storage
        extr.DataExtractor(
            categories=(defs.KEY_LABELS,
                        ))  # Extraction of "labels" entries for evaluation
    ])

    # initialize a dummy network, which returns a random prediction
    class DummyNetwork(nn.Module):
        def forward(self, x):
            return torch.randint(0, 6, (x.size(0), 1, *x.size()[2:]))

    dummy_network = DummyNetwork()
    torch.manual_seed(0)  # set seed for reproducibility

    nb_batches = len(loader)

    epochs = 10
    for epoch in range(epochs):
        print(f'Epoch {epoch + 1}/{epochs}')
        for i, batch in enumerate(loader):
            # get the data from batch and predict
            x, sample_indices = batch[defs.KEY_IMAGES], batch[
                defs.KEY_SAMPLE_INDEX]
            prediction = dummy_network(x)

            # translate the prediction to numpy and back to (B)HWC (channel last)
            numpy_prediction = prediction.numpy().transpose((0, 2, 3, 1))

            # add the batch prediction to the assembler
            is_last = i == nb_batches - 1
            assembler.add_batch(numpy_prediction, sample_indices.numpy(),
                                is_last)

            # process the subjects/images that are fully assembled
            for subject_index in assembler.subjects_ready:
                subject_prediction = assembler.get_assembled_subject(
                    subject_index)

                # extract the target and image properties via direct extract
                direct_sample = dataset.direct_extract(direct_extractor,
                                                       subject_index)
                reference, image_properties = direct_sample[
                    defs.KEY_LABELS], direct_sample[defs.KEY_PROPERTIES]

                # evaluate the prediction against the reference
                evaluator.evaluate(subject_prediction[..., 0], reference[...,
                                                                         0],
                                   direct_sample[defs.KEY_SUBJECT])

        # calculate mean and standard deviation of each metric
        results = statistics_aggregator.calculate(evaluator.results)
        # log to TensorBoard into category train
        for result in results:
            tb.add_scalar(f'train/{result.metric}-{result.id_}', result.value,
                          epoch)

        # clear results such that the evaluator is ready for the next evaluation
        evaluator.clear()
示例#19
0
def load_atlas_custom_images(wdpath):
    # params_list = list(data_batch.items())
    # print(params_list[0] )
    t1w_list = []
    t2w_list = []
    gt_label_list = []
    brain_mask_list = []
    transform_list = []

    #Load the train labels_native with their transform
    for dirpath, subdirs, files in os.walk(wdpath):
        # print("dirpath", dirpath)
        # print("subdirs", subdirs)
        # print("files", files)
        for x in files:
            if x.endswith("T1native.nii.gz"):
                t1w_list.append(sitk.ReadImage(os.path.join(dirpath, x)))
            elif x.endswith("T2native.nii.gz"):
                t2w_list.append(sitk.ReadImage(os.path.join(dirpath, x)))
            elif x.endswith("labels_native.nii.gz"):
                gt_label_list.append(sitk.ReadImage(os.path.join(dirpath, x)))
            elif x.endswith("Brainmasknative.nii.gz"):
                brain_mask_list.append(sitk.ReadImage(os.path.join(dirpath,
                                                                   x)))
            elif x.endswith("affine.txt"):
                transform_list.append(
                    sitk.ReadTransform(os.path.join(dirpath, x)))
            # else:
            #     print("Problem in CustomAtlas in folder", dirpath)

    #Resample and thershold to get the label
    white_matter_list = []
    grey_matter_list = []
    hippocampus_list = []
    amygdala_list = []
    thalamus_list = []
    for i in range(0, len(gt_label_list)):
        resample_img = sitk.Resample(gt_label_list[i], atlas_t1,
                                     transform_list[i],
                                     sitk.sitkNearestNeighbor, 0,
                                     gt_label_list[i].GetPixelIDValue())
        white_matter_list.append(sitk.Threshold(resample_img, 1, 1, 0))
        grey_matter_list.append(sitk.Threshold(resample_img, 2, 2, 0))
        hippocampus_list.append(sitk.Threshold(resample_img, 3, 3, 0))
        amygdala_list.append(sitk.Threshold(resample_img, 4, 4, 0))
        thalamus_list.append(sitk.Threshold(resample_img, 5, 5, 0))

    #Save each label from first data
    path_to_save = '../bin/custom_atlas_result/'
    if not os.path.exists(path_to_save):
        os.makedirs(path_to_save)
    sitk.WriteImage(hippocampus_list[0],
                    os.path.join(path_to_save, 'Hippocampus_label.nii'), True)
    sitk.WriteImage(white_matter_list[0],
                    os.path.join(path_to_save, 'White_matter_label.nii'), True)
    sitk.WriteImage(grey_matter_list[0],
                    os.path.join(path_to_save, 'Grey_matter_label.nii'), True)
    sitk.WriteImage(amygdala_list[0],
                    os.path.join(path_to_save, 'Amygdala_label.nii'), True)
    sitk.WriteImage(thalamus_list[0],
                    os.path.join(path_to_save, 'Thalamus_label.nii'), True)

    #Save an image resampled to show segmentation
    sitk.WriteImage(gt_label_list[0],
                    os.path.join(path_to_save, 'Train_image_1_resampled.nii'),
                    True)

    # sum them up and divide by their number of images to make a probability map
    white_matter_map = 0
    grey_matter_map = 0
    hippocampus_map = 0
    amygdala_map = 0
    thalamus_map = 0
    for i in range(1, len(gt_label_list)):
        white_matter_map = sitk.Add(white_matter_map, white_matter_list[i])
        grey_matter_map = sitk.Add(grey_matter_map, grey_matter_list[i])
        hippocampus_map = sitk.Add(hippocampus_map, hippocampus_list[i])
        amygdala_map = sitk.Add(amygdala_map, amygdala_list[i])
        thalamus_map = sitk.Add(thalamus_map, thalamus_list[i])

    white_matter_map = sitk.Divide(white_matter_map, len(white_matter_list))
    grey_matter_map = sitk.Divide(grey_matter_map, len(grey_matter_list))
    hippocampus_map = sitk.Divide(hippocampus_map, len(hippocampus_list))
    amygdala_map = sitk.Divide(amygdala_map, len(amygdala_list))
    thalamus_map = sitk.Divide(thalamus_map, len(thalamus_list))
    #atlas = sitk.Divide(sum_images, len(test_resample))
    #slice = sitk.GetArrayFromImage(atlas)[90,:,:]
    #plt.imshow(slice)

    #Register without threshold
    path_to_save = '../bin/custom_atlas_result/'
    if not os.path.exists(path_to_save):
        os.makedirs(path_to_save)
    sitk.WriteImage(
        grey_matter_map,
        os.path.join(path_to_save, 'grey_matter_map_no_threshold.nii'), True)
    sitk.WriteImage(
        white_matter_map,
        os.path.join(path_to_save, 'white_matter_map_no_threshold.nii'), True)
    sitk.WriteImage(
        hippocampus_map,
        os.path.join(path_to_save, 'hippocampus_map_no_threshold.nii'), True)
    sitk.WriteImage(
        amygdala_map,
        os.path.join(path_to_save, 'amygdala_map_no_threshold.nii'), True)
    sitk.WriteImage(
        thalamus_map,
        os.path.join(path_to_save, 'thalamus_map_no_threshold.nii'), True)

    #Threhold the 5 different maps to get a binary map
    white_matter_map = sitk.BinaryThreshold(white_matter_map, 0.3, 1, 1, 0)
    grey_matter_map = sitk.BinaryThreshold(grey_matter_map, 0.6, 2, 2, 0)
    hippocampus_map = sitk.BinaryThreshold(hippocampus_map, 0.9, 3, 3, 0)
    amygdala_map = sitk.BinaryThreshold(amygdala_map, 1.2, 4, 4, 0)
    thalamus_map = sitk.BinaryThreshold(thalamus_map, 1.5, 5, 5, 0)

    #Save the images
    path_to_save = '../bin/custom_atlas_result/'
    if not os.path.exists(path_to_save):
        os.makedirs(path_to_save)
    sitk.WriteImage(grey_matter_map,
                    os.path.join(path_to_save, 'grey_matter_map.nii'), True)
    sitk.WriteImage(white_matter_map,
                    os.path.join(path_to_save, 'white_matter_map.nii'), True)
    sitk.WriteImage(hippocampus_map,
                    os.path.join(path_to_save, 'hippocampus_map.nii'), True)
    sitk.WriteImage(amygdala_map, os.path.join(path_to_save,
                                               'amygdala_map.nii'), True)
    sitk.WriteImage(thalamus_map, os.path.join(path_to_save,
                                               'thalamus_map.nii'), True)

    # Load the test labels_native and their transform
    path_to_test = '../data/test'
    test_gt_label_list = []
    test_transform_list = []

    for dirpath, subdirs, files in os.walk(path_to_test):
        for x in files:
            if x.endswith("labels_native.nii.gz"):
                test_gt_label_list.append(
                    sitk.ReadImage(os.path.join(dirpath, x)))
            if x.endswith("affine.txt"):
                test_transform_list.append(
                    sitk.ReadTransform(os.path.join(dirpath, x)))

    #Resample the labels_native with the transform
    test_resample_img = []
    for i in range(0, len(test_gt_label_list)):
        resample_img = sitk.Resample(test_gt_label_list[i], atlas_t1,
                                     test_transform_list[i],
                                     sitk.sitkNearestNeighbor, 0,
                                     test_gt_label_list[i].GetPixelIDValue())

        test_resample_img.append(resample_img)

    sitk.WriteImage(test_resample_img[0],
                    os.path.join(path_to_save, 'Test_data_1_resampled.nii'),
                    True)

    # Save the first test patient labels
    # path_to_save = '../bin/temp_test_result/'
    # if not os.path.exists(path_to_save):
    #     os.makedirs(path_to_save)
    # sitk.WriteImage(test_resample_img[0], os.path.join(path_to_save, 'FirstPatienFromTestList.nii'), False)

    #Compute the dice coeefficent (and the Hausdorff distance)
    label_list = [
        'White Matter', 'Grey Matter', 'Hippocampus', 'Amygdala', 'Thalamus'
    ]
    map_list = [
        white_matter_map, grey_matter_map, hippocampus_map, amygdala_map,
        thalamus_map
    ]
    dice_list = []

    path_to_save = '../bin/DiceTestResult/'
    if not os.path.exists(path_to_save):
        os.makedirs(path_to_save)
    for i in range(0, 5):
        evaluator = eval_.Evaluator(eval_.ConsoleEvaluatorWriter(5))
        evaluator.metrics = [
            metric.DiceCoefficient(),
            metric.HausdorffDistance()
        ]
        evaluator.add_writer(
            eval_.CSVEvaluatorWriter(
                os.path.join(path_to_save,
                             'DiceResults_' + label_list[i] + '.csv')))
        evaluator.add_label(i + 1, label_list[i])
        for j in range(0, len(test_resample_img)):
            evaluator.evaluate(test_resample_img[j], map_list[i],
                               'Patient ' + str(j))

    print("END Custom loadAtlas")