def evaluate_individual_metrics_probably_with_ids_no_pred(
        load_y,
        load_x,
        predict,
        metrics: dict,
        test_ids,
        results_path,
        exist_ok=False):
    assert len(metrics) > 0, 'No metric provided'
    os.makedirs(results_path, exist_ok=exist_ok)

    results = defaultdict(dict)
    for _id in tqdm(test_ids):
        target = load_y(_id)
        prediction = predict(load_x(_id))

        for metric_name, metric in metrics.items():
            try:
                results[metric_name][_id] = metric(target, prediction, _id)
            except TypeError:
                results[metric_name][_id] = metric(target, prediction)

    for metric_name, result in results.items():
        save_json(result,
                  os.path.join(results_path, metric_name + '.json'),
                  indent=0)
def evaluate_individual_metrics_with_hm(load_y,
                                        load_x,
                                        predict,
                                        metrics: dict,
                                        test_ids,
                                        train_ids,
                                        results_path,
                                        exist_ok=False):
    assert len(metrics) > 0, 'No metric provided'
    os.makedirs(results_path, exist_ok=exist_ok)

    all_train_img = np.float16([])
    for _id in train_ids:
        all_train_img = np.concatenate(
            (all_train_img, np.float16(load_x(_id)).ravel()))

    results = defaultdict(dict)
    for _id in tqdm(test_ids):
        target = load_y(_id)
        image = load_x(_id)
        prediction = predict(
            np.float32(
                np.reshape(match_histograms(image.ravel(), all_train_img),
                           image.shape)))

        for metric_name, metric in metrics.items():
            try:
                results[metric_name][_id] = metric(target, prediction, _id)
            except TypeError:
                results[metric_name][_id] = metric(target, prediction)

    for metric_name, result in results.items():
        save_json(result,
                  os.path.join(results_path, metric_name + '.json'),
                  indent=0)
예제 #3
0
def evaluate_individual_metrics_with_prc(load_y_true,
                                         metrics: dict,
                                         predictions_path,
                                         logits_path,
                                         results_path,
                                         exist_ok=False):
    assert len(metrics) > 0, 'No metric provided'
    os.makedirs(results_path, exist_ok=exist_ok)

    results = defaultdict(dict)
    for identifier, prediction in tqdm(load_from_folder(predictions_path)):
        target = load_y_true(identifier)

        for metric_name, metric in metrics.items():
            if metric_name == 'prc_records':
                logit = load_pred(identifier, logits_path)
                results[metric_name][identifier] = metric(
                    target, prediction, logit)
            else:
                results[metric_name][identifier] = metric(target, prediction)

    for metric_name, result in results.items():
        save_json(result,
                  os.path.join(results_path, metric_name + '.json'),
                  indent=0)
def evaluate_individual_metrics_probably_with_ids(load_y_true,
                                                  metrics: dict,
                                                  predictions_path,
                                                  results_path,
                                                  exist_ok=False):
    assert len(metrics) > 0, 'No metric provided'
    os.makedirs(results_path, exist_ok=exist_ok)

    results = defaultdict(dict)
    for identifier, prediction in tqdm(load_from_folder(predictions_path)):
        target = load_y_true(identifier)

        for metric_name, metric in metrics.items():
            try:
                results[metric_name][identifier] = metric(
                    target, prediction, identifier)
            except TypeError:
                results[metric_name][identifier] = metric(target, prediction)

    for metric_name, result in results.items():
        save_json(result,
                  os.path.join(results_path, metric_name + '.json'),
                  indent=0)
예제 #5
0
@slicewisely
@add_extract_dims(1, 2)
def predict(image):
    return inference_step(image, architecture=model, activation=torch.sigmoid)


# metrics
individual_metrics = {
    'dice': lambda gt, pred: dice_score(gt.astype(bool), pred >= .5)
}
val_metrics = convert_to_aggregated(individual_metrics)


# run experiment
logger = TBLogger(EXPERIMENT_PATH / FOLD / 'logs')
save_json(CONFIG, EXPERIMENT_PATH / 'config.json')
train(train_step, batch_iter, n_epochs=CONFIG['n_epochs'], logger=logger,
      validate=lambda : compute_metrics(predict, dataset.load_image, dataset.load_gt, val_ids, val_metrics),
      architecture=model, optimizer=optimizer, criterion=criterion, lr=CONFIG['lr'])
save_model_state(model, EXPERIMENT_PATH / FOLD / 'model.pth')

commands.predict(
    ids=test_ids,
    output_path=EXPERIMENT_PATH / FOLD / 'predictions',
    load_x=dataset.load_image,
    predict_fn=predict
)
commands.evaluate_individual_metrics(
    load_y_true=dataset.load_gt,
    metrics=individual_metrics,
    predictions_path=EXPERIMENT_PATH / FOLD / 'predictions',
예제 #6
0
    for id1, id2 in tqdm(zip(test_ids1, test_ids2)):
        predict1 = load_numpy(
            exp_path / f'experiment_{n}/test_predictions_cancer500/{id1}.npy',
            decompress=True)
        predict2 = load_numpy(
            exp_path / f'experiment_{n}/test_predictions_cancer500/{id2}.npy',
            decompress=True)
        if predict1.shape == predict2.shape:
            spacing = cfg.dataset_to_test.spacing(id1)
            if np.sum(predict1 >= 0.5) == 0 or np.sum(predict2 >= 0.5) == 0:
                dice_records[f'{id1}_{id2}'] = 0.0
                sdice05_records[f'{id1}_{id2}'] = 0.0
                sdice10_records[f'{id1}_{id2}'] = 0.0
            else:
                dice_records[f'{id1}_{id2}'] = dice_score(
                    predict1 >= 0.5, predict2 >= 0.5)
                sdice10_records[f'{id1}_{id2}'] = surface_dice(predict1 >= 0.5,
                                                               predict2 >= 0.5,
                                                               spacing=spacing,
                                                               tolerance=1.0)
                sdice05_records[f'{id1}_{id2}'] = surface_dice(predict1 >= 0.5,
                                                               predict2 >= 0.5,
                                                               spacing=spacing,
                                                               tolerance=0.5)

    dice_path = exp_path / f'experiment_{n}/test_pairwise_metrics/dice.json'
    sdice05_path = exp_path / f'experiment_{n}/test_pairwise_metrics/sdice_0.5.json'

    save_json(dice_records, dice_path, indent=0)
    save_json(sdice05_records, sdice05_path, indent=0)
예제 #7
0
import sys
from pathlib import Path

from dpipe.split import train_val_test_split
from dpipe.io import save_json

from two_and_half_d.dataset import BraTS2013

BRATS_PATH = Path(sys.argv[1])
SPLIT_PATH = Path(sys.argv[2])
VAL_SIZE = 1
N_FOLDS = 5
RANDOM_STATE = 42

save_json(
    train_val_test_split(BraTS2013(BRATS_PATH).ids,
                         val_size=VAL_SIZE,
                         n_splits=N_FOLDS,
                         random_state=RANDOM_STATE), SPLIT_PATH)