Exemple #1
0
def get_results(path, metric='dice', metrics_dir='test_metrics'):
    from os.path import exists, join as jp
    scores = []
    ids = []
    i = 0
    while exists(jp(path, f'experiment_{i}')):
        temp_json = load_json(
            f'{path}/experiment_{i}/{metrics_dir}/{metric}.json')
        scores.extend(list(temp_json.values()))
        ids.extend(list(temp_json.keys()))
        i += 1
    return np.array(scores), np.array(ids), i
Exemple #2
0
def exp2prc_df(exp_path, n_val=5, specific_ids=None):
    """Constructs pandas DataFrame with prc data from all predictions in ``exp_path``."""
    dfs = []
    for n in range(n_val):
        prc_path = jp(exp_path, f'experiment_{n}', 'test_metrics',
                      'prc_records.json')
        prc_dicts = load_json(prc_path)

        for _id in prc_dicts.keys():
            if specific_ids is None:
                [d.update({'id': _id}) for d in prc_dicts[_id]]
                dfs.append(pd.DataFrame.from_records(prc_dicts[_id]))
            else:
                if _id in specific_ids:
                    [d.update({'id': _id}) for d in prc_dicts[_id]]
                    dfs.append(pd.DataFrame.from_records(prc_dicts[_id]))

    df = pd.concat(dfs)
    return df
Exemple #3
0
def load_results(experiment_path):
    results = defaultdict(dict)

    for fold_name in os.listdir(experiment_path):
        if fold_name.startswith('experiment'):
            for s in sets:
                for metric in metrics:
                    try:
                        partial_result = load_json(
                            join(experiment_path, fold_name, s + '_metrics',
                                 metric + '.json'))
                        if hasattr(
                                partial_result[list(partial_result.keys())[0]],
                                '__iter__'):
                            partial_result = {
                                i: np.array(r).flatten()
                                for i, r in partial_result.items()
                            }
                        results[f'{s}_{metric}'].update(partial_result)
                    except FileNotFoundError:
                        pass

    return results
Exemple #4
0
SPLIT_PATH = Path(sys.argv[2])
EXPERIMENT_PATH = Path(sys.argv[3])
FOLD = sys.argv[4]

CONFIG = {
    'source_slice_spacing': 1.,
    'target_slice_spacing': np.linspace(1, 5, 9),
    'batch_size': 5,
    'batches_per_epoch': 100,
    'n_epochs': 300,
    'lr': 1e-4,
    'device': 'cuda',
}

try:
    CONFIG.update(load_json(sys.argv[5]))
except (IndexError, FileNotFoundError):
    pass

PATCH_SIZE = np.array([64, 64, 32])

# dataset
raw_dataset = BraTS2013(BRATS_PATH)
dataset = apply(CropToBrain(raw_dataset), load_image=partial(min_max_scale, axes=0))
train_dataset = cache_methods(ChangeSliceSpacing(dataset, new_slice_spacing=CONFIG['source_slice_spacing']))

# cross validation
split = load_json(SPLIT_PATH)
train_ids, val_ids, test_ids = split[int(FOLD)]

# batch iterator