Esempio n. 1
0
def run_main(context):
    no_transform = torch_transforms.Compose([
        imed_transforms.CenterCrop([128, 128]),
        imed_transforms.NumpyToTensor(),
        imed_transforms.NormalizeInstance(),
    ])

    out_dir = context["path_output"]
    split_dct = joblib.load(os.path.join(out_dir, "split_datasets.joblib"))
    metadata_dct = {}
    for subset in ['train', 'valid', 'test']:
        metadata_dct[subset] = {}
        ds = imed_loader.BidsDataset(context["path_data"],
                                     subject_lst=split_dct[subset],
                                     contrast_lst=context["contrast_train_validation"]
                                     if subset != "test" else context["contrast_test"],
                                     transform=no_transform,
                                     slice_filter_fn=imed_loader_utils.SliceFilter())

        for m in metadata_type:
            if m in metadata_dct:
                metadata_dct[subset][m] = [v for m_lst in [metadata_dct[subset][m], ds.metadata[m]] for v in m_lst]
            else:
                metadata_dct[subset][m] = ds.metadata[m]

    cluster_dct = joblib.load(os.path.join(out_dir, "clustering_models.joblib"))

    out_dir = os.path.join(out_dir, "cluster_metadata")
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)

    for m in metadata_type:
        values = [v for s in ['train', 'valid', 'test'] for v in metadata_dct[s][m]]
        print('\n{}: Min={}, Max={}, Median={}'.format(m, min(values), max(values), np.median(values)))
        plot_decision_boundaries(metadata_dct, cluster_dct[m], metadata_range[m], m, os.path.join(out_dir, m + '.png'))
Esempio n. 2
0
def run_main(args):
    with open(args.c, "r") as fhandle:
        context = json.load(fhandle)

    transform_lst = torch_transforms.Compose([
        imed_transforms.Resample(wspace=0.75, hspace=0.75),
        imed_transforms.CenterCrop([128, 128]),
        imed_transforms.NumpyToTensor(),
        imed_transforms.NormalizeInstance(),
    ])

    train_lst, valid_lst, test_lst = imed_loader_utils.split_dataset(
        context["bids_path"], context["center_test"], context["split_method"],
        context["random_seed"])

    balance_dct = {}
    for ds_lst, ds_name in zip([train_lst, valid_lst, test_lst],
                               ['train', 'valid', 'test']):
        print("\nLoading {} set.\n".format(ds_name))
        ds = imed_loader.BidsDataset(
            context["bids_path"],
            subject_lst=ds_lst,
            target_suffix=context["target_suffix"],
            contrast_lst=context["contrast_test"]
            if ds_name == 'test' else context["contrast_train_validation"],
            metadata_choice=context["metadata"],
            contrast_balance=context["contrast_balance"],
            transform=transform_lst,
            slice_filter_fn=imed_utils.SliceFilter())

        print("Loaded {} axial slices for the {} set.".format(
            len(ds), ds_name))
        ds_loader = DataLoader(ds,
                               batch_size=1,
                               shuffle=False,
                               pin_memory=False,
                               collate_fn=imed_loader_utils.imed_collate,
                               num_workers=1)

        balance_lst = []
        for i, batch in enumerate(ds_loader):
            gt_sample = batch["gt"].numpy().astype(np.int)[0, 0, :, :]
            nb_ones = (gt_sample == 1).sum()
            nb_voxels = gt_sample.size
            balance_lst.append(nb_ones * 100.0 / nb_voxels)

        balance_dct[ds_name] = balance_lst

    for ds_name in balance_dct:
        print('\nClass balance in {} set:'.format(ds_name))
        print_stats(balance_dct[ds_name])

    print('\nClass balance in full set:')
    print_stats([e for d in balance_dct for e in balance_dct[d]])
Esempio n. 3
0
def run_main(context):
    no_transform = torch_transforms.Compose([
        imed_transforms.CenterCrop([128, 128]),
        imed_transforms.NumpyToTensor(),
        imed_transforms.NormalizeInstance(),
    ])

    out_dir = context["log_directory"]
    metadata_dct = {}
    for subset in ['train', 'validation', 'test']:
        metadata_dct[subset] = {}
        for bids_ds in tqdm(context["bids_path_" + subset],
                            desc="Loading " + subset + " set"):
            ds = imed_loader.BidsDataset(
                bids_ds,
                contrast_lst=context["contrast_train_validation"]
                if subset != "test" else context["contrast_test"],
                transform=no_transform,
                slice_filter_fn=imed_loader_utils.SliceFilter())

            for m in metadata_type:
                if m in metadata_dct:
                    metadata_dct[subset][m] = [
                        v
                        for m_lst in [metadata_dct[subset][m], ds.metadata[m]]
                        for v in m_lst
                    ]
                else:
                    metadata_dct[subset][m] = ds.metadata[m]

        for m in metadata_type:
            metadata_dct[subset][m] = list(set(metadata_dct[subset][m]))

    with open(out_dir + "/metadata_config.json", 'w') as fp:
        json.dump(metadata_dct, fp)

    return