def create_dataset(self):
     """
     Function responsible for creating the test, train and validation PyTorch Dataset class.
     """
     mapper_class = fm(self.preop_patients,
                       self.id_mapping,
                       normalized=self.normalized)
     dataset = mapper_class.generate_mapping()
     with open(self.filter_ids, 'rb') as file:
         filter_ids = pickle.load(file)
     dataset_filtered = [
         entry for entry in dataset if entry['ENT'] is not None
     ]
     self.dataset_filtered = [
         entry for entry in dataset_filtered
         if entry['id'] not in filter_ids
     ]
     random.seed(4)
     random.shuffle(self.dataset_filtered)
     train_dataset = Dataset(self.dataset_filtered,
                             phase='train',
                             normalize=self.normalization)
     val_dataset = Dataset(self.dataset_filtered,
                           phase='val',
                           normalize=self.normalization)
     test_dataset = dataset_2(self.dataset_filtered,
                              phase='test',
                              normalize=self.normalization)
     return train_dataset, val_dataset, test_dataset
예제 #2
0
def main():
    preop_patients = []
    for path in Path('./data/preoperative_no_norm').glob('BMIAXNA*'):
        preop_patients.append(path)

    id_mapping = './data/pickles_jsons/id_surv_mapping_10_groups.json'
    mapper_class = fm(preop_patients, id_mapping, normalized=True)
    dataset = mapper_class.generate_mapping()

    with open('./data/pickles_jsons/filter_ids_v2_all.pkl', 'rb') as file:
        filter_ids = pickle.load(file)

    dataset_filtered = [entry for entry in dataset if entry['ENT'] is not None]
    dataset_filtered = [
        entry for entry in dataset_filtered if entry['id'] not in filter_ids
    ]

    random.seed(4)
    random.shuffle(dataset_filtered)

    train = dataset_filtered[:int(len(dataset_filtered) * 0.7)]
    test = dataset_filtered[int(len(dataset_filtered) * 0.8):]
    val = dataset_filtered[int(len(dataset_filtered) *
                               0.7):int(len(dataset_filtered) * 0.8)]

    val_ids = [entry['id'] for entry in val]
    train_ids = [entry['id'] for entry in train]
    test_ids = [entry['id'] for entry in test]
    ids_per_phase = {'train': train_ids, 'val': val_ids, 'test': test_ids}

    with open('./data/pickles_jsons/ids_per_phase.pkl', 'wb') as file:
        pickle.dump(ids_per_phase, file)
예제 #3
0
def main():
    preop_patients = []
    for path in Path('./data/preoperative_no_norm').glob('BMIAXNA*'):
        preop_patients.append(path)
    id_mapping = './data/pickles_jsons/id_surv_mapping_10_groups.json'
    mapper_class = fm(preop_patients, id_mapping, normalized=True)
    dataset = mapper_class.generate_mapping()
    with open('./data/pickles_jsons/filter_ids_v2_all.pkl', 'rb') as file:
        filter_ids = pickle.load(file)
    dataset_filtered = [entry for entry in dataset if entry['ENT'] is not None]
    dataset_filtered = [
        entry for entry in dataset_filtered if entry['id'] not in filter_ids
    ]
    random.seed(4)
    random.shuffle(dataset_filtered)
    convert_images(dataset_filtered)
예제 #4
0
def main():
    preop_patients = []
    for path in Path('./data/preoperative').rglob('BMIAXNA*'):
        preop_patients.append(path)
    mapper_class = fm(preop_patients, './data/pickles_jsons/id_surv_mapping.json')
    dataset = mapper_class.generate_mapping()
    dataset_filtered = [entry for entry in dataset if entry['ENT'] is not None]
    train_dataset = Dataset(dataset_filtered, phase='train')
    val_dataset = Dataset(dataset_filtered, phase='val')
    filter_ids = []
    for data in train_dataset:
        if 'BMIAXNAT' in data:
            filter_ids.append(data)

    for data in val_dataset:
        if 'BMIAXNAT' in data:
            filter_ids.append(data)

    with open('./data/pickles_jsons/filter_ids.pkl', 'wb') as file:
        pickle.dump(filter_ids, file)