def _collect(self): self.subject_files.clear() img_dir, label_dir = self.get_img_and_label_dirs() assert os.path.exists(img_dir) assert os.path.exists(label_dir) files_by_id = {} for file_path in glob.glob(img_dir + '/*') + glob.glob(label_dir + '/*'): base_name = os.path.basename(file_path) id_ = base_name[:12] if base_name.endswith('_superpixels.png'): files_by_id.setdefault(id_, {})['superpixel'] = file_path elif base_name.endswith('_segmentation.png'): files_by_id.setdefault(id_, {})['gt'] = file_path elif base_name.endswith('.jpg'): files_by_id.setdefault(id_, {})['image'] = file_path for id_, files in files_by_id.items(): assert len(files) == 3, 'id "{}" has not 3 entries'.format(id_) params = { 'images': { 'image': files['image'] }, 'labels': { 'gt': files['gt'] } } if self.with_super_pixels: params['misc'] = {'superpixel': files['superpixel']} sf = data.SubjectFile(id_, **params) self.subject_files.append(sf)
def _collect(self): self.subject_files.clear() flair_paths = glob.glob(self.root_dir + '/**/*_flair.nii.gz', recursive=True) t1_paths = glob.glob(self.root_dir + '/**/*_t1.nii.gz', recursive=True) t2_paths = glob.glob(self.root_dir + '/**/*_t2.nii.gz', recursive=True) t1c_paths = glob.glob(self.root_dir + '/**/*_t1ce.nii.gz', recursive=True) label_paths = glob.glob(self.root_dir + '/**/*_seg.nii.gz', recursive=True) flair_paths.sort() t1_paths.sort() t2_paths.sort() t1c_paths.sort() label_paths.sort() if not (len(flair_paths) == len(t1_paths) == len(t2_paths) == len(t1c_paths)): raise ValueError( 'all sequences must have same amount of files in the dataset') has_gt = len(label_paths) > 0 if has_gt and len(flair_paths) != len(label_paths): raise ValueError( 'label must have same amount of files as other sequences') for subject_index in range(len(flair_paths)): subject_dir = os.path.dirname(flair_paths[subject_index]) identifier = os.path.basename(subject_dir) if self.crop_brats_prefix: identifier = identifier[len('BratsXX_'):] if self.with_grade: grade = os.path.basename(os.path.dirname(subject_dir)) identifier = '{}_{}'.format(identifier, grade) image_files = { 'flair': flair_paths[subject_index], 't1': t1_paths[subject_index], 't2': t2_paths[subject_index], 't1c': t1c_paths[subject_index] } label_files = {} if has_gt: label_files['gt'] = label_paths[subject_index] sf = data.SubjectFile(identifier, images=image_files, labels=label_files) self.subject_files.append(sf)
def _collect(self): self.subject_files.clear() subject_dirs = glob.glob(os.path.join(self.root_dir, '*')) subject_dirs = list( filter( lambda path: os.path.basename(path).lower().startswith( 'subject') and os.path.isdir(path), subject_dirs)) subject_dirs.sort(key=lambda path: os.path.basename(path)) # for each subject for subject_dir in subject_dirs: subject = os.path.basename(subject_dir) images = { data.FileTypes.Data.name: os.path.join(subject_dir, 'MRFreal.mha') } labels = { data.FileTypes.T1H2Omap.name: os.path.join(subject_dir, 'T1H2O.mha'), data.FileTypes.FFmap.name: os.path.join(subject_dir, 'FF.mha'), data.FileTypes.B1map.name: os.path.join(subject_dir, 'B1.mha') } mask_fg = { data.FileTypes.ForegroundTissueMask.name: os.path.join(subject_dir, 'MASK_FG.mha') } mask_t1h2o = { data.FileTypes.T1H2OTissueMask.name: os.path.join(subject_dir, 'MASK_FG.mha') } sf = pymia_data.SubjectFile(subject, images=images, labels=labels, mask_fg=mask_fg, mask_t1h2o=mask_t1h2o) self.subject_files.append(sf)
def _collect(self): self.subject_files.clear() subject_dirs = glob.glob(os.path.join(self.root_dir, '*')) subject_dirs = list(filter(lambda path: os.path.basename(path).lower().startswith('subject') and os.path.isdir(path), subject_dirs)) subject_dirs.sort(key=lambda path: os.path.basename(path)) for subject_dir in subject_dirs: subject = os.path.basename(subject_dir) # we generate an entry for the coordinates of the points in our clouds # note that the "---" is an ugly hack to be able to pass two paths images = {data.FileTypes.COORDINATE.name: os.path.join(subject_dir, '{}_PROBABILITY.mha'.format(subject)) + '---' + os.path.join(subject_dir, '{}_GROUND_TRUTH.mha'.format(subject)) } # we create an entry for the labels of each point labels = {data.FileTypes.LABEL.name: os.path.join(subject_dir, '{}_GROUND_TRUTH.mha'.format(subject))} indices = {data.FileTypes.INDICES.name: os.path.join(subject_dir, '{}_PROBABILITY.mha'.format(subject))} image_information = {data.FileTypes.IMAGE_INFORMATION.name: os.path.join(subject_dir, '{}_PROBABILITY.mha'.format(subject))} # we also save the ground truth in image format for easier evaluation gt = {data.FileTypes.GTM.name: os.path.join(subject_dir, '{}_GROUND_TRUTH.mha'.format(subject))} sf = pymia_data.SubjectFile(subject, images=images, labels=labels, indices=indices, image_information=image_information, gt=gt ) self.subject_files.append(sf)
def _collect(self): self.subject_files.clear() files_by_id = {} for post_fix in self.post_fixes: post_fix_paths = glob.glob(self.prediction_path + '/**/*_{}.nii.gz'.format(post_fix), recursive=True) for path_ in post_fix_paths: id_ = os.path.basename( path_)[:-len('_{}.nii.gz'.format(post_fix))] files_by_id.setdefault(id_, {})[post_fix] = path_ for id_, files in files_by_id.items(): assert set(files.keys()) == set(self.post_fixes), \ 'id "{}" has not all required entries "({})"'.format(id_, list(self.post_fixes)) categories = {} for post_fix, category in self.post_fix_to_category.items(): categories.setdefault(category, {})[post_fix] = files[post_fix] sf = data.SubjectFile(id_, **categories) self.subject_files.append(sf)