# set path to the set of labels that we want to keep in the output label maps (called here segmentation labels) segmentation_labels = '../../data/labels_classes_priors/segmentation_labels.npy' # Because we enabled right/left flipping, and because our label map contains different labels for contralateral # structures we need to sort the generation_labels between non-sided, left and right structures. # Thus we directly load the generation labels here, and sort them according to FreeSurfer classification. generation_labels, n_neutral_labels = utils.get_list_labels(generation_labels, FS_sort=True) ######################################################################################################## # instantiate BrainGenerator object brain_generator = BrainGenerator(labels_dir=path_label_maps, generation_labels=generation_labels, output_labels=segmentation_labels, n_neutral_labels=n_neutral_labels, output_shape=output_shape, output_div_by_n=output_divisible_by_n, prior_distributions=prior_distributions, flipping=flipping) utils.mkdir(result_folder) for n in range(n_examples): # generate new image and corresponding labels start = time.time() im, lab = brain_generator.generate_brain() end = time.time() print('deformation {0:d} took {1:.01f}s'.format(n, end - start)) # save image
# load label list, classes list and intensity ranges if necessary generation_labels, n_neutral_labels = utils.get_list_labels(generation_labels, FS_sort=True) # instantiate BrainGenerator object brain_generator = BrainGenerator(labels_dir=path_label_maps, generation_labels=generation_labels, output_labels=segmentation_labels, n_neutral_labels=n_neutral_labels, n_channels=channels, target_res=target_resolution, output_shape=output_shape, output_div_by_n=output_divisible_by_n, generation_classes=generation_classes, prior_distributions=prior_distribution, prior_means=prior_means, prior_stds=prior_stds, scaling_bounds=scaling_bounds, rotation_bounds=rotation_bounds, shearing_bounds=shearing_bounds, nonlin_std=nonlin_std, data_res=data_res, thickness=thickness, downsample=downsample, blur_range=blur_range, bias_field_std=bias_field_std) utils.mkdir(result_folder) for n in range(n_examples):
# Very simple script showing how to generate new images with lab2im import os import numpy as np from ext.lab2im.utils import save_volume from SynthSeg.brain_generator import BrainGenerator # path of the input label map path_label_map = '../data_example/brain_label_map.nii.gz' # path where to save the generated image result_dir = '../generated_images' # generate an image from the label map. # Because the image is spatially deformed, we also output the corresponding deformed label map. brain_generator = BrainGenerator(path_label_map) im, lab = brain_generator.generate_brain() # save output image and label map if not os.path.exists(os.path.join(result_dir)): os.mkdir(result_dir) save_volume(np.squeeze(im), brain_generator.aff, brain_generator.header, os.path.join(result_dir, 'brain.nii.gz')) save_volume(np.squeeze(lab), brain_generator.aff, brain_generator.header, os.path.join(result_dir, 'labels.nii.gz'))
# As these prior distributions are Gaussians, they are each controlled by a mean and a standard deviation. # Therefore, the numpy array pointed by prior_means is of size (2, K), where K is the nummber of classes specified in # generation_classes. The first row of prior_means correspond to the means of the Gaussian priors, and the second row # correspond to standard deviations. # These hyperparameters were estimated with the function SynthSeg/estimate_priors.py/build_intensity_stats prior_means = '../../data/labels_classes_priors/prior_means.npy' # same as for prior_means, but for the standard deviations of the GMM. prior_stds = '../../data/labels_classes_priors/prior_stds.npy' ######################################################################################################## # instantiate BrainGenerator object brain_generator = BrainGenerator(labels_dir=path_label_map, generation_labels=generation_labels, output_labels=output_labels, generation_classes=generation_classes, prior_distributions=prior_distribution, prior_means=prior_means, prior_stds=prior_stds, output_shape=output_shape) # create result dir utils.mkdir(result_dir) for n in range(n_examples): # generate new image and corresponding labels start = time.time() im, lab = brain_generator.generate_brain() end = time.time() print('generation {0:d} took {1:.01f}s'.format(n, end - start))
If you use this code, please cite one of the SynthSeg papers: https://github.com/BBillot/SynthSeg/blob/master/bibtex.bib Copyright 2020 Benjamin Billot Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from ext.lab2im import utils from SynthSeg.brain_generator import BrainGenerator # generate an image from the label map. brain_generator = BrainGenerator( '../../data/training_label_maps/training_seg_01.nii.gz') im, lab = brain_generator.generate_brain() # save output image and label map utils.save_volume(im, brain_generator.aff, brain_generator.header, './generated_examples/image_default.nii.gz') utils.save_volume(lab, brain_generator.aff, brain_generator.header, './generated_examples/labels_default.nii.gz')
# simulate, we decide here to downsample the Gaussian image to LR. If downsampled, the data will then be upsampled back # to the target HR resolution (the one of the training label maps by default). This downsampling/upsampling step # enables to reproduce the process that will happen at test time: real LR scans will be upsampled to HR, and run through # the network to obtain the HR regressed scan. downsample = True # ------------------------------------------------------ Generate ------------------------------------------------------ # instantiate BrainGenerator object brain_generator = BrainGenerator(labels_dir=path_label_map, generation_labels=generation_labels, output_labels=output_labels, n_neutral_labels=n_neutral_labels, output_shape=output_shape, prior_distributions=prior_distributions, generation_classes=generation_classes, prior_means=prior_means, prior_stds=prior_stds, randomise_res=randomise_res, data_res=data_res, thickness=thickness, downsample=downsample, blur_range=blur_range) for n in range(n_examples): # generate new image and corresponding labels im, lab = brain_generator.generate_brain() # save output image and label map utils.save_volume(im, brain_generator.aff, brain_generator.header, os.path.join(result_dir, 'image_t1_%s.nii.gz' % n))
# By default, the output label maps contain all the labels used for generation. We can also chose to keep only a subset # of those, by specifying them in output_labels. This should only contain label already present in the label maps (or in # generation_labels if it is provided). output_labels = '../data_example/segmentation_labels.npy' # By default, each label will be associated to a Gaussian distribution when sampling a new image. We can also group # labels in classes, to force them to share the same Gaussian. This can be done by providing generation classes, which # should be a sequence, a 1d numpy array, or the path to such an array, with the *same length* as generation_labels. # Values in generation_classes should be between 0 and K-1, where K is the total number of classes. generation_classes = '../data_example/generation_classes.npy' ######################################################################################################## # instantiate BrainGenerator object brain_generator = BrainGenerator(labels_dir=path_label_map, generation_labels=generation_labels, output_labels=output_labels, generation_classes=generation_classes) # create result dir if not os.path.exists(os.path.join(result_dir)): os.mkdir(result_dir) for n in range(n_examples): # generate new image and corresponding labels im, lab = brain_generator.generate_brain() # save output image and label map save_volume(np.squeeze(im), brain_generator.aff, brain_generator.header, os.path.join(result_dir, 'brain_%s.nii.gz' % n)) save_volume(np.squeeze(lab), brain_generator.aff, brain_generator.header,
# This is achieved by multiplying the standard deviation of the Gaussian blurring kernel by a random coefficient # "blur_range", which is drawn in the uniform distribution U(1/blur_range; blur_range) blur_range = 1.03 # ------------------------------------------------------ Generate ------------------------------------------------------ # instantiate BrainGenerator object brain_generator = BrainGenerator(labels_dir=path_label_map, generation_labels=generation_labels, n_neutral_labels=n_neutral_labels, prior_distributions=prior_distributions, generation_classes=generation_classes, output_labels=output_labels, n_channels=n_channels, target_res=target_res, output_shape=output_shape, flipping=flipping, scaling_bounds=scaling_bounds, rotation_bounds=rotation_bounds, shearing_bounds=shearing_bounds, translation_bounds=translation_bounds, nonlin_std=nonlin_std, bias_field_std=bias_field_std, randomise_res=randomise_res, blur_range=blur_range) for n in range(n_examples): # generate new image and corresponding labels im, lab = brain_generator.generate_brain() # save output image and label map