Ejemplo n.º 1
0
def generate_images(segs, outDir, blur_factor, foreground_mean, foreground_var, background_mean, background_var):
    imgDir = outDir + 'images/'
    make_dir(imgDir)
    index = 1
    for seg in segs:
        print("Generating image " + str(index) + " out of " + str(len(segs)))
        name = seg.replace('segmentations/','images/').replace('_seg.nrrd', '_blur' + str(blur_factor) + '.nrrd')
        img = sw.Image(seg)
        origin = img.origin()
        img_array = blur(img.toArray(), blur_factor)
        img_array = apply_noise(img_array, foreground_mean, foreground_var, background_mean, background_var)
        img_array = np.float32(img_array)
        img = sw.Image(np.float32(img_array)).setOrigin(origin)
        img.write(name,compressed=True)
        index += 1
    return get_files(imgDir)
Ejemplo n.º 2
0
def get_images(loader_dir, image_list, down_factor, down_dir):
    # get all images
    all_images = []
    for image_path in image_list:
        if down_dir is not None:
            if not os.path.exists(down_dir):
                os.makedirs(down_dir)
            img_name = os.path.basename(image_path)
            res_img = os.path.join(down_dir, img_name)
            if not os.path.exists(res_img):
                apply_down_sample(image_path, res_img, down_factor)
            image_path = res_img

# for_viewing returns 'F' order, i.e., transpose, needed for this array
        img = sw.Image(image_path).toArray(copy=True, for_viewing=True)
        all_images.append(img)
    all_images = np.array(all_images)
    # get mean and std
    mean_path = loader_dir + 'mean_img.npy'
    std_path = loader_dir + 'std_img.npy'
    if not os.path.exists(mean_path) or not os.path.exists(std_path):
        mean_image = np.mean(all_images)
        std_image = np.std(all_images)
        np.save(mean_path, mean_image)
        np.save(std_path, std_image)
    else:
        mean_image = np.load(mean_path)
        std_image = np.load(std_path)
    # normlaize
    norm_images = []
    for image in all_images:
        norm_images.append([(image - mean_image) / std_image])
    return norm_images
Ejemplo n.º 3
0
def apply_down_sample(image_path, output_path, factor=0.75):
    image = sw.Image(image_path)
    size = image.size()
    sizex = int(size[0] * factor)
    sizey = int(size[1] * factor)
    sizez = int(size[2] * factor)
    image.resize([sizex, sizey, sizez]).write(output_path)
Ejemplo n.º 4
0
def generate_image(out_dir, gen_particles, base_image, base_particles):
	"""Use warp between particles to warp original image into a new image"""
	image_name = os.path.basename(gen_particles).replace(".particles",".nrrd")
	gen_image = out_dir + "Generated-Images/" + image_name
	img = sw.Image(base_image)
	transform = sw.ImageUtils.createWarpTransform(base_particles, gen_particles, 2)
	img.applyTransform(transform).write(gen_image)
	return gen_image
Ejemplo n.º 5
0
def get_mesh_from_DT(DT_list, mesh_dir):
    if not os.path.exists(mesh_dir):
        os.makedirs(mesh_dir)
    mesh_files = []
    xml_filename = mesh_dir + "temp.xml"
    for input_file in DT_list:
        print('	' + get_prefix(input_file))
        output_vtk = mesh_dir + "original_" + get_prefix(input_file) + ".vtk"
        image = sw.Image(input_file)
        image.toMesh(isovalue=0).write(output_vtk)
        mesh_files.append(output_vtk)
    return sorted(mesh_files)
Ejemplo n.º 6
0
def get_optimize_input(distance_transform_files, mesh_mode=False):
    if mesh_mode:
        dt_dir = os.path.dirname(distance_transform_files[0])
        mesh_dir = dt_dir.replace("distance_transforms", "meshes")
        if not os.path.exists(mesh_dir):
            os.makedirs(mesh_dir)
        domain_type = 'mesh'
        files = []
        for file in distance_transform_files:
            mesh_file = file.replace(dt_dir, mesh_dir).replace(".nrrd", ".vtk")
            print("Writing: " + mesh_file)
            sw.Image(file).toMesh(0).write(mesh_file)
            files.append(mesh_file)
    else:
        domain_type = 'image'
        files = distance_transform_files
    return domain_type, files
Ejemplo n.º 7
0
def Run_Pipeline(args):
    print("\nStep 1. Extract Data\n")
    """
    Step 1: EXTRACT DATA
    We define dataset_name which determines which dataset to download from 
    the portal and the directory to save output from the use case in. 
    """
    dataset_name = "left_atrium-v0"
    output_directory = "Output/left_atrium/"
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)

    # If running a tiny_test, then download subset of the data
    if args.tiny_test:
        args.use_single_scale = 1
        sw.data.download_subset(args.use_case, dataset_name, output_directory)
        file_list_img = sorted(
            glob.glob(output_directory + dataset_name + "/images/*.nrrd"))[:3]
        file_list_seg = sorted(
            glob.glob(output_directory + dataset_name +
                      "/segmentations/*.nrrd"))[:3]

    # Else download the entire dataset
    else:
        sw.data.download_and_unzip_dataset(dataset_name, output_directory)
        file_list_img = sorted(
            glob.glob(output_directory + dataset_name + "/images/*.nrrd"))
        file_list_seg = sorted(
            glob.glob(output_directory + dataset_name +
                      "/segmentations/*.nrrd"))

        # Select representative data if using subsample
        if args.use_subsample:
            inputSegs = [sw.Image(filename) for filename in file_list_seg]
            sample_idx = sw.data.sample_images(inputSegs,
                                               int(args.num_subsample))
            file_list_seg = [file_list_seg[i] for i in sample_idx]
            file_list_img = [file_list_img[i] for i in sample_idx]
        else:
            sample_idx = []

    # If skipping grooming, use the pregroomed distance transforms from the portal
    if args.skip_grooming:
        print("Skipping grooming.")
        dtDirecory = output_directory + dataset_name + '/groomed/distance_transforms/'
        indices = []
        if args.tiny_test:
            indices = [0, 1, 2]
        elif args.use_subsample:
            indices = sample_idx
        dt_files = sw.data.get_file_list(dtDirecory,
                                         ending=".nrrd",
                                         indices=indices)

    # Else groom the segmentations and get distance transforms for optimization
    else:
        print("\nStep 2. Groom - Data Pre-processing\n")
        """
        Step 2: GROOMING 
        
        The required grooming steps are: 
        1. Isotropic resampling
        2. Centering 
        3. Padding
        4. Center of Mass Alignment
        5. Find refernce image
        6. Rigid Alignment
        7. Find largest bounding box
        8. Apply cropping and padding
        9. Create smooth signed distance transforms

        For more information on grooming see docs/workflow/groom.md
        http://sciinstitute.github.io/ShapeWorks/workflow/groom.html
        """

        # Create a directory for groomed output
        groom_dir = output_directory + 'groomed/'
        if not os.path.exists(groom_dir):
            os.makedirs(groom_dir)
        """
        First, we need to loop over the shape segmentation files and load the segmentations
        """
        # list of shape segmentations
        shape_seg_list = []
        # list of shape names (shape files prefixes) to be used for saving outputs
        shape_seg_names = []
        for shape_filename in file_list_seg:
            print('Loading: ' + shape_filename)
            # get current shape name
            shape_seg_names.append(
                shape_filename.split('/')[-1].replace('.nrrd', ''))
            # load segmentation
            shape_seg = sw.Image(shape_filename)
            # append to the shape list
            shape_seg_list.append(shape_seg)
        """
        If we are grooming with images, we need to loop over the image files and laod the images
        """
        if args.groom_images and file_list_img:

            # list of shape images
            shape_img_list = []
            # list of shape names (shape files prefixes) to be used for saving outputs
            shape_img_names = []
            for shape_filename in file_list_img:
                print('Loading: ' + shape_filename)
                # get current shape name
                shape_img_names.append(
                    shape_filename.split('/')[-1].replace('.nrrd', ''))
                # load image
                shape_image = sw.Image(shape_filename)
                # append to the shape image list
                shape_img_list.append(shape_image)
            """
            If we are grooming with images, we will perform the same grooming operations
            on images

            """
            """
            Now we can loop over the images and apply the initial grooming steps to themm
            """
            print(
                "\nPerforming resampling, centering,and padding operation on images\n"
            )
            for shape_img, shape_name in zip(shape_img_list, shape_img_names):
                """
                Grooming Step 1: Resample images to have isotropic (uniform) spacing
                    - Antialiase the images to convert it to a smooth continuous-valued 
                    image for interpolation
                    - Resample the antialiased image using the same voxel spacing for all dimensions
                    - Binarize the resampled images to results in a binary image with an 
                    isotropic voxel spacing
                """
                print('Resampling image: ' + shape_name)
                # antialias for 30 iterations
                antialias_iterations = 30
                shape_img.antialias(antialias_iterations)
                # resample to isotropic spacing using linear interpolation
                iso_spacing = [1, 1, 1]
                shape_img.resample(iso_spacing, sw.InterpolationType.Linear)
                # make segmetnation binary again
                shape_img.binarize()
                """
                Grooming Step 2:Recenter the image

                """
                print('Recentering image: ' + shape_name)
                shape_img.recenter()
                """
                Grooming Step 3: Padding the image

                """
                print('Padding image: ' + shape_name)
                # parameters for padding
                padding_size = 10  # number of voxels to pad for each dimension
                padding_value = 0  # the constant value used to pad the segmentations
                shape_img.pad(padding_size, padding_value)
        """
        Now we can loop over the segmentations and apply the initial grooming steps to them
        """
        print(
            "\nPerforming resampling, centering,and padding operation on segmentations\n"
        )
        for shape_seg, shape_name in zip(shape_seg_list, shape_seg_names):
            """
            Grooming Step 1: Resample segmentations to have isotropic (uniform) spacing
                - Antialiase the binary segmentation to convert it to a smooth continuous-valued 
                image for interpolation
                - Resample the antialiased image using the same voxel spacing for all dimensions
                - Binarize the resampled images to results in a binary segmentation with an 
                isotropic voxel spacing
            """
            print('Resampling segmentation: ' + shape_name)
            # antialias for 30 iterations
            antialias_iterations = 30
            shape_seg.antialias(antialias_iterations)
            # resample to isotropic spacing using linear interpolation
            iso_spacing = [1, 1, 1]
            shape_seg.resample(iso_spacing, sw.InterpolationType.Linear)
            # make segmetnation binary again
            shape_seg.binarize()
            """
            Grooming Step 2:Recenter the segmentation image

            """
            print('Recentering segmentation: ' + shape_name)
            shape_seg.recenter()
            """
            Grooming Step 3: Padding the segmentation image

            """
            print('Padding segmentation: ' + shape_name)
            # parameters for padding
            padding_size = 10  # number of voxels to pad for each dimension
            padding_value = 0  # the constant value used to pad the segmentations
            shape_seg.pad(padding_size, padding_value)
        """
        Grooming Step 4: Center of mass alignment 
        This step translates the center of mass of the shape to the center of the 3D volume space
        as a precursor for rigid alignment. It involves:
            - Finding the center of mass of the segmentation
            - Finding the center of the image domain
            - Defining the translation vector
            - Applying the translation by antialiasing, translating, and binarizing. 
            (Because this step involves interpolation, we must antialias before and 
            binarize after as we did when resampling.)
        """
        print('\n')
        for i in range(len(shape_seg_list)):
            shape_name = shape_seg_names[i]
            shape_seg = shape_seg_list[i]
            print('Center of mass alignment: ' + shape_name)
            # compute the center of mass of this segmentation
            shape_center = shape_seg.centerOfMass()
            # get the center of the image domain
            image_center = shape_seg.center()
            # define the translation to move the shape to its center
            translationVector = image_center - shape_center
            # perform antialias-translate-binarize
            shape_seg.antialias(antialias_iterations).translate(
                translationVector).binarize()
            # if we are grooming the images, appply the COM alignment to the corresponding images
            if (args.groom_images):
                shape_img_name = shape_img_names[i]
                shape_img = shape_img_list[i]
                print("Center of mass alignment for corresponding image: " +
                      shape_img_name)

                # perform antialias-translate-binarize
                shape_img.antialias(antialias_iterations).translate(
                    translationVector).binarize()
        """
        Grooming Step 5: Select a reference
        This step requires breaking the loop to load all of the segmentations at once so the shape
        closest to the mean can be found and selected as the reference. 
        """
        print('\n')
        ref_index = sw.find_reference_image_index(shape_seg_list)
        # Make a copy of the reference segmentation
        ref_seg = shape_seg_list[ref_index].write(groom_dir + 'reference.nrrd')
        ref_name = shape_seg_names[ref_index]
        print("Reference found: " + ref_name)
        """
        Grooming Step 6: Rigid alignment
        This step rigidly aligns each shape to the selected references. 
        Rigid alignment involves interpolation, hence we need to convert binary segmentations 
        to continuous-valued images again. There are two steps:
            - computing the rigid transformation parameters that would align a segmentation 
            to the reference shape
            - applying the rigid transformation to the segmentation
            - save the aligned images for the next step
        """
        print('\n')
        # First antialias the reference segmentation
        ref_seg.antialias(antialias_iterations)
        # Set the alignment parameters
        iso_value = 1e-20
        icp_iterations = 200
        # Now loop through all the segmentations and apply rigid alignment
        for i in range(len(shape_seg_list)):
            shape_name = shape_seg_names[i]
            shape_seg = shape_seg_list[i]
            print('Aligning ' + shape_name + ' to ' + ref_name)
            # compute rigid transformation
            shape_seg.antialias(antialias_iterations)
            rigidTransform = shape_seg.createRigidRegistrationTransform(
                ref_seg, iso_value, icp_iterations)
            # second we apply the computed transformation, note that shape_seg has
            # already been antialiased, so we can directly apply the transformation
            shape_seg.applyTransform(rigidTransform, ref_seg.origin(),
                                     ref_seg.dims(), ref_seg.spacing(),
                                     ref_seg.coordsys(),
                                     sw.InterpolationType.Linear)
            # then turn antialized-tranformed segmentation to a binary segmentation
            shape_seg.binarize()
            # If we are grooming the iamges, apply the rigid alignment to the corresponding images
            if (args.groom_images):
                shape_img_name = shape_img_names[i]
                shape_img = shape_img_list[i]
                print('Aligning the corresponding image: ' + shape_img_name +
                      ' to ' + ref_name)
                # second we apply the computed transformation,first we antialias and then
                # we can directly apply the transformation
                shape_img.antialias(antialias_iterations)
                shape_img.applyTransform(rigidTransform, ref_seg.origin(),
                                         ref_seg.dims(), ref_seg.spacing(),
                                         ref_seg.coordsys(),
                                         sw.InterpolationType.Linear)
                # then turn antialized-tranformed segmentation to a binary image
                shape_img.binarize()
        """
        Grooming Step 7: Finding the largest bounding box
        We want to crop all of the segmentations to be the same size, so we need to find 
        the largest bounding box as this will contain all the segmentations. This requires 
        loading all segmentation files at once.
        """
        print('\n')
        # Compute bounding box - aligned segmentations are binary images, so an good iso_value is 0.5
        iso_value = 0.5
        segs_bounding_box = sw.ImageUtils.boundingBox(shape_seg_list,
                                                      iso_value)
        """
        Grooming Step 8: Apply cropping and padding
        Now we need to loop over the segmentations and crop them to the size of the bounding box.
        To avoid cropped segmentations to touch the image boundaries, we will crop then 
        pad the segmentations.
            - Crop to bounding box size
            - Pad segmentations
        """

        # parameters for padding
        padding_size = 10  # number of voxels to pad for each dimension
        padding_value = 0  # the constant value used to pad the segmentations
        # loop over segs to apply cropping and padding
        for i in range(len(shape_seg_list)):
            shape_name = shape_seg_names[i]
            shape_seg = shape_seg_list[i]
            print('Cropping & padding segmentation: ' + shape_name)
            shape_seg.crop(segs_bounding_box).pad(padding_size, padding_value)

            # if we are grooming images, apply same cropping and padding to images
            if (args.groom_images):
                shape_img_name = shape_img_names[i]
                shape_img = shape_img_list[i]
                print('Cropping & padding corresponding image: ' +
                      shape_img_name)
                shape_img.crop(segs_bounding_box).pad(padding_size,
                                                      padding_value)

        # Save groomed images
        if args.groom_images:
            print('\nSaving groomed images\n')
            sw.utils.save_images(groom_dir + 'images',
                                 shape_img_list,
                                 shape_img_names,
                                 extension='nrrd',
                                 compressed=True,
                                 verbose=True)
        """
        Grooming Step 9: Converting segmentations to smooth signed distance transforms.
        The computeDT API needs an iso_value that defines the foreground-background interface, to create 
        a smoother interface we first antialiasing the segmentation then compute the distance transform 
        at the zero-level set. We then need to smooth the DT as it will have some remaining aliasing effect 
        of binarization. 
        So the steps are:
            - Antialias 
            - Compute distance transform
            - Apply smoothing
            - Save the distance transform
        """
        print('\n')
        # Define distance transform parameters
        iso_value = 0
        sigma = 1.3
        # Loop over segs and compute smooth DT
        for shape_seg, shape_name in zip(shape_seg_list, shape_seg_names):
            print('Compute DT for segmentation: ' + shape_name)
            shape_seg.antialias(antialias_iterations).computeDT(
                iso_value).gaussianBlur(sigma)
        # Save distance transforms
        dt_files = sw.utils.save_images(groom_dir + 'distance_transforms/',
                                        shape_seg_list,
                                        shape_seg_names,
                                        extension='nrrd',
                                        compressed=True,
                                        verbose=True)

    print("\nStep 3. Optimize - Particle Based Optimization\n")
    """
    Step 3: OPTIMIZE - Particle Based Optimization
    Now that we have the distance transform representation of data we create 
    the parameter files for the shapeworks particle optimization routine.
    For more details on the plethora of parameters for shapeworks please refer 
    to docs/workflow/optimze.md
    http://sciinstitute.github.io/ShapeWorks/workflow/optimize.html
    """

    # Make directory to save optimization output
    point_dir = output_directory + 'shape_models/' + args.option_set
    if not os.path.exists(point_dir):
        os.makedirs(point_dir)
    # Create a dictionary for all the parameters required by optimization
    parameter_dictionary = {
        "number_of_particles": 512,
        "use_normals": 0,
        "normal_weight": 10.0,
        "checkpointing_interval": 200,
        "keep_checkpoints": 0,
        "iterations_per_split": 4000,
        "optimization_iterations": 4000,
        "starting_regularization": 1000,
        "ending_regularization": 10,
        "recompute_regularization_interval": 2,
        "domains_per_shape": 1,
        "relative_weighting": 10,
        "domain_type": 'image',
        "initial_relative_weighting": 0.1,
        "procrustes_interval": 1,
        "procrustes_scaling": 1,
        "save_init_splits": 0,
        "verbosity": 0
    }
    # If running a tiny test, reduce some parameters
    if args.tiny_test:
        parameter_dictionary["number_of_particles"] = 32
        parameter_dictionary["optimization_iterations"] = 25
        parameter_dictionary["iterations_per_split"] = 25

    # Run multiscale optimization unless single scale is specified
    if not args.use_single_scale:
        parameter_dictionary["use_shape_statistics_after"] = 128

    # Get data input (meshes if running in mesh mode, else distance transforms)
    parameter_dictionary[
        "domain_type"], input_files = sw.data.get_optimize_input(
            dt_files, args.mesh_mode)

    # Execute the optimization function on distance transforms
    [local_point_files, world_point_files
     ] = OptimizeUtils.runShapeWorksOptimize(point_dir, input_files,
                                             parameter_dictionary)

    # Prepare analysis XML
    analyze_xml = point_dir + "/left_atrium_analyze.xml"
    AnalyzeUtils.create_analyze_xml(analyze_xml, input_files,
                                    local_point_files, world_point_files)

    # If tiny test or verify, check results and exit
    AnalyzeUtils.check_results(args, world_point_files)

    print(
        "\nStep 4. Analysis - Launch ShapeWorksStudio - sparse correspondence model.\n"
    )
    """
    Step 4: ANALYZE - Shape Analysis and Visualization
    Now we launch studio to analyze the resulting shape model.
    For more information about the analysis step, see docs/workflow/analyze.md
    http://sciinstitute.github.io/ShapeWorks/workflow/analyze.html
    """

    print("\nStep 5. Analysis - Launch ShapeWorksStudio.\n")
    AnalyzeUtils.launch_shapeworks_studio(analyze_xml)
Ejemplo n.º 8
0
def Run_Pipeline(args):
    print("\nStep 1. Extract Data\n")
    """
    Step 1: EXTRACT DATA
    We define dataset_name which determines which dataset to download from
    the portal and the directory to save output from the use case in.
    This data is comprised of femur meshes and corresponding hip CT scans.
    """
    dataset_name = "femur-v1"
    output_directory = "Output/femur_cut/"
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)

    # If running a tiny_test, then download subset of the data
    if args.tiny_test:
        args.use_single_scale = True
        sw.data.download_subset(args.use_case, dataset_name, output_directory)
        mesh_files = sorted(
            glob.glob(output_directory + dataset_name + "/meshes/*.ply"))[:3]
        image_files = sorted(
            glob.glob(output_directory + dataset_name + "/images/*.nrrd"))[:3]
    # else download the entire dataset
    else:
        sw.data.download_and_unzip_dataset(dataset_name, output_directory)
        mesh_files = sorted(
            glob.glob(output_directory + dataset_name + "/meshes/*.ply"))
        image_files = sorted(
            glob.glob(output_directory + dataset_name + "/images/*.nrrd"))

        # Select data if using subsample
        if args.use_subsample:
            inputMeshes = [sw.Mesh(filename) for filename in mesh_files]
            sample_idx = sw.data.sample_meshes(inputMeshes,
                                               int(args.num_subsample))
            mesh_files = [mesh_files[i] for i in sample_idx]

    # If skipping grooming, use the pregroomed meshes from the portal
    if args.skip_grooming:
        print("Skipping grooming.")
        mesh_directory = output_directory + dataset_name + '/groomed/meshes/'
        indices = []
        if args.tiny_test:
            indices = [0, 1, 2]
        elif args.use_subsample:
            indices = sample_idx
        mesh_files = sw.data.get_file_list(mesh_directory,
                                           ending=".vtk",
                                           indices=indices)

    # Else groom the meshes for optimization
    else:
        print("\nStep 2. Groom - Data Pre-processing\n")
        """
        Step 2: GROOMING
        The required grooming steps are:
        1. Reflect if neccesary
        2. Apply smoothing and remeshing
        3. Centering
        4. Select reference mesh
        5. Rigidly align mesh to reference
        Option to groom corresponding images (includes applying transforms)
        For more information on grooming see docs/workflow/groom.md
        http://sciinstitute.github.io/ShapeWorks/workflow/groom.html
        """

        # Create a directory for groomed output
        groom_dir = output_directory + 'groomed/'
        if not os.path.exists(groom_dir):
            os.makedirs(groom_dir)

        # Set reference side (arbitrary)
        ref_side = "L"  # chosen so reflection happens in tiny test
        """
        To begin grooming, we loop over the files and load the meshes
        """
        names = []
        mesh_list = []
        reflections = []  # save in case grooming images
        center_translations = []  # save in case grooming images
        for mesh_filename in mesh_files:
            print('\nLoading: ' + mesh_filename)
            # Get shape name
            name = os.path.basename(mesh_filename).replace('.ply', '')
            names.append(name)
            # Get mesh
            mesh = sw.Mesh(mesh_filename)
            mesh_list.append(mesh)
            """
            Grooming Step 1: Get reflection transform - We have left and 
            right femurs, so we reflect the non-reference side meshes 
            so that all of the femurs can be aligned.
            """
            reflection = np.eye(4)  # Identity
            if ref_side in name:
                print("Reflecting: " + name)
                reflection[0][0] = -1  # Reflect across X
                mesh.applyTransform(reflection)
            reflections.append(reflection)
            """
            Grooming Step 1: Smooth and remeshing
            """
            print('Smoothing and remeshing: ' + name)
            mesh.smooth(iterations=10).remesh(numVertices=10000,
                                              adaptivity=1.0)
            """
            Grooming Step 3: Centering
            """
            print("Centering: " + name)
            translation = np.eye(4)  # Identity
            translation[:3, -1] = -mesh.center()  # Translate center to (0,0,0)
            mesh.applyTransform(translation)
            center_translations.append(translation)
        """
        Grooming Step 4: Select a reference
        This step requires loading all of the meshes at once so the shape
        closest to the mean can be found and selected as the reference. 
        """
        ref_index = sw.find_reference_mesh_index(mesh_list)
        # Make a copy of the reference mesh
        ref_mesh = mesh_list[ref_index].copy().write(groom_dir +
                                                     'reference.vtk')
        ref_name = names[ref_index]
        print("\nReference found: " + ref_name)

        rigid_transforms = []  # save in case grooming images
        for mesh, name in zip(mesh_list, names):
            """
            Grooming Step 5: Rigid alignment
            This step rigidly aligns each shape to the selected reference. 
            """
            print('\nAligning ' + name + ' to ' + ref_name)
            # compute rigid transformation
            rigid_transform = mesh.createTransform(ref_mesh,
                                                   sw.Mesh.AlignmentType.Rigid,
                                                   100)
            # apply rigid transform
            rigid_transforms.append(rigid_transform)
            mesh.applyTransform(rigid_transform)

        # Write groomed meshes
        print("\nWriting groomed meshes.")
        mesh_files = sw.utils.save_meshes(groom_dir + 'meshes/',
                                          mesh_list,
                                          names,
                                          extension='vtk',
                                          compressed=False,
                                          verbose=True)
        """
        Groom images
        """
        if args.groom_images:
            # Load corresponding images
            print("\nGrooming images:")
            image_list = []
            for name, reflection, translation in zip(names, reflections,
                                                     center_translations):
                # Get corresponding image path
                prefix = name.split("_")[0]
                for index in range(len(image_files)):
                    if prefix in image_files[index]:
                        corresponding_image_file = image_files[index]
                        break
                print('\nLoading image: ' + corresponding_image_file)
                image = sw.Image(corresponding_image_file)
                # Apply reflection to image
                print("Reflecting image: " + name)
                image.applyTransform(reflection)
                # Apply centering to image
                print("Centering image: " + name)
                image.setOrigin(image.origin() + translation[:3, -1])
                image_list.append(image)
            # Get reference image
            ref_image = image_list[ref_index].copy()
            ref_image.resample(
                [1, 1, 1], sw.InterpolationType.Linear).write(groom_dir +
                                                              'reference.nrrd')
            # Get bounding box
            bounding_box = sw.MeshUtils.boundingBox(mesh_list)
            for image, name, rigid_transform in zip(image_list, names,
                                                    rigid_transforms):
                # Align image
                print("\nAligning image: " + name)
                image.applyTransform(rigid_transform,
                                     ref_image.origin(),
                                     ref_image.dims(),
                                     ref_image.spacing(),
                                     ref_image.coordsys(),
                                     sw.InterpolationType.Linear,
                                     meshTransform=True)
                # Crop image
                print('Cropping image: ' + name)
                try:
                    image.crop(bounding_box)
                except:
                    print(image)
                    input(bounding_box)
            # Write images
            print("\nWriting groomed images.")
            image_files = sw.utils.save_images(groom_dir + 'images/',
                                               image_list,
                                               names,
                                               extension='nrrd',
                                               compressed=True,
                                               verbose=True)

    print("\nStep 3. Optimize - Particle Based Optimization\n")
    """
    Step 3: OPTIMIZE - Particle Based Optimization

    Now that we have the distance transform representation of data we create 
    the parameter files for the shapeworks particle optimization routine.
    For more details on the plethora of parameters for shapeworks please refer 
    to docs/workflow/optimze.md
    http://sciinstitute.github.io/ShapeWorks/workflow/optimize.html
    """

    # Make directory to save optimization output
    point_dir = output_directory + 'shape_models/' + args.option_set
    if not os.path.exists(point_dir):
        os.makedirs(point_dir)

    # Cutting planes
    cutting_planes = []
    cutting_plane_counts = []
    for i in range(len(mesh_files)):
        cutting_planes.append(
            np.array([[-1, -1, -10], [1, -1, -10], [-1, 1, -10]]))
        cutting_plane_counts.append(1)

    # Create a dictionary for all the parameters required by optimization
    parameter_dictionary = {
        "number_of_particles": 512,
        "use_normals": 0,
        "normal_weight": 10.0,
        "checkpointing_interval": 200,
        "keep_checkpoints": 0,
        "iterations_per_split": 1000,
        "optimization_iterations": 500,
        "starting_regularization": 100,
        "ending_regularization": 0.1,
        "recompute_regularization_interval": 2,
        "domains_per_shape": 1,
        "domain_type": 'mesh',
        "relative_weighting": 10,
        "initial_relative_weighting": 0.01,
        "procrustes_interval": 1,
        "procrustes_scaling": 1,
        "save_init_splits": 1,
        "debug_projection": 0,
        "verbosity": 0,
        "use_statistics_in_init": 0,
        "adaptivity_mode": 0,
        "cutting_plane_counts": cutting_plane_counts,
        "cutting_planes": cutting_planes
    }

    # If running a tiny test, reduce some parameters
    if args.tiny_test:
        parameter_dictionary["number_of_particles"] = 32
        parameter_dictionary["optimization_iterations"] = 25
        parameter_dictionary["iterations_per_split"] = 25
    # Run multiscale optimization unless single scale is specified
    if not args.use_single_scale:
        parameter_dictionary["use_shape_statistics_after"] = 64

    # Execute the optimization function on distance transforms
    [local_point_files, world_point_files
     ] = OptimizeUtils.runShapeWorksOptimize(point_dir, mesh_files,
                                             parameter_dictionary)

    # If tiny test or verify, check results and exit
    AnalyzeUtils.check_results(args, world_point_files)

    print(
        "\nStep 4. Analysis - Launch ShapeWorksStudio - sparse correspondence model.\n"
    )
    """
    Step 4: ANALYZE - Shape Analysis and Visualization

    Now we launch studio to analyze the resulting shape model.
    For more information about the analysis step, see docs/workflow/analyze.md
    http://sciinstitute.github.io/ShapeWorks/workflow/analyze.html
    """

    # Prepare analysis XML
    analyze_xml = point_dir + "/femur_cut_analyze.xml"
    AnalyzeUtils.create_analyze_xml(analyze_xml, mesh_files, local_point_files,
                                    world_point_files)
    AnalyzeUtils.launch_shapeworks_studio(analyze_xml)
def Run_Pipeline(args):
    print("\nStep 1. Extract Data\n")
    """
    Step 1: EXTRACT DATA

    We define dataset_name which determines which dataset to download from 
    the portal and the directory to save output from the use case in. 
    """
    print("\nDataset options for running multiple domain use case: \n")
    print(
        "1. ellipsoid_joint_rotation \t 2. ellipsoid_joint_size \t 3. ellipsoid_joint_size_rotation \n"
    )
    print(
        "You can change the dataset name and output directory name to try out this use case with other datasets"
    )

    dataset_name = "ellipsoid_joint_rotation"
    output_directory = "Output/ellipsoid_multiple_domain/"
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)

    # If running a tiny_test, then download subset of the data
    if args.tiny_test:
        sw.data.download_subset(args.use_case, dataset_name, output_directory)
        file_list = sorted(
            glob.glob(output_directory + dataset_name +
                      "/segmentations/*.nrrd"))[:6]
    # Else download the entire dataset
    else:
        sw.data.download_and_unzip_dataset(dataset_name, output_directory)
        file_list = sorted(
            glob.glob(output_directory + dataset_name +
                      "/segmentations/*.nrrd"))

        if args.use_subsample:
            inputImages = [sw.Image(filename) for filename in file_list]
            sample_idx = sw.data.sample_images(inputImages,
                                               int(args.num_subsample),
                                               domains_per_shape=2)
            file_list = [file_list[i] for i in sample_idx]

    # If skipping grooming, use the pregroomed distance transforms from the portal
    if args.skip_grooming:
        print("Skipping grooming.")
        dt_directory = output_directory + dataset_name + '/groomed/distance_transforms/'
        indices = []
        if args.tiny_test:
            indices = list(range(6))
        elif args.use_subsample:
            indices = sample_idx
        dt_files = sw.data.get_file_list(dt_directory,
                                         ending=".nrrd",
                                         indices=indices)

    # Else groom the segmentations and get distance transforms for optimization
    else:
        print("\nStep 2. Groom - Data Pre-processing\n")
        """
        Step 2: GROOMING 
        The segmentaions are pre-alinged during generation( EllipsoidJointsGenerator) 
        such that they are centered w.r.t to each other. Hence we only perform the 
        following two steps
        The required grooming steps are: 
        1. Isotropic resampling
        2. Reference selection
        3. Rigid Alignment - ICP
        4. Find largest bounding box
        5. Apply cropping and padding
        6. Create smooth signed distance transforms

        For more information on grooming see docs/workflow/groom.md
        http://sciinstitute.github.io/ShapeWorks/workflow/groom.html
        """

        # Create a directory for groomed output
        groom_dir = output_directory + 'groomed/'
        if not os.path.exists(groom_dir):
            os.makedirs(groom_dir)
        """
        First, we need to loop over the shape segmentation files and load the segmentations
        """
        # list of shape segmentations
        shape_seg_list = []
        # list of shape names (shape files prefixes) to be used for saving outputs
        shape_names = []
        domain_ids = []
        for shape_filename in file_list:
            print('Loading: ' + shape_filename)
            # get current shape name
            shape_names.append(
                shape_filename.split('/')[-1].replace('.nrrd', ''))

            # get domain identifiers
            name = shape_filename.split('/')[-1].replace('.nrrd', '')
            domain_ids.append(name.split(".")[0].split("_")[-1])

            # load segmentation
            shape_seg = sw.Image(shape_filename)
            # append to the shape list
            shape_seg_list.append(shape_seg)
        #domain identifiers for all shapes
        domain_ids = np.array(domain_ids)
        #shape index for all shapes in domain 1
        domain1_indx = list(np.where(domain_ids == 'd1')[0])
        #shape index for all shapes in domain 2
        domain2_indx = list(np.where(domain_ids == 'd2')[0])
        """
        Now we can loop over the segmentations and apply the initial grooming steps to themm
        """

        for shape_seg, shape_name in zip(shape_seg_list, shape_names):
            """
            Grooming Step 1: Resample segmentations to have isotropic (uniform) spacing
                - Antialiase the binary segmentation to convert it to a smooth continuous-valued 
                image for interpolation
                - Resample the antialiased image using the same voxel spacing for all dimensions
                - Binarize the resampled images to results in a binary segmentation with an 
                isotropic voxel spacing
            """
            print('Resampling segmentation: ' + shape_name)
            # antialias for 30 iterations
            antialias_iterations = 30
            shape_seg.antialias(antialias_iterations)
            # resample to isotropic spacing using linear interpolation
            iso_spacing = [1, 1, 1]
            shape_seg.resample(iso_spacing, sw.InterpolationType.Linear)
            # make segmetnation binary again
            shape_seg.binarize()
        """
        Grooming Step 2: Select a reference
        This step requires breaking the loop to load all of the segmentations at once so the shape
        closest to the mean can be found and selected as the reference.
        For the ellipsoid_joint available on the ShapeWorks portal, the mode of variation are 
        rotation and/or size of the second ellipsoid w.r.t to the first ellipsoid. 
        Hence, we align the shapes using the first domain as the reference. 

        If both the ellipsoids vary w.r.t each other, then we would use 'global alignment'. 
        For this, the reference shape can be estimated by combined the shapes from all domains. 


        ref_index,combined_mesh = sw.find_reference_image_index(shape_seg_list,domains_per_shape=2)
        for i in range(len(combined_mesh)):

            bbox = combined_mesh[i].boundingBox().pad(20.0)
            combined_segs.append(combined_mesh[i].toImage(bbox))

        After finding the combined reference image, the transformation will be calculated for the combined shapes
        but the transformation will be applied to each domain in each shape individually. 


        """

        iso_value = 1e-20
        icp_iterations = 200
        domains_per_shape = 2
        domain_1_shapes = []
        # get domain 1 shapes
        for i in range(int(len(shape_seg_list) / domains_per_shape)):
            domain_1_shapes.append(shape_seg_list[i * domains_per_shape])

        ref_index = sw.find_reference_image_index(domain_1_shapes)

        reference = domain_1_shapes[ref_index].copy()
        reference.antialias(antialias_iterations)
        ref_name = shape_names[ref_index * domains_per_shape]
        """
        Grooming Step 3: Rigid alignment
        This step rigidly aligns each shape to the selected references. 
        Rigid alignment involves interpolation, hence we need to convert binary segmentations 
        to continuous-valued images again. There are two steps:
            - computing the rigid transformation parameters that would align a segmentation 
            to the reference shape
            - applying the rigid transformation to the segmentation
            - save the aligned images for the next step
        """

        for i in range(len(domain_1_shapes)):

            # compute rigid transformation using the domain 1 segmentations
            shape_seg_list[i *
                           domains_per_shape].antialias(antialias_iterations)
            rigidTransform = shape_seg_list[
                i * domains_per_shape].createRigidRegistrationTransform(
                    reference, iso_value, icp_iterations)

            # apply the transformation to each domain(each subject)
            for d in range(domains_per_shape):

                print("Aligning " + shape_names[i * domains_per_shape + d] +
                      ' to ' + ref_name)

                shape_seg_list[i * domains_per_shape +
                               d].antialias(antialias_iterations)

                shape_seg_list[i * domains_per_shape + d].applyTransform(
                    rigidTransform, reference.origin(), reference.dims(),
                    reference.spacing(), reference.coordsys(),
                    sw.InterpolationType.NearestNeighbor)
                # then turn antialized-tranformed segmentation to a binary segmentation
                shape_seg_list[i * domains_per_shape + d].binarize()
        """
        Grooming Step 4: Finding the largest bounding box
        We want to crop all of the segmentations to be the same size, so we need to find 
        the largest bounding box as this will contain all the segmentations. This requires 
        loading all segmentation files at once.
        """
        # Compute bounding box - aligned segmentations are binary images, so an good iso_value is 0.5
        iso_value = 0.5
        segs_bounding_box = sw.ImageUtils.boundingBox(shape_seg_list,
                                                      iso_value)
        """
        Grooming Step 5: Apply cropping and padding
        Now we need to loop over the segmentations and crop them to the size of the bounding box.
        To avoid cropped segmentations to touch the image boundaries, we will crop then 
        pad the segmentations.
            - Crop to bounding box size
            - Pad segmentations
        """

        # parameters for padding
        padding_size = 10  # number of voxels to pad for each dimension
        padding_value = 0  # the constant value used to pad the segmentations
        # loop over segs to apply cropping and padding
        for shape_seg, shape_name in zip(shape_seg_list, shape_names):
            print('Cropping & padding segmentation: ' + shape_name)
            shape_seg.crop(segs_bounding_box).pad(padding_size, padding_value)
        """
        Grooming Step 6: Converting segmentations to smooth signed distance transforms.
        The computeDT API needs an iso_value that defines the foreground-background interface, to create 
        a smoother interface we first antialiasing the segmentation then compute the distance transform 
        at the zero-level set. We then need to smooth the DT as it will have some remaining aliasing effect 
        of binarization. 
        So the steps are:
            - Antialias 
            - Compute distance transform
            - Apply smoothing
            - Save the distance transform
        """

        # Define distance transform parameters
        iso_value = 0
        sigma = 2
        # Loop over segs and compute smooth DT
        for shape_seg, shape_name in zip(shape_seg_list, shape_names):
            print('Compute DT for segmentation: ' + shape_name)
            shape_seg.antialias(antialias_iterations).computeDT(
                iso_value).gaussianBlur(sigma)
        # Save distance transforms
        dt_files = sw.utils.save_images(groom_dir + 'distance_transforms/',
                                        shape_seg_list,
                                        shape_names,
                                        extension='nrrd',
                                        compressed=True,
                                        verbose=True)

    print("\nStep 3. Optimize - Particle Based Optimization\n")
    """
    Step 3: OPTIMIZE - Particle Based Optimization

    Now that we have the distance transform representation of data we create 
    the parameter files for the shapeworks particle optimization routine.
    For more details on the plethora of parameters for shapeworks please refer 
    to docs/workflow/optimze.md
    http://sciinstitute.github.io/ShapeWorks/workflow/optimize.html
    """

    # Make directory to save optimization output
    point_dir = output_directory + 'shape_models/' + args.option_set
    if not os.path.exists(point_dir):
        os.makedirs(point_dir)
    # Create a dictionary for all the parameters required by optimization
    # Create a dictionary for all the parameters required by optimization
    parameter_dictionary = {
        "number_of_particles": [512, 512],
        "use_normals": [0, 0],
        "normal_weight": [10.0, 10.0],
        "checkpointing_interval": 200,
        "keep_checkpoints": 0,
        "iterations_per_split": 1000,
        "optimization_iterations": 1000,
        "starting_regularization": 1000,
        "ending_regularization": 0.5,
        "recompute_regularization_interval": 2,
        "domains_per_shape": 2,
        "domain_type": 'image',
        "relative_weighting": 100,
        "initial_relative_weighting": 0.1,
        "procrustes_interval": 0,
        "procrustes_scaling": 0,
        "save_init_splits": 0,
        "verbosity": 0
    }

    if args.tiny_test:
        parameter_dictionary["number_of_particles"] = [32, 32]
        parameter_dictionary["optimization_iterations"] = 25

    # Get data input (meshes if running in mesh mode, else distance transforms)
    parameter_dictionary[
        "domain_type"], input_files = sw.data.get_optimize_input(
            dt_files, args.mesh_mode)

    # Execute the optimization function on distance transforms
    [local_point_files, world_point_files
     ] = OptimizeUtils.runShapeWorksOptimize(point_dir, input_files,
                                             parameter_dictionary)

    # Prepare analysis XML
    analyze_xml = point_dir + "/ellipsoid_multiple_domain_analyze.xml"
    domains_per_shape = 2
    AnalyzeUtils.create_analyze_xml(analyze_xml, input_files,
                                    local_point_files, world_point_files,
                                    domains_per_shape)

    # If tiny test or verify, check results and exit
    AnalyzeUtils.check_results(args, world_point_files)

    print(
        "\nStep 4. Analysis - Launch ShapeWorksStudio - sparse correspondence model.\n"
    )
    """
    Step 4: ANALYZE - Shape Analysis and Visualization

    Now we launch studio to analyze the resulting shape model.
    For more information about the analysis step, see docs/workflow/analyze.md
    http://sciinstitute.github.io/ShapeWorks/workflow/analyze.html
    """
    AnalyzeUtils.launch_shapeworks_studio(analyze_xml)
Ejemplo n.º 10
0
def Run_Pipeline(args):
    print("\nStep 1. Extract Data\n")
    """
    Step 1: EXTRACT DATA

    We define dataset_name which determines which dataset to download from 
    the portal and the directory to save output from the use case in. 
    """
    dataset_name = "ellipsoid_1mode"
    output_directory = "Output/ellipsoid_fd/"
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    sw.data.download_and_unzip_dataset(dataset_name, output_directory)

    file_list_dts = sorted(glob.glob(
        output_directory + dataset_name + "/groomed/distance_transforms/*.nrrd"))
    file_list_new_segs = sorted(
        glob.glob(output_directory + dataset_name + "/fd_segmentations/*.nrrd"))

    print("\nStep 2. Groom - Create distance transforms\n")
    """
    Step 2: GROOMING 
    
    The new segmentations are prealigned so the only grooming step 
    required is to convert them to distance transforms.

    For more information on grooming see docs/workflow/groom.md
    http://sciinstitute.github.io/ShapeWorks/workflow/groom.html
    """

    # Create a directory for groomed output
    groom_dir = output_directory + 'groomed/'
    if not os.path.exists(groom_dir):
        os.makedirs(groom_dir)

    """
    Converting segmentations to smooth signed distance transforms.
    The computeDT API needs an iso_value that defines the foreground-background interface, to create 
    a smoother interface we first antialiasing the segmentation then compute the distance transform 
    at the zero-level set. We then need to smooth the DT as it will have some remaining aliasing effect 
    of binarization. 
    So the steps are:
        - Antialias 
        - Compute distance transform
        - Apply smoothing
        - Save the distance transform
    """

    # Define distance transform parameters
    iso_value = 0
    sigma = 1.3
    antialias_iterations = 30
    dt_list = []
    shape_names = []
    # Loop over segs and compute smooth DT
    for shape_filename in file_list_new_segs:
        print('Loading: ' + shape_filename)
        # get current shape name
        shape_name = shape_filename.split('/')[-1].replace('.nrrd', '')
        shape_names.append(shape_name)
        # load segmentation
        shape_seg = sw.Image(shape_filename)
        print('Compute DT for segmentation: ' + shape_name)
        shape_seg.antialias(antialias_iterations).computeDT(
            iso_value).gaussianBlur(sigma)
        dt_list.append(shape_seg)
    # Save distance transforms
    new_dt_files = sw.utils.save_images(groom_dir + 'distance_transforms/', dt_list,
                                        shape_names, extension='nrrd', compressed=True, verbose=True)
    # Get list of original and new distance transforms
    dt_files = file_list_dts + new_dt_files

    print("\nStep 3. Optimize - Particle Based Optimization\n")
    """
    Step 3: OPTIMIZE - Particle Based Optimization

    Now that we have the distance transform representation of data we create 
    the parameter files for the shapeworks particle optimization routine.
    For more details on the plethora of parameters for shapeworks please refer 
    to docs/workflow/optimze.md
    http://sciinstitute.github.io/ShapeWorks/workflow/optimize.html
    """

    # Make directory to save optimization output
    point_dir = output_directory + 'shape_models/' + args.option_set
    if not os.path.exists(point_dir):
        os.makedirs(point_dir)

    """
    Evaluate the meanshape of the existing shape model and use that to initialize the 
    particles on the new shapes
    """
    shape_model_dir = output_directory + dataset_name + "/shape_models/ellipsoid/128/"
    OptimizeUtils.findMeanShape(shape_model_dir)
    mean_shape_path = shape_model_dir + '/meanshape_local.particles'

    # Create a dictionary for all the parameters required by optimization
    parameter_dictionary = {
        "number_of_particles": 128,
        "use_normals": 0,
        "normal_weight": 15.0,
        "checkpointing_interval": 200,
        "keep_checkpoints": 0,
        "iterations_per_split": 100,
        "optimization_iterations": 2000,
        "starting_regularization": 100,
        "ending_regularization": 0.1,
        "recompute_regularization_interval": 2,
        "domains_per_shape": 1,
        "domain_type": 'image',
        "relative_weighting": 15,
        "initial_relative_weighting": 0.05,
        "procrustes_interval": 0,
        "procrustes_scaling": 0,
        "save_init_splits": 0,
        "verbosity": 0,
        "number_fixed_domains": len(file_list_dts),
        "fixed_domain_model_dir": shape_model_dir,
        "mean_shape_path": mean_shape_path,
    }

    # Execute the optimization function
    [local_point_files, world_point_files] = OptimizeUtils.runShapeWorksOptimize_FixedDomains(
        point_dir, dt_files, parameter_dictionary)

    # Prepare analysis XML
    analyze_xml = point_dir + "/ellipsoid_fd_analyze.xml"
    AnalyzeUtils.create_analyze_xml(analyze_xml, dt_files, local_point_files, world_point_files)

    # If tiny test or verify, check results and exit
    AnalyzeUtils.check_results(args, world_point_files)

    """
    Step 4: ANALYZE - Shape Analysis and Visualization

    Now we launch studio to analyze the resulting shape model.
    For more information about the analysis step, see docs/workflow/analyze.md
    http://sciinstitute.github.io/ShapeWorks/workflow/analyze.html
    """
    print("\nStep 4. Analysis - Launch ShapeWorksStudio - sparse correspondence model.\n")
    AnalyzeUtils.launch_shapeworks_studio(analyze_xml)
Ejemplo n.º 11
0
def Run_Pipeline(args):
    print("\nStep 1. Extract Data\n")
    """
    Step 1: EXTRACT DATA

    We define dataset_name which determines which dataset to download from 
    the portal and the directory to save output from the use case in. 
    """
    dataset_name = "ellipsoid_1mode_aligned"
    output_directory = "Output/ellipsoid_cut/"
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)

    # If running a tiny_test, then download subset of the data
    if args.tiny_test:
        args.use_single_scale = 1
        sw.data.download_subset(args.use_case, dataset_name, output_directory)
        file_list = sorted(
            glob.glob(output_directory + dataset_name +
                      "/segmentations/*.nrrd"))[:3]
    # Else download the entire dataset
    else:
        sw.data.download_and_unzip_dataset(dataset_name, output_directory)
        file_list = sorted(
            glob.glob(output_directory + dataset_name +
                      "/segmentations/*.nrrd"))

        # Select representative data if using subsample
        if args.use_subsample:
            inputImages = [sw.Image(filename) for filename in file_list]
            sample_idx = sw.data.sample_images(inputImages,
                                               int(args.num_subsample))
            file_list = [file_list[i] for i in sample_idx]

    print("\nStep 2. Groom - Create distance transforms\n")
    """
    Step 2: GROOMING 
    
    This use case makes uses prealigned data so the only grooming step 
    required is to convert the segmentations to distance transforms.

    For more information on grooming see docs/workflow/groom.md
    http://sciinstitute.github.io/ShapeWorks/workflow/groom.html
    """

    # Create a directory for groomed output
    groom_dir = output_directory + 'groomed/'
    if not os.path.exists(groom_dir):
        os.makedirs(groom_dir)
    """
    Converting segmentations to smooth signed distance transforms.
    The computeDT API needs an iso_value that defines the foreground-background interface, to create 
    a smoother interface we first antialiasing the segmentation then compute the distance transform 
    at the zero-level set. We then need to smooth the DT as it will have some remaining aliasing effect 
    of binarization. 
    So the steps are:
        - Antialias 
        - Compute distance transform
        - Apply smoothing
        - Save the distance transform
    """

    # Define distance transform parameters
    iso_value = 0
    sigma = 1.3
    antialias_iterations = 30
    dt_list = []
    shape_names = []
    # Loop over segs and compute smooth DT
    for shape_filename in file_list:
        print('Loading: ' + shape_filename)
        # get current shape name
        shape_name = shape_filename.split('/')[-1].replace('.nrrd', '')
        shape_names.append(shape_name)
        # load segmentation
        shape_seg = sw.Image(shape_filename)
        print('Compute DT for segmentation: ' + shape_name)
        shape_seg.antialias(antialias_iterations).computeDT(
            iso_value).gaussianBlur(sigma)
        dt_list.append(shape_seg)
    # Save distance transforms
    dt_files = sw.utils.save_images(groom_dir + 'distance_transforms/',
                                    dt_list,
                                    shape_names,
                                    extension='nrrd',
                                    compressed=True,
                                    verbose=True)

    print("\nStep 3. Optimize - Particle Based Optimization\n")
    """
    Step 3: OPTIMIZE - Particle Based Optimization

    Now that we have the distance transform representation of data we create 
    the parameter files for the shapeworks particle optimization routine.
    For more details on the plethora of parameters for shapeworks please refer 
    to docs/workflow/optimze.md
    http://sciinstitute.github.io/ShapeWorks/workflow/optimize.html
    """

    # Make directory to save optimization output
    point_dir = output_directory + 'shape_models/' + args.option_set
    if not os.path.exists(point_dir):
        os.makedirs(point_dir)

    # Define the cutting planes
    cutting_plane_points1 = [[10, 10, 0], [-10, -10, 0], [10, -10, 0]]
    cutting_plane_points2 = [[10, 0, 10], [-10, 0, 10], [10, 0, -10]]
    cp = [cutting_plane_points1, cutting_plane_points2]
    # Cutting planes
    cutting_planes = []
    cutting_plane_counts = []
    for i in range(len(dt_files)):
        cutting_planes.append(cutting_plane_points1)
        cutting_planes.append(cutting_plane_points2)
        cutting_plane_counts.append(2)

    # Create a dictionary for all the parameters required by optimization
    parameter_dictionary = {
        "number_of_particles": 32,
        "use_normals": 1,
        "normal_weight": 15.0,
        "checkpointing_interval": 200,
        "keep_checkpoints": 0,
        "iterations_per_split": 3000,
        "optimization_iterations": 3000,
        "starting_regularization": 100,
        "ending_regularization": 10,
        "recompute_regularization_interval": 2,
        "domains_per_shape": 1,
        "domain_type": 'image',
        "relative_weighting": 15,
        "initial_relative_weighting": 0.05,
        "procrustes_interval": 0,
        "procrustes_scaling": 0,
        "save_init_splits": 0,
        "verbosity": 0,
        "adaptivity_mode": 0,
        "cutting_plane_counts": cutting_plane_counts,
        "cutting_planes": cutting_planes
    }
    # If running a tiny test, reduce some parameters
    if args.tiny_test:
        parameter_dictionary["number_of_particles"] = 16
        parameter_dictionary["optimization_iterations"] = 25
    # Run multiscale optimization unless single scale is specified
    if not args.use_single_scale:
        parameter_dictionary["use_shape_statistics_after"] = 16

    # Get data input (meshes if running in mesh mode, else distance transforms)
    parameter_dictionary[
        "domain_type"], input_files = sw.data.get_optimize_input(
            dt_files, args.mesh_mode)

    # Execute the optimization function on distance transforms
    [local_point_files, world_point_files
     ] = OptimizeUtils.runShapeWorksOptimize(point_dir, input_files,
                                             parameter_dictionary)

    # Prepare analysis XML
    analyze_xml = point_dir + "/ellipsoid_cut_analyze.xml"
    AnalyzeUtils.create_analyze_xml(analyze_xml, input_files,
                                    local_point_files, world_point_files)

    # If tiny test or verify, check results and exit
    AnalyzeUtils.check_results(args, world_point_files)

    print(
        "\nStep 4. Analysis - Launch ShapeWorksStudio - sparse correspondence model.\n"
    )
    """
    Step 4: ANALYZE - Shape Analysis and Visualization

    Now we launch studio to analyze the resulting shape model.
    For more information about the analysis step, see docs/workflow/analyze.md
    http://sciinstitute.github.io/ShapeWorks/workflow/analyze.html
    """
    AnalyzeUtils.launch_shapeworks_studio(analyze_xml)