def profile_sampling(boundary_img,
                     intensity_img,
                     save_data=True,
                     base_name=None):

    # load the data as well as filenames and headers for saving later
    boundary_img = load_volume(boundary_img)
    boundary_data = boundary_img.get_data()
    hdr = boundary_img.get_header()
    aff = boundary_img.get_affine()

    intensity_data = load_volume(intensity_img).get_data()

    try:
        cbstoolsjcc.initVM(initialheap='6000m', maxheap='6000m')
    except ValueError:
        pass

    sampler = cbstoolsjcc.LaminarProfileSampling()
    sampler.setIntensityImage(
        cbstoolsjcc.JArray('float')(
            (intensity_data.flatten('F')).astype(float)))
    sampler.setProfileSurfaceImage(
        cbstoolsjcc.JArray('float')(
            (boundary_data.flatten('F')).astype(float)))
    zooms = [x.item() for x in hdr.get_zooms()]
    sampler.setResolutions(zooms[0], zooms[1], zooms[2])
    sampler.setDimensions(boundary_data.shape)
    sampler.execute()

    profile_data = np.reshape(
        np.array(sampler.getProfileMappedIntensityImage(), dtype=np.float32),
        boundary_data.shape, 'F')

    profile_img = nb.Nifti1Image(profile_data, aff, hdr)

    if save_data:
        if not base_name:
            if not isinstance(intensity_img, basestring):
                base_name = os.getcwd()
                print "saving to %s" % base_name
            else:
                dir_name = os.path.dirname(intensity_img)
                base_name = os.path.basename(intensity_img)
                base_name = os.path.join(dir_name,
                                         base_name[:base_name.find('.')])
        save_volume(base_name + '_profiles.nii.gz', profile_img)

    return profile_img
def create_levelsets(tissue_prob_img, save_data=True, base_name=None):

    # load the data as well as filenames and headers for saving later
    prob_img = load_volume(tissue_prob_img)
    prob_data = prob_img.get_data()
    hdr = prob_img.get_header()
    aff = prob_img.get_affine()

    try:
        cbstoolsjcc.initVM(initialheap='6000m', maxheap='6000m')
    except ValueError:
        pass

    prob2level = cbstoolsjcc.SurfaceProbabilityToLevelset()

    prob2level.setProbabilityImage(
        cbstoolsjcc.JArray('float')((prob_data.flatten('F')).astype(float)))
    prob2level.setDimensions(prob_data.shape)
    zooms = [x.item() for x in hdr.get_zooms()]
    prob2level.setResolutions(zooms[0], zooms[1], zooms[2])
    prob2level.execute()

    levelset_data = np.reshape(
        np.array(prob2level.getLevelSetImage(), dtype=np.float32),
        prob_data.shape, 'F')

    levelset_img = nb.Nifti1Image(levelset_data, aff, hdr)

    if save_data:
        if not base_name:
            if not isinstance(tissue_prob_img, basestring):
                base_name = os.getcwd()
                print "saving to %s" % base_name
            else:
                dir_name = os.path.dirname(tissue_prob_img)
                base_name = os.path.basename(tissue_prob_img)
                base_name = os.path.join(dir_name,
                                         base_name[:base_name.find('.')])
        save_volume(base_name + '_levelset.nii.gz', levelset_img)

    return levelset_img
def MGDMBrainSegmentation(input_filename_type_list,
                          output_dir=None,
                          num_steps=5,
                          atlas_file=None,
                          topology_lut_dir=None):
    """
    Perform MGDM segmentation
    :param input_filename_type_list: list of [[fname1,type1],[fname2,type2],...] - for a maximum of 4 inputs
    :param output_dir: full path to the output directory
    :param num_steps: number of steps for (default 5, set to 0 for testing)
    :param atlas_file: full path to the atlas file, default set in defaults.py
    :param topology_lut_dir: full path to the directory with the topology files, default set in defaults.py
    :return:
    """

    from nibabel.orientations import io_orientation, inv_ornt_aff, apply_orientation, ornt_transform
    import os

    print(
        "Thank you for choosing the MGDM segmentation from the cbstools for your brain segmentation needs"
    )
    print("Sit back and relax, let the magic of algorithms happen...")
    print("")
    if output_dir is None:
        output_dir = os.path.dirname(input_filename_type_list[0][0])
    if atlas_file is None:
        atlas = os.path.join(ATLAS_DIR, 'brain-atlas-3.0.3.txt')
    else:
        atlas = atlas_file

    if topology_lut_dir is None:
        topology_lut_dir = TOPOLOGY_LUT_DIR  # grabbing this from the default settings in defaults.py
    else:
        if not (
                topology_lut_dir[-1] == os.sep
        ):  #if we don't end in a path sep, we need to make sure that we add it
            topology_lut_dir += os.sep

    print("Atlas file: " + atlas)
    print("Topology LUT durectory: " + topology_lut_dir)
    print("")

    if not any(isinstance(el, list)
               for el in input_filename_type_list):  #make into list of lists
        input_filename_type_list = [input_filename_type_list]

    #now we setup the mgdm specfic settings
    mgdm = cj.BrainMgdmMultiSegmentation2()
    mgdm.setAtlasFile(atlas)
    mgdm.setTopologyLUTdirectory(topology_lut_dir)

    mgdm.setOutputImages('segmentation')
    # --> mgdm.setOrientations(mgdm.AXIAL, mgdm.R2L, mgdm.A2P, mgdm.I2S) # this is the default for MGDM, <--
    # mgdm.setOrientations(mgdm.AXIAL, mgdm.L2R, mgdm.P2A, mgdm.I2S)  #LR,PA,IS is always how they are returned from nibabel
    mgdm.setAdjustIntensityPriors(False)  # default is True
    mgdm.setComputePosterior(False)
    mgdm.setDiffuseProbabilities(False)
    mgdm.setSteps(num_steps)
    mgdm.setTopology('wcs')  # {'wcs','no'} no=off for testing, wcs=default
    for idx, con in enumerate(input_filename_type_list):
        print("Input files and filetypes:")
        print("  " + str(idx + 1) + " "),
        print(con)
        #flipLR = False
        #flipAP = False
        #flipIS = False

        fname = con[0]
        type = con[1]
        d, d_aff, d_head = niiLoad(fname, return_header=True)

        ## usage example in the proc_file function of : https://github.com/nipy/nibabel/blob/master/bin/parrec2nii
        ornt_orig = io_orientation(d_aff)
        ornt_mgdm = io_orientation(np.diag(
            [-1, -1, 1,
             1]).dot(d_aff))  # -1 -1 1 LPS (mgdm default); 1 1 1 is RAS
        ornt_chng = ornt_transform(
            ornt_mgdm, ornt_orig)  # to get from MGDM to our original input

        # convert orientation information to mgdm slice and orientation info
        aff_orients, aff_slc = get_affine_orientation_slice(d_aff)
        print("data orientation: " + str(aff_orients)),
        print("slice settings: " + aff_slc)
        print("mgdm orientation: " + str(ornt_mgdm))
        print("data orientation: " + str(ornt_orig))

        if aff_slc == "AXIAL":
            SLC = mgdm.AXIAL
        elif aff_slc == "SAGITTAL":
            SLC = mgdm.SAGITTAL
        else:
            SLC = mgdm.CORONAL
        for aff_orient in aff_orients:  #TODO: if anything is different from the default MGDM settings, we need to flip axes of the data at the end
            if aff_orient == "L":
                LR = mgdm.R2L
            elif aff_orient == "R":
                LR = mgdm.L2R
            # flipLR = True
            elif aff_orient == "A":
                AP = mgdm.P2A
                #flipAP = True
            elif aff_orient == "P":
                AP = mgdm.A2P
            elif aff_orient == "I":
                IS = mgdm.S2I
                #flipIS = True
            elif aff_orient == "S":
                IS = mgdm.I2S
        mgdm.setOrientations(SLC, LR, AP,
                             IS)  #L2R,P2A,I2S is nibabel default (i.e., RAS)

        if idx + 1 == 1:
            # we use the first image to set the dimensions and resolutions
            res = d_head.get_zooms()
            res = [a1.item()
                   for a1 in res]  # cast to regular python float type
            mgdm.setDimensions(d.shape[0], d.shape[1], d.shape[2])
            mgdm.setResolutions(res[0], res[1], res[2])

            # keep the shape and affine from the first image for saving
            d_shape = np.array(d.shape)
            out_root_fname = os.path.basename(fname)[0:os.path.basename(
                fname).find('.')]  #assumes no periods in filename, :-/

            mgdm.setContrastImage1(
                cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType1(type)
        elif idx + 1 == 2:
            mgdm.setContrastImage2(
                cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType2(type)
        elif idx + 1 == 3:
            mgdm.setContrastImage3(
                cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType3(type)
        elif idx + 1 == 4:
            mgdm.setContrastImage4(
                cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType4(type)
    try:
        print("Executing MGDM on your inputs")
        print("Don't worry, the magic is happening!")
        mgdm.execute()
        print(os.path.join(output_dir, out_root_fname + '_seg_cjs.nii.gz'))

        # outputs
        # reshape fortran stype to convert back to the format the nibabel likes
        seg_im = np.reshape(
            np.array(mgdm.getSegmentedBrainImage(), dtype=np.uint32), d_shape,
            'F')
        lbl_im = np.reshape(
            np.array(mgdm.getPosteriorMaximumLabels4D(), dtype=np.uint32),
            d_shape, 'F')
        ids_im = np.reshape(
            np.array(mgdm.getSegmentedIdsImage(), dtype=np.uint32), d_shape,
            'F')

        # fix orientation back to the input orientation :-/ not really working
        # seg_im = apply_orientation(seg_im, ornt_chng) # this takes care of the orientations between mipav and input
        # lbl_im = apply_orientation(lbl_im, ornt_chng) # TODO: fix the origin point offset?, 2x check possible RL flip
        # ids_im = apply_orientation(ids_im, ornt_chng) # alternative: register? https://github.com/pyimreg
        #

        # save
        seg_file = os.path.join(output_dir, out_root_fname + '_seg_cjs.nii.gz')
        lbl_file = os.path.join(output_dir, out_root_fname + '_lbl_cjs.nii.gz')
        ids_file = os.path.join(output_dir, out_root_fname + '_ids_cjs.nii.gz')

        ## this will work, but the solution with nibabel.orientations is much cleaner
        # if our settings were not the same as MGDM likes, we need to flip the relevant settings:
        #d_aff_new = flip_affine_orientation(d_aff, flipLR=flipLR, flipAP=flipAP, flipIS=flipIS)

        d_head['data_type'] = np.array(32).astype(
            'uint32')  #convert the header as well
        d_head['cal_max'] = np.max(seg_im)  #max for display
        niiSave(seg_file, seg_im, d_aff, header=d_head, data_type='uint32')
        d_head['cal_max'] = np.max(lbl_im)
        niiSave(lbl_file, lbl_im, d_aff, header=d_head, data_type='uint32')
        d_head['cal_max'] = np.max(ids_im)  # convert the header as well
        niiSave(ids_file, ids_im, d_aff, header=d_head, data_type='uint32')
        print("Data stored in: " + output_dir)
    except:
        print("--- MGDM failed. Go cry. ---")
        return
    print("Execution completed")

    return seg_im, d_aff, d_head
def MGDMBrainSegmentation_v2(con1_files,
                             con1_type,
                             con2_files=None,
                             con2_type=None,
                             con3_files=None,
                             con3_type=None,
                             con4_files=None,
                             con4_type=None,
                             output_dir=None,
                             num_steps=5,
                             topology='wcs',
                             atlas_file=None,
                             topology_lut_dir=None,
                             adjust_intensity_priors=False,
                             compute_posterior=False,
                             diffuse_probabilities=False,
                             file_suffix=None):
    """
    Perform MGDM segmentation
    simplified inputs
    adjust_intensity_priors is supposed to be True??? totally screws up :-/

    :param con1_files:              List of files for contrast 1, required
    :param con1_type:               Contrast 1 type (from get_MGDM_seg_contrast_names(atlas_file))
    :param con2_files:              List of files for contrast 2, optional, must be matched to con1_files
    :param con2_type:               Contrast 2 type
    :param con3_files:              List of files for contrast 3, optional, must be matched to con1_files
    :param con3_type:               Contrast 3 type
    :param con4_files:              List of files for contrast 4, optional, must be matched to con1_files
    :param con4_type:               Contrast 4 type
    :param output_dir:              Directory to place output, defaults to input directory if = None
    :param num_steps:               Number of steps for MGDM, default = 5, set to 0 for quicker testing (but worse quality segmentation)
    :param topology:                Topology setting {'wcs', 'no'} ('no' for no topology)
    :param atlas_file:              Atlas file full path and filename
    :param topology_lut_dir:        Directory for topology files
    :param adjust_intensity_priors: Adjust intensity priors based on dataset: True/False
    :param compute_posterior:       Copmute posterior: True/False
    :param diffuse_probabilities:   Compute diffuse probabilities: True/False
    :param file_suffix:             Distinguishing text to add to the end of the filename
    :return:
    """

    #from nibabel.orientations import io_orientation, inv_ornt_aff, apply_orientation, ornt_transform
    import os
    print(
        "Thank you for choosing the MGDM segmentation from the cbstools for your brain segmentation needs"
    )
    print("Sit back and relax, let the magic of algorithms happen...")
    print("")

    out_files_seg = []
    out_files_lbl = []
    out_files_ids = []

    if output_dir is None:
        output_dir = os.path.dirname(con1_files[0])
    if atlas_file is None:
        atlas = os.path.join(ATLAS_DIR, 'brain-atlas-3.0.3.txt')
    else:
        atlas = atlas_file

    create_dir(output_dir)

    if topology_lut_dir is None:
        topology_lut_dir = TOPOLOGY_LUT_DIR  # grabbing this from the default settings in defaults.py
    else:
        if not (
                topology_lut_dir[-1] == pathsep
        ):  #if we don't end in a path sep, we need to make sure that we add it
            topology_lut_dir += pathsep

    print("Atlas file: " + atlas)
    print("Topology LUT durectory: " + topology_lut_dir)
    print("")

    if not isinstance(con1_files, list):  # make into lists if they were not
        con1_files = [con1_files]
    if con2_files is not None and not isinstance(
            con2_files, list):  # make into list of lists
        con2_files = [con2_files]
    if con3_files is not None and not isinstance(
            con3_files, list):  # make into list of lists
        con3_files = [con3_files]
    if con4_files is not None and not isinstance(
            con4_files, list):  # make into list of lists
        con4_files = [con4_files]

    #now we setup the mgdm specfic settings
    mgdm = cj.BrainMgdmMultiSegmentation2()
    mgdm.setAtlasFile(atlas)
    mgdm.setTopologyLUTdirectory(topology_lut_dir)

    mgdm.setOutputImages('segmentation')
    # --> mgdm.setOrientations(mgdm.AXIAL, mgdm.R2L, mgdm.A2P, mgdm.I2S) # this is the default for MGDM, <--
    # mgdm.setOrientations(mgdm.AXIAL, mgdm.L2R, mgdm.P2A, mgdm.I2S)  #LR,PA,IS is always how they are returned from nibabel
    mgdm.setAdjustIntensityPriors(adjust_intensity_priors)  # default is True
    mgdm.setComputePosterior(compute_posterior)
    mgdm.setDiffuseProbabilities(diffuse_probabilities)
    mgdm.setSteps(num_steps)
    mgdm.setTopology(topology)  # {'wcs','no'} no=off for testing, wcs=default

    for idx, con1 in enumerate(con1_files):
        print("Input files and filetypes:")
        print(con1_type + ":\t" + con1.split(pathsep)[-1])

        fname = con1
        type = con1_type
        d, d_aff, d_head = niiLoad(fname, return_header=True)

        # convert orientation information to mgdm slice and orientation info
        # aff_orients,aff_slc = get_affine_orientation_slice(d_aff)
        # print("data orientation: " + str(aff_orients)),
        # print("slice settings: " + aff_slc)
        # if aff_slc == "AXIAL":
        #     SLC=mgdm.AXIAL
        # elif aff_slc == "SAGITTAL":
        #     SLC=mgdm.SAGITTAL
        # else:
        #     SLC=mgdm.CORONAL
        # for aff_orient in aff_orients: #TODO: if anything is different from the default MGDM settings, we need to flip axes of the data at the end
        #     if aff_orient == "L":
        #         LR=mgdm.R2L
        #     elif aff_orient == "R":
        #         LR = mgdm.L2R
        #        # flipLR = True
        #     elif aff_orient == "A":
        #         AP = mgdm.P2A
        #         #flipAP = True
        #     elif aff_orient == "P":
        #         AP = mgdm.A2P
        #     elif aff_orient == "I":
        #         IS = mgdm.S2I
        #         #flipIS = True
        #     elif aff_orient == "S":
        #         IS = mgdm.I2S
        #mgdm.setOrientations(SLC, LR, AP, IS)  #L2R,P2A,I2S is nibabel default (i.e., RAS)

        # we use the first image to set the dimensions and resolutions
        res = d_head.get_zooms()
        res = [a1.item() for a1 in res]  # cast to regular python float type
        mgdm.setDimensions(d.shape[0], d.shape[1], d.shape[2])
        mgdm.setResolutions(res[0], res[1], res[2])

        # keep the shape and affine from the first image for saving
        d_shape = np.array(d.shape)
        out_root_fname = os.path.basename(fname)[0:os.path.basename(
            fname).find('.')]  # assumes no periods in filename, :-/
        mgdm.setContrastImage1(
            cj.JArray('float')((d.flatten('F')).astype(float)))
        mgdm.setContrastType1(type)

        if con2_files is not None:  #only bother with the other contrasts if something is in the one before it
            print(con2_type + ":\t" + con2_files[idx].split(pathsep)[-1])
            d, a = niiLoad(con2_files[idx], return_header=False)
            mgdm.setContrastImage2(
                cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType2(con2_type)
            if con3_files is not None:
                print(con3_type + ":\t" + con3_files[idx].split(pathsep)[-1])
                d, a = niiLoad(con3_files[idx], return_header=False)
                mgdm.setContrastImage3(
                    cj.JArray('float')((d.flatten('F')).astype(float)))
                mgdm.setContrastType3(con3_type)
                if con4_files is not None:
                    print(con4_type + ":\t" +
                          con4_files[idx].split(pathsep)[-1])
                    d, a = niiLoad(con4_files[idx], return_header=False)
                    mgdm.setContrastImage4(
                        cj.JArray('float')((d.flatten('F')).astype(float)))
                    mgdm.setContrastType4(con4_type)
        try:
            print("Executing MGDM on your inputs")
            print("Don't worry, the magic is happening!")
            ## ---------------------------- MGDM MAGIC START ---------------------------- ##
            mgdm.execute()
            ## ---------------------------- MGDM MAGIC END   ---------------------------- ##

            # outputs
            # reshape fortran stype to convert back to the format the nibabel likes
            seg_im = np.reshape(
                np.array(mgdm.getSegmentedBrainImage(), dtype=np.uint32),
                d_shape, 'F')
            lbl_im = np.reshape(
                np.array(mgdm.getPosteriorMaximumLabels4D(), dtype=np.uint32),
                d_shape, 'F')
            ids_im = np.reshape(
                np.array(mgdm.getSegmentedIdsImage(), dtype=np.uint32),
                d_shape, 'F')

            # filenames for saving
            if file_suffix is not None:
                seg_file = os.path.join(
                    output_dir,
                    out_root_fname + '_seg' + file_suffix + '.nii.gz')
                lbl_file = os.path.join(
                    output_dir,
                    out_root_fname + '_lbl' + file_suffix + '.nii.gz')
                ids_file = os.path.join(
                    output_dir,
                    out_root_fname + '_ids' + file_suffix + '.nii.gz')
            else:
                seg_file = os.path.join(output_dir,
                                        out_root_fname + '_seg.nii.gz')
                lbl_file = os.path.join(output_dir,
                                        out_root_fname + '_lbl.nii.gz')
                ids_file = os.path.join(output_dir,
                                        out_root_fname + '_ids.nii.gz')

            d_head['data_type'] = np.array(32).astype(
                'uint32')  #convert the header as well
            d_head['cal_max'] = np.max(seg_im)  #max for display
            niiSave(seg_file, seg_im, d_aff, header=d_head, data_type='uint32')
            d_head['cal_max'] = np.max(lbl_im)
            niiSave(lbl_file, lbl_im, d_aff, header=d_head, data_type='uint32')
            d_head['cal_max'] = np.max(ids_im)  # convert the header as well
            niiSave(ids_file, ids_im, d_aff, header=d_head, data_type='uint32')
            print("Data stored in: " + output_dir)
            print("")
            out_files_seg.append(seg_file)
            out_files_lbl.append(lbl_file)
            out_files_ids.append(ids_file)
        except:
            print("--- MGDM failed. Go cry. ---")
            return
        print("Execution completed")

    return out_files_seg, out_files_lbl, out_files_ids
def layering(gwb_levelset,
             cgb_levelset,
             lut_dir,
             n_layers=10,
             save_data=True,
             base_name=None):

    # load the data as well as filenames and headers for saving later
    gwb_img = load_volume(gwb_levelset)
    gwb_data = gwb_img.get_data()
    hdr = gwb_img.get_header()
    aff = gwb_img.get_affine()

    cgb_data = load_volume(cgb_levelset).get_data()

    try:
        cbstoolsjcc.initVM(initialheap='6000m', maxheap='6000m')
    except ValueError:
        pass

    lamination = cbstoolsjcc.LaminarVolumetricLayering()
    lamination.setDimensions(gwb_data.shape[0], gwb_data.shape[1],
                             gwb_data.shape[2])
    zooms = [x.item() for x in hdr.get_zooms()]
    lamination.setResolutions(zooms[0], zooms[1], zooms[2])

    lamination.setInnerDistanceImage(
        cbstoolsjcc.JArray('float')((gwb_data.flatten('F')).astype(float)))
    lamination.setOuterDistanceImage(
        cbstoolsjcc.JArray('float')((cgb_data.flatten('F')).astype(float)))
    lamination.setNumberOfLayers(n_layers)
    lamination.setTopologyLUTdirectory(lut_dir)
    lamination.execute()

    depth_data = np.reshape(
        np.array(lamination.getContinuousDepthMeasurement(), dtype=np.float32),
        gwb_data.shape, 'F')
    layer_data = np.reshape(
        np.array(lamination.getDiscreteSampledLayers(), dtype=np.uint32),
        gwb_data.shape, 'F')

    boundary_len = lamination.getLayerBoundarySurfacesLength()
    boundary_data = np.reshape(
        np.array(lamination.getLayerBoundarySurfaces(), dtype=np.float32),
        (gwb_data.shape[0], gwb_data.shape[1], gwb_data.shape[2],
         boundary_len), 'F')

    depth_img = nb.Nifti1Image(depth_data, aff, hdr)
    layer_img = nb.Nifti1Image(layer_data, aff, hdr)
    boundary_img = nb.Nifti1Image(boundary_data, aff, hdr)

    if save_data:
        if not base_name:
            if not isinstance(gwb_levelset, basestring):
                base_name = os.getcwd()
                print "saving to %s" % base_name
            else:
                dir_name = os.path.dirname(gwb_levelset)
                base_name = os.path.basename(gwb_levelset)
                base_name = os.path.join(dir_name,
                                         base_name[:base_name.find('.')])

        save_volume(base_name + '_depth.nii.gz', depth_img)
        save_volume(base_name + '_layers.nii.gz', layer_img)
        save_volume(base_name + '_boundaries.nii.gz', boundary_img)

    return depth_img, layer_img, boundary_img
def profile_meshing(profile_file, surf_mesh, save_data=True, base_name=None):

    profile_img = load_volume(profile_file)
    profile_data = profile_img.get_data()
    profile_len = profile_data.shape[3]
    hdr = profile_img.get_header()
    aff = profile_img.get_affine()

    in_coords = load_mesh_geometry(surf_mesh)['coords']
    in_faces = load_mesh_geometry(surf_mesh)['faces']

    try:
        cbstoolsjcc.initVM(initialheap='6000m', maxheap='6000m')
    except ValueError:
        pass

    mesher = cbstoolsjcc.LaminarProfileMeshing()

    mesher.setDimensions(profile_data.shape)
    zooms = [x.item() for x in hdr.get_zooms()]
    mesher.setResolutions(zooms[0], zooms[1], zooms[2])

    mesher.setProfileSurfaceImage(
        cbstoolsjcc.JArray('float')((profile_data.flatten('F')).astype(float)))
    mesher.setSurfacePoints(
        cbstoolsjcc.JArray('float')(in_coords.flatten().astype(float)))
    mesher.setSurfaceTriangles(
        cbstoolsjcc.JArray('int')(in_faces.flatten().astype(int)))

    mesher.execute()

    out_coords = np.zeros(
        (in_coords.shape[0], in_coords.shape[1], profile_len))
    out_faces = np.zeros((in_faces.shape[0], in_faces.shape[1], profile_len))

    mesh_list = []
    for i in range(profile_len):
        current_mesh = {}
        current_mesh['coords'] = np.reshape(
            np.array(mesher.getSampledSurfacePoints(i), dtype=np.float32),
            in_coords.shape)
        current_mesh['faces'] = np.reshape(
            np.array(mesher.getSampledSurfaceTriangles(i), dtype=np.float32),
            in_faces.shape)
        mesh_list.append(current_mesh)

    if save_data:
        if not base_name:
            if not isinstance(profile_file, basestring):
                base_name = os.getcwd()
                print "saving to %s" % base_name
            else:
                dir_name = os.path.dirname(intensity_img)
                base_name = os.path.basename(intensity_img)
                base_name = os.path.join(dir_name,
                                         base_name[:base_name.find('.')])

        for i in range(len(mesh_list)):
            save_mesh_geometry(base_name + '_%s.vtk' % str(i), mesh_list[i])

    return mesh_list
Beispiel #7
0
def MGDMBrainSegmentation(input_filename_type_list,
                          output_dir=None,
                          num_steps=5,
                          atlas_file=None,
                          topology_lut_dir=None):
    """
    Perform MGDM segmentation
    :param input_filename_type_list: list of [[fname1,type1],[fname2,type2],...] - for a maximum of 4 inputs
    :param output_dir: full path to the output directory
    :param num_steps: number of steps for (default 5, set to 0 for testing)
    :param atlas_file: full path to the atlas file, default set in defaults.py
    :param topology_lut_dir: full path to the directory with the topology files, default set in defaults.py
    :return:
    """
    print(
        "Thank you for choosing the MGDM segmentation from the cbstools for your brain segmentation needs"
    )
    print("Sit back and relax, let the magic of algorithms happen...")
    print("")
    if output_dir is None:
        output_dir = os.path.dirname(input_filename_type_list[0][0])
    if atlas_file is None:
        atlas = os.path.join(ATLAS_DIR, 'brain-atlas-3.0.3.txt')
    else:
        atlas = atlas_file

    if topology_lut_dir is None:
        topology_lut_dir = TOPOLOGY_LUT_DIR  # grabbing this from the default settings in defaults.py
    else:
        if not (
                topology_lut_dir[-1] == os.sep
        ):  #if we don't end in a path sep, we need to make sure that we add it
            topology_lut_dir += os.sep
    print("Atlas file: " + atlas)
    print("Topology LUT durectory: " + topology_lut_dir)
    print("")

    if not any(isinstance(el, list)
               for el in input_filename_type_list):  #make into list of lists
        input_filename_type_list = [input_filename_type_list]

    #now we setup the mgdm specfic settings
    mgdm = cj.BrainMgdmMultiSegmentation2()
    mgdm.setAtlasFile(atlas)
    mgdm.setTopologyLUTdirectory(topology_lut_dir)

    mgdm.setOutputImages('segmentation')
    mgdm.setOrientations(mgdm.AXIAL, mgdm.R2L, mgdm.A2P, mgdm.I2S)
    mgdm.setAdjustIntensityPriors(False)  # default is True
    mgdm.setComputePosterior(False)
    mgdm.setDiffuseProbabilities(False)
    mgdm.setSteps(num_steps)
    mgdm.setTopology('wcs')  # {'wcs','no'} no=off for testing, wcs=default

    for idx, con in enumerate(input_filename_type_list):
        print("Input files and filetypes:")
        print("  " + str(idx + 1) + " "),
        print(con)
        fname = con[0]
        type = con[1]
        img = nb.load(fname)
        d = img.get_data()
        if idx + 1 == 1:
            # we use the first image to set the dimensions and resolutions
            res = img.header.get_zooms()
            res = [a1.item()
                   for a1 in res]  # cast to regular python float type
            mgdm.setDimensions(d.shape[0], d.shape[1], d.shape[2])
            mgdm.setResolutions(res[0], res[1], res[2])

            # keep the shape and affine from the first image for saving
            d_shape = np.array(d.shape)
            d_aff = img.affine
            out_root_fname = os.path.basename(fname)[0:os.path.basename(
                fname).find('.')]  #assumes no periods in filename, :-/

            mgdm.setContrastImage1(
                cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType1(type)
        elif idx + 1 == 2:
            mgdm.setContrastImage2(
                cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType2(type)
        elif idx + 1 == 3:
            mgdm.setContrastImage3(
                cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType3(type)
        elif idx + 1 == 4:
            mgdm.setContrastImage4(
                cj.JArray('float')((d.flatten('F')).astype(float)))
            mgdm.setContrastType4(type)
    try:
        print("Executing MGDM on your inputs")
        print("Don't worry, the magic is happening!")
        mgdm.execute()
        print(os.path.join(output_dir, out_root_fname + '_seg_cjs.nii.gz'))

        # outputs
        # reshape fortran stype to convert back to the format the nibabel likes
        seg_im = np.reshape(
            np.array(mgdm.getSegmentedBrainImage(), dtype=np.uint32), d_shape,
            'F')
        lbl_im = np.reshape(
            np.array(mgdm.getPosteriorMaximumLabels4D(), dtype=np.uint32),
            d_shape, 'F')
        ids_im = np.reshape(
            np.array(mgdm.getSegmentedIdsImage(), dtype=np.uint32), d_shape,
            'F')

        # save
        out_im = nb.Nifti1Image(seg_im, d_aff)
        nb.save(out_im,
                os.path.join(output_dir, out_root_fname + '_seg_cjs.nii.gz'))
        out_im = nb.Nifti1Image(lbl_im, d_aff)
        nb.save(out_im,
                os.path.join(output_dir, out_root_fname + '_lbl_cjs.nii.gz'))
        out_im = nb.Nifti1Image(ids_im, d_aff)
        nb.save(out_im,
                os.path.join(output_dir, out_root_fname + '_ids_cjs.nii.gz'))

        print("Data stored in: " + output_dir)
    except:
        print("--- MGDM failed. Go cry. ---")
    print("Execution completed")