コード例 #1
0
def check_if_rpi(fname):
    from sct_orientation import get_orientation
    if not get_orientation(fname) == 'RPI':
        printv(
            '\nERROR: ' + fname +
            ' is not in RPI orientation. Use sct_orientation to reorient your data. Exit program.\n',
            1, 'error')
コード例 #2
0
    def change_orientation(self, orientation='RPI', inversion_orient=False):
        """
        This function changes the orientation of the data by swapping the image axis.
        Warning: the nifti image header is not changed!!!
        :param orientation: string of three character representing the new orientation (ex: AIL, default: RPI)
               inversion_orient: boolean. If True, the data change to match the orientation in the header, based on the orientation provided as the argument orientation.
        :return:
        """
        opposite_character = {'L': 'R', 'R': 'L', 'A': 'P', 'P': 'A', 'I': 'S', 'S': 'I'}

        if self.orientation is None:
            from sct_orientation import get_orientation
            self.orientation = get_orientation(self.file_name)
        # get orientation to return at the end of function
        raw_orientation = self.orientation

        if inversion_orient:
            temp_orientation = self.orientation
            self.orientation = orientation
            orientation = temp_orientation

        # change the orientation of the image
        perm = [0, 1, 2]
        inversion = [1, 1, 1]
        for i, character in enumerate(self.orientation):
            try:
                perm[i] = orientation.index(character)
            except ValueError:
                perm[i] = orientation.index(opposite_character[character])
                inversion[i] = -1

        # axes inversion
        self.data = self.data[::inversion[0], ::inversion[1], ::inversion[2]]

        # axes manipulations
        from numpy import swapaxes

        if perm == [1, 0, 2]:
            self.data = swapaxes(self.data, 0, 1)
        elif perm == [2, 1, 0]:
            self.data = swapaxes(self.data, 0, 2)
        elif perm == [0, 2, 1]:
            self.data = swapaxes(self.data, 1, 2)
        elif perm == [2, 1, 0]:
            self.data = swapaxes(self.data, 0, 2)
        elif perm == [2, 0, 1]:
            self.data = swapaxes(self.data, 0, 2)  # transform [2, 0, 1] to [1, 0, 2]
            self.data = swapaxes(self.data, 0, 1)  # transform [1, 0, 2] to [0, 1, 2]
        elif perm == [1, 2, 0]:
            self.data = swapaxes(self.data, 0, 2)  # transform [1, 2, 0] to [0, 2, 1]
            self.data = swapaxes(self.data, 1, 2)  # transform [0, 2, 1] to [0, 1, 2]
        elif perm == [0, 1, 2]:
            # do nothing
            pass
        else:
            print 'Error: wrong orientation'
        # from numpy import array
        # self.dim = array(self.dim)[perm]
        self.orientation = orientation
        return raw_orientation
コード例 #3
0
    def change_orientation(self, orientation="RPI", inversion_orient=False):
        """
        This function changes the orientation of the data by swapping the image axis.
        Warning: the nifti image header is not changed!!!
        :param orientation: string of three character representing the new orientation (ex: AIL, default: RPI)
               inversion_orient: boolean. If True, the data change to match the orientation in the header, based on the orientation provided as the argument orientation.
        :return:
        """
        opposite_character = {"L": "R", "R": "L", "A": "P", "P": "A", "I": "S", "S": "I"}

        if self.orientation is None:
            from sct_orientation import get_orientation

            self.orientation = get_orientation(self.file_name)

        if inversion_orient:
            temp_orientation = self.orientation
            self.orientation = orientation
            orientation = temp_orientation

        # change the orientation of the image
        perm = [0, 1, 2]
        inversion = [1, 1, 1]
        for i, character in enumerate(self.orientation):
            try:
                perm[i] = orientation.index(character)
            except ValueError:
                perm[i] = orientation.index(opposite_character[character])
                inversion[i] = -1

        # axes inversion
        self.data = self.data[:: inversion[0], :: inversion[1], :: inversion[2]]

        # axes manipulations
        from numpy import swapaxes

        if perm == [1, 0, 2]:
            self.data = swapaxes(self.data, 0, 1)
        elif perm == [2, 1, 0]:
            self.data = swapaxes(self.data, 0, 2)
        elif perm == [0, 2, 1]:
            self.data = swapaxes(self.data, 1, 2)
        elif perm == [2, 1, 0]:
            self.data = swapaxes(self.data, 0, 2)
        elif perm == [2, 0, 1]:
            self.data = swapaxes(self.data, 0, 2)  # transform [2, 0, 1] to [1, 0, 2]
            self.data = swapaxes(self.data, 0, 1)  # transform [1, 0, 2] to [0, 1, 2]
        elif perm == [1, 2, 0]:
            self.data = swapaxes(self.data, 0, 2)  # transform [1, 2, 0] to [0, 2, 1]
            self.data = swapaxes(self.data, 1, 2)  # transform [0, 2, 1] to [0, 1, 2]
        elif perm == [0, 1, 2]:
            # do nothing
            pass
        else:
            print "Error: wrong orientation"

        self.orientation = orientation
コード例 #4
0
    def loadFromPath(self, path, verbose):
        """
        This function load an image from an absolute path using nibabel library
        :param path: path of the file from which the image will be loaded
        :return:
        """
        from nibabel import load, spatialimages
        from sct_utils import check_file_exist, printv, extract_fname
        from sct_orientation import get_orientation

        check_file_exist(path, verbose=verbose)
        try:
            im_file = load(path)
        except spatialimages.ImageFileError:
            printv('Error: make sure ' + path + ' is an image.', 1, 'error')
        self.orientation = get_orientation(path)
        self.data = im_file.get_data()
        self.hdr = im_file.get_header()
        self.absolutepath = path
        self.path, self.file_name, self.ext = extract_fname(path)
コード例 #5
0
    def loadFromPath(self, path, verbose):
        """
        This function load an image from an absolute path using nibabel library
        :param path: path of the file from which the image will be loaded
        :return:
        """
        from nibabel import load, spatialimages
        from sct_utils import check_file_exist, printv, extract_fname
        from sct_orientation import get_orientation

        # check_file_exist(path, verbose=verbose)
        try:
            im_file = load(path)
        except spatialimages.ImageFileError:
            printv('Error: make sure ' + path + ' is an image.', 1, 'error')
        self.orientation = get_orientation(path)
        self.data = im_file.get_data()
        self.hdr = im_file.get_header()
        self.absolutepath = path
        self.path, self.file_name, self.ext = extract_fname(path)
        self.dim = get_dimension(im_file)
コード例 #6
0
def compute_csa(fname_segmentation,
                name_method,
                volume_output,
                verbose,
                remove_temp_files,
                step,
                smoothing_param,
                figure_fit,
                name_output,
                slices,
                vert_levels,
                path_to_template,
                algo_fitting='hanning',
                type_window='hanning',
                window_length=80):

    # Extract path, file and extension
    fname_segmentation = os.path.abspath(fname_segmentation)
    path_data, file_data, ext_data = sct.extract_fname(fname_segmentation)

    # create temporary folder
    sct.printv('\nCreate temporary folder...', verbose)
    path_tmp = sct.slash_at_the_end('tmp.' + time.strftime("%y%m%d%H%M%S"), 1)
    sct.run('mkdir ' + path_tmp, verbose)

    # Copying input data to tmp folder and convert to nii
    sct.printv('\nCopying input data to tmp folder and convert to nii...',
               verbose)
    sct.run('isct_c3d ' + fname_segmentation + ' -o ' + path_tmp +
            'segmentation.nii')

    # go to tmp folder
    os.chdir(path_tmp)

    # Change orientation of the input segmentation into RPI
    sct.printv('\nChange orientation of the input segmentation into RPI...',
               verbose)
    fname_segmentation_orient = set_orientation('segmentation.nii', 'RPI',
                                                'segmentation_orient.nii')

    # Get size of data
    sct.printv('\nGet data dimensions...', verbose)
    nx, ny, nz, nt, px, py, pz, pt = Image(fname_segmentation_orient).dim
    sct.printv('  ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), verbose)

    # Open segmentation volume
    sct.printv('\nOpen segmentation volume...', verbose)
    file_seg = nibabel.load(fname_segmentation_orient)
    data_seg = file_seg.get_data()
    hdr_seg = file_seg.get_header()

    # # Extract min and max index in Z direction
    X, Y, Z = (data_seg > 0).nonzero()
    min_z_index, max_z_index = min(Z), max(Z)
    # Xp, Yp = (data_seg[:, :, 0] >= 0).nonzero()  # X and Y range

    # extract centerline and smooth it
    x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline(
        fname_segmentation_orient,
        algo_fitting=algo_fitting,
        type_window=type_window,
        window_length=window_length,
        verbose=verbose)
    z_centerline_scaled = [x * pz for x in z_centerline]

    # Compute CSA
    sct.printv('\nCompute CSA...', verbose)

    # Empty arrays in which CSA for each z slice will be stored
    csa = np.zeros(max_z_index - min_z_index + 1)
    # csa = [0.0 for i in xrange(0, max_z_index-min_z_index+1)]

    for iz in xrange(0, len(z_centerline)):

        # compute the vector normal to the plane
        normal = normalize(
            np.array([
                x_centerline_deriv[iz], y_centerline_deriv[iz],
                z_centerline_deriv[iz]
            ]))

        # compute the angle between the normal vector of the plane and the vector z
        angle = np.arccos(np.dot(normal, [0, 0, 1]))

        # compute the number of voxels, assuming the segmentation is coded for partial volume effect between 0 and 1.
        number_voxels = sum(sum(data_seg[:, :, iz + min_z_index]))

        # compute CSA, by scaling with voxel size (in mm) and adjusting for oblique plane
        csa[iz] = number_voxels * px * py * np.cos(angle)

    if smoothing_param:
        from msct_smooth import smoothing_window
        sct.printv('\nSmooth CSA across slices...', verbose)
        sct.printv('.. Hanning window: ' + str(smoothing_param) + ' mm',
                   verbose)
        csa_smooth = smoothing_window(csa,
                                      window_len=smoothing_param / pz,
                                      window='hanning',
                                      verbose=0)
        # display figure
        if verbose == 2:
            import matplotlib.pyplot as plt
            plt.figure()
            pltx, = plt.plot(z_centerline_scaled, csa, 'bo')
            pltx_fit, = plt.plot(z_centerline_scaled,
                                 csa_smooth,
                                 'r',
                                 linewidth=2)
            plt.title("Cross-sectional area (CSA)")
            plt.xlabel('z (mm)')
            plt.ylabel('CSA (mm^2)')
            plt.legend([pltx, pltx_fit], ['Raw', 'Smoothed'])
            plt.show()
        # update variable
        csa = csa_smooth

    # Create output text file
    sct.printv('\nWrite text file...', verbose)
    file_results = open('csa.txt', 'w')
    for i in range(min_z_index, max_z_index + 1):
        file_results.write(
            str(int(i)) + ',' + str(csa[i - min_z_index]) + '\n')
        # Display results
        sct.printv(
            'z=' + str(i - min_z_index) + ': ' + str(csa[i - min_z_index]) +
            ' mm^2', verbose, 'bold')
    file_results.close()

    # output volume of csa values
    if volume_output:
        sct.printv('\nCreate volume of CSA values...', verbose)
        # get orientation of the input data
        orientation = get_orientation('segmentation.nii')
        data_seg = data_seg.astype(np.float32, copy=False)
        # loop across slices
        for iz in range(min_z_index, max_z_index + 1):
            # retrieve seg pixels
            x_seg, y_seg = (data_seg[:, :, iz] > 0).nonzero()
            seg = [[x_seg[i], y_seg[i]] for i in range(0, len(x_seg))]
            # loop across pixels in segmentation
            for i in seg:
                # replace value with csa value
                data_seg[i[0], i[1], iz] = csa[iz - min_z_index]
        # create header
        hdr_seg.set_data_dtype('float32')  # set imagetype to uint8
        # save volume
        img = nibabel.Nifti1Image(data_seg, None, hdr_seg)
        nibabel.save(img, 'csa_RPI.nii')
        # Change orientation of the output centerline into input orientation
        fname_csa_volume = set_orientation('csa_RPI.nii', orientation,
                                           'csa_RPI_orient.nii')

    # come back to parent folder
    os.chdir('..')

    # Generate output files
    sct.printv('\nGenerate output files...', verbose)
    from shutil import copyfile
    copyfile(path_tmp + 'csa.txt', path_data + param.fname_csa)
    # sct.generate_output_file(path_tmp+'csa.txt', path_data+param.fname_csa)  # extension already included in param.fname_csa
    if volume_output:
        sct.generate_output_file(
            fname_csa_volume, path_data +
            name_output)  # extension already included in name_output

    # average csa across vertebral levels or slices if asked (flag -z or -l)
    if slices or vert_levels:

        if vert_levels and not path_to_template:
            sct.printv('\nERROR: Path to template is missing. See usage.\n', 1,
                       'error')
            sys.exit(2)
        elif vert_levels and path_to_template:
            abs_path_to_template = os.path.abspath(path_to_template)

        # go to tmp folder
        os.chdir(path_tmp)

        # create temporary folder
        sct.printv('\nCreate temporary folder to average csa...', verbose)
        path_tmp_extract_metric = sct.slash_at_the_end('label_temp', 1)
        sct.run('mkdir ' + path_tmp_extract_metric, verbose)

        # Copying output CSA volume in the temporary folder
        sct.printv('\nCopy data to tmp folder...', verbose)
        sct.run('cp ' + fname_segmentation + ' ' + path_tmp_extract_metric)

        # create file info_label
        path_fname_seg, file_fname_seg, ext_fname_seg = sct.extract_fname(
            fname_segmentation)
        create_info_label('info_label.txt', path_tmp_extract_metric,
                          file_fname_seg + ext_fname_seg)

        # average CSA
        if slices:
            os.system("sct_extract_metric -i " + path_data + name_output +
                      " -f " + path_tmp_extract_metric +
                      " -m wa -o ../csa_mean.txt -z " + slices)
        if vert_levels:
            sct.run('cp -R ' + abs_path_to_template + ' .')
            os.system("sct_extract_metric -i " + path_data + name_output +
                      " -f " + path_tmp_extract_metric +
                      " -m wa -o ../csa_mean.txt -v " + vert_levels)

        os.chdir('..')

        # Remove temporary files
        print('\nRemove temporary folder used to average CSA...')
        sct.run('rm -rf ' + path_tmp_extract_metric)

    # Remove temporary files
    if remove_temp_files:
        print('\nRemove temporary files...')
        sct.run('rm -rf ' + path_tmp)
コード例 #7
0
def extract_centerline(fname_segmentation,
                       remove_temp_files,
                       name_output='',
                       verbose=0,
                       algo_fitting='hanning',
                       type_window='hanning',
                       window_length=80):

    # Extract path, file and extension
    fname_segmentation = os.path.abspath(fname_segmentation)
    path_data, file_data, ext_data = sct.extract_fname(fname_segmentation)

    # create temporary folder
    path_tmp = 'tmp.' + time.strftime("%y%m%d%H%M%S")
    sct.run('mkdir ' + path_tmp)

    # copy files into tmp folder
    sct.run('cp ' + fname_segmentation + ' ' + path_tmp)

    # go to tmp folder
    os.chdir(path_tmp)

    # Change orientation of the input centerline into RPI
    sct.printv('\nOrient centerline to RPI orientation...', verbose)
    fname_segmentation_orient = 'segmentation_rpi' + ext_data
    set_orientation(file_data + ext_data, 'RPI', fname_segmentation_orient)

    # Get dimension
    sct.printv('\nGet dimensions...', verbose)
    nx, ny, nz, nt, px, py, pz, pt = Image(fname_segmentation_orient).dim
    sct.printv(
        '.. matrix size: ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz),
        verbose)
    sct.printv(
        '.. voxel size:  ' + str(px) + 'mm x ' + str(py) + 'mm x ' + str(pz) +
        'mm', verbose)

    # Extract orientation of the input segmentation
    orientation = get_orientation(file_data + ext_data)
    sct.printv('\nOrientation of segmentation image: ' + orientation, verbose)

    sct.printv('\nOpen segmentation volume...', verbose)
    file = nibabel.load(fname_segmentation_orient)
    data = file.get_data()
    hdr = file.get_header()

    # Extract min and max index in Z direction
    X, Y, Z = (data > 0).nonzero()
    min_z_index, max_z_index = min(Z), max(Z)
    x_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
    y_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
    z_centerline = [iz for iz in range(min_z_index, max_z_index + 1)]
    # Extract segmentation points and average per slice
    for iz in range(min_z_index, max_z_index + 1):
        x_seg, y_seg = (data[:, :, iz] > 0).nonzero()
        x_centerline[iz - min_z_index] = np.mean(x_seg)
        y_centerline[iz - min_z_index] = np.mean(y_seg)
    for k in range(len(X)):
        data[X[k], Y[k], Z[k]] = 0

    # extract centerline and smooth it
    x_centerline_fit, y_centerline_fit, z_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline(
        fname_segmentation_orient,
        type_window=type_window,
        window_length=window_length,
        algo_fitting=algo_fitting,
        verbose=verbose)

    if verbose == 2:
        import matplotlib.pyplot as plt

        #Creation of a vector x that takes into account the distance between the labels
        nz_nonz = len(z_centerline)
        x_display = [0 for i in range(x_centerline_fit.shape[0])]
        y_display = [0 for i in range(y_centerline_fit.shape[0])]
        for i in range(0, nz_nonz, 1):
            x_display[int(z_centerline[i] - z_centerline[0])] = x_centerline[i]
            y_display[int(z_centerline[i] - z_centerline[0])] = y_centerline[i]

        plt.figure(1)
        plt.subplot(2, 1, 1)
        plt.plot(z_centerline_fit, x_display, 'ro')
        plt.plot(z_centerline_fit, x_centerline_fit)
        plt.xlabel("Z")
        plt.ylabel("X")
        plt.title("x and x_fit coordinates")

        plt.subplot(2, 1, 2)
        plt.plot(z_centerline_fit, y_display, 'ro')
        plt.plot(z_centerline_fit, y_centerline_fit)
        plt.xlabel("Z")
        plt.ylabel("Y")
        plt.title("y and y_fit coordinates")
        plt.show()

    # Create an image with the centerline
    for iz in range(min_z_index, max_z_index + 1):
        data[
            round(x_centerline_fit[iz - min_z_index]),
            round(y_centerline_fit[iz - min_z_index]),
            iz] = 1  # if index is out of bounds here for hanning: either the segmentation has holes or labels have been added to the file
    # Write the centerline image in RPI orientation
    hdr.set_data_dtype('uint8')  # set imagetype to uint8
    sct.printv('\nWrite NIFTI volumes...', verbose)
    img = nibabel.Nifti1Image(data, None, hdr)
    nibabel.save(img, 'centerline.nii.gz')
    # Define name if output name is not specified
    if name_output == 'csa_volume.nii.gz' or name_output == '':
        # sct.generate_output_file('centerline.nii.gz', file_data+'_centerline'+ext_data, verbose)
        name_output = file_data + '_centerline' + ext_data
    sct.generate_output_file('centerline.nii.gz', name_output, verbose)

    # create a txt file with the centerline
    path, rad_output, ext = sct.extract_fname(name_output)
    name_output_txt = rad_output + '.txt'
    sct.printv('\nWrite text file...', verbose)
    file_results = open(name_output_txt, 'w')
    for i in range(min_z_index, max_z_index + 1):
        file_results.write(
            str(int(i)) + ' ' + str(x_centerline_fit[i - min_z_index]) + ' ' +
            str(y_centerline_fit[i - min_z_index]) + '\n')
    file_results.close()

    # Copy result into parent folder
    sct.run('cp ' + name_output_txt + ' ../')

    del data

    # come back to parent folder
    os.chdir('..')

    # Change orientation of the output centerline into input orientation
    sct.printv(
        '\nOrient centerline image to input orientation: ' + orientation,
        verbose)
    fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data
    set_orientation(path_tmp + '/' + name_output, orientation, name_output)

    # Remove temporary files
    if remove_temp_files:
        sct.printv('\nRemove temporary files...', verbose)
        sct.run('rm -rf ' + path_tmp, verbose)

    return name_output
コード例 #8
0
def extract_centerline(
    fname_segmentation, remove_temp_files, verbose=0, algo_fitting="hanning", type_window="hanning", window_length=80
):

    # Extract path, file and extension
    fname_segmentation = os.path.abspath(fname_segmentation)
    path_data, file_data, ext_data = sct.extract_fname(fname_segmentation)

    # create temporary folder
    path_tmp = "tmp." + time.strftime("%y%m%d%H%M%S")
    sct.run("mkdir " + path_tmp)

    # copy files into tmp folder
    sct.run("cp " + fname_segmentation + " " + path_tmp)

    # go to tmp folder
    os.chdir(path_tmp)

    # Change orientation of the input centerline into RPI
    sct.printv("\nOrient centerline to RPI orientation...", verbose)
    fname_segmentation_orient = "segmentation_rpi" + ext_data
    set_orientation(file_data + ext_data, "RPI", fname_segmentation_orient)

    # Get dimension
    sct.printv("\nGet dimensions...", verbose)
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_segmentation_orient)
    sct.printv(".. matrix size: " + str(nx) + " x " + str(ny) + " x " + str(nz), verbose)
    sct.printv(".. voxel size:  " + str(px) + "mm x " + str(py) + "mm x " + str(pz) + "mm", verbose)

    # Extract orientation of the input segmentation
    orientation = get_orientation(file_data + ext_data)
    sct.printv("\nOrientation of segmentation image: " + orientation, verbose)

    sct.printv("\nOpen segmentation volume...", verbose)
    file = nibabel.load(fname_segmentation_orient)
    data = file.get_data()
    hdr = file.get_header()

    # Extract min and max index in Z direction
    X, Y, Z = (data > 0).nonzero()
    min_z_index, max_z_index = min(Z), max(Z)
    x_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
    y_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
    z_centerline = [iz for iz in range(min_z_index, max_z_index + 1)]
    # Extract segmentation points and average per slice
    for iz in range(min_z_index, max_z_index + 1):
        x_seg, y_seg = (data[:, :, iz] > 0).nonzero()
        x_centerline[iz - min_z_index] = np.mean(x_seg)
        y_centerline[iz - min_z_index] = np.mean(y_seg)
    for k in range(len(X)):
        data[X[k], Y[k], Z[k]] = 0

    # extract centerline and smooth it
    x_centerline_fit, y_centerline_fit, z_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline(
        fname_segmentation_orient,
        type_window=type_window,
        window_length=window_length,
        algo_fitting=algo_fitting,
        verbose=verbose,
    )

    if verbose == 2:
        import matplotlib.pyplot as plt

        # Creation of a vector x that takes into account the distance between the labels
        nz_nonz = len(z_centerline)
        x_display = [0 for i in range(x_centerline_fit.shape[0])]
        y_display = [0 for i in range(y_centerline_fit.shape[0])]
        for i in range(0, nz_nonz, 1):
            x_display[int(z_centerline[i] - z_centerline[0])] = x_centerline[i]
            y_display[int(z_centerline[i] - z_centerline[0])] = y_centerline[i]

        plt.figure(1)
        plt.subplot(2, 1, 1)
        plt.plot(z_centerline_fit, x_display, "ro")
        plt.plot(z_centerline_fit, x_centerline_fit)
        plt.xlabel("Z")
        plt.ylabel("X")
        plt.title("x and x_fit coordinates")

        plt.subplot(2, 1, 2)
        plt.plot(z_centerline_fit, y_display, "ro")
        plt.plot(z_centerline_fit, y_centerline_fit)
        plt.xlabel("Z")
        plt.ylabel("Y")
        plt.title("y and y_fit coordinates")
        plt.show()

    # Create an image with the centerline
    for iz in range(min_z_index, max_z_index + 1):
        data[
            round(x_centerline_fit[iz - min_z_index]), round(y_centerline_fit[iz - min_z_index]), iz
        ] = (
            1
        )  # if index is out of bounds here for hanning: either the segmentation has holes or labels have been added to the file
    # Write the centerline image in RPI orientation
    hdr.set_data_dtype("uint8")  # set imagetype to uint8
    sct.printv("\nWrite NIFTI volumes...", verbose)
    img = nibabel.Nifti1Image(data, None, hdr)
    nibabel.save(img, "centerline.nii.gz")
    sct.generate_output_file("centerline.nii.gz", file_data + "_centerline" + ext_data, verbose)

    # create a txt file with the centerline
    file_name = file_data + "_centerline" + ".txt"
    sct.printv("\nWrite text file...", verbose)
    file_results = open(file_name, "w")
    for i in range(min_z_index, max_z_index + 1):
        file_results.write(
            str(int(i))
            + " "
            + str(x_centerline_fit[i - min_z_index])
            + " "
            + str(y_centerline_fit[i - min_z_index])
            + "\n"
        )
    file_results.close()

    # Copy result into parent folder
    sct.run("cp " + file_name + " ../")

    del data

    # come back to parent folder
    os.chdir("..")

    # Change orientation of the output centerline into input orientation
    sct.printv("\nOrient centerline image to input orientation: " + orientation, verbose)
    fname_segmentation_orient = "tmp.segmentation_rpi" + ext_data
    set_orientation(
        path_tmp + "/" + file_data + "_centerline" + ext_data, orientation, file_data + "_centerline" + ext_data
    )

    # Remove temporary files
    if remove_temp_files:
        sct.printv("\nRemove temporary files...", verbose)
        sct.run("rm -rf " + path_tmp, verbose)

    return file_data + "_centerline" + ext_data
コード例 #9
0
def main():

    # Initialization
    fname_anat = ''
    fname_centerline = ''
    centerline_fitting = 'polynome'
    remove_temp_files = param.remove_temp_files
    interp = param.interp
    degree_poly = param.deg_poly

    # extract path of the script
    path_script = os.path.dirname(__file__) + '/'

    # Parameters for debug mode
    if param.debug == 1:
        print '\n*** WARNING: DEBUG MODE ON ***\n'
        status, path_sct_data = commands.getstatusoutput(
            'echo $SCT_TESTING_DATA_DIR')
        fname_anat = path_sct_data + '/t2/t2.nii.gz'
        fname_centerline = path_sct_data + '/t2/t2_seg.nii.gz'
    else:
        # Check input param
        try:
            opts, args = getopt.getopt(sys.argv[1:], 'hi:c:r:d:f:s:')
        except getopt.GetoptError as err:
            print str(err)
            usage()
        if not opts:
            usage()
        for opt, arg in opts:
            if opt == '-h':
                usage()
            elif opt in ('-i'):
                fname_anat = arg
            elif opt in ('-c'):
                fname_centerline = arg
            elif opt in ('-r'):
                remove_temp_files = int(arg)
            elif opt in ('-d'):
                degree_poly = int(arg)
            elif opt in ('-f'):
                centerline_fitting = str(arg)
            elif opt in ('-s'):
                interp = str(arg)

    # display usage if a mandatory argument is not provided
    if fname_anat == '' or fname_centerline == '':
        usage()

    # check existence of input files
    sct.check_file_exist(fname_anat)
    sct.check_file_exist(fname_centerline)

    # extract path/file/extension
    path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)

    # Display arguments
    print '\nCheck input arguments...'
    print '  Input volume ...................... ' + fname_anat
    print '  Centerline ........................ ' + fname_centerline
    print ''

    # Get input image orientation
    input_image_orientation = get_orientation(fname_anat)

    # Reorient input data into RL PA IS orientation
    set_orientation(fname_anat, 'RPI', 'tmp.anat_orient.nii')
    set_orientation(fname_centerline, 'RPI', 'tmp.centerline_orient.nii')

    # Open centerline
    #==========================================================================================
    print '\nGet dimensions of input centerline...'
    nx, ny, nz, nt, px, py, pz, pt = Image('tmp.centerline_orient.nii').dim
    print '.. matrix size: ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz)
    print '.. voxel size:  ' + str(px) + 'mm x ' + str(py) + 'mm x ' + str(
        pz) + 'mm'

    print '\nOpen centerline volume...'
    file = nibabel.load('tmp.centerline_orient.nii')
    data = file.get_data()

    X, Y, Z = (data > 0).nonzero()
    min_z_index, max_z_index = min(Z), max(Z)

    # loop across z and associate x,y coordinate with the point having maximum intensity
    x_centerline = [0 for iz in range(min_z_index, max_z_index + 1, 1)]
    y_centerline = [0 for iz in range(min_z_index, max_z_index + 1, 1)]
    z_centerline = [iz for iz in range(min_z_index, max_z_index + 1, 1)]

    # Two possible scenario:
    # 1. the centerline is probabilistic: each slices contains voxels with the probability of containing the centerline [0:...:1]
    # We only take the maximum value of the image to aproximate the centerline.
    # 2. The centerline/segmentation image contains many pixels per slice with values {0,1}.
    # We take all the points and approximate the centerline on all these points.

    X, Y, Z = ((data < 1) * (data > 0)).nonzero()  # X is empty if binary image
    if (len(X) > 0):  # Scenario 1
        for iz in range(min_z_index, max_z_index + 1, 1):
            x_centerline[iz - min_z_index], y_centerline[
                iz - min_z_index] = numpy.unravel_index(
                    data[:, :, iz].argmax(), data[:, :, iz].shape)
    else:  # Scenario 2
        for iz in range(min_z_index, max_z_index + 1, 1):
            x_seg, y_seg = (data[:, :, iz] > 0).nonzero()
            if len(x_seg) > 0:
                x_centerline[iz - min_z_index] = numpy.mean(x_seg)
                y_centerline[iz - min_z_index] = numpy.mean(y_seg)

    # TODO: find a way to do the previous loop with this, which is more neat:
    # [numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape) for iz in range(0,nz,1)]

    # clear variable
    del data

    # Fit the centerline points with the kind of curve given as argument of the script and return the new smoothed coordinates
    if centerline_fitting == 'splines':
        try:
            x_centerline_fit, y_centerline_fit = b_spline_centerline(
                x_centerline, y_centerline, z_centerline)
        except ValueError:
            print "splines fitting doesn't work, trying with polynomial fitting...\n"
            x_centerline_fit, y_centerline_fit = polynome_centerline(
                x_centerline, y_centerline, z_centerline)
    elif centerline_fitting == 'polynome':
        x_centerline_fit, y_centerline_fit = polynome_centerline(
            x_centerline, y_centerline, z_centerline)

    #==========================================================================================
    # Split input volume
    print '\nSplit input volume...'
    from sct_split_data import split_data
    if not split_data('tmp.anat_orient.nii', 2, '_z'):
        sct.printv('ERROR in split_data.', 1, 'error')
    file_anat_split = [
        'tmp.anat_orient_z' + str(z).zfill(4) for z in range(0, nz, 1)
    ]

    # initialize variables
    file_mat_inv_cumul = [
        'tmp.mat_inv_cumul_z' + str(z).zfill(4) for z in range(0, nz, 1)
    ]
    z_init = min_z_index
    displacement_max_z_index = x_centerline_fit[
        z_init - min_z_index] - x_centerline_fit[max_z_index - min_z_index]

    # write centerline as text file
    print '\nGenerate fitted transformation matrices...'
    file_mat_inv_cumul_fit = [
        'tmp.mat_inv_cumul_fit_z' + str(z).zfill(4) for z in range(0, nz, 1)
    ]
    for iz in range(min_z_index, max_z_index + 1, 1):
        # compute inverse cumulative fitted transformation matrix
        fid = open(file_mat_inv_cumul_fit[iz], 'w')
        if (x_centerline[iz - min_z_index] == 0
                and y_centerline[iz - min_z_index] == 0):
            displacement = 0
        else:
            displacement = x_centerline_fit[
                z_init - min_z_index] - x_centerline_fit[iz - min_z_index]
        fid.write('%i %i %i %f\n' % (1, 0, 0, displacement))
        fid.write('%i %i %i %f\n' % (0, 1, 0, 0))
        fid.write('%i %i %i %i\n' % (0, 0, 1, 0))
        fid.write('%i %i %i %i\n' % (0, 0, 0, 1))
        fid.close()

    # we complete the displacement matrix in z direction
    for iz in range(0, min_z_index, 1):
        fid = open(file_mat_inv_cumul_fit[iz], 'w')
        fid.write('%i %i %i %f\n' % (1, 0, 0, 0))
        fid.write('%i %i %i %f\n' % (0, 1, 0, 0))
        fid.write('%i %i %i %i\n' % (0, 0, 1, 0))
        fid.write('%i %i %i %i\n' % (0, 0, 0, 1))
        fid.close()
    for iz in range(max_z_index + 1, nz, 1):
        fid = open(file_mat_inv_cumul_fit[iz], 'w')
        fid.write('%i %i %i %f\n' % (1, 0, 0, displacement_max_z_index))
        fid.write('%i %i %i %f\n' % (0, 1, 0, 0))
        fid.write('%i %i %i %i\n' % (0, 0, 1, 0))
        fid.write('%i %i %i %i\n' % (0, 0, 0, 1))
        fid.close()

    # apply transformations to data
    print '\nApply fitted transformation matrices...'
    file_anat_split_fit = [
        'tmp.anat_orient_fit_z' + str(z).zfill(4) for z in range(0, nz, 1)
    ]
    for iz in range(0, nz, 1):
        # forward cumulative transformation to data
        sct.run(fsloutput + 'flirt -in ' + file_anat_split[iz] + ' -ref ' +
                file_anat_split[iz] + ' -applyxfm -init ' +
                file_mat_inv_cumul_fit[iz] + ' -out ' +
                file_anat_split_fit[iz] + ' -interp ' + interp)

    # Merge into 4D volume
    print '\nMerge into 4D volume...'
    from sct_concat_data import concat_data
    from glob import glob
    concat_data(glob('tmp.anat_orient_fit_z*.nii'),
                'tmp.anat_orient_fit.nii',
                dim=2)
    # sct.run(fsloutput+'fslmerge -z tmp.anat_orient_fit tmp.anat_orient_fit_z*')

    # Reorient data as it was before
    print '\nReorient data back into native orientation...'
    set_orientation('tmp.anat_orient_fit.nii', input_image_orientation,
                    'tmp.anat_orient_fit_reorient.nii')

    # Generate output file (in current folder)
    print '\nGenerate output file (in current folder)...'
    sct.generate_output_file('tmp.anat_orient_fit_reorient.nii',
                             file_anat + '_flatten' + ext_anat)

    # Delete temporary files
    if remove_temp_files == 1:
        print '\nDelete temporary files...'
        sct.run('rm -rf tmp.*')

    # to view results
    print '\nDone! To view results, type:'
    print 'fslview ' + file_anat + ext_anat + ' ' + file_anat + '_flatten' + ext_anat + ' &\n'
コード例 #10
0
def main(list_file,
         param,
         output_file_name=None,
         remove_temp_files=1,
         verbose=0):

    path, file, ext = sct.extract_fname(list_file[0])

    # create temporary folder
    path_tmp = 'tmp.' + time.strftime("%y%m%d%H%M%S")
    sct.run('mkdir ' + path_tmp)

    # copy files into tmp folder
    sct.printv('\nCopy files into tmp folder...', verbose)
    for i in range(len(list_file)):
        file_temp = os.path.abspath(list_file[i])
        sct.run('cp ' + file_temp + ' ' + path_tmp)

    # go to tmp folder
    os.chdir(path_tmp)

    ## Concatenation of the files

    # Concatenation : sum of matrices
    file_0 = load(file + ext)
    data_concatenation = file_0.get_data()
    hdr_0 = file_0.get_header()
    orientation_file_0 = get_orientation(list_file[0])
    if len(list_file) > 0:
        for i in range(1, len(list_file)):
            orientation_file_temp = get_orientation(list_file[i])
            if orientation_file_0 != orientation_file_temp:
                print "ERROR: The files ", list_file[0], " and ", list_file[
                    i], " are not in the same orientation. Use sct_orientation to change the orientation of a file."
                sys.exit(2)
            file_temp = load(list_file[i])
            data_temp = file_temp.get_data()
            data_concatenation = data_concatenation + data_temp

    # Save concatenation as a file
    print '\nWrite NIFTI volumes...'
    img = Nifti1Image(data_concatenation, None, hdr_0)
    save(img, 'concatenation_file.nii.gz')

    # Applying nurbs to the concatenation and save file as binary file
    fname_output = extract_centerline('concatenation_file.nii.gz',
                                      remove_temp_files=remove_temp_files,
                                      verbose=verbose,
                                      algo_fitting=param.algo_fitting,
                                      type_window=param.type_window,
                                      window_length=param.window_length)

    # Rename files after processing
    if output_file_name != None:
        output_file_name = output_file_name
    else:
        output_file_name = "generated_centerline.nii.gz"

    os.rename(fname_output, output_file_name)
    path_binary, file_binary, ext_binary = sct.extract_fname(output_file_name)
    os.rename('concatenation_file_centerline.txt', file_binary + '.txt')

    # Process for a binary file as output:
    sct.run('cp ' + output_file_name + ' ../')

    # Process for a text file as output:
    sct.run('cp ' + file_binary + '.txt' + ' ../')

    os.chdir('../')
    # Remove temporary files
    if remove_temp_files:
        print('\nRemove temporary files...')
        sct.run('rm -rf ' + path_tmp)
コード例 #11
0
def create_mask():

    fsloutput = 'export FSLOUTPUTTYPE=NIFTI; '  # for faster processing, all outputs are in NIFTI

    # display usage if a mandatory argument is not provided
    if param.fname_data == '' or param.method == '':
        sct.printv('\nERROR: All mandatory arguments are not provided. See usage (add -h).\n', 1, 'error')

    # parse argument for method
    method_list = param.method.replace(' ', '').split(',')  # remove spaces and parse with comma
    # method_list = param.method.split(',')  # parse with comma
    method_type = method_list[0]

    # check existence of method type
    if not method_type in param.method_list:
        sct.printv('\nERROR in '+os.path.basename(__file__)+': Method "'+method_type+'" is not recognized. See usage (add -h).\n', 1, 'error')

    # check method val
    if not method_type == 'center':
        method_val = method_list[1]
    del method_list

    # check existence of shape
    if not param.shape in param.shape_list:
        sct.printv('\nERROR in '+os.path.basename(__file__)+': Shape "'+param.shape+'" is not recognized. See usage (add -h).\n', 1, 'error')

    # check existence of input files
    sct.printv('\ncheck existence of input files...', param.verbose)
    sct.check_file_exist(param.fname_data, param.verbose)
    if method_type == 'centerline':
        sct.check_file_exist(method_val, param.verbose)

    # check if orientation is RPI
    if not get_orientation(param.fname_data) == 'RPI':
        sct.printv('\nERROR in '+os.path.basename(__file__)+': Orientation of input image should be RPI. Use sct_orientation to put your image in RPI.\n', 1, 'error')

    # display input parameters
    sct.printv('\nInput parameters:', param.verbose)
    sct.printv('  data ..................'+param.fname_data, param.verbose)
    sct.printv('  method ................'+method_type, param.verbose)

    # Extract path/file/extension
    path_data, file_data, ext_data = sct.extract_fname(param.fname_data)

    # Get output folder and file name
    if param.fname_out == '':
        param.fname_out = param.file_prefix+file_data+ext_data
    #fname_out = os.path.abspath(path_out+file_out+ext_out)

    # create temporary folder
    sct.printv('\nCreate temporary folder...', param.verbose)
    path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1)
    sct.run('mkdir '+path_tmp, param.verbose)

    # Copying input data to tmp folder and convert to nii
    # NB: cannot use c3d here because c3d cannot convert 4D data.
    sct.printv('\nCopying input data to tmp folder and convert to nii...', param.verbose)
    sct.run('cp '+param.fname_data+' '+path_tmp+'data'+ext_data, param.verbose)
    if method_type == 'centerline':
        sct.run('isct_c3d '+method_val+' -o '+path_tmp+'/centerline.nii.gz')

    # go to tmp folder
    os.chdir(path_tmp)

    # convert to nii format
    sct.run('fslchfiletype NIFTI data', param.verbose)

    # Get dimensions of data
    sct.printv('\nGet dimensions of data...', param.verbose)
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('data.nii')
    sct.printv('  ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz)+ ' x ' + str(nt), param.verbose)
    # in case user input 4d data
    if nt != 1:
        sct.printv('WARNING in '+os.path.basename(__file__)+': Input image is 4d but output mask will 3D.', param.verbose, 'warning')
        # extract first volume to have 3d reference
        sct.run(fsloutput+'fslroi data data -0 1', param.verbose)

    if method_type == 'coord':
        # parse to get coordinate
        coord = map(int, method_val.split('x'))

    if method_type == 'point':
        # get file name
        fname_point = method_val
        # extract coordinate of point
        sct.printv('\nExtract coordinate of point...', param.verbose)
        status, output = sct.run('sct_label_utils -i '+fname_point+' -t display-voxel', param.verbose)
        # parse to get coordinate
        coord = output[output.find('Position=')+10:-17].split(',')

    if method_type == 'center':
        # set coordinate at center of FOV
        coord = round(float(nx)/2), round(float(ny)/2)

    if method_type == 'centerline':
        # get name of centerline from user argument
        fname_centerline = 'centerline.nii.gz'
    else:
        # generate volume with line along Z at coordinates 'coord'
        sct.printv('\nCreate line...', param.verbose)
        fname_centerline = create_line('data.nii', coord, nz)

    # create mask
    sct.printv('\nCreate mask...', param.verbose)
    centerline = nibabel.load(fname_centerline)  # open centerline
    hdr = centerline.get_header()  # get header
    hdr.set_data_dtype('uint8')  # set imagetype to uint8
    data_centerline = centerline.get_data()  # get centerline
    z_centerline = [iz for iz in range(0, nz, 1) if data_centerline[:, :, iz].any()]
    nz = len(z_centerline)
    # get center of mass of the centerline
    cx = [0] * nz
    cy = [0] * nz
    for iz in range(0, nz, 1):
        cx[iz], cy[iz] = ndimage.measurements.center_of_mass(numpy.array(data_centerline[:, :, z_centerline[iz]]))
    # create 2d masks
    file_mask = 'data_mask'
    for iz in range(nz):
        center = numpy.array([cx[iz], cy[iz]])
        mask2d = create_mask2d(center, param.shape, param.size, nx, ny)
        # Write NIFTI volumes
        img = nibabel.Nifti1Image(mask2d, None, hdr)
        nibabel.save(img, (file_mask+str(iz)+'.nii'))
    # merge along Z
    cmd = 'fslmerge -z mask '
    for iz in range(nz):
        cmd = cmd + file_mask+str(iz)+' '
    status, output = sct.run(cmd, param.verbose)
    # copy geometry
    sct.run(fsloutput+'fslcpgeom data mask', param.verbose)
    # sct.run('fslchfiletype NIFTI mask', param.verbose)

    # come back to parent folder
    os.chdir('..')

    # Generate output files
    sct.printv('\nGenerate output files...', param.verbose)
    sct.generate_output_file(path_tmp+'mask.nii.gz', param.fname_out)

    # Remove temporary files
    if param.remove_tmp_files == 1:
        sct.printv('\nRemove temporary files...', param.verbose)
        sct.run('rm -rf '+path_tmp, param.verbose)

    # to view results
    sct.printv('\nDone! To view results, type:', param.verbose)
    sct.printv('fslview '+param.fname_data+' '+param.fname_out+' -l Red -t 0.5 &', param.verbose, 'info')
    print
コード例 #12
0
def compute_csa(fname_segmentation, name_method, volume_output, verbose, remove_temp_files, step, smoothing_param, figure_fit, name_output, slices, vert_levels, path_to_template, algo_fitting = 'hanning', type_window = 'hanning', window_length = 80):

    # Extract path, file and extension
    fname_segmentation = os.path.abspath(fname_segmentation)
    path_data, file_data, ext_data = sct.extract_fname(fname_segmentation)

    # create temporary folder
    sct.printv('\nCreate temporary folder...', verbose)
    path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1)
    sct.run('mkdir '+path_tmp, verbose)

    # Copying input data to tmp folder and convert to nii
    sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose)
    sct.run('isct_c3d '+fname_segmentation+' -o '+path_tmp+'segmentation.nii')

    # go to tmp folder
    os.chdir(path_tmp)

    # Change orientation of the input segmentation into RPI
    sct.printv('\nChange orientation of the input segmentation into RPI...', verbose)
    fname_segmentation_orient = set_orientation('segmentation.nii', 'RPI', 'segmentation_orient.nii')

    # Get size of data
    sct.printv('\nGet data dimensions...', verbose)
    nx, ny, nz, nt, px, py, pz, pt = Image(fname_segmentation_orient).dim
    sct.printv('  ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), verbose)

    # Open segmentation volume
    sct.printv('\nOpen segmentation volume...', verbose)
    file_seg = nibabel.load(fname_segmentation_orient)
    data_seg = file_seg.get_data()
    hdr_seg = file_seg.get_header()

    # # Extract min and max index in Z direction
    X, Y, Z = (data_seg > 0).nonzero()
    min_z_index, max_z_index = min(Z), max(Z)
    # Xp, Yp = (data_seg[:, :, 0] >= 0).nonzero()  # X and Y range

    # extract centerline and smooth it
    x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline(fname_segmentation_orient, algo_fitting=algo_fitting, type_window=type_window, window_length=window_length, verbose=verbose)
    z_centerline_scaled = [x*pz for x in z_centerline]

    # Compute CSA
    sct.printv('\nCompute CSA...', verbose)

    # Empty arrays in which CSA for each z slice will be stored
    csa = np.zeros(max_z_index-min_z_index+1)
    # csa = [0.0 for i in xrange(0, max_z_index-min_z_index+1)]

    for iz in xrange(0, len(z_centerline)):

        # compute the vector normal to the plane
        normal = normalize(np.array([x_centerline_deriv[iz], y_centerline_deriv[iz], z_centerline_deriv[iz]]))

        # compute the angle between the normal vector of the plane and the vector z
        angle = np.arccos(np.dot(normal, [0, 0, 1]))

        # compute the number of voxels, assuming the segmentation is coded for partial volume effect between 0 and 1.
        number_voxels = sum(sum(data_seg[:, :, iz+min_z_index]))

        # compute CSA, by scaling with voxel size (in mm) and adjusting for oblique plane
        csa[iz] = number_voxels * px * py * np.cos(angle)

    if smoothing_param:
        from msct_smooth import smoothing_window
        sct.printv('\nSmooth CSA across slices...', verbose)
        sct.printv('.. Hanning window: '+str(smoothing_param)+' mm', verbose)
        csa_smooth = smoothing_window(csa, window_len=smoothing_param/pz, window='hanning', verbose=0)
        # display figure
        if verbose == 2:
            import matplotlib.pyplot as plt
            plt.figure()
            pltx, = plt.plot(z_centerline_scaled, csa, 'bo')
            pltx_fit, = plt.plot(z_centerline_scaled, csa_smooth, 'r', linewidth=2)
            plt.title("Cross-sectional area (CSA)")
            plt.xlabel('z (mm)')
            plt.ylabel('CSA (mm^2)')
            plt.legend([pltx, pltx_fit], ['Raw', 'Smoothed'])
            plt.show()
        # update variable
        csa = csa_smooth

    # Create output text file
    sct.printv('\nWrite text file...', verbose)
    file_results = open('csa.txt', 'w')
    for i in range(min_z_index, max_z_index+1):
        file_results.write(str(int(i)) + ',' + str(csa[i-min_z_index])+'\n')
        # Display results
        sct.printv('z='+str(i-min_z_index)+': '+str(csa[i-min_z_index])+' mm^2', verbose, 'bold')
    file_results.close()

    # output volume of csa values
    if volume_output:
        sct.printv('\nCreate volume of CSA values...', verbose)
        # get orientation of the input data
        orientation = get_orientation('segmentation.nii')
        data_seg = data_seg.astype(np.float32, copy=False)
        # loop across slices
        for iz in range(min_z_index, max_z_index+1):
            # retrieve seg pixels
            x_seg, y_seg = (data_seg[:, :, iz] > 0).nonzero()
            seg = [[x_seg[i],y_seg[i]] for i in range(0, len(x_seg))]
            # loop across pixels in segmentation
            for i in seg:
                # replace value with csa value
                data_seg[i[0], i[1], iz] = csa[iz-min_z_index]
        # create header
        hdr_seg.set_data_dtype('float32')  # set imagetype to uint8
        # save volume
        img = nibabel.Nifti1Image(data_seg, None, hdr_seg)
        nibabel.save(img, 'csa_RPI.nii')
        # Change orientation of the output centerline into input orientation
        fname_csa_volume = set_orientation('csa_RPI.nii', orientation, 'csa_RPI_orient.nii')

    # come back to parent folder
    os.chdir('..')

    # Generate output files
    sct.printv('\nGenerate output files...', verbose)
    from shutil import copyfile
    copyfile(path_tmp+'csa.txt', path_data+param.fname_csa)
    # sct.generate_output_file(path_tmp+'csa.txt', path_data+param.fname_csa)  # extension already included in param.fname_csa
    if volume_output:
        sct.generate_output_file(fname_csa_volume, path_data+name_output)  # extension already included in name_output

    # average csa across vertebral levels or slices if asked (flag -z or -l)
    if slices or vert_levels:

        if vert_levels and not path_to_template:
            sct.printv('\nERROR: Path to template is missing. See usage.\n', 1, 'error')
            sys.exit(2)
        elif vert_levels and path_to_template:
            abs_path_to_template = os.path.abspath(path_to_template)

        # go to tmp folder
        os.chdir(path_tmp)

        # create temporary folder
        sct.printv('\nCreate temporary folder to average csa...', verbose)
        path_tmp_extract_metric = sct.slash_at_the_end('label_temp', 1)
        sct.run('mkdir '+path_tmp_extract_metric, verbose)

        # Copying output CSA volume in the temporary folder
        sct.printv('\nCopy data to tmp folder...', verbose)
        sct.run('cp '+fname_segmentation+' '+path_tmp_extract_metric)

        # create file info_label
        path_fname_seg, file_fname_seg, ext_fname_seg = sct.extract_fname(fname_segmentation)
        create_info_label('info_label.txt', path_tmp_extract_metric, file_fname_seg+ext_fname_seg)

        # average CSA
        if slices:
            os.system("sct_extract_metric -i "+path_data+name_output+" -f "+path_tmp_extract_metric+" -m wa -o ../csa_mean.txt -z "+slices)
        if vert_levels:
            sct.run('cp -R '+abs_path_to_template+' .')
            os.system("sct_extract_metric -i "+path_data+name_output+" -f "+path_tmp_extract_metric+" -m wa -o ../csa_mean.txt -v "+vert_levels)

        os.chdir('..')

        # Remove temporary files
        print('\nRemove temporary folder used to average CSA...')
        sct.run('rm -rf '+path_tmp_extract_metric)

    # Remove temporary files
    if remove_temp_files:
        print('\nRemove temporary files...')
        sct.run('rm -rf '+path_tmp)
def main():

    # Initialization
    fname_anat = ''
    fname_point = ''
    slice_gap = param.gap
    remove_tmp_files = param.remove_tmp_files
    gaussian_kernel = param.gaussian_kernel
    start_time = time.time()

    # get path of the toolbox
    status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
    path_sct = sct.slash_at_the_end(path_sct, 1)

    # Parameters for debug mode
    if param.debug == 1:
        sct.printv('\n*** WARNING: DEBUG MODE ON ***\n\t\t\tCurrent working directory: '+os.getcwd(), 'warning')
        status, path_sct_testing_data = commands.getstatusoutput('echo $SCT_TESTING_DATA_DIR')
        fname_anat = path_sct_testing_data+'/t2/t2.nii.gz'
        fname_point = path_sct_testing_data+'/t2/t2_centerline_init.nii.gz'
        slice_gap = 5

    else:
        # Check input param
        try:
            opts, args = getopt.getopt(sys.argv[1:],'hi:p:g:r:k:')
        except getopt.GetoptError as err:
            print str(err)
            usage()
        if not opts:
            usage()
        for opt, arg in opts:
            if opt == '-h':
                usage()
            elif opt in ('-i'):
                fname_anat = arg
            elif opt in ('-p'):
                fname_point = arg
            elif opt in ('-g'):
                slice_gap = int(arg)
            elif opt in ('-r'):
                remove_tmp_files = int(arg)
            elif opt in ('-k'):
                gaussian_kernel = int(arg)

    # display usage if a mandatory argument is not provided
    if fname_anat == '' or fname_point == '':
        usage()

    # check existence of input files
    sct.check_file_exist(fname_anat)
    sct.check_file_exist(fname_point)

    # extract path/file/extension
    path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
    path_point, file_point, ext_point = sct.extract_fname(fname_point)

    # extract path of schedule file
    # TODO: include schedule file in sct
    # TODO: check existence of schedule file
    file_schedule = path_sct + param.schedule_file

    # Get input image orientation
    input_image_orientation = get_orientation(fname_anat)

    # Display arguments
    print '\nCheck input arguments...'
    print '  Anatomical image:     '+fname_anat
    print '  Orientation:          '+input_image_orientation
    print '  Point in spinal cord: '+fname_point
    print '  Slice gap:            '+str(slice_gap)
    print '  Gaussian kernel:      '+str(gaussian_kernel)
    print '  Degree of polynomial: '+str(param.deg_poly)

    # create temporary folder
    print('\nCreate temporary folder...')
    path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
    sct.create_folder(path_tmp)
    print '\nCopy input data...'
    sct.run('cp '+fname_anat+ ' '+path_tmp+'/tmp.anat'+ext_anat)
    sct.run('cp '+fname_point+ ' '+path_tmp+'/tmp.point'+ext_point)

    # go to temporary folder
    os.chdir(path_tmp)

    # convert to nii
    sct.run('fslchfiletype NIFTI tmp.anat')
    sct.run('fslchfiletype NIFTI tmp.point')

    # Reorient input anatomical volume into RL PA IS orientation
    print '\nReorient input volume to RL PA IS orientation...'
    #sct.run(sct.fsloutput + 'fslswapdim tmp.anat RL PA IS tmp.anat_orient')
    set_orientation('tmp.anat.nii', 'RPI', 'tmp.anat_orient.nii')
    # Reorient binary point into RL PA IS orientation
    print '\nReorient binary point into RL PA IS orientation...'
    sct.run(sct.fsloutput + 'fslswapdim tmp.point RL PA IS tmp.point_orient')
    set_orientation('tmp.point.nii', 'RPI', 'tmp.point_orient')

    # Get image dimensions
    print '\nGet image dimensions...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('tmp.anat_orient')
    print '.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz)
    print '.. voxel size:  '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm'

    # Split input volume
    print '\nSplit input volume...'
    sct.run(sct.fsloutput + 'fslsplit tmp.anat_orient tmp.anat_orient_z -z')
    file_anat_split = ['tmp.anat_orient_z'+str(z).zfill(4) for z in range(0,nz,1)]

    # Get the coordinates of the input point
    print '\nGet the coordinates of the input point...'
    file = nibabel.load('tmp.point_orient.nii')
    data = file.get_data()
    x_init, y_init, z_init = (data > 0).nonzero()
    x_init = x_init[0]
    y_init = y_init[0]
    z_init = z_init[0]
    print '('+str(x_init)+', '+str(y_init)+', '+str(z_init)+')'

    # Extract the slice corresponding to z=z_init
    print '\nExtract the slice corresponding to z='+str(z_init)+'...'
    file_point_split = ['tmp.point_orient_z'+str(z).zfill(4) for z in range(0,nz,1)]
    sct.run(sct.fsloutput+'fslroi tmp.point_orient '+file_point_split[z_init]+' 0 -1 0 -1 '+str(z_init)+' 1')

    # Create gaussian mask from point
    print '\nCreate gaussian mask from point...'
    file_mask_split = ['tmp.mask_orient_z'+str(z).zfill(4) for z in range(0,nz,1)]
    sct.run(sct.fsloutput+'fslmaths '+file_point_split[z_init]+' -s '+str(gaussian_kernel)+' '+file_mask_split[z_init])

    # Obtain max value from mask
    print '\nFind maximum value from mask...'
    file = nibabel.load(file_mask_split[z_init]+'.nii')
    data = file.get_data()
    max_value_mask = numpy.max(data)
    print '..'+str(max_value_mask)

    # Normalize mask beween 0 and 1
    print '\nNormalize mask beween 0 and 1...'
    sct.run(sct.fsloutput+'fslmaths '+file_mask_split[z_init]+' -div '+str(max_value_mask)+' '+file_mask_split[z_init])

    ## Take the square of the mask
    #print '\nCalculate the square of the mask...'
    #sct.run(sct.fsloutput+'fslmaths '+file_mask_split[z_init]+' -mul '+file_mask_split[z_init]+' '+file_mask_split[z_init])

    # initialize variables
    file_mat = ['tmp.mat_z'+str(z).zfill(4) for z in range(0,nz,1)]
    file_mat_inv = ['tmp.mat_inv_z'+str(z).zfill(4) for z in range(0,nz,1)]
    file_mat_inv_cumul = ['tmp.mat_inv_cumul_z'+str(z).zfill(4) for z in range(0,nz,1)]

    # create identity matrix for initial transformation matrix
    fid = open(file_mat_inv_cumul[z_init], 'w')
    fid.write('%i %i %i %i\n' %(1, 0, 0, 0) )
    fid.write('%i %i %i %i\n' %(0, 1, 0, 0) )
    fid.write('%i %i %i %i\n' %(0, 0, 1, 0) )
    fid.write('%i %i %i %i\n' %(0, 0, 0, 1) )
    fid.close()

    # initialize centerline: give value corresponding to initial point
    x_centerline = [x_init]
    y_centerline = [y_init]
    z_centerline = [z_init]
    warning_count = 0

    # go up (1), then down (2) in reference to the binary point
    for iUpDown in range(1, 3):

        if iUpDown == 1:
            # z increases
            slice_gap_signed = slice_gap
        elif iUpDown == 2:
            # z decreases
            slice_gap_signed = -slice_gap
            # reverse centerline (because values will be appended at the end)
            x_centerline.reverse()
            y_centerline.reverse()
            z_centerline.reverse()

        # initialization before looping
        z_dest = z_init # point given by user
        z_src = z_dest + slice_gap_signed

        # continue looping if 0 < z < nz
        while 0 <= z_src and z_src <= nz-1:

            # print current z:
            print 'z='+str(z_src)+':'

            # estimate transformation
            sct.run(fsloutput+'flirt -in '+file_anat_split[z_src]+' -ref '+file_anat_split[z_dest]+' -schedule '+file_schedule+ ' -verbose 0 -omat '+file_mat[z_src]+' -cost normcorr -forcescaling -inweight '+file_mask_split[z_dest]+' -refweight '+file_mask_split[z_dest])

            # display transfo
            status, output = sct.run('cat '+file_mat[z_src])
            print output

            # check if transformation is bigger than 1.5x slice_gap
            tx = float(output.split()[3])
            ty = float(output.split()[7])
            norm_txy = numpy.linalg.norm([tx, ty],ord=2)
            if norm_txy > 1.5*slice_gap:
                print 'WARNING: Transformation is too large --> using previous one.'
                warning_count = warning_count + 1
                # if previous transformation exists, replace current one with previous one
                if os.path.isfile(file_mat[z_dest]):
                    sct.run('cp '+file_mat[z_dest]+' '+file_mat[z_src])

            # estimate inverse transformation matrix
            sct.run('convert_xfm -omat '+file_mat_inv[z_src]+' -inverse '+file_mat[z_src])

            # compute cumulative transformation
            sct.run('convert_xfm -omat '+file_mat_inv_cumul[z_src]+' -concat '+file_mat_inv[z_src]+' '+file_mat_inv_cumul[z_dest])

            # apply inverse cumulative transformation to initial gaussian mask (to put it in src space)
            sct.run(fsloutput+'flirt -in '+file_mask_split[z_init]+' -ref '+file_mask_split[z_init]+' -applyxfm -init '+file_mat_inv_cumul[z_src]+' -out '+file_mask_split[z_src])

            # open inverse cumulative transformation file and generate centerline
            fid = open(file_mat_inv_cumul[z_src])
            mat = fid.read().split()
            x_centerline.append(x_init + float(mat[3]))
            y_centerline.append(y_init + float(mat[7]))
            z_centerline.append(z_src)
            #z_index = z_index+1

            # define new z_dest (target slice) and new z_src (moving slice)
            z_dest = z_dest + slice_gap_signed
            z_src = z_src + slice_gap_signed


    # Reconstruct centerline
    # ====================================================================================================

    # reverse back centerline (because it's been reversed once, so now all values are in the right order)
    x_centerline.reverse()
    y_centerline.reverse()
    z_centerline.reverse()

    # fit centerline in the Z-X plane using polynomial function
    print '\nFit centerline in the Z-X plane using polynomial function...'
    coeffsx = numpy.polyfit(z_centerline, x_centerline, deg=param.deg_poly)
    polyx = numpy.poly1d(coeffsx)
    x_centerline_fit = numpy.polyval(polyx, z_centerline)
    # calculate RMSE
    rmse = numpy.linalg.norm(x_centerline_fit-x_centerline)/numpy.sqrt( len(x_centerline) )
    # calculate max absolute error
    max_abs = numpy.max( numpy.abs(x_centerline_fit-x_centerline) )
    print '.. RMSE (in mm): '+str(rmse*px)
    print '.. Maximum absolute error (in mm): '+str(max_abs*px)

    # fit centerline in the Z-Y plane using polynomial function
    print '\nFit centerline in the Z-Y plane using polynomial function...'
    coeffsy = numpy.polyfit(z_centerline, y_centerline, deg=param.deg_poly)
    polyy = numpy.poly1d(coeffsy)
    y_centerline_fit = numpy.polyval(polyy, z_centerline)
    # calculate RMSE
    rmse = numpy.linalg.norm(y_centerline_fit-y_centerline)/numpy.sqrt( len(y_centerline) )
    # calculate max absolute error
    max_abs = numpy.max( numpy.abs(y_centerline_fit-y_centerline) )
    print '.. RMSE (in mm): '+str(rmse*py)
    print '.. Maximum absolute error (in mm): '+str(max_abs*py)

    # display
    if param.debug == 1:
        import matplotlib.pyplot as plt
        plt.figure()
        plt.plot(z_centerline,x_centerline,'.',z_centerline,x_centerline_fit,'r')
        plt.legend(['Data','Polynomial Fit'])
        plt.title('Z-X plane polynomial interpolation')
        plt.show()

        plt.figure()
        plt.plot(z_centerline,y_centerline,'.',z_centerline,y_centerline_fit,'r')
        plt.legend(['Data','Polynomial Fit'])
        plt.title('Z-Y plane polynomial interpolation')
        plt.show()

    # generate full range z-values for centerline
    z_centerline_full = [iz for iz in range(0, nz, 1)]

    # calculate X and Y values for the full centerline
    x_centerline_fit_full = numpy.polyval(polyx, z_centerline_full)
    y_centerline_fit_full = numpy.polyval(polyy, z_centerline_full)

    # Generate fitted transformation matrices and write centerline coordinates in text file
    print '\nGenerate fitted transformation matrices and write centerline coordinates in text file...'
    file_mat_inv_cumul_fit = ['tmp.mat_inv_cumul_fit_z'+str(z).zfill(4) for z in range(0,nz,1)]
    file_mat_cumul_fit = ['tmp.mat_cumul_fit_z'+str(z).zfill(4) for z in range(0,nz,1)]
    fid_centerline = open('tmp.centerline_coordinates.txt', 'w')
    for iz in range(0, nz, 1):
        # compute inverse cumulative fitted transformation matrix
        fid = open(file_mat_inv_cumul_fit[iz], 'w')
        fid.write('%i %i %i %f\n' %(1, 0, 0, x_centerline_fit_full[iz]-x_init) )
        fid.write('%i %i %i %f\n' %(0, 1, 0, y_centerline_fit_full[iz]-y_init) )
        fid.write('%i %i %i %i\n' %(0, 0, 1, 0) )
        fid.write('%i %i %i %i\n' %(0, 0, 0, 1) )
        fid.close()
        # compute forward cumulative fitted transformation matrix
        sct.run('convert_xfm -omat '+file_mat_cumul_fit[iz]+' -inverse '+file_mat_inv_cumul_fit[iz])
        # write centerline coordinates in x, y, z format
        fid_centerline.write('%f %f %f\n' %(x_centerline_fit_full[iz], y_centerline_fit_full[iz], z_centerline_full[iz]) )
    fid_centerline.close()


    # Prepare output data
    # ====================================================================================================

    # write centerline as text file
    for iz in range(0, nz, 1):
        # compute inverse cumulative fitted transformation matrix
        fid = open(file_mat_inv_cumul_fit[iz], 'w')
        fid.write('%i %i %i %f\n' %(1, 0, 0, x_centerline_fit_full[iz]-x_init) )
        fid.write('%i %i %i %f\n' %(0, 1, 0, y_centerline_fit_full[iz]-y_init) )
        fid.write('%i %i %i %i\n' %(0, 0, 1, 0) )
        fid.write('%i %i %i %i\n' %(0, 0, 0, 1) )
        fid.close()

    # write polynomial coefficients
    numpy.savetxt('tmp.centerline_polycoeffs_x.txt',coeffsx)
    numpy.savetxt('tmp.centerline_polycoeffs_y.txt',coeffsy)

    # apply transformations to data
    print '\nApply fitted transformation matrices...'
    file_anat_split_fit = ['tmp.anat_orient_fit_z'+str(z).zfill(4) for z in range(0,nz,1)]
    file_mask_split_fit = ['tmp.mask_orient_fit_z'+str(z).zfill(4) for z in range(0,nz,1)]
    file_point_split_fit = ['tmp.point_orient_fit_z'+str(z).zfill(4) for z in range(0,nz,1)]
    for iz in range(0, nz, 1):
        # forward cumulative transformation to data
        sct.run(fsloutput+'flirt -in '+file_anat_split[iz]+' -ref '+file_anat_split[iz]+' -applyxfm -init '+file_mat_cumul_fit[iz]+' -out '+file_anat_split_fit[iz])
        # inverse cumulative transformation to mask
        sct.run(fsloutput+'flirt -in '+file_mask_split[z_init]+' -ref '+file_mask_split[z_init]+' -applyxfm -init '+file_mat_inv_cumul_fit[iz]+' -out '+file_mask_split_fit[iz])
        # inverse cumulative transformation to point
        sct.run(fsloutput+'flirt -in '+file_point_split[z_init]+' -ref '+file_point_split[z_init]+' -applyxfm -init '+file_mat_inv_cumul_fit[iz]+' -out '+file_point_split_fit[iz]+' -interp nearestneighbour')

    # Merge into 4D volume
    print '\nMerge into 4D volume...'
    sct.run(fsloutput+'fslmerge -z tmp.anat_orient_fit tmp.anat_orient_fit_z*')
    sct.run(fsloutput+'fslmerge -z tmp.mask_orient_fit tmp.mask_orient_fit_z*')
    sct.run(fsloutput+'fslmerge -z tmp.point_orient_fit tmp.point_orient_fit_z*')

    # Copy header geometry from input data
    print '\nCopy header geometry from input data...'
    sct.run(fsloutput+'fslcpgeom tmp.anat_orient.nii tmp.anat_orient_fit.nii ')
    sct.run(fsloutput+'fslcpgeom tmp.anat_orient.nii tmp.mask_orient_fit.nii ')
    sct.run(fsloutput+'fslcpgeom tmp.anat_orient.nii tmp.point_orient_fit.nii ')

    # Reorient outputs into the initial orientation of the input image
    print '\nReorient the centerline into the initial orientation of the input image...'
    set_orientation('tmp.point_orient_fit.nii', input_image_orientation, 'tmp.point_orient_fit.nii')
    set_orientation('tmp.mask_orient_fit.nii', input_image_orientation, 'tmp.mask_orient_fit.nii')

    # Generate output file (in current folder)
    print '\nGenerate output file (in current folder)...'
    os.chdir('..')  # come back to parent folder
    #sct.generate_output_file('tmp.centerline_polycoeffs_x.txt','./','centerline_polycoeffs_x','.txt')
    #sct.generate_output_file('tmp.centerline_polycoeffs_y.txt','./','centerline_polycoeffs_y','.txt')
    #sct.generate_output_file('tmp.centerline_coordinates.txt','./','centerline_coordinates','.txt')
    #sct.generate_output_file('tmp.anat_orient.nii','./',file_anat+'_rpi',ext_anat)
    #sct.generate_output_file('tmp.anat_orient_fit.nii', file_anat+'_rpi_align'+ext_anat)
    #sct.generate_output_file('tmp.mask_orient_fit.nii', file_anat+'_mask'+ext_anat)
    fname_output_centerline = sct.generate_output_file(path_tmp+'/tmp.point_orient_fit.nii', file_anat+'_centerline'+ext_anat)

    # Delete temporary files
    if remove_tmp_files == 1:
        print '\nRemove temporary files...'
        sct.run('rm -rf '+path_tmp)

    # print number of warnings
    print '\nNumber of warnings: '+str(warning_count)+' (if >10, you should probably reduce the gap and/or increase the kernel size'

    # display elapsed time
    elapsed_time = time.time() - start_time
    print '\nFinished! \n\tGenerated file: '+fname_output_centerline+'\n\tElapsed time: '+str(int(round(elapsed_time)))+'s\n'
コード例 #14
0
def compute_csa(fname_segmentation, name_method, volume_output, verbose, remove_temp_files, spline_smoothing, step, smoothing_param, figure_fit, name_output, slices, vert_levels, path_to_template, algo_fitting = 'hanning', type_window = 'hanning', window_length = 80):

    #param.algo_fitting = 'hanning'

    # Extract path, file and extension
    fname_segmentation = os.path.abspath(fname_segmentation)
    path_data, file_data, ext_data = sct.extract_fname(fname_segmentation)

    # create temporary folder
    sct.printv('\nCreate temporary folder...', verbose)
    path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1)
    sct.run('mkdir '+path_tmp, verbose)

    # Copying input data to tmp folder and convert to nii
    sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose)
    sct.run('isct_c3d '+fname_segmentation+' -o '+path_tmp+'segmentation.nii')

    # go to tmp folder
    os.chdir(path_tmp)
        
    # Change orientation of the input segmentation into RPI
    sct.printv('\nChange orientation of the input segmentation into RPI...', verbose)
    fname_segmentation_orient = set_orientation('segmentation.nii', 'RPI', 'segmentation_orient.nii')

    # Get size of data
    sct.printv('\nGet data dimensions...', verbose)
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_segmentation_orient)
    sct.printv('  ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), verbose)

    # Open segmentation volume
    sct.printv('\nOpen segmentation volume...', verbose)
    file_seg = nibabel.load(fname_segmentation_orient)
    data_seg = file_seg.get_data()
    hdr_seg = file_seg.get_header()

    #
    # # Extract min and max index in Z direction
    X, Y, Z = (data_seg > 0).nonzero()
    # coords_seg = np.array([str([X[i], Y[i], Z[i]]) for i in xrange(0,len(Z))])  # don't know why but finding strings in array of array of strings is WAY faster than doing the same with integers
    min_z_index, max_z_index = min(Z), max(Z)
    Xp,Yp = (data_seg[:,:,0]>=0).nonzero() # X and Y range
    #
    # x_centerline = [0 for i in xrange(0,max_z_index-min_z_index+1)]
    # y_centerline = [0 for i in xrange(0,max_z_index-min_z_index+1)]
    # z_centerline = np.array([iz for iz in xrange(min_z_index, max_z_index+1)])
    #
    # # Extract segmentation points and average per slice
    # for iz in xrange(min_z_index, max_z_index+1):
    #     x_seg, y_seg = (data_seg[:,:,iz]>0).nonzero()
    #     x_centerline[iz-min_z_index] = np.mean(x_seg)
    #     y_centerline[iz-min_z_index] = np.mean(y_seg)
    #
    # # Fit the centerline points with spline and return the new fitted coordinates
    # x_centerline_fit, y_centerline_fit,x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)

    # extract centerline and smooth it
    x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = smooth_centerline(fname_segmentation_orient, algo_fitting=algo_fitting, type_window=type_window, window_length=window_length, verbose = verbose)
    z_centerline_scaled = [x*pz for x in z_centerline]

   # # 3D plot of the fit
 #    fig=plt.figure()
 #    ax=Axes3D(fig)
 #    ax.plot(x_centerline,y_centerline,z_centerline,zdir='z')
 #    ax.plot(x_centerline_fit,y_centerline_fit,z_centerline,zdir='z')
 #    plt.show()

    # Defining cartesian basis vectors 
    x = np.array([1, 0, 0])
    y = np.array([0, 1, 0])
    z = np.array([0, 0, 1])
    
    # Creating folder in which JPG files will be stored
    sct.printv('\nCreating folder in which JPG files will be stored...', verbose)
    sct.create_folder('JPG_Results')

    # Compute CSA
    sct.printv('\nCompute CSA...', verbose)

    # Empty arrays in which CSA for each z slice will be stored
    csa = [0.0 for i in xrange(0,max_z_index-min_z_index+1)]
    # sections_ortho_counting = [0 for i in xrange(0,max_z_index-min_z_index+1)]
    # sections_ortho_ellipse = [0 for i in xrange(0,max_z_index-min_z_index+1)]
    # sections_z_ellipse = [0 for i in xrange(0,max_z_index-min_z_index+1)]
    # sections_z_counting = [0 for i in xrange(0,max_z_index-min_z_index+1)]
    sct.printv('\nCross-Section Area:', verbose, 'bold')

    for iz in xrange(0, len(z_centerline)):

        # Equation of the the plane which is orthogonal to the spline at z=iz
        a = x_centerline_deriv[iz]
        b = y_centerline_deriv[iz]
        c = z_centerline_deriv[iz]

        #vector normal to the plane
        normal = normalize(np.array([a, b, c]))

        # angle between normal vector and z
        angle = np.arccos(np.dot(normal, z))

        if name_method == 'counting_ortho_plane' or name_method == 'ellipse_ortho_plane':

            x_center = x_centerline_fit[iz]
            y_center = y_centerline_fit[iz]
            z_center = z_centerline[iz]

            # use of x in order to get orientation of each plane, basis_1 is in the plane ax+by+cz+d=0
            basis_1 = normalize(np.cross(normal,x))
            basis_2 = normalize(np.cross(normal,basis_1))

            # maximum dimension of the tilted plane. Try multiply numerator by sqrt(2) ?
            max_diameter = (max([(max(X)-min(X))*px,(max(Y)-min(Y))*py]))/(np.cos(angle))

            # Forcing the step to be the min of x and y scale (default value is 1 mm)
            step = min([px,py])

            # discretized plane which will be filled with 0/1
            plane_seg = np.zeros((int(max_diameter/step),int(max_diameter/step)))

            # how the plane will be skimmed through
            plane_grid = np.linspace(-int(max_diameter/2),int(max_diameter/2),int(max_diameter/step))

            # we go through the plane
            for i_b1 in plane_grid :

                for i_b2 in plane_grid :

                    point = np.array([x_center*px,y_center*py,z_center*pz]) + i_b1*basis_1 +i_b2*basis_2

                    # to which voxel belongs each point of the plane
                    coord_voxel = str([ int(point[0]/px), int(point[1]/py), int(point[2]/pz)])

                    if (coord_voxel in coords_seg) is True :  # if this voxel is 1
                        plane_seg[int((plane_grid==i_b1).nonzero()[0])][int((plane_grid==i_b2).nonzero()[0])] = 1

                        # number of voxels that are in the intersection of each plane and the nonzeros values of segmentation, times the area of one cell of the discretized plane
                        if name_method == 'counting_ortho_plane':
                            csa[iz] = len((plane_seg>0).nonzero()[0])*step*step

            # if verbose ==1 and name_method == 'counting_ortho_plane' :

                # print('Cross-Section Area : ' + str(csa[iz]) + ' mm^2')

            if name_method == 'ellipse_ortho_plane':

                # import scipy stuff
                from scipy.misc import imsave

                os.chdir('JPG_Results')
                imsave('plane_ortho_' + str(iz) + '.jpg', plane_seg)

                # Tresholded gradient image
                mag = edge_detection('plane_ortho_' + str(iz) + '.jpg')

                #Coordinates of the contour
                x_contour,y_contour = (mag>0).nonzero()

                x_contour = x_contour*step
                y_contour = y_contour*step

                #Fitting an ellipse
                fit = Ellipse_fit(x_contour,y_contour)

                # Semi-minor axis, semi-major axis
                a_ellipse, b_ellipse = ellipse_dim(fit)

                #Section = pi*a*b
                csa[iz] = a_ellipse*b_ellipse*np.pi

                # if verbose == 1 and name_method == 'ellipse_ortho_plane':
                #     print('Cross-Section Area : ' + str(csa[iz]) + ' mm^2')
                # os.chdir('..')

        if name_method == 'counting_z_plane' or name_method == 'ellipse_z_plane':

            # getting the segmentation for each z plane
            x_seg, y_seg = (data_seg[:, :, iz+min_z_index] > 0).nonzero()
            seg = [[x_seg[i], y_seg[i]] for i in range(0, len(x_seg))]

            plane = np.zeros((max(Xp), max(Yp)))

            for i in seg:
                # filling the plane with 0 and 1 regarding to the segmentation
                plane[i[0] - 1][i[1] - 1] = data_seg[i[0] - 1, i[1] - 1, iz+min_z_index]

            if name_method == 'counting_z_plane':
                x, y = (plane > 0.0).nonzero()
                len_x = len(x)
                for i in range(0, len_x):
                    csa[iz] += plane[x[i], y[i]]*px*py
                csa[iz] *= np.cos(angle)

            # if verbose == 1 and name_method == 'counting_z_plane':
            #     print('Cross-Section Area : ' + str(csa[iz]) + ' mm^2')

            if name_method == 'ellipse_z_plane':

                # import scipy stuff
                from scipy.misc import imsave

                os.chdir('JPG_Results')
                imsave('plane_z_' + str(iz) + '.jpg', plane)

                # Tresholded gradient image
                mag = edge_detection('plane_z_' + str(iz) + '.jpg')

                x_contour,y_contour = (mag>0).nonzero()

                x_contour = x_contour*px
                y_contour = y_contour*py

                # Fitting an ellipse
                fit = Ellipse_fit(x_contour,y_contour)
                a_ellipse, b_ellipse = ellipse_dim(fit)
                csa[iz] = a_ellipse*b_ellipse*np.pi*np.cos(angle)

                 # if verbose == 1 and name_method == 'ellipse_z_plane':
                 #     print('Cross-Section Area : ' + str(csa[iz]) + ' mm^2')

    if spline_smoothing == 1:
        sct.printv('\nSmoothing results with spline...', verbose)
        tck = scipy.interpolate.splrep(z_centerline_scaled, csa, s=smoothing_param)
        csa_smooth = scipy.interpolate.splev(z_centerline_scaled, tck)
        if figure_fit == 1:
            import matplotlib.pyplot as plt
            plt.figure()
            plt.plot(z_centerline_scaled, csa)
            plt.plot(z_centerline_scaled, csa_smooth)
            plt.legend(['CSA values', 'Smoothed values'], 2)
            plt.savefig('Spline_fit.png')
        csa = csa_smooth  # update variable

    # Create output text file
    sct.printv('\nWrite text file...', verbose)
    file_results = open('csa.txt', 'w')
    for i in range(min_z_index, max_z_index+1):
        file_results.write(str(int(i)) + ',' + str(csa[i-min_z_index])+'\n')
        # Display results
        sct.printv('z='+str(i-min_z_index)+': '+str(csa[i-min_z_index])+' mm^2', verbose, 'bold')
    file_results.close()

    # output volume of csa values
    if volume_output:
        sct.printv('\nCreate volume of CSA values...', verbose)
        # get orientation of the input data
        orientation = get_orientation('segmentation.nii')
        data_seg = data_seg.astype(np.float32, copy=False)
        # loop across slices
        for iz in range(min_z_index, max_z_index+1):
            # retrieve seg pixels
            x_seg, y_seg = (data_seg[:, :, iz] > 0).nonzero()
            seg = [[x_seg[i],y_seg[i]] for i in range(0, len(x_seg))]
            # loop across pixels in segmentation
            for i in seg:
                # replace value with csa value
                data_seg[i[0], i[1], iz] = csa[iz-min_z_index]
        # create header
        hdr_seg.set_data_dtype('float32')  # set imagetype to uint8
        # save volume
        img = nibabel.Nifti1Image(data_seg, None, hdr_seg)
        nibabel.save(img, 'csa_RPI.nii')
        # Change orientation of the output centerline into input orientation
        fname_csa_volume = set_orientation('csa_RPI.nii', orientation, 'csa_RPI_orient.nii')

    # come back to parent folder
    os.chdir('..')

    # Generate output files
    sct.printv('\nGenerate output files...', verbose)
    sct.generate_output_file(path_tmp+'csa.txt', path_data+param.fname_csa)  # extension already included in param.fname_csa
    if volume_output:
        sct.generate_output_file(fname_csa_volume, path_data+name_output)  # extension already included in name_output

    # average csa across vertebral levels or slices if asked (flag -z or -l)
    if slices or vert_levels:

        if vert_levels and not path_to_template:
            sct.printv('\nERROR: Path to template is missing. See usage.\n', 1, 'error')
            sys.exit(2)
        elif vert_levels and path_to_template:
            abs_path_to_template = os.path.abspath(path_to_template)

        # go to tmp folder
        os.chdir(path_tmp)

        # create temporary folder
        sct.printv('\nCreate temporary folder to average csa...', verbose)
        path_tmp_extract_metric = sct.slash_at_the_end('label_temp', 1)
        sct.run('mkdir '+path_tmp_extract_metric, verbose)

        # Copying output CSA volume in the temporary folder
        sct.printv('\nCopy data to tmp folder...', verbose)
        sct.run('cp '+fname_segmentation+' '+path_tmp_extract_metric)

        # create file info_label
        path_fname_seg, file_fname_seg, ext_fname_seg = sct.extract_fname(fname_segmentation)
        create_info_label('info_label.txt', path_tmp_extract_metric, file_fname_seg+ext_fname_seg)

        if slices:
            # average CSA
            os.system("sct_extract_metric -i "+path_data+name_output+" -f "+path_tmp_extract_metric+" -m wa -o "+sct.slash_at_the_end(path_data)+"mean_csa -z "+slices)
        if vert_levels:
            sct.run('cp -R '+abs_path_to_template+' .')
            # average CSA
            os.system("sct_extract_metric -i "+path_data+name_output+" -f "+path_tmp_extract_metric+" -m wa -o "+sct.slash_at_the_end(path_data)+"mean_csa -v "+vert_levels)

        os.chdir('..')

        # Remove temporary files
        print('\nRemove temporary folder used to average CSA...')
        sct.run('rm -rf '+path_tmp_extract_metric)

    # Remove temporary files
    if remove_temp_files:
        print('\nRemove temporary files...')
        sct.run('rm -rf '+path_tmp)
コード例 #15
0
def main(segmentation_file=None,
         label_file=None,
         output_file_name=None,
         parameter="binary_centerline",
         remove_temp_files=1,
         verbose=0):

    #Process for a binary file as output:
    if parameter == "binary_centerline":

        # Binary_centerline: Process for only a segmentation file:
        if "-i" in arguments and "-l" not in arguments:
            # Extract path, file and extension
            segmentation_file = os.path.abspath(segmentation_file)
            path_data, file_data, ext_data = sct.extract_fname(
                segmentation_file)

            # create temporary folder
            path_tmp = 'tmp.' + time.strftime("%y%m%d%H%M%S")
            sct.run('mkdir ' + path_tmp)

            # copy files into tmp folder
            sct.run('cp ' + segmentation_file + ' ' + path_tmp)

            # go to tmp folder
            os.chdir(path_tmp)

            # Change orientation of the input segmentation into RPI
            print '\nOrient segmentation image to RPI orientation...'
            fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data
            set_orientation(file_data + ext_data, 'RPI',
                            fname_segmentation_orient)

            # Extract orientation of the input segmentation
            orientation = get_orientation(file_data + ext_data)
            print '\nOrientation of segmentation image: ' + orientation

            # Get size of data
            print '\nGet dimensions data...'
            nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(
                fname_segmentation_orient)
            print '.. ' + str(nx) + ' x ' + str(ny) + ' y ' + str(
                nz) + ' z ' + str(nt)

            print '\nOpen segmentation volume...'
            file = nibabel.load(fname_segmentation_orient)
            data = file.get_data()
            hdr = file.get_header()

            # Extract min and max index in Z direction
            X, Y, Z = (data > 0).nonzero()
            min_z_index, max_z_index = min(Z), max(Z)
            x_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
            y_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
            z_centerline = [iz for iz in range(min_z_index, max_z_index + 1)]
            # Extract segmentation points and average per slice
            for iz in range(min_z_index, max_z_index + 1):
                x_seg, y_seg = (data[:, :, iz] > 0).nonzero()
                x_centerline[iz - min_z_index] = np.mean(x_seg)
                y_centerline[iz - min_z_index] = np.mean(y_seg)

            #ne sert a rien
            for k in range(len(X)):
                data[X[k], Y[k], Z[k]] = 0

            print len(x_centerline)
            # Fit the centerline points with splines and return the new fitted coordinates
            #done with nurbs for now
            x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = b_spline_centerline(
                x_centerline, y_centerline, z_centerline)
            # Create an image with the centerline
            for iz in range(min_z_index, max_z_index + 1):
                data[round(x_centerline_fit[iz - min_z_index]),
                     round(y_centerline_fit[iz - min_z_index]),
                     iz] = 1  #with nurbs fitting
                #data[round(x_centerline[iz-min_z_index]), round(y_centerline[iz-min_z_index]), iz] = 1             #without nurbs fitting

            # Write the centerline image in RPI orientation
            hdr.set_data_dtype('uint8')  # set imagetype to uint8
            print '\nWrite NIFTI volumes...'
            img = nibabel.Nifti1Image(data, None, hdr)
            if output_file_name != None:
                file_name = output_file_name
            else:
                file_name = file_data + '_centerline' + ext_data
            nibabel.save(img, 'tmp.centerline.nii')
            sct.generate_output_file('tmp.centerline.nii', file_name)

            del data

            # come back to parent folder
            os.chdir('..')

            # Change orientation of the output centerline into input orientation
            print '\nOrient centerline image to input orientation: ' + orientation
            set_orientation(path_tmp + '/' + file_name, orientation, file_name)

            # Remove temporary files
            if remove_temp_files:
                print('\nRemove temporary files...')
                sct.run('rm -rf ' + path_tmp)

            return file_name

        # Binary_centerline: Process for only a label file:
        if "-l" in arguments and "-i" not in arguments:
            file = os.path.abspath(label_file)
            path_data, file_data, ext_data = sct.extract_fname(file)

            file = nibabel.load(label_file)
            data = file.get_data()
            hdr = file.get_header()

            X, Y, Z = (data > 0).nonzero()
            Z_new = np.linspace(min(Z), max(Z), (max(Z) - min(Z) + 1))

            # sort X and Y arrays using Z
            X = [X[i] for i in Z[:].argsort()]
            Y = [Y[i] for i in Z[:].argsort()]
            Z = [Z[i] for i in Z[:].argsort()]

            #print X, Y, Z

            f1 = interpolate.UnivariateSpline(Z, X)
            f2 = interpolate.UnivariateSpline(Z, Y)

            X_fit = f1(Z_new)
            Y_fit = f2(Z_new)

            #print X_fit
            #print Y_fit

            if verbose == 1:
                import matplotlib.pyplot as plt

                plt.figure()
                plt.plot(Z_new, X_fit)
                plt.plot(Z, X, 'o', linestyle='None')
                plt.show()

                plt.figure()
                plt.plot(Z_new, Y_fit)
                plt.plot(Z, Y, 'o', linestyle='None')
                plt.show()

            data = data * 0

            for i in xrange(len(X_fit)):
                data[X_fit[i], Y_fit[i], Z_new[i]] = 1

            # Create NIFTI image
            print '\nSave volume ...'
            hdr.set_data_dtype('float32')  # set image type to uint8
            img = nibabel.Nifti1Image(data, None, hdr)
            if output_file_name != None:
                file_name = output_file_name
            else:
                file_name = file_data + '_centerline' + ext_data
            # save volume
            nibabel.save(img, file_name)
            print '\nFile created : ' + file_name

            del data

        #### Binary_centerline: Process for a segmentation file and a label file:
        if "-l" and "-i" in arguments:

            ## Creation of a temporary file that will contain each centerline file of the process
            path_tmp = 'tmp.' + time.strftime("%y%m%d%H%M%S")
            sct.run('mkdir ' + path_tmp)

            ##From label file create centerline image
            print '\nPROCESS PART 1: From label file create centerline image.'
            file_label = os.path.abspath(label_file)
            path_data_label, file_data_label, ext_data_label = sct.extract_fname(
                file_label)

            file_label = nibabel.load(label_file)

            #Copy label_file into temporary folder
            sct.run('cp ' + label_file + ' ' + path_tmp)

            data_label = file_label.get_data()
            hdr_label = file_label.get_header()

            if verbose == 1:
                from copy import copy
                data_label_to_show = copy(data_label)

            X, Y, Z = (data_label > 0).nonzero()
            Z_new = np.linspace(min(Z), max(Z), (max(Z) - min(Z) + 1))

            # sort X and Y arrays using Z
            X = [X[i] for i in Z[:].argsort()]
            Y = [Y[i] for i in Z[:].argsort()]
            Z = [Z[i] for i in Z[:].argsort()]

            #print X, Y, Z

            f1 = interpolate.UnivariateSpline(Z, X)
            f2 = interpolate.UnivariateSpline(Z, Y)

            X_fit = f1(Z_new)
            Y_fit = f2(Z_new)

            #print X_fit
            #print Y_fit

            if verbose == 1:
                import matplotlib.pyplot as plt

                plt.figure()
                plt.plot(Z_new, X_fit)
                plt.plot(Z, X, 'o', linestyle='None')
                plt.show()

                plt.figure()
                plt.plot(Z_new, Y_fit)
                plt.plot(Z, Y, 'o', linestyle='None')
                plt.show()

            data_label = data_label * 0

            for i in xrange(len(X_fit)):
                data_label[X_fit[i], Y_fit[i], Z_new[i]] = 1

            # Create NIFTI image
            print '\nSave volume ...'
            hdr_label.set_data_dtype('float32')  # set image type to uint8
            img = nibabel.Nifti1Image(data_label, None, hdr_label)
            # save volume
            file_name_label = file_data_label + '_centerline' + ext_data_label
            nibabel.save(img, file_name_label)
            print '\nFile created : ' + file_name_label

            # copy files into tmp folder
            sct.run('cp ' + file_name_label + ' ' + path_tmp)
            #effacer fichier dans folder parent
            os.remove(file_name_label)
            del data_label

            ##From segmentation file create centerline image
            print '\nPROCESS PART 2: From segmentation file create centerline image.'
            # Extract path, file and extension
            segmentation_file = os.path.abspath(segmentation_file)
            path_data_seg, file_data_seg, ext_data_seg = sct.extract_fname(
                segmentation_file)

            # copy files into tmp folder
            sct.run('cp ' + segmentation_file + ' ' + path_tmp)

            # go to tmp folder
            os.chdir(path_tmp)

            # Change orientation of the input segmentation into RPI
            print '\nOrient segmentation image to RPI orientation...'
            fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data_seg
            set_orientation(file_data_seg + ext_data_seg, 'RPI',
                            fname_segmentation_orient)

            # Extract orientation of the input segmentation
            orientation = get_orientation(file_data_seg + ext_data_seg)
            print '\nOrientation of segmentation image: ' + orientation

            # Get size of data
            print '\nGet dimensions data...'
            nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(
                fname_segmentation_orient)
            print '.. ' + str(nx) + ' x ' + str(ny) + ' y ' + str(
                nz) + ' z ' + str(nt)

            print '\nOpen segmentation volume...'
            file_seg = nibabel.load(fname_segmentation_orient)
            data_seg = file_seg.get_data()
            hdr_seg = file_seg.get_header()

            if verbose == 1:
                data_seg_to_show = copy(data_seg)

            # Extract min and max index in Z direction
            X, Y, Z = (data_seg > 0).nonzero()
            min_z_index, max_z_index = min(Z), max(Z)
            x_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
            y_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
            z_centerline = [iz for iz in range(min_z_index, max_z_index + 1)]
            # Extract segmentation points and average per slice
            for iz in range(min_z_index, max_z_index + 1):
                x_seg, y_seg = (data_seg[:, :, iz] > 0).nonzero()
                x_centerline[iz - min_z_index] = np.mean(x_seg)
                y_centerline[iz - min_z_index] = np.mean(y_seg)
            for k in range(len(X)):
                data_seg[X[k], Y[k], Z[k]] = 0
            # Fit the centerline points with splines and return the new fitted coordinates
            #done with nurbs for now
            x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = b_spline_centerline(
                x_centerline, y_centerline, z_centerline)

            # Create an image with the centerline
            for iz in range(min_z_index, max_z_index + 1):
                data_seg[round(x_centerline_fit[iz - min_z_index]),
                         round(y_centerline_fit[iz - min_z_index]), iz] = 1
            # Write the centerline image in RPI orientation
            hdr_seg.set_data_dtype('uint8')  # set imagetype to uint8
            print '\nWrite NIFTI volumes...'
            img = nibabel.Nifti1Image(data_seg, None, hdr_seg)
            nibabel.save(img, 'tmp.centerline.nii')
            file_name_seg = file_data_seg + '_centerline' + ext_data_seg
            sct.generate_output_file('tmp.centerline.nii',
                                     file_name_seg)  #pb ici

            # copy files into parent folder
            #sct.run('cp '+file_name_seg+' ../')

            del data_seg

            # come back to parent folder
            #            os.chdir('..')

            # Change orientation of the output centerline into input orientation
            print '\nOrient centerline image to input orientation: ' + orientation
            set_orientation(file_name_seg, orientation, file_name_seg)

            print '\nRemoving overlap of the centerline obtain with label file if there are any:'

            ## Remove overlap from centerline file obtain with label file
            remove_overlap(file_name_label, file_name_seg,
                           "generated_centerline_without_overlap.nii.gz")

            ## Concatenation of the two centerline files
            print '\nConcatenation of the two centerline files:'
            if output_file_name != None:
                file_name = output_file_name
            else:
                file_name = 'centerline_total_from_label_and_seg'

            sct.run(
                'fslmaths generated_centerline_without_overlap.nii.gz -add ' +
                file_name_seg + ' ' + file_name)

            if verbose == 1:
                import matplotlib.pyplot as plt
                from scipy import ndimage

                #Get back concatenation of segmentation and labels before any processing
                data_concatenate = data_seg_to_show + data_label_to_show
                z_centerline = [
                    iz for iz in range(0, nz, 1)
                    if data_concatenate[:, :, iz].any()
                ]
                nz_nonz = len(z_centerline)
                x_centerline = [0 for iz in range(0, nz_nonz, 1)]
                y_centerline = [0 for iz in range(0, nz_nonz, 1)]

                # Calculate centerline coordinates and create image of the centerline
                for iz in range(0, nz_nonz, 1):
                    x_centerline[iz], y_centerline[
                        iz] = ndimage.measurements.center_of_mass(
                            data_concatenate[:, :, z_centerline[iz]])

                #Load file with resulting centerline
                file_centerline_fit = nibabel.load(file_name)
                data_centerline_fit = file_centerline_fit.get_data()

                z_centerline_fit = [
                    iz for iz in range(0, nz, 1)
                    if data_centerline_fit[:, :, iz].any()
                ]
                nz_nonz_fit = len(z_centerline_fit)
                x_centerline_fit_total = [0 for iz in range(0, nz_nonz_fit, 1)]
                y_centerline_fit_total = [0 for iz in range(0, nz_nonz_fit, 1)]

                #Convert to array
                x_centerline_fit_total = np.asarray(x_centerline_fit_total)
                y_centerline_fit_total = np.asarray(y_centerline_fit_total)
                #Calculate overlap between seg and label
                length_overlap = X_fit.shape[0] + x_centerline_fit.shape[
                    0] - x_centerline_fit_total.shape[0]
                # The total fitting is the concatenation of the two fitting (
                for i in range(x_centerline_fit.shape[0]):
                    x_centerline_fit_total[i] = x_centerline_fit[i]
                    y_centerline_fit_total[i] = y_centerline_fit[i]
                for i in range(X_fit.shape[0] - length_overlap):
                    x_centerline_fit_total[x_centerline_fit.shape[0] +
                                           i] = X_fit[i + length_overlap]
                    y_centerline_fit_total[x_centerline_fit.shape[0] +
                                           i] = Y_fit[i + length_overlap]
                    print x_centerline_fit.shape[0] + i

                #for iz in range(0, nz_nonz_fit, 1):
                #    x_centerline_fit[iz], y_centerline_fit[iz] = ndimage.measurements.center_of_mass(data_centerline_fit[:, :, z_centerline_fit[iz]])

                #Creation of a vector x that takes into account the distance between the labels
                #x_centerline_fit = np.asarray(x_centerline_fit)
                #y_centerline_fit = np.asarray(y_centerline_fit)
                x_display = [0 for i in range(x_centerline_fit_total.shape[0])]
                y_display = [0 for i in range(y_centerline_fit_total.shape[0])]

                for i in range(0, nz_nonz, 1):
                    x_display[z_centerline[i] -
                              z_centerline[0]] = x_centerline[i]
                    y_display[z_centerline[i] -
                              z_centerline[0]] = y_centerline[i]

                plt.figure(1)
                plt.subplot(2, 1, 1)
                plt.plot(z_centerline_fit, x_display, 'ro')
                plt.plot(z_centerline_fit, x_centerline_fit_total)
                plt.xlabel("Z")
                plt.ylabel("X")
                plt.title("x and x_fit coordinates")

                plt.subplot(2, 1, 2)
                plt.plot(z_centerline_fit, y_display, 'ro')
                plt.plot(z_centerline_fit, y_centerline_fit_total)
                plt.xlabel("Z")
                plt.ylabel("Y")
                plt.title("y and y_fit coordinates")
                plt.show()

                del data_concatenate, data_label_to_show, data_seg_to_show, data_centerline_fit

            sct.run('cp ' + file_name + ' ../')

            # Copy result into parent folder
            sct.run('cp ' + file_name + ' ../')

            # Come back to parent folder
            os.chdir('..')

            # Remove temporary centerline files
            if remove_temp_files:
                print('\nRemove temporary files...')
                sct.run('rm -rf ' + path_tmp)

#Process for a text file as output:
    if parameter == "text_file":
        print "\nText file process"
        #Process for only a segmentation file:
        if "-i" in arguments and "-l" not in arguments:

            # Extract path, file and extension
            segmentation_file = os.path.abspath(segmentation_file)
            path_data, file_data, ext_data = sct.extract_fname(
                segmentation_file)

            # create temporary folder
            path_tmp = 'tmp.' + time.strftime("%y%m%d%H%M%S")
            sct.run('mkdir ' + path_tmp)

            # copy files into tmp folder
            sct.run('cp ' + segmentation_file + ' ' + path_tmp)

            # go to tmp folder
            os.chdir(path_tmp)

            # Change orientation of the input segmentation into RPI
            print '\nOrient segmentation image to RPI orientation...'
            fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data
            set_orientation(file_data + ext_data, 'RPI',
                            fname_segmentation_orient)

            # Extract orientation of the input segmentation
            orientation = get_orientation(file_data + ext_data)
            print '\nOrientation of segmentation image: ' + orientation

            # Get size of data
            print '\nGet dimensions data...'
            nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(
                fname_segmentation_orient)
            print '.. ' + str(nx) + ' x ' + str(ny) + ' y ' + str(
                nz) + ' z ' + str(nt)

            print '\nOpen segmentation volume...'
            file = nibabel.load(fname_segmentation_orient)
            data = file.get_data()
            hdr = file.get_header()

            # Extract min and max index in Z direction
            X, Y, Z = (data > 0).nonzero()
            min_z_index, max_z_index = min(Z), max(Z)
            x_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
            y_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
            z_centerline = [iz for iz in range(min_z_index, max_z_index + 1)]
            # Extract segmentation points and average per slice
            for iz in range(min_z_index, max_z_index + 1):
                x_seg, y_seg = (data[:, :, iz] > 0).nonzero()
                x_centerline[iz - min_z_index] = np.mean(x_seg)
                y_centerline[iz - min_z_index] = np.mean(y_seg)
            for k in range(len(X)):
                data[X[k], Y[k], Z[k]] = 0
            # Fit the centerline points with splines and return the new fitted coordinates
            x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = b_spline_centerline(
                x_centerline, y_centerline, z_centerline)

            # Create output text file
            if output_file_name != None:
                file_name = output_file_name
            else:
                file_name = file_data + '_centerline' + '.txt'

            sct.printv('\nWrite text file...', verbose)
            #file_results = open("../"+file_name, 'w')
            file_results = open(file_name, 'w')
            for i in range(min_z_index, max_z_index + 1):
                file_results.write(
                    str(int(i)) + ' ' +
                    str(x_centerline_fit[i - min_z_index]) + ' ' +
                    str(y_centerline_fit[i - min_z_index]) + '\n')
            file_results.close()

            # Copy result into parent folder
            sct.run('cp ' + file_name + ' ../')

            del data

            # come back to parent folder
            os.chdir('..')

            # Remove temporary files
            if remove_temp_files:
                print('\nRemove temporary files...')
                sct.run('rm -rf ' + path_tmp)

            return file_name

        #Process for only a label file:
        if "-l" in arguments and "-i" not in arguments:
            file = os.path.abspath(label_file)
            path_data, file_data, ext_data = sct.extract_fname(file)

            file = nibabel.load(label_file)
            data = file.get_data()
            hdr = file.get_header()

            X, Y, Z = (data > 0).nonzero()
            Z_new = np.linspace(min(Z), max(Z), (max(Z) - min(Z) + 1))

            # sort X and Y arrays using Z
            X = [X[i] for i in Z[:].argsort()]
            Y = [Y[i] for i in Z[:].argsort()]
            Z = [Z[i] for i in Z[:].argsort()]

            #print X, Y, Z

            f1 = interpolate.UnivariateSpline(Z, X)
            f2 = interpolate.UnivariateSpline(Z, Y)

            X_fit = f1(Z_new)
            Y_fit = f2(Z_new)

            #print X_fit
            #print Y_fit

            if verbose == 1:
                import matplotlib.pyplot as plt

                plt.figure()
                plt.plot(Z_new, X_fit)
                plt.plot(Z, X, 'o', linestyle='None')
                plt.show()

                plt.figure()
                plt.plot(Z_new, Y_fit)
                plt.plot(Z, Y, 'o', linestyle='None')
                plt.show()

            data = data * 0

            for iz in xrange(len(X_fit)):
                data[X_fit[iz], Y_fit[iz], Z_new[iz]] = 1

            # Create output text file
            sct.printv('\nWrite text file...', verbose)
            if output_file_name != None:
                file_name = output_file_name
            else:
                file_name = file_data + '_centerline' + ext_data
            file_results = open(file_name, 'w')
            min_z_index, max_z_index = min(Z), max(Z)
            for i in range(min_z_index, max_z_index + 1):
                file_results.write(
                    str(int(i)) + ' ' + str(X_fit[i - min_z_index]) + ' ' +
                    str(Y_fit[i - min_z_index]) + '\n')
            file_results.close()

            del data

        #Process for a segmentation file and a label file:
        if "-l" and "-i" in arguments:

            ## Creation of a temporary file that will contain each centerline file of the process
            path_tmp = 'tmp.' + time.strftime("%y%m%d%H%M%S")
            sct.run('mkdir ' + path_tmp)

            ##From label file create centerline text file
            print '\nPROCESS PART 1: From label file create centerline text file.'
            file_label = os.path.abspath(label_file)
            path_data_label, file_data_label, ext_data_label = sct.extract_fname(
                file_label)

            file_label = nibabel.load(label_file)

            #Copy label_file into temporary folder
            sct.run('cp ' + label_file + ' ' + path_tmp)

            data_label = file_label.get_data()
            hdr_label = file_label.get_header()

            X, Y, Z = (data_label > 0).nonzero()
            Z_new = np.linspace(min(Z), max(Z), (max(Z) - min(Z) + 1))

            # sort X and Y arrays using Z
            X = [X[i] for i in Z[:].argsort()]
            Y = [Y[i] for i in Z[:].argsort()]
            Z = [Z[i] for i in Z[:].argsort()]

            #print X, Y, Z

            f1 = interpolate.UnivariateSpline(Z, X)
            f2 = interpolate.UnivariateSpline(Z, Y)

            X_fit = f1(Z_new)
            Y_fit = f2(Z_new)

            #print X_fit
            #print Y_fit

            if verbose == 1:
                import matplotlib.pyplot as plt

                plt.figure()
                plt.plot(Z_new, X_fit)
                plt.plot(Z, X, 'o', linestyle='None')
                plt.show()

                plt.figure()
                plt.plot(Z_new, Y_fit)
                plt.plot(Z, Y, 'o', linestyle='None')
                plt.show()

            data_label = data_label * 0

            for i in xrange(len(X_fit)):
                data_label[X_fit[i], Y_fit[i], Z_new[i]] = 1

            # Create output text file
            sct.printv('\nWrite text file...', verbose)
            file_name_label = file_data_label + '_centerline' + '.txt'
            file_results = open(path_tmp + '/' + file_name_label, 'w')
            min_z_index, max_z_index = min(Z), max(Z)
            for i in range(min_z_index, max_z_index + 1):
                file_results.write(
                    str(int(i)) + ' ' + str(X_fit[i - min_z_index]) + ' ' +
                    str(Y_fit[i - min_z_index]) + '\n')
            file_results.close()

            # copy files into tmp folder
            #sct.run('cp '+file_name_label+' '+path_tmp)

            del data_label

            ##From segmentation file create centerline text file
            print '\nPROCESS PART 2: From segmentation file create centerline image.'
            # Extract path, file and extension
            segmentation_file = os.path.abspath(segmentation_file)
            path_data_seg, file_data_seg, ext_data_seg = sct.extract_fname(
                segmentation_file)

            # copy files into tmp folder
            sct.run('cp ' + segmentation_file + ' ' + path_tmp)

            # go to tmp folder
            os.chdir(path_tmp)

            # Change orientation of the input segmentation into RPI
            print '\nOrient segmentation image to RPI orientation...'
            fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data_seg
            set_orientation(file_data_seg + ext_data_seg, 'RPI',
                            fname_segmentation_orient)

            # Extract orientation of the input segmentation
            orientation = get_orientation(file_data_seg + ext_data_seg)
            print '\nOrientation of segmentation image: ' + orientation

            # Get size of data
            print '\nGet dimensions data...'
            nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(
                fname_segmentation_orient)
            print '.. ' + str(nx) + ' x ' + str(ny) + ' y ' + str(
                nz) + ' z ' + str(nt)

            print '\nOpen segmentation volume...'
            file_seg = nibabel.load(fname_segmentation_orient)
            data_seg = file_seg.get_data()
            hdr_seg = file_seg.get_header()

            # Extract min and max index in Z direction
            X, Y, Z = (data_seg > 0).nonzero()
            min_z_index, max_z_index = min(Z), max(Z)
            x_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
            y_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
            z_centerline = [iz for iz in range(min_z_index, max_z_index + 1)]
            # Extract segmentation points and average per slice
            for iz in range(min_z_index, max_z_index + 1):
                x_seg, y_seg = (data_seg[:, :, iz] > 0).nonzero()
                x_centerline[iz - min_z_index] = np.mean(x_seg)
                y_centerline[iz - min_z_index] = np.mean(y_seg)
            for k in range(len(X)):
                data_seg[X[k], Y[k], Z[k]] = 0
            # Fit the centerline points with splines and return the new fitted coordinates
            #done with nurbs for now
            x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = b_spline_centerline(
                x_centerline, y_centerline, z_centerline)

            # Create output text file
            file_name_seg = file_data_seg + '_centerline' + '.txt'
            sct.printv('\nWrite text file...', verbose)
            file_results = open(file_name_seg, 'w')
            for i in range(min_z_index, max_z_index + 1):
                file_results.write(
                    str(int(i)) + ' ' +
                    str(x_centerline_fit[i - min_z_index]) + ' ' +
                    str(y_centerline_fit[i - min_z_index]) + '\n')
            file_results.close()

            del data_seg

            print '\nRemoving overlap of the centerline obtain with label file if there are any:'

            ## Remove overlap from centerline file obtain with label file
            remove_overlap(file_name_label,
                           file_name_seg,
                           "generated_centerline_without_overlap1.txt",
                           parameter=1)

            ## Concatenation of the two centerline files
            print '\nConcatenation of the two centerline files:'
            if output_file_name != None:
                file_name = output_file_name
            else:
                file_name = 'centerline_total_from_label_and_seg.txt'

            f_output = open(file_name, "w")
            f_output.close()
            with open(file_name_seg, "r") as f_seg:
                with open("generated_centerline_without_overlap1.txt",
                          "r") as f:
                    with open(file_name, "w") as f_output:
                        data_line_seg = f_seg.readlines()
                        data_line = f.readlines()
                        for line in data_line_seg:
                            f_output.write(line)
                        for line in data_line:
                            f_output.write(line)

            # Copy result into parent folder
            sct.run('cp ' + file_name + ' ../')

            # Come back to parent folder
            os.chdir('..')

            # Remove temporary centerline files
            if remove_temp_files:
                print('\nRemove temporary files...')
                sct.run('rm -rf ' + path_tmp)
def main(segmentation_file=None, label_file=None, output_file_name=None, parameter = "binary_centerline", remove_temp_files = 1, verbose = 0 ):

#Process for a binary file as output:
    if parameter == "binary_centerline":

        # Binary_centerline: Process for only a segmentation file:
        if "-i" in arguments and "-l" not in arguments:
                    # Extract path, file and extension
            segmentation_file = os.path.abspath(segmentation_file)
            path_data, file_data, ext_data = sct.extract_fname(segmentation_file)

            # create temporary folder
            path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
            sct.run('mkdir '+path_tmp)

            # copy files into tmp folder
            sct.run('cp '+segmentation_file+' '+path_tmp)

            # go to tmp folder
            os.chdir(path_tmp)

            # Change orientation of the input segmentation into RPI
            print '\nOrient segmentation image to RPI orientation...'
            fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data
            set_orientation(file_data+ext_data, 'RPI', fname_segmentation_orient)

            # Extract orientation of the input segmentation
            orientation = get_orientation(file_data+ext_data)
            print '\nOrientation of segmentation image: ' + orientation

            # Get size of data
            print '\nGet dimensions data...'
            nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_segmentation_orient)
            print '.. '+str(nx)+' x '+str(ny)+' y '+str(nz)+' z '+str(nt)

            print '\nOpen segmentation volume...'
            file = nibabel.load(fname_segmentation_orient)
            data = file.get_data()
            hdr = file.get_header()

            # Extract min and max index in Z direction
            X, Y, Z = (data>0).nonzero()
            min_z_index, max_z_index = min(Z), max(Z)
            x_centerline = [0 for i in range(0,max_z_index-min_z_index+1)]
            y_centerline = [0 for i in range(0,max_z_index-min_z_index+1)]
            z_centerline = [iz for iz in range(min_z_index, max_z_index+1)]
            # Extract segmentation points and average per slice
            for iz in range(min_z_index, max_z_index+1):
                x_seg, y_seg = (data[:,:,iz]>0).nonzero()
                x_centerline[iz-min_z_index] = np.mean(x_seg)
                y_centerline[iz-min_z_index] = np.mean(y_seg)

            #ne sert a rien
            for k in range(len(X)):
                data[X[k],Y[k],Z[k]] = 0

            print len(x_centerline)
            # Fit the centerline points with splines and return the new fitted coordinates
                    #done with nurbs for now
            x_centerline_fit, y_centerline_fit,x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)
                        # Create an image with the centerline
            for iz in range(min_z_index, max_z_index+1):
                data[round(x_centerline_fit[iz-min_z_index]), round(y_centerline_fit[iz-min_z_index]), iz] = 1    #with nurbs fitting
                #data[round(x_centerline[iz-min_z_index]), round(y_centerline[iz-min_z_index]), iz] = 1             #without nurbs fitting


            # Write the centerline image in RPI orientation
            hdr.set_data_dtype('uint8') # set imagetype to uint8
            print '\nWrite NIFTI volumes...'
            img = nibabel.Nifti1Image(data, None, hdr)
            if output_file_name != None :
                file_name = output_file_name
            else: file_name = file_data+'_centerline'+ext_data
            nibabel.save(img,'tmp.centerline.nii')
            sct.generate_output_file('tmp.centerline.nii',file_name)

            del data

            # come back to parent folder
            os.chdir('..')

            # Change orientation of the output centerline into input orientation
            print '\nOrient centerline image to input orientation: ' + orientation
            set_orientation(path_tmp+'/'+file_name, orientation, file_name)

           # Remove temporary files
            if remove_temp_files:
                print('\nRemove temporary files...')
                sct.run('rm -rf '+path_tmp)

            return file_name


        # Binary_centerline: Process for only a label file:
        if "-l" in arguments and "-i" not in arguments:
            file = os.path.abspath(label_file)
            path_data, file_data, ext_data = sct.extract_fname(file)

            file = nibabel.load(label_file)
            data = file.get_data()
            hdr = file.get_header()

            X,Y,Z = (data>0).nonzero()
            Z_new = np.linspace(min(Z),max(Z),(max(Z)-min(Z)+1))

            # sort X and Y arrays using Z
            X = [X[i] for i in Z[:].argsort()]
            Y = [Y[i] for i in Z[:].argsort()]
            Z = [Z[i] for i in Z[:].argsort()]

            #print X, Y, Z

            f1 = interpolate.UnivariateSpline(Z, X)
            f2 = interpolate.UnivariateSpline(Z, Y)

            X_fit = f1(Z_new)
            Y_fit = f2(Z_new)

            #print X_fit
            #print Y_fit

            if verbose==1 :
                import matplotlib.pyplot as plt

                plt.figure()
                plt.plot(Z_new,X_fit)
                plt.plot(Z,X,'o',linestyle = 'None')
                plt.show()

                plt.figure()
                plt.plot(Z_new,Y_fit)
                plt.plot(Z,Y,'o',linestyle = 'None')
                plt.show()

            data =data*0

            for i in xrange(len(X_fit)):
                data[X_fit[i],Y_fit[i],Z_new[i]] = 1


            # Create NIFTI image
            print '\nSave volume ...'
            hdr.set_data_dtype('float32') # set image type to uint8
            img = nibabel.Nifti1Image(data, None, hdr)
            if output_file_name != None :
                file_name = output_file_name
            else: file_name = file_data+'_centerline'+ext_data
            # save volume
            nibabel.save(img,file_name)
            print '\nFile created : ' + file_name

            del data



        #### Binary_centerline: Process for a segmentation file and a label file:
        if "-l" and "-i" in arguments:

            ## Creation of a temporary file that will contain each centerline file of the process
            path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
            sct.run('mkdir '+path_tmp)

            ##From label file create centerline image
            print '\nPROCESS PART 1: From label file create centerline image.'
            file_label = os.path.abspath(label_file)
            path_data_label, file_data_label, ext_data_label = sct.extract_fname(file_label)

            file_label = nibabel.load(label_file)

            #Copy label_file into temporary folder
            sct.run('cp '+label_file+' '+path_tmp)

            data_label = file_label.get_data()
            hdr_label = file_label.get_header()

            if verbose == 1:
                from copy import copy
                data_label_to_show = copy(data_label)

            X,Y,Z = (data_label>0).nonzero()
            Z_new = np.linspace(min(Z),max(Z),(max(Z)-min(Z)+1))

            # sort X and Y arrays using Z
            X = [X[i] for i in Z[:].argsort()]
            Y = [Y[i] for i in Z[:].argsort()]
            Z = [Z[i] for i in Z[:].argsort()]

            #print X, Y, Z

            f1 = interpolate.UnivariateSpline(Z, X)
            f2 = interpolate.UnivariateSpline(Z, Y)

            X_fit = f1(Z_new)
            Y_fit = f2(Z_new)

            #print X_fit
            #print Y_fit

            if verbose==1 :
                import matplotlib.pyplot as plt

                plt.figure()
                plt.plot(Z_new,X_fit)
                plt.plot(Z,X,'o',linestyle = 'None')
                plt.show()

                plt.figure()
                plt.plot(Z_new,Y_fit)
                plt.plot(Z,Y,'o',linestyle = 'None')
                plt.show()

            data_label =data_label*0

            for i in xrange(len(X_fit)):
                data_label[X_fit[i],Y_fit[i],Z_new[i]] = 1

            # Create NIFTI image
            print '\nSave volume ...'
            hdr_label.set_data_dtype('float32') # set image type to uint8
            img = nibabel.Nifti1Image(data_label, None, hdr_label)
            # save volume
            file_name_label = file_data_label + '_centerline' + ext_data_label
            nibabel.save(img, file_name_label)
            print '\nFile created : ' + file_name_label

            # copy files into tmp folder
            sct.run('cp '+file_name_label+' '+path_tmp)
            #effacer fichier dans folder parent
            os.remove(file_name_label)
            del data_label


            ##From segmentation file create centerline image
            print '\nPROCESS PART 2: From segmentation file create centerline image.'
            # Extract path, file and extension
            segmentation_file = os.path.abspath(segmentation_file)
            path_data_seg, file_data_seg, ext_data_seg = sct.extract_fname(segmentation_file)

            # copy files into tmp folder
            sct.run('cp '+segmentation_file+' '+path_tmp)

            # go to tmp folder
            os.chdir(path_tmp)

            # Change orientation of the input segmentation into RPI
            print '\nOrient segmentation image to RPI orientation...'
            fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data_seg
            set_orientation(file_data_seg+ext_data_seg, 'RPI', fname_segmentation_orient)

            # Extract orientation of the input segmentation
            orientation = get_orientation(file_data_seg+ext_data_seg)
            print '\nOrientation of segmentation image: ' + orientation

            # Get size of data
            print '\nGet dimensions data...'
            nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_segmentation_orient)
            print '.. '+str(nx)+' x '+str(ny)+' y '+str(nz)+' z '+str(nt)

            print '\nOpen segmentation volume...'
            file_seg = nibabel.load(fname_segmentation_orient)
            data_seg = file_seg.get_data()
            hdr_seg = file_seg.get_header()

            if verbose == 1:
                data_seg_to_show = copy(data_seg)

            # Extract min and max index in Z direction
            X, Y, Z = (data_seg>0).nonzero()
            min_z_index, max_z_index = min(Z), max(Z)
            x_centerline = [0 for i in range(0,max_z_index-min_z_index+1)]
            y_centerline = [0 for i in range(0,max_z_index-min_z_index+1)]
            z_centerline = [iz for iz in range(min_z_index, max_z_index+1)]
            # Extract segmentation points and average per slice
            for iz in range(min_z_index, max_z_index+1):
                x_seg, y_seg = (data_seg[:,:,iz]>0).nonzero()
                x_centerline[iz-min_z_index] = np.mean(x_seg)
                y_centerline[iz-min_z_index] = np.mean(y_seg)
            for k in range(len(X)):
                data_seg[X[k],Y[k],Z[k]] = 0
            # Fit the centerline points with splines and return the new fitted coordinates
                    #done with nurbs for now
            x_centerline_fit, y_centerline_fit,x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)


            # Create an image with the centerline
            for iz in range(min_z_index, max_z_index+1):
                data_seg[round(x_centerline_fit[iz-min_z_index]), round(y_centerline_fit[iz-min_z_index]), iz] = 1
            # Write the centerline image in RPI orientation
            hdr_seg.set_data_dtype('uint8') # set imagetype to uint8
            print '\nWrite NIFTI volumes...'
            img = nibabel.Nifti1Image(data_seg, None, hdr_seg)
            nibabel.save(img,'tmp.centerline.nii')
            file_name_seg = file_data_seg+'_centerline'+ext_data_seg
            sct.generate_output_file('tmp.centerline.nii',file_name_seg)   #pb ici

            # copy files into parent folder
            #sct.run('cp '+file_name_seg+' ../')

            del data_seg

            # come back to parent folder
#            os.chdir('..')

            # Change orientation of the output centerline into input orientation
            print '\nOrient centerline image to input orientation: ' + orientation
            set_orientation(file_name_seg, orientation, file_name_seg)



            print '\nRemoving overlap of the centerline obtain with label file if there are any:'

            ## Remove overlap from centerline file obtain with label file
            remove_overlap(file_name_label, file_name_seg, "generated_centerline_without_overlap.nii.gz")


            ## Concatenation of the two centerline files
            print '\nConcatenation of the two centerline files:'
            if output_file_name != None :
                file_name = output_file_name
            else: file_name = 'centerline_total_from_label_and_seg'

            sct.run('fslmaths generated_centerline_without_overlap.nii.gz -add ' + file_name_seg + ' ' + file_name)



            if verbose == 1 :
                import matplotlib.pyplot as plt
                from scipy import ndimage

                #Get back concatenation of segmentation and labels before any processing
                data_concatenate = data_seg_to_show + data_label_to_show
                z_centerline = [iz for iz in range(0, nz, 1) if data_concatenate[:, :, iz].any()]
                nz_nonz = len(z_centerline)
                x_centerline = [0 for iz in range(0, nz_nonz, 1)]
                y_centerline = [0 for iz in range(0, nz_nonz, 1)]


                # Calculate centerline coordinates and create image of the centerline
                for iz in range(0, nz_nonz, 1):
                    x_centerline[iz], y_centerline[iz] = ndimage.measurements.center_of_mass(data_concatenate[:, :, z_centerline[iz]])

                #Load file with resulting centerline
                file_centerline_fit = nibabel.load(file_name)
                data_centerline_fit = file_centerline_fit.get_data()

                z_centerline_fit = [iz for iz in range(0, nz, 1) if data_centerline_fit[:, :, iz].any()]
                nz_nonz_fit = len(z_centerline_fit)
                x_centerline_fit_total = [0 for iz in range(0, nz_nonz_fit, 1)]
                y_centerline_fit_total = [0 for iz in range(0, nz_nonz_fit, 1)]

                #Convert to array
                x_centerline_fit_total = np.asarray(x_centerline_fit_total)
                y_centerline_fit_total = np.asarray(y_centerline_fit_total)
                #Calculate overlap between seg and label
                length_overlap = X_fit.shape[0] + x_centerline_fit.shape[0] - x_centerline_fit_total.shape[0]
                # The total fitting is the concatenation of the two fitting (
                for i in range(x_centerline_fit.shape[0]):
                    x_centerline_fit_total[i] = x_centerline_fit[i]
                    y_centerline_fit_total[i] = y_centerline_fit[i]
                for i in range(X_fit.shape[0]-length_overlap):
                    x_centerline_fit_total[x_centerline_fit.shape[0] + i] = X_fit[i+length_overlap]
                    y_centerline_fit_total[x_centerline_fit.shape[0] + i] = Y_fit[i+length_overlap]
                    print x_centerline_fit.shape[0] + i

                #for iz in range(0, nz_nonz_fit, 1):
                #    x_centerline_fit[iz], y_centerline_fit[iz] = ndimage.measurements.center_of_mass(data_centerline_fit[:, :, z_centerline_fit[iz]])

                #Creation of a vector x that takes into account the distance between the labels
                #x_centerline_fit = np.asarray(x_centerline_fit)
                #y_centerline_fit = np.asarray(y_centerline_fit)
                x_display = [0 for i in range(x_centerline_fit_total.shape[0])]
                y_display = [0 for i in range(y_centerline_fit_total.shape[0])]


                for i in range(0, nz_nonz, 1):
                    x_display[z_centerline[i]-z_centerline[0]] = x_centerline[i]
                    y_display[z_centerline[i]-z_centerline[0]] = y_centerline[i]

                plt.figure(1)
                plt.subplot(2,1,1)
                plt.plot(z_centerline_fit, x_display, 'ro')
                plt.plot(z_centerline_fit, x_centerline_fit_total)
                plt.xlabel("Z")
                plt.ylabel("X")
                plt.title("x and x_fit coordinates")

                plt.subplot(2,1,2)
                plt.plot(z_centerline_fit, y_display, 'ro')
                plt.plot(z_centerline_fit, y_centerline_fit_total)
                plt.xlabel("Z")
                plt.ylabel("Y")
                plt.title("y and y_fit coordinates")
                plt.show()

                del data_concatenate, data_label_to_show, data_seg_to_show, data_centerline_fit

            sct.run('cp '+file_name+' ../')

            # Copy result into parent folder
            sct.run('cp '+file_name+' ../')

            # Come back to parent folder
            os.chdir('..')

            # Remove temporary centerline files
            if remove_temp_files:
                print('\nRemove temporary files...')
                sct.run('rm -rf '+path_tmp)


  #Process for a text file as output:
    if parameter == "text_file" :
        print "\nText file process"
        #Process for only a segmentation file:
        if "-i" in arguments and "-l" not in arguments:

                    # Extract path, file and extension
            segmentation_file = os.path.abspath(segmentation_file)
            path_data, file_data, ext_data = sct.extract_fname(segmentation_file)


            # create temporary folder
            path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
            sct.run('mkdir '+path_tmp)

            # copy files into tmp folder
            sct.run('cp '+segmentation_file+' '+path_tmp)

            # go to tmp folder
            os.chdir(path_tmp)

            # Change orientation of the input segmentation into RPI
            print '\nOrient segmentation image to RPI orientation...'
            fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data
            set_orientation(file_data+ext_data, 'RPI', fname_segmentation_orient)

            # Extract orientation of the input segmentation
            orientation = get_orientation(file_data+ext_data)
            print '\nOrientation of segmentation image: ' + orientation

            # Get size of data
            print '\nGet dimensions data...'
            nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_segmentation_orient)
            print '.. '+str(nx)+' x '+str(ny)+' y '+str(nz)+' z '+str(nt)

            print '\nOpen segmentation volume...'
            file = nibabel.load(fname_segmentation_orient)
            data = file.get_data()
            hdr = file.get_header()

            # Extract min and max index in Z direction
            X, Y, Z = (data>0).nonzero()
            min_z_index, max_z_index = min(Z), max(Z)
            x_centerline = [0 for i in range(0,max_z_index-min_z_index+1)]
            y_centerline = [0 for i in range(0,max_z_index-min_z_index+1)]
            z_centerline = [iz for iz in range(min_z_index, max_z_index+1)]
            # Extract segmentation points and average per slice
            for iz in range(min_z_index, max_z_index+1):
                x_seg, y_seg = (data[:,:,iz]>0).nonzero()
                x_centerline[iz-min_z_index] = np.mean(x_seg)
                y_centerline[iz-min_z_index] = np.mean(y_seg)
            for k in range(len(X)):
                data[X[k],Y[k],Z[k]] = 0
            # Fit the centerline points with splines and return the new fitted coordinates
            x_centerline_fit, y_centerline_fit,x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)

            # Create output text file
            if output_file_name != None :
                file_name = output_file_name
            else: file_name = file_data+'_centerline'+'.txt'

            sct.printv('\nWrite text file...', verbose)
            #file_results = open("../"+file_name, 'w')
            file_results = open(file_name, 'w')
            for i in range(min_z_index, max_z_index+1):
                file_results.write(str(int(i)) + ' ' + str(x_centerline_fit[i-min_z_index]) + ' ' + str(y_centerline_fit[i-min_z_index]) + '\n')
            file_results.close()

            # Copy result into parent folder
            sct.run('cp '+file_name+' ../')

            del data

            # come back to parent folder
            os.chdir('..')


           # Remove temporary files
            if remove_temp_files:
                print('\nRemove temporary files...')
                sct.run('rm -rf '+path_tmp)

            return file_name


        #Process for only a label file:
        if "-l" in arguments and "-i" not in arguments:
            file = os.path.abspath(label_file)
            path_data, file_data, ext_data = sct.extract_fname(file)

            file = nibabel.load(label_file)
            data = file.get_data()
            hdr = file.get_header()

            X,Y,Z = (data>0).nonzero()
            Z_new = np.linspace(min(Z),max(Z),(max(Z)-min(Z)+1))

            # sort X and Y arrays using Z
            X = [X[i] for i in Z[:].argsort()]
            Y = [Y[i] for i in Z[:].argsort()]
            Z = [Z[i] for i in Z[:].argsort()]

            #print X, Y, Z

            f1 = interpolate.UnivariateSpline(Z, X)
            f2 = interpolate.UnivariateSpline(Z, Y)

            X_fit = f1(Z_new)
            Y_fit = f2(Z_new)

            #print X_fit
            #print Y_fit

            if verbose==1 :
                import matplotlib.pyplot as plt

                plt.figure()
                plt.plot(Z_new,X_fit)
                plt.plot(Z,X,'o',linestyle = 'None')
                plt.show()

                plt.figure()
                plt.plot(Z_new,Y_fit)
                plt.plot(Z,Y,'o',linestyle = 'None')
                plt.show()

            data =data*0

            for iz in xrange(len(X_fit)):
                data[X_fit[iz],Y_fit[iz],Z_new[iz]] = 1

            # Create output text file
            sct.printv('\nWrite text file...', verbose)
            if output_file_name != None :
                file_name = output_file_name
            else: file_name = file_data+'_centerline'+ext_data
            file_results = open(file_name, 'w')
            min_z_index, max_z_index = min(Z), max(Z)
            for i in range(min_z_index, max_z_index+1):
                file_results.write(str(int(i)) + ' ' + str(X_fit[i-min_z_index]) + ' ' + str(Y_fit[i-min_z_index]) + '\n')
            file_results.close()

            del data

        #Process for a segmentation file and a label file:
        if "-l" and "-i" in arguments:

            ## Creation of a temporary file that will contain each centerline file of the process
            path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
            sct.run('mkdir '+path_tmp)

            ##From label file create centerline text file
            print '\nPROCESS PART 1: From label file create centerline text file.'
            file_label = os.path.abspath(label_file)
            path_data_label, file_data_label, ext_data_label = sct.extract_fname(file_label)

            file_label = nibabel.load(label_file)

            #Copy label_file into temporary folder
            sct.run('cp '+label_file+' '+path_tmp)

            data_label = file_label.get_data()
            hdr_label = file_label.get_header()

            X,Y,Z = (data_label>0).nonzero()
            Z_new = np.linspace(min(Z),max(Z),(max(Z)-min(Z)+1))

            # sort X and Y arrays using Z
            X = [X[i] for i in Z[:].argsort()]
            Y = [Y[i] for i in Z[:].argsort()]
            Z = [Z[i] for i in Z[:].argsort()]

            #print X, Y, Z

            f1 = interpolate.UnivariateSpline(Z, X)
            f2 = interpolate.UnivariateSpline(Z, Y)

            X_fit = f1(Z_new)
            Y_fit = f2(Z_new)

            #print X_fit
            #print Y_fit

            if verbose==1 :
                import matplotlib.pyplot as plt

                plt.figure()
                plt.plot(Z_new,X_fit)
                plt.plot(Z,X,'o',linestyle = 'None')
                plt.show()

                plt.figure()
                plt.plot(Z_new,Y_fit)
                plt.plot(Z,Y,'o',linestyle = 'None')
                plt.show()

            data_label =data_label*0

            for i in xrange(len(X_fit)):
                data_label[X_fit[i],Y_fit[i],Z_new[i]] = 1

            # Create output text file
            sct.printv('\nWrite text file...', verbose)
            file_name_label = file_data_label+'_centerline'+'.txt'
            file_results = open(path_tmp + '/' + file_name_label, 'w')
            min_z_index, max_z_index = min(Z), max(Z)
            for i in range(min_z_index, max_z_index+1):
                file_results.write(str(int(i)) + ' ' + str(X_fit[i-min_z_index]) + ' ' + str(Y_fit[i-min_z_index]) + '\n')
            file_results.close()

            # copy files into tmp folder
            #sct.run('cp '+file_name_label+' '+path_tmp)

            del data_label


            ##From segmentation file create centerline text file
            print '\nPROCESS PART 2: From segmentation file create centerline image.'
            # Extract path, file and extension
            segmentation_file = os.path.abspath(segmentation_file)
            path_data_seg, file_data_seg, ext_data_seg = sct.extract_fname(segmentation_file)

            # copy files into tmp folder
            sct.run('cp '+segmentation_file+' '+path_tmp)

            # go to tmp folder
            os.chdir(path_tmp)

            # Change orientation of the input segmentation into RPI
            print '\nOrient segmentation image to RPI orientation...'
            fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data_seg
            set_orientation(file_data_seg+ext_data_seg, 'RPI', fname_segmentation_orient)

            # Extract orientation of the input segmentation
            orientation = get_orientation(file_data_seg+ext_data_seg)
            print '\nOrientation of segmentation image: ' + orientation

            # Get size of data
            print '\nGet dimensions data...'
            nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_segmentation_orient)
            print '.. '+str(nx)+' x '+str(ny)+' y '+str(nz)+' z '+str(nt)

            print '\nOpen segmentation volume...'
            file_seg = nibabel.load(fname_segmentation_orient)
            data_seg = file_seg.get_data()
            hdr_seg = file_seg.get_header()

            # Extract min and max index in Z direction
            X, Y, Z = (data_seg>0).nonzero()
            min_z_index, max_z_index = min(Z), max(Z)
            x_centerline = [0 for i in range(0,max_z_index-min_z_index+1)]
            y_centerline = [0 for i in range(0,max_z_index-min_z_index+1)]
            z_centerline = [iz for iz in range(min_z_index, max_z_index+1)]
            # Extract segmentation points and average per slice
            for iz in range(min_z_index, max_z_index+1):
                x_seg, y_seg = (data_seg[:,:,iz]>0).nonzero()
                x_centerline[iz-min_z_index] = np.mean(x_seg)
                y_centerline[iz-min_z_index] = np.mean(y_seg)
            for k in range(len(X)):
                data_seg[X[k],Y[k],Z[k]] = 0
            # Fit the centerline points with splines and return the new fitted coordinates
                    #done with nurbs for now
            x_centerline_fit, y_centerline_fit,x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)


             # Create output text file
            file_name_seg = file_data_seg+'_centerline'+'.txt'
            sct.printv('\nWrite text file...', verbose)
            file_results = open(file_name_seg, 'w')
            for i in range(min_z_index, max_z_index+1):
                file_results.write(str(int(i)) + ' ' + str(x_centerline_fit[i-min_z_index]) + ' ' + str(y_centerline_fit[i-min_z_index]) + '\n')
            file_results.close()

            del data_seg


            print '\nRemoving overlap of the centerline obtain with label file if there are any:'

            ## Remove overlap from centerline file obtain with label file
            remove_overlap(file_name_label, file_name_seg, "generated_centerline_without_overlap1.txt", parameter=1)

            ## Concatenation of the two centerline files
            print '\nConcatenation of the two centerline files:'
            if output_file_name != None :
                file_name = output_file_name
            else: file_name = 'centerline_total_from_label_and_seg.txt'

            f_output = open(file_name, "w")
            f_output.close()
            with open(file_name_seg, "r") as f_seg:
                with open("generated_centerline_without_overlap1.txt", "r") as f:
                    with open(file_name, "w") as f_output:
                        data_line_seg = f_seg.readlines()
                        data_line = f.readlines()
                        for line in data_line_seg :
                            f_output.write(line)
                        for line in data_line :
                            f_output.write(line)

            # Copy result into parent folder
            sct.run('cp '+file_name+' ../')

            # Come back to parent folder
            os.chdir('..')

            # Remove temporary centerline files
            if remove_temp_files:
                print('\nRemove temporary files...')
                sct.run('rm -rf '+path_tmp)
コード例 #17
0
def main(list_file, param, output_file_name=None, remove_temp_files = 1, verbose = 0):

    path, file, ext = sct.extract_fname(list_file[0])

    # create temporary folder
    path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
    sct.run('mkdir '+path_tmp)

    # copy files into tmp folder
    sct.printv('\nCopy files into tmp folder...', verbose)
    for i in range(len(list_file)):
       file_temp = os.path.abspath(list_file[i])
       sct.run('cp '+file_temp+' '+path_tmp)

    # go to tmp folder
    os.chdir(path_tmp)

    ## Concatenation of the files

    # Concatenation : sum of matrices
    file_0 = load(file+ext)
    data_concatenation = file_0.get_data()
    hdr_0 = file_0.get_header()
    orientation_file_0 = get_orientation(list_file[0])
    if len(list_file)>0:
       for i in range(1, len(list_file)):
           orientation_file_temp = get_orientation(list_file[i])
           if orientation_file_0 != orientation_file_temp :
               print "ERROR: The files ", list_file[0], " and ", list_file[i], " are not in the same orientation. Use sct_orientation to change the orientation of a file."
               sys.exit(2)
           file_temp = load(list_file[i])
           data_temp = file_temp.get_data()
           data_concatenation = data_concatenation + data_temp

    # Save concatenation as a file
    print '\nWrite NIFTI volumes...'
    img = Nifti1Image(data_concatenation, None, hdr_0)
    save(img,'concatenation_file.nii.gz')


    # Applying nurbs to the concatenation and save file as binary file
    fname_output = extract_centerline('concatenation_file.nii.gz', remove_temp_files = remove_temp_files, verbose = verbose, algo_fitting=param.algo_fitting, type_window=param.type_window, window_length=param.window_length)

    # Rename files after processing
    if output_file_name != None:
       output_file_name = output_file_name
    else : output_file_name = "generated_centerline.nii.gz"

    os.rename(fname_output, output_file_name)
    path_binary, file_binary, ext_binary = sct.extract_fname(output_file_name)
    os.rename('concatenation_file_centerline.txt', file_binary+'.txt')

    # Process for a binary file as output:
    sct.run('cp '+output_file_name+' ../')

    # Process for a text file as output:
    sct.run('cp '+file_binary+ '.txt'+ ' ../')

    os.chdir('../')
    # Remove temporary files
    if remove_temp_files:
       print('\nRemove temporary files...')
       sct.run('rm -rf '+path_tmp)
コード例 #18
0
            zmax_anatomic = line.split(',')[1]
            zmin_seg = line.split(',')[2]
            zmax_seg = line.split(',')[3]
            if len(line_list) == 6:
                ymin_anatomic = line.split(',')[4]
                ymax_anatomic = line.split(',')[5]
        file_results.close()

        os.chdir(PATH_OUTPUT + '/subjects/' + subject + '/' + 'T2')

        # Convert to RPI
        # Input:
        # - data.nii.gz
        # - data_RPI.nii.gz
        print '\nConvert to RPI'
        orientation = get_orientation('data.nii.gz')
        sct.run('sct_orientation -i data.nii.gz -s RPI')
        # Crop image
        if ymin_anatomic == None and ymax_anatomic == None:
            sct.run(
                'sct_crop_image -i data_RPI.nii.gz -o data_RPI_crop.nii.gz -dim 2 -start '
                + zmin_anatomic + ' -end ' + zmax_anatomic)
        else:
            sct.run(
                'sct_crop_image -i data_RPI.nii.gz -o data_RPI_crop.nii.gz -dim 1,2 -start '
                + ymin_anatomic + ',' + zmin_anatomic + ' -end ' +
                ymax_anatomic + ',' + zmax_anatomic)
        # sct.run('sct_orientation -i data_RPI_crop.nii.gz -o data_crop.nii.gz -s '+ orientation)

        # denoising
        # input:
コード例 #19
0
def check_if_rpi(fname):
    from sct_orientation import get_orientation
    if not get_orientation(fname) == 'RPI':
        printv('\nERROR: '+fname+' is not in RPI orientation. Use sct_orientation to reorient your data. Exit program.\n', 1, 'error')
コード例 #20
0
def extract_centerline(fname_segmentation, remove_temp_files, name_output='', verbose = 0, algo_fitting = 'hanning', type_window = 'hanning', window_length = 80):

    # Extract path, file and extension
    fname_segmentation = os.path.abspath(fname_segmentation)
    path_data, file_data, ext_data = sct.extract_fname(fname_segmentation)

    # create temporary folder
    path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
    sct.run('mkdir '+path_tmp)

    # copy files into tmp folder
    sct.run('cp '+fname_segmentation+' '+path_tmp)

    # go to tmp folder
    os.chdir(path_tmp)

    # Change orientation of the input centerline into RPI
    sct.printv('\nOrient centerline to RPI orientation...', verbose)
    fname_segmentation_orient = 'segmentation_rpi' + ext_data
    set_orientation(file_data+ext_data, 'RPI', fname_segmentation_orient)

    # Get dimension
    sct.printv('\nGet dimensions...', verbose)
    nx, ny, nz, nt, px, py, pz, pt = Image(fname_segmentation_orient).dim
    sct.printv('.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz), verbose)
    sct.printv('.. voxel size:  '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm', verbose)

    # Extract orientation of the input segmentation
    orientation = get_orientation(file_data+ext_data)
    sct.printv('\nOrientation of segmentation image: ' + orientation, verbose)

    sct.printv('\nOpen segmentation volume...', verbose)
    file = nibabel.load(fname_segmentation_orient)
    data = file.get_data()
    hdr = file.get_header()

    # Extract min and max index in Z direction
    X, Y, Z = (data>0).nonzero()
    min_z_index, max_z_index = min(Z), max(Z)
    x_centerline = [0 for i in range(0,max_z_index-min_z_index+1)]
    y_centerline = [0 for i in range(0,max_z_index-min_z_index+1)]
    z_centerline = [iz for iz in range(min_z_index, max_z_index+1)]
    # Extract segmentation points and average per slice
    for iz in range(min_z_index, max_z_index+1):
        x_seg, y_seg = (data[:,:,iz]>0).nonzero()
        x_centerline[iz-min_z_index] = np.mean(x_seg)
        y_centerline[iz-min_z_index] = np.mean(y_seg)
    for k in range(len(X)):
        data[X[k], Y[k], Z[k]] = 0

    # extract centerline and smooth it
    x_centerline_fit, y_centerline_fit, z_centerline_fit, x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = smooth_centerline(fname_segmentation_orient, type_window = type_window, window_length = window_length, algo_fitting = algo_fitting, verbose = verbose)

    if verbose == 2:
            import matplotlib.pyplot as plt

            #Creation of a vector x that takes into account the distance between the labels
            nz_nonz = len(z_centerline)
            x_display = [0 for i in range(x_centerline_fit.shape[0])]
            y_display = [0 for i in range(y_centerline_fit.shape[0])]
            for i in range(0, nz_nonz, 1):
                x_display[int(z_centerline[i]-z_centerline[0])] = x_centerline[i]
                y_display[int(z_centerline[i]-z_centerline[0])] = y_centerline[i]

            plt.figure(1)
            plt.subplot(2,1,1)
            plt.plot(z_centerline_fit, x_display, 'ro')
            plt.plot(z_centerline_fit, x_centerline_fit)
            plt.xlabel("Z")
            plt.ylabel("X")
            plt.title("x and x_fit coordinates")

            plt.subplot(2,1,2)
            plt.plot(z_centerline_fit, y_display, 'ro')
            plt.plot(z_centerline_fit, y_centerline_fit)
            plt.xlabel("Z")
            plt.ylabel("Y")
            plt.title("y and y_fit coordinates")
            plt.show()


    # Create an image with the centerline
    for iz in range(min_z_index, max_z_index+1):
        data[round(x_centerline_fit[iz-min_z_index]), round(y_centerline_fit[iz-min_z_index]), iz] = 1 # if index is out of bounds here for hanning: either the segmentation has holes or labels have been added to the file
    # Write the centerline image in RPI orientation
    hdr.set_data_dtype('uint8') # set imagetype to uint8
    sct.printv('\nWrite NIFTI volumes...', verbose)
    img = nibabel.Nifti1Image(data, None, hdr)
    nibabel.save(img, 'centerline.nii.gz')
    # Define name if output name is not specified
    if name_output=='csa_volume.nii.gz' or name_output=='':
        # sct.generate_output_file('centerline.nii.gz', file_data+'_centerline'+ext_data, verbose)
        name_output = file_data+'_centerline'+ext_data
    sct.generate_output_file('centerline.nii.gz', name_output, verbose)

    # create a txt file with the centerline
    path, rad_output, ext = sct.extract_fname(name_output)
    name_output_txt = rad_output + '.txt'
    sct.printv('\nWrite text file...', verbose)
    file_results = open(name_output_txt, 'w')
    for i in range(min_z_index, max_z_index+1):
        file_results.write(str(int(i)) + ' ' + str(x_centerline_fit[i-min_z_index]) + ' ' + str(y_centerline_fit[i-min_z_index]) + '\n')
    file_results.close()

    # Copy result into parent folder
    sct.run('cp '+name_output_txt+' ../')

    del data

    # come back to parent folder
    os.chdir('..')

    # Change orientation of the output centerline into input orientation
    sct.printv('\nOrient centerline image to input orientation: ' + orientation, verbose)
    fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data
    set_orientation(path_tmp+'/'+name_output, orientation, name_output)

   # Remove temporary files
    if remove_temp_files:
        sct.printv('\nRemove temporary files...', verbose)
        sct.run('rm -rf '+path_tmp, verbose)

    return name_output
コード例 #21
0
def generate_warping_field(fname_dest,
                           x_trans,
                           y_trans,
                           theta_rot=None,
                           center_rotation=None,
                           fname='warping_field.nii.gz',
                           verbose=1):
    """Generation of a warping field towards an image and given transformation parameters.

    Given a destination image and transformation parameters this functions creates a NIFTI 3D warping field that can be
    applied afterwards. The transformation parameters corresponds to a slice-by-slice registration of images, thus the
    transformation parameters must be precised for each slice of the image.

    inputs:
        fname_dest: name of destination image (type: string)
        x_trans: list of translations along x axis for each slice (type: list, length: height of fname_dest)
        y_trans: list of translations along y axis for each slice (type: list, length: height of fname_dest)
        theta_rot[optional]: list of rotation angles in radian (and in ITK's coordinate system) for each slice (type: list)
        center_rotation[optional]: pixel coordinates in plan xOy of the wanted center of rotation (type: list,
            length: 2, example: [0,ny/2])
        fname[optional]: name of output warp (type: string)
        verbose: display parameter (type: int)

    output:
        creation of a warping field of name 'fname' with an header similar to the destination image.
    """
    from nibabel import load
    from math import cos, sin
    from sct_orientation import get_orientation

    #Make sure image is in rpi format
    sct.printv(
        '\nChecking if the image of destination is in RPI orientation for the warping field generation ...',
        verbose)
    orientation = get_orientation(fname_dest)
    if orientation != 'RPI':
        sct.printv(
            '\nWARNING: The image of destination is not in RPI format. Dimensions of the warping field might be inverted.',
            verbose)
    else:
        sct.printv('\tOK', verbose)

    sct.printv(
        '\n\nCreating warping field ' + fname +
        ' for transformations along z...', verbose)

    file_dest = load(fname_dest)
    hdr_file_dest = file_dest.get_header()
    hdr_warp = hdr_file_dest.copy()

    # Get image dimensions
    sct.printv('\nGet image dimensions of destination image...', verbose)
    nx, ny, nz, nt, px, py, pz, pt = Image(fname_dest).dim
    sct.printv(
        '.. matrix size: ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz),
        verbose)
    sct.printv(
        '.. voxel size:  ' + str(px) + 'mm x ' + str(py) + 'mm x ' + str(pz) +
        'mm', verbose)

    #Center of rotation
    if center_rotation == None:
        x_a = int(round(nx / 2))
        y_a = int(round(ny / 2))
    else:
        x_a = center_rotation[0]
        y_a = center_rotation[1]

    # Calculate displacement for each voxel
    data_warp = zeros(((((nx, ny, nz, 1, 3)))))
    # For translations
    if theta_rot == None:
        for i in range(nx):
            for j in range(ny):
                for k in range(nz):
                    data_warp[i, j, k, 0, 0] = x_trans[k]
                    data_warp[i, j, k, 0, 1] = y_trans[k]
                    data_warp[i, j, k, 0, 2] = 0
    # # For rigid transforms (not optimized)
    # if theta_rot != None:
    #     for k in range(nz):
    #         for i in range(nx):
    #             for j in range(ny):
    #                 # data_warp[i, j, k, 0, 0] = (cos(theta_rot[k])-1) * (i - x_a) - sin(theta_rot[k]) * (j - y_a) - x_trans[k]
    #                 # data_warp[i, j, k, 0, 1] = -(sin(theta_rot[k]) * (i - x_a) + (cos(theta_rot[k])-1) * (j - y_a)) + y_trans[k]
    #
    #                 data_warp[i, j, k, 0, 0] = (cos(theta_rot[k]) - 1) * (i - x_a) - sin(theta_rot[k]) * (j - y_a) + x_trans[k] #+ sin(theta_rot[k]) * (int(round(nx/2))-x_a)
    #                 data_warp[i, j, k, 0, 1] = - sin(theta_rot[k]) * (i - x_a) - (cos(theta_rot[k]) - 1) * (j - y_a) + y_trans[k] #- sin(theta_rot[k]) * (int(round(nx/2))-x_a)
    #                 data_warp[i, j, k, 0, 2] = 0

    # For rigid transforms with array (time optimization)
    if theta_rot != None:
        vector_i = [[[i - x_a], [j - y_a]] for i in range(nx)
                    for j in range(ny)]
        for k in range(nz):
            matrix_rot_a = asarray([[cos(theta_rot[k]), -sin(theta_rot[k])],
                                    [-sin(theta_rot[k]), -cos(theta_rot[k])]])
            tmp = matrix_rot_a + array(((-1, 0), (0, 1)))
            result = dot(tmp,
                         array(vector_i).T[0]) + array([[x_trans[k]],
                                                        [y_trans[k]]])
            for i in range(nx):
                data_warp[i, :, k, 0, 0] = result[0][i * nx:i * nx + ny]
                data_warp[i, :, k, 0, 1] = result[1][i * nx:i * nx + ny]

    # Generate warp file as a warping field
    hdr_warp.set_intent('vector', (), '')
    hdr_warp.set_data_dtype('float32')
    img = nibabel.Nifti1Image(data_warp, None, hdr_warp)
    nibabel.save(img, fname)
    sct.printv('\nDONE ! Warping field generated: ' + fname, verbose)
コード例 #22
0
def generate_warping_field(fname_dest, x_trans, y_trans, theta_rot=None, center_rotation=None, fname='warping_field.nii.gz', verbose=1):
    """Generation of a warping field towards an image and given transformation parameters.

    Given a destination image and transformation parameters this functions creates a NIFTI 3D warping field that can be
    applied afterwards. The transformation parameters corresponds to a slice-by-slice registration of images, thus the
    transformation parameters must be precised for each slice of the image.

    inputs:
        fname_dest: name of destination image (type: string)
        x_trans: list of translations along x axis for each slice (type: list, length: height of fname_dest)
        y_trans: list of translations along y axis for each slice (type: list, length: height of fname_dest)
        theta_rot[optional]: list of rotation angles in radian (and in ITK's coordinate system) for each slice (type: list)
        center_rotation[optional]: pixel coordinates in plan xOy of the wanted center of rotation (type: list,
            length: 2, example: [0,ny/2])
        fname[optional]: name of output warp (type: string)
        verbose: display parameter (type: int)

    output:
        creation of a warping field of name 'fname' with an header similar to the destination image.
    """
    from nibabel import load
    from math import cos, sin
    from sct_orientation import get_orientation

    #Make sure image is in rpi format
    sct.printv('\nChecking if the image of destination is in RPI orientation for the warping field generation ...', verbose)
    orientation = get_orientation(fname_dest)
    if orientation != 'RPI':
        sct.printv('\nWARNING: The image of destination is not in RPI format. Dimensions of the warping field might be inverted.', verbose)
    else: sct.printv('\tOK', verbose)

    sct.printv('\n\nCreating warping field ' + fname + ' for transformations along z...', verbose)

    file_dest = load(fname_dest)
    hdr_file_dest = file_dest.get_header()
    hdr_warp = hdr_file_dest.copy()

    # Get image dimensions
    sct.printv('\nGet image dimensions of destination image...', verbose)
    nx, ny, nz, nt, px, py, pz, pt = Image(fname_dest).dim
    sct.printv('.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz), verbose)
    sct.printv('.. voxel size:  '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm', verbose)

    #Center of rotation
    if center_rotation == None:
        x_a = int(round(nx/2))
        y_a = int(round(ny/2))
    else:
        x_a = center_rotation[0]
        y_a = center_rotation[1]

    # Calculate displacement for each voxel
    data_warp = zeros(((((nx, ny, nz, 1, 3)))))
    # For translations
    if theta_rot == None:
        for i in range(nx):
            for j in range(ny):
                for k in range(nz):
                    data_warp[i, j, k, 0, 0] = x_trans[k]
                    data_warp[i, j, k, 0, 1] = y_trans[k]
                    data_warp[i, j, k, 0, 2] = 0
    # # For rigid transforms (not optimized)
    # if theta_rot != None:
    #     for k in range(nz):
    #         for i in range(nx):
    #             for j in range(ny):
    #                 # data_warp[i, j, k, 0, 0] = (cos(theta_rot[k])-1) * (i - x_a) - sin(theta_rot[k]) * (j - y_a) - x_trans[k]
    #                 # data_warp[i, j, k, 0, 1] = -(sin(theta_rot[k]) * (i - x_a) + (cos(theta_rot[k])-1) * (j - y_a)) + y_trans[k]
    #
    #                 data_warp[i, j, k, 0, 0] = (cos(theta_rot[k]) - 1) * (i - x_a) - sin(theta_rot[k]) * (j - y_a) + x_trans[k] #+ sin(theta_rot[k]) * (int(round(nx/2))-x_a)
    #                 data_warp[i, j, k, 0, 1] = - sin(theta_rot[k]) * (i - x_a) - (cos(theta_rot[k]) - 1) * (j - y_a) + y_trans[k] #- sin(theta_rot[k]) * (int(round(nx/2))-x_a)
    #                 data_warp[i, j, k, 0, 2] = 0

    # For rigid transforms with array (time optimization)
    if theta_rot != None:
        vector_i = [[[i-x_a],[j-y_a]] for i in range(nx) for j in range(ny)]
        for k in range(nz):
            matrix_rot_a = asarray([[cos(theta_rot[k]), - sin(theta_rot[k])],[-sin(theta_rot[k]), -cos(theta_rot[k])]])
            tmp = matrix_rot_a + array(((-1,0),(0,1)))
            result = dot(tmp, array(vector_i).T[0]) + array([[x_trans[k]], [y_trans[k]]])
            for i in range(nx):
                data_warp[i, :, k, 0, 0] = result[0][i*nx:i*nx+ny]
                data_warp[i, :, k, 0, 1] = result[1][i*nx:i*nx+ny]

    # Generate warp file as a warping field
    hdr_warp.set_intent('vector', (), '')
    hdr_warp.set_data_dtype('float32')
    img = nibabel.Nifti1Image(data_warp, None, hdr_warp)
    nibabel.save(img, fname)
    sct.printv('\nDONE ! Warping field generated: '+fname, verbose)
コード例 #23
0
def main():
    # Initialization to defaults parameters
    fname_data = ''  # data is empty by default
    path_label = ''  # empty by default
    method = param.method # extraction mode by default
    labels_of_interest = param.labels_of_interest
    slices_of_interest = param.slices_of_interest
    vertebral_levels = param.vertebral_levels
    average_all_labels = param.average_all_labels
    fname_output = param.fname_output
    fname_vertebral_labeling = param.fname_vertebral_labeling
    fname_normalizing_label = ''  # optional then default is empty
    normalization_method = ''  # optional then default is empty
    actual_vert_levels = None  # variable used in case the vertebral levels asked by the user don't correspond exactly to the vertebral levels available in the metric data
    warning_vert_levels = None  # variable used to warn the user in case the vertebral levels he asked don't correspond exactly to the vertebral levels available in the metric data
    verbose = param.verbose
    flag_h = 0
    ml_clusters = param.ml_clusters
    adv_param = param.adv_param
    adv_param_user = ''

    # Parameters for debug mode
    if param.debug:
        print '\n*** WARNING: DEBUG MODE ON ***\n'
        status, path_sct_data = commands.getstatusoutput('echo $SCT_TESTING_DATA_DIR')
        fname_data = '/Users/julien/data/temp/sct_example_data/mt/mtr.nii.gz'
        path_label = '/Users/julien/data/temp/sct_example_data/mt/label/atlas/'
        method = 'map'
        ml_clusters = '0:29,30,31'
        labels_of_interest = '0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29'
        slices_of_interest = ''
        vertebral_levels = ''
        average_all_labels = 1
        fname_normalizing_label = ''  #path_sct+'/testing/data/errsm_23/mt/label/template/MNI-Poly-AMU_CSF.nii.gz'
        normalization_method = ''  #'whole'
    else:
        # Check input parameters
        try:
            opts, args = getopt.getopt(sys.argv[1:], 'haf:i:l:m:n:o:p:v:w:z:') # define flags
        except getopt.GetoptError as err: # check if the arguments are defined
            print str(err) # error
            usage() # display usage
        if not opts:
            usage()
        for opt, arg in opts: # explore flags
            if opt in '-a':
                average_all_labels = 1
            elif opt in '-f':
                path_label = os.path.abspath(arg)  # save path of labels folder
            elif opt == '-h':  # help option
                flag_h = 1
            elif opt in '-i':
                fname_data = arg
            elif opt in '-l':
                labels_of_interest = arg
            elif opt in '-m':  # method for metric extraction
                method = arg
            elif opt in '-n':  # filename of the label by which the user wants to normalize
                fname_normalizing_label = arg
            elif opt in '-o': # output option
                fname_output = arg  # fname of output file
            elif opt in '-p':
                adv_param_user = arg
            elif opt in '-v':
                # vertebral levels option, if the user wants to average the metric across specific vertebral levels
                 vertebral_levels = arg
            elif opt in '-w':
                # method used for the normalization by the metric estimation into the normalizing label (see flag -n): 'sbs' for slice-by-slice or 'whole' for normalization after estimation in the whole labels
                normalization_method = arg
            elif opt in '-z':  # slices numbers option
                slices_of_interest = arg # save labels numbers

    # Display usage with tract parameters by default in case files aren't chosen in arguments inputs
    if fname_data == '' or path_label == '' or flag_h:
        param.path_label = path_label
        usage()

    # Check existence of data file
    sct.printv('\ncheck existence of input files...', verbose)
    sct.check_file_exist(fname_data)
    sct.check_folder_exist(path_label)
    if fname_normalizing_label:
        sct.check_folder_exist(fname_normalizing_label)

    # add slash at the end
    path_label = sct.slash_at_the_end(path_label, 1)

    # Find path to the vertebral labeling file if vertebral levels were specified by the user
    if vertebral_levels:
        if slices_of_interest:  # impossible to select BOTH specific slices and specific vertebral levels
            print '\nERROR: You cannot select BOTH vertebral levels AND slice numbers.'
            usage()
        else:
            fname_vertebral_labeling_list = sct.find_file_within_folder(fname_vertebral_labeling, path_label + '..')
            if len(fname_vertebral_labeling_list) > 1:
                print color.red + 'ERROR: More than one file named \'' + fname_vertebral_labeling + ' were found in ' + path_label + '. Exit program.' + color.end
                sys.exit(2)
            elif len(fname_vertebral_labeling_list) == 0:
                print color.red + 'ERROR: No file named \'' + fname_vertebral_labeling + ' were found in ' + path_label + '. Exit program.' + color.end
                sys.exit(2)
            else:
                fname_vertebral_labeling = os.path.abspath(fname_vertebral_labeling_list[0])

    # Check input parameters
    check_method(method, fname_normalizing_label, normalization_method)

    # parse argument for param
    if not adv_param_user == '':
        adv_param = adv_param_user.replace(' ', '').split(',')  # remove spaces and parse with comma
        del adv_param_user  # clean variable
        # TODO: check integrity of input

    # Extract label info
    label_id, label_name, label_file = read_label_file(path_label, param.file_info_label)
    nb_labels_total = len(label_id)

    # check consistency of label input parameter.
    label_id_user, average_all_labels = check_labels(labels_of_interest, nb_labels_total, average_all_labels, method)  # If 'labels_of_interest' is empty, then
    # 'label_id_user' contains the index of all labels in the file info_label.txt

    # print parameters
    print '\nChecked parameters:'
    print '  data ...................... '+fname_data
    print '  folder label .............. '+path_label
    print '  selected labels ........... '+str(label_id_user)
    print '  estimation method ......... '+method
    print '  slices of interest ........ '+slices_of_interest
    print '  vertebral levels .......... '+vertebral_levels
    print '  vertebral labeling file.... '+fname_vertebral_labeling
    print '  advanced parameters ....... '+str(adv_param)

    # Check if the orientation of the data is RPI
    orientation_data = get_orientation(fname_data)

    # If orientation is not RPI, change to RPI
    if orientation_data != 'RPI':
        sct.printv('\nCreate temporary folder to change the orientation of the NIFTI files into RPI...', verbose)
        path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1)
        sct.create_folder(path_tmp)
        # change orientation and load data
        sct.printv('\nChange image orientation and load it...', verbose)
        data = nib.load(set_orientation(fname_data, 'RPI', path_tmp+'orient_data.nii')).get_data()
        # Do the same for labels
        sct.printv('\nChange labels orientation and load them...', verbose)
        labels = np.empty([nb_labels_total], dtype=object)  # labels(nb_labels_total, x, y, z)
        for i_label in range(0, nb_labels_total):
            labels[i_label] = nib.load(set_orientation(path_label+label_file[i_label], 'RPI', path_tmp+'orient_'+label_file[i_label])).get_data()
        if fname_normalizing_label:  # if the "normalization" option is wanted,
            normalizing_label = np.empty([1], dtype=object)  # choose this kind of structure so as to keep easily the
            # compatibility with the rest of the code (dimensions: (1, x, y, z))
            normalizing_label[0] = nib.load(set_orientation(fname_normalizing_label, 'RPI', path_tmp+'orient_normalizing_volume.nii')).get_data()
        if vertebral_levels:  # if vertebral levels were selected,
            data_vertebral_labeling = nib.load(set_orientation(fname_vertebral_labeling, 'RPI', path_tmp+'orient_vertebral_labeling.nii.gz')).get_data()
        # Remove the temporary folder used to change the NIFTI files orientation into RPI
        sct.printv('\nRemove the temporary folder...', verbose)
        status, output = commands.getstatusoutput('rm -rf ' + path_tmp)
    else:
        # Load image
        sct.printv('\nLoad image...', verbose)
        data = nib.load(fname_data).get_data()

        # Load labels
        sct.printv('\nLoad labels...', verbose)
        labels = np.empty([nb_labels_total], dtype=object)  # labels(nb_labels_total, x, y, z)
        for i_label in range(0, nb_labels_total):
            labels[i_label] = nib.load(path_label+label_file[i_label]).get_data()
        if fname_normalizing_label:  # if the "normalization" option is wanted,
            normalizing_label = np.empty([1], dtype=object)  # choose this kind of structure so as to keep easily the
            # compatibility with the rest of the code (dimensions: (1, x, y, z))
            normalizing_label[0] = nib.load(fname_normalizing_label).get_data()  # load the data of the normalizing label
        if vertebral_levels:  # if vertebral levels were selected,
            data_vertebral_labeling = nib.load(fname_vertebral_labeling).get_data()

    # Change metric data type into floats for future manipulations (normalization)
    data = np.float64(data)

    # Get dimensions of data
    sct.printv('\nGet dimensions of data...', verbose)
    nx, ny, nz = data.shape
    sct.printv('  ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), verbose)

    # Get dimensions of labels
    sct.printv('\nGet dimensions of label...', verbose)
    nx_atlas, ny_atlas, nz_atlas = labels[0].shape
    sct.printv('.. '+str(nx_atlas)+' x '+str(ny_atlas)+' x '+str(nz_atlas)+' x '+str(nb_labels_total), verbose)

    # Check dimensions consistency between atlas and data
    if (nx, ny, nz) != (nx_atlas, ny_atlas, nz_atlas):
        print '\nERROR: Metric data and labels DO NOT HAVE SAME DIMENSIONS.'
        sys.exit(2)

    # Update the flag "slices_of_interest" according to the vertebral levels selected by user (if it's the case)
    if vertebral_levels:
        slices_of_interest, actual_vert_levels, warning_vert_levels = \
            get_slices_matching_with_vertebral_levels(data, vertebral_levels, data_vertebral_labeling)

    # select slice of interest by cropping data and labels
    if slices_of_interest:
        data = remove_slices(data, slices_of_interest)
        for i_label in range(0, nb_labels_total):
            labels[i_label] = remove_slices(labels[i_label], slices_of_interest)
        if fname_normalizing_label:  # if the "normalization" option was selected,
            normalizing_label[0] = remove_slices(normalizing_label[0], slices_of_interest)

    # if user wants to get unique value across labels, then combine all labels together
    if average_all_labels == 1:
        sum_labels_user = np.sum(labels[label_id_user])  # sum the labels selected by user
        if method == 'ml' or method == 'map':  # in case the maximum likelihood and the average across different labels are wanted
            labels_tmp = np.empty([nb_labels_total - len(label_id_user) + 1], dtype=object)
            labels = np.delete(labels, label_id_user)  # remove the labels selected by user
            labels_tmp[0] = sum_labels_user  # put the sum of the labels selected by user in first position of the tmp
            # variable
            for i_label in range(1, len(labels_tmp)):
                labels_tmp[i_label] = labels[i_label-1]  # fill the temporary array with the values of the non-selected labels
            labels = labels_tmp  # replace the initial labels value by the updated ones (with the summed labels)
            del labels_tmp  # delete the temporary labels
        else:  # in other cases than the maximum likelihood, we can remove other labels (not needed for estimation)
            labels = np.empty(1, dtype=object)
            labels[0] = sum_labels_user  # we create a new label array that includes only the summed labels

    if fname_normalizing_label:  # if the "normalization" option is wanted
        sct.printv('\nExtract normalization values...', verbose)
        if normalization_method == 'sbs':  # case: the user wants to normalize slice-by-slice
            for z in range(0, data.shape[-1]):
                normalizing_label_slice = np.empty([1], dtype=object)  # in order to keep compatibility with the function
                # 'extract_metric_within_tract', define a new array for the slice z of the normalizing labels
                normalizing_label_slice[0] = normalizing_label[0][..., z]
                metric_normalizing_label = extract_metric_within_tract(data[..., z], normalizing_label_slice, method, 0)
                # estimate the metric mean in the normalizing label for the slice z
                if metric_normalizing_label[0][0] != 0:
                    data[..., z] = data[..., z]/metric_normalizing_label[0][0]  # divide all the slice z by this value

        elif normalization_method == 'whole':  # case: the user wants to normalize after estimations in the whole labels
            metric_mean_norm_label, metric_std_norm_label = extract_metric_within_tract(data, normalizing_label, method, param.verbose)  # mean and std are lists

    # identify cluster for each tract (for use with robust ML)
    ml_clusters_array = get_clusters(ml_clusters, labels)

    # extract metrics within labels
    sct.printv('\nExtract metric within labels...', verbose)
    metric_mean, metric_std = extract_metric_within_tract(data, labels, method, verbose, ml_clusters_array, adv_param)  # mean and std are lists

    if fname_normalizing_label and normalization_method == 'whole':  # case: user wants to normalize after estimations in the whole labels
        metric_mean, metric_std = np.divide(metric_mean, metric_mean_norm_label), np.divide(metric_std, metric_std_norm_label)

    # update label name if average
    if average_all_labels == 1:
        label_name[0] = 'AVERAGED'+' -'.join(label_name[i] for i in label_id_user)  # concatenate the names of the
        # labels selected by the user if the average tag was asked
        label_id_user = [0]  # update "label_id_user" to select the "averaged" label (which is in first position)

    metric_mean = metric_mean[label_id_user]
    metric_std = metric_std[label_id_user]

    # display metrics
    sct.printv('\nEstimation results:', 1)
    for i in range(0, metric_mean.size):
        sct.printv(str(label_id_user[i])+', '+str(label_name[label_id_user[i]])+':    '+str(metric_mean[i])+' +/- '+str(metric_std[i]), 1, 'info')

    # save and display metrics
    save_metrics(label_id_user, label_name, slices_of_interest, metric_mean, metric_std, fname_output, fname_data,
                 method, fname_normalizing_label, actual_vert_levels, warning_vert_levels)
コード例 #24
0
    def change_orientation(self, orientation='RPI', inversion_orient=False):
        """
        This function changes the orientation of the data by swapping the image axis.
        Warning: the nifti image header is not changed!!!
        :param orientation: string of three character representing the new orientation (ex: AIL, default: RPI)
               inversion_orient: boolean. If True, the data change to match the orientation in the header, based on the orientation provided as the argument orientation.
        :return:
        """
        opposite_character = {
            'L': 'R',
            'R': 'L',
            'A': 'P',
            'P': 'A',
            'I': 'S',
            'S': 'I'
        }

        if self.orientation is None:
            from sct_orientation import get_orientation
            self.orientation = get_orientation(self.file_name)

        if inversion_orient:
            temp_orientation = self.orientation
            self.orientation = orientation
            orientation = temp_orientation

        # change the orientation of the image
        perm = [0, 1, 2]
        inversion = [1, 1, 1]
        for i, character in enumerate(self.orientation):
            try:
                perm[i] = orientation.index(character)
            except ValueError:
                perm[i] = orientation.index(opposite_character[character])
                inversion[i] = -1

        # axes inversion
        self.data = self.data[::inversion[0], ::inversion[1], ::inversion[2]]

        # axes manipulations
        from numpy import swapaxes

        if perm == [1, 0, 2]:
            self.data = swapaxes(self.data, 0, 1)
        elif perm == [2, 1, 0]:
            self.data = swapaxes(self.data, 0, 2)
        elif perm == [0, 2, 1]:
            self.data = swapaxes(self.data, 1, 2)
        elif perm == [2, 1, 0]:
            self.data = swapaxes(self.data, 0, 2)
        elif perm == [2, 0, 1]:
            self.data = swapaxes(self.data, 0,
                                 2)  # transform [2, 0, 1] to [1, 0, 2]
            self.data = swapaxes(self.data, 0,
                                 1)  # transform [1, 0, 2] to [0, 1, 2]
        elif perm == [1, 2, 0]:
            self.data = swapaxes(self.data, 0,
                                 2)  # transform [1, 2, 0] to [0, 2, 1]
            self.data = swapaxes(self.data, 1,
                                 2)  # transform [0, 2, 1] to [0, 1, 2]
        elif perm == [0, 1, 2]:
            # do nothing
            pass
        else:
            print 'Error: wrong orientation'

        self.orientation = orientation
コード例 #25
0
            zmax_anatomic = line.split(',')[1]
            zmin_seg = line.split(',')[2]
            zmax_seg = line.split(',')[3]
            if len(line_list)==6:
                ymin_anatomic = line.split(',')[4]
                ymax_anatomic = line.split(',')[5]
        file_results.close()

        os.chdir(PATH_OUTPUT + '/subjects/'+subject+'/'+'T2')

        # Convert to RPI
        # Input:
        # - data.nii.gz
        # - data_RPI.nii.gz
        print '\nConvert to RPI'
        orientation = get_orientation('data.nii.gz')
        sct.run('sct_orientation -i data.nii.gz -s RPI')
        # Crop image
        if ymin_anatomic == None and ymax_anatomic == None:
            sct.run('sct_crop_image -i data_RPI.nii.gz -o data_RPI_crop.nii.gz -dim 2 -start ' + zmin_anatomic + ' -end ' + zmax_anatomic )
        else: sct.run('sct_crop_image -i data_RPI.nii.gz -o data_RPI_crop.nii.gz -dim 1,2 -start ' + ymin_anatomic +','+zmin_anatomic+ ' -end ' + ymax_anatomic+','+zmax_anatomic )
        # sct.run('sct_orientation -i data_RPI_crop.nii.gz -o data_crop.nii.gz -s '+ orientation)

        # denoising
        # input:
        # - data_crop.nii.gz
        # output:
        # - data_crop_denoised.nii.gz
        #print '\nDenoising image data_RPI_crop.nii.gz...'
        #sct.printv('sct_denoising_onlm.py -i data_RPI_crop.nii.gz')
        #sct.run('sct_denoising_onlm.py -i data_RPI_crop.nii.gz')
コード例 #26
0
def main():
    
    # Initialization
    fname_anat = ''
    fname_centerline = ''
    centerline_fitting = 'polynome'
    remove_temp_files = param.remove_temp_files
    interp = param.interp
    degree_poly = param.deg_poly
    
    # extract path of the script
    path_script = os.path.dirname(__file__)+'/'
    
    # Parameters for debug mode
    if param.debug == 1:
        print '\n*** WARNING: DEBUG MODE ON ***\n'
        status, path_sct_data = commands.getstatusoutput('echo $SCT_TESTING_DATA_DIR')
        fname_anat = path_sct_data+'/t2/t2.nii.gz'
        fname_centerline = path_sct_data+'/t2/t2_seg.nii.gz'
    else:
        # Check input param
        try:
            opts, args = getopt.getopt(sys.argv[1:],'hi:c:r:d:f:s:')
        except getopt.GetoptError as err:
            print str(err)
            usage()
        if not opts:
            usage()
        for opt, arg in opts:
            if opt == '-h':
                usage()
            elif opt in ('-i'):
                fname_anat = arg
            elif opt in ('-c'):
                fname_centerline = arg
            elif opt in ('-r'):
                remove_temp_files = int(arg)
            elif opt in ('-d'):
                degree_poly = int(arg)
            elif opt in ('-f'):
                centerline_fitting = str(arg)
            elif opt in ('-s'):
                interp = str(arg)
    
    # display usage if a mandatory argument is not provided
    if fname_anat == '' or fname_centerline == '':
        usage()
    
    # check existence of input files
    sct.check_file_exist(fname_anat)
    sct.check_file_exist(fname_centerline)
    
    # extract path/file/extension
    path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
    
    # Display arguments
    print '\nCheck input arguments...'
    print '  Input volume ...................... '+fname_anat
    print '  Centerline ........................ '+fname_centerline
    print ''
    
    # Get input image orientation
    input_image_orientation = get_orientation(fname_anat)

    # Reorient input data into RL PA IS orientation
    set_orientation(fname_anat, 'RPI', 'tmp.anat_orient.nii')
    set_orientation(fname_centerline, 'RPI', 'tmp.centerline_orient.nii')

    # Open centerline
    #==========================================================================================
    print '\nGet dimensions of input centerline...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('tmp.centerline_orient.nii')
    print '.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz)
    print '.. voxel size:  '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm'
    
    print '\nOpen centerline volume...'
    file = nibabel.load('tmp.centerline_orient.nii')
    data = file.get_data()

    X, Y, Z = (data>0).nonzero()
    min_z_index, max_z_index = min(Z), max(Z)
    
    
    # loop across z and associate x,y coordinate with the point having maximum intensity
    x_centerline = [0 for iz in range(min_z_index, max_z_index+1, 1)]
    y_centerline = [0 for iz in range(min_z_index, max_z_index+1, 1)]
    z_centerline = [iz for iz in range(min_z_index, max_z_index+1, 1)]

    # Two possible scenario:
    # 1. the centerline is probabilistic: each slices contains voxels with the probability of containing the centerline [0:...:1]
    # We only take the maximum value of the image to aproximate the centerline.
    # 2. The centerline/segmentation image contains many pixels per slice with values {0,1}.
    # We take all the points and approximate the centerline on all these points.

    X, Y, Z = ((data<1)*(data>0)).nonzero() # X is empty if binary image
    if (len(X) > 0): # Scenario 1
        for iz in range(min_z_index, max_z_index+1, 1):
            x_centerline[iz-min_z_index], y_centerline[iz-min_z_index] = numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape)
    else: # Scenario 2
        for iz in range(min_z_index, max_z_index+1, 1):
            x_seg, y_seg = (data[:,:,iz]>0).nonzero()
            if len(x_seg) > 0:
                x_centerline[iz-min_z_index] = numpy.mean(x_seg)
                y_centerline[iz-min_z_index] = numpy.mean(y_seg)

    # TODO: find a way to do the previous loop with this, which is more neat:
    # [numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape) for iz in range(0,nz,1)]
    
    # clear variable
    del data
    
    # Fit the centerline points with the kind of curve given as argument of the script and return the new smoothed coordinates
    if centerline_fitting == 'splines':
        try:
            x_centerline_fit, y_centerline_fit = b_spline_centerline(x_centerline,y_centerline,z_centerline)
        except ValueError:
            print "splines fitting doesn't work, trying with polynomial fitting...\n"
            x_centerline_fit, y_centerline_fit = polynome_centerline(x_centerline,y_centerline,z_centerline)
    elif centerline_fitting == 'polynome':
        x_centerline_fit, y_centerline_fit = polynome_centerline(x_centerline,y_centerline,z_centerline)

    #==========================================================================================
    # Split input volume
    print '\nSplit input volume...'
    sct.run(sct.fsloutput + 'fslsplit tmp.anat_orient.nii tmp.anat_z -z')
    file_anat_split = ['tmp.anat_z'+str(z).zfill(4) for z in range(0,nz,1)]

    # initialize variables
    file_mat_inv_cumul = ['tmp.mat_inv_cumul_z'+str(z).zfill(4) for z in range(0,nz,1)]
    z_init = min_z_index
    displacement_max_z_index = x_centerline_fit[z_init-min_z_index]-x_centerline_fit[max_z_index-min_z_index]

    # write centerline as text file
    print '\nGenerate fitted transformation matrices...'
    file_mat_inv_cumul_fit = ['tmp.mat_inv_cumul_fit_z'+str(z).zfill(4) for z in range(0,nz,1)]
    for iz in range(min_z_index, max_z_index+1, 1):
        # compute inverse cumulative fitted transformation matrix
        fid = open(file_mat_inv_cumul_fit[iz], 'w')
        if (x_centerline[iz-min_z_index] == 0 and y_centerline[iz-min_z_index] == 0):
            displacement = 0
        else:
            displacement = x_centerline_fit[z_init-min_z_index]-x_centerline_fit[iz-min_z_index]
        fid.write('%i %i %i %f\n' %(1, 0, 0, displacement) )
        fid.write('%i %i %i %f\n' %(0, 1, 0, 0) )
        fid.write('%i %i %i %i\n' %(0, 0, 1, 0) )
        fid.write('%i %i %i %i\n' %(0, 0, 0, 1) )
        fid.close()

    # we complete the displacement matrix in z direction
    for iz in range(0, min_z_index, 1):
        fid = open(file_mat_inv_cumul_fit[iz], 'w')
        fid.write('%i %i %i %f\n' %(1, 0, 0, 0) )
        fid.write('%i %i %i %f\n' %(0, 1, 0, 0) )
        fid.write('%i %i %i %i\n' %(0, 0, 1, 0) )
        fid.write('%i %i %i %i\n' %(0, 0, 0, 1) )
        fid.close()
    for iz in range(max_z_index+1, nz, 1):
        fid = open(file_mat_inv_cumul_fit[iz], 'w')
        fid.write('%i %i %i %f\n' %(1, 0, 0, displacement_max_z_index) )
        fid.write('%i %i %i %f\n' %(0, 1, 0, 0) )
        fid.write('%i %i %i %i\n' %(0, 0, 1, 0) )
        fid.write('%i %i %i %i\n' %(0, 0, 0, 1) )
        fid.close()

    # apply transformations to data
    print '\nApply fitted transformation matrices...'
    file_anat_split_fit = ['tmp.anat_orient_fit_z'+str(z).zfill(4) for z in range(0,nz,1)]
    for iz in range(0, nz, 1):
        # forward cumulative transformation to data
        sct.run(fsloutput+'flirt -in '+file_anat_split[iz]+' -ref '+file_anat_split[iz]+' -applyxfm -init '+file_mat_inv_cumul_fit[iz]+' -out '+file_anat_split_fit[iz]+' -interp '+interp)

    # Merge into 4D volume
    print '\nMerge into 4D volume...'
    sct.run(fsloutput+'fslmerge -z tmp.anat_orient_fit tmp.anat_orient_fit_z*')

    # Reorient data as it was before
    print '\nReorient data back into native orientation...'
    set_orientation('tmp.anat_orient_fit.nii', input_image_orientation, 'tmp.anat_orient_fit_reorient.nii')

    # Generate output file (in current folder)
    print '\nGenerate output file (in current folder)...'
    sct.generate_output_file('tmp.anat_orient_fit_reorient.nii', file_anat+'_flatten'+ext_anat)

    # Delete temporary files
    if remove_temp_files == 1:
        print '\nDelete temporary files...'
        sct.run('rm -rf tmp.*')

    # to view results
    print '\nDone! To view results, type:'
    print 'fslview '+file_anat+ext_anat+' '+file_anat+'_flatten'+ext_anat+' &\n'
コード例 #27
0
def main():
    # Initialization to defaults parameters
    fname_data = ''  # data is empty by default
    path_label = ''  # empty by default
    method = param.method  # extraction mode by default
    labels_of_interest = param.labels_of_interest
    slices_of_interest = param.slices_of_interest
    vertebral_levels = param.vertebral_levels
    average_all_labels = param.average_all_labels
    fname_output = param.fname_output
    fname_vertebral_labeling = param.fname_vertebral_labeling
    fname_normalizing_label = ''  # optional then default is empty
    normalization_method = ''  # optional then default is empty
    actual_vert_levels = None  # variable used in case the vertebral levels asked by the user don't correspond exactly to the vertebral levels available in the metric data
    warning_vert_levels = None  # variable used to warn the user in case the vertebral levels he asked don't correspond exactly to the vertebral levels available in the metric data
    verbose = param.verbose
    flag_h = 0
    ml_clusters = param.ml_clusters
    adv_param = param.adv_param
    adv_param_user = ''

    # Parameters for debug mode
    if param.debug:
        print '\n*** WARNING: DEBUG MODE ON ***\n'
        status, path_sct_data = commands.getstatusoutput(
            'echo $SCT_TESTING_DATA_DIR')
        fname_data = '/Users/julien/data/temp/sct_example_data/mt/mtr.nii.gz'
        path_label = '/Users/julien/data/temp/sct_example_data/mt/label/atlas/'
        method = 'map'
        ml_clusters = '0:29,30,31'
        labels_of_interest = '0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29'
        slices_of_interest = ''
        vertebral_levels = ''
        average_all_labels = 1
        fname_normalizing_label = ''  #path_sct+'/testing/data/errsm_23/mt/label/template/MNI-Poly-AMU_CSF.nii.gz'
        normalization_method = ''  #'whole'
    else:
        # Check input parameters
        try:
            opts, args = getopt.getopt(
                sys.argv[1:], 'haf:i:l:m:n:o:p:v:w:z:')  # define flags
        except getopt.GetoptError as err:  # check if the arguments are defined
            print str(err)  # error
            usage()  # display usage
        if not opts:
            usage()
        for opt, arg in opts:  # explore flags
            if opt in '-a':
                average_all_labels = 1
            elif opt in '-f':
                path_label = os.path.abspath(arg)  # save path of labels folder
            elif opt == '-h':  # help option
                flag_h = 1
            elif opt in '-i':
                fname_data = arg
            elif opt in '-l':
                labels_of_interest = arg
            elif opt in '-m':  # method for metric extraction
                method = arg
            elif opt in '-n':  # filename of the label by which the user wants to normalize
                fname_normalizing_label = arg
            elif opt in '-o':  # output option
                fname_output = arg  # fname of output file
            elif opt in '-p':
                adv_param_user = arg
            elif opt in '-v':
                # vertebral levels option, if the user wants to average the metric across specific vertebral levels
                vertebral_levels = arg
            elif opt in '-w':
                # method used for the normalization by the metric estimation into the normalizing label (see flag -n): 'sbs' for slice-by-slice or 'whole' for normalization after estimation in the whole labels
                normalization_method = arg
            elif opt in '-z':  # slices numbers option
                slices_of_interest = arg  # save labels numbers

    # Display usage with tract parameters by default in case files aren't chosen in arguments inputs
    if fname_data == '' or path_label == '' or flag_h:
        param.path_label = path_label
        usage()

    # Check existence of data file
    sct.printv('\ncheck existence of input files...', verbose)
    sct.check_file_exist(fname_data)
    sct.check_folder_exist(path_label)
    if fname_normalizing_label:
        sct.check_folder_exist(fname_normalizing_label)

    # add slash at the end
    path_label = sct.slash_at_the_end(path_label, 1)

    # Find path to the vertebral labeling file if vertebral levels were specified by the user
    if vertebral_levels:
        if slices_of_interest:  # impossible to select BOTH specific slices and specific vertebral levels
            print '\nERROR: You cannot select BOTH vertebral levels AND slice numbers.'
            usage()
        else:
            fname_vertebral_labeling_list = sct.find_file_within_folder(
                fname_vertebral_labeling, path_label + '..')
            if len(fname_vertebral_labeling_list) > 1:
                print color.red + 'ERROR: More than one file named \'' + fname_vertebral_labeling + ' were found in ' + path_label + '. Exit program.' + color.end
                sys.exit(2)
            elif len(fname_vertebral_labeling_list) == 0:
                print color.red + 'ERROR: No file named \'' + fname_vertebral_labeling + ' were found in ' + path_label + '. Exit program.' + color.end
                sys.exit(2)
            else:
                fname_vertebral_labeling = os.path.abspath(
                    fname_vertebral_labeling_list[0])

    # Check input parameters
    check_method(method, fname_normalizing_label, normalization_method)

    # parse argument for param
    if not adv_param_user == '':
        adv_param = adv_param_user.replace(' ', '').split(
            ',')  # remove spaces and parse with comma
        del adv_param_user  # clean variable
        # TODO: check integrity of input

    # Extract label info
    label_id, label_name, label_file = read_label_file(path_label,
                                                       param.file_info_label)
    nb_labels_total = len(label_id)

    # check consistency of label input parameter.
    label_id_user, average_all_labels = check_labels(
        labels_of_interest, nb_labels_total, average_all_labels,
        method)  # If 'labels_of_interest' is empty, then
    # 'label_id_user' contains the index of all labels in the file info_label.txt

    # print parameters
    print '\nChecked parameters:'
    print '  data ...................... ' + fname_data
    print '  folder label .............. ' + path_label
    print '  selected labels ........... ' + str(label_id_user)
    print '  estimation method ......... ' + method
    print '  slices of interest ........ ' + slices_of_interest
    print '  vertebral levels .......... ' + vertebral_levels
    print '  vertebral labeling file.... ' + fname_vertebral_labeling
    print '  advanced parameters ....... ' + str(adv_param)

    # Check if the orientation of the data is RPI
    orientation_data = get_orientation(fname_data)

    # If orientation is not RPI, change to RPI
    if orientation_data != 'RPI':
        sct.printv(
            '\nCreate temporary folder to change the orientation of the NIFTI files into RPI...',
            verbose)
        path_tmp = sct.slash_at_the_end('tmp.' + time.strftime("%y%m%d%H%M%S"),
                                        1)
        sct.create_folder(path_tmp)
        # change orientation and load data
        sct.printv('\nChange image orientation and load it...', verbose)
        data = nib.load(
            set_orientation(fname_data, 'RPI',
                            path_tmp + 'orient_data.nii')).get_data()
        # Do the same for labels
        sct.printv('\nChange labels orientation and load them...', verbose)
        labels = np.empty([nb_labels_total],
                          dtype=object)  # labels(nb_labels_total, x, y, z)
        for i_label in range(0, nb_labels_total):
            labels[i_label] = nib.load(
                set_orientation(path_label + label_file[i_label], 'RPI',
                                path_tmp + 'orient_' +
                                label_file[i_label])).get_data()
        if fname_normalizing_label:  # if the "normalization" option is wanted,
            normalizing_label = np.empty(
                [1], dtype=object
            )  # choose this kind of structure so as to keep easily the
            # compatibility with the rest of the code (dimensions: (1, x, y, z))
            normalizing_label[0] = nib.load(
                set_orientation(fname_normalizing_label, 'RPI', path_tmp +
                                'orient_normalizing_volume.nii')).get_data()
        if vertebral_levels:  # if vertebral levels were selected,
            data_vertebral_labeling = nib.load(
                set_orientation(
                    fname_vertebral_labeling, 'RPI',
                    path_tmp + 'orient_vertebral_labeling.nii.gz')).get_data()
        # Remove the temporary folder used to change the NIFTI files orientation into RPI
        sct.printv('\nRemove the temporary folder...', verbose)
        status, output = commands.getstatusoutput('rm -rf ' + path_tmp)
    else:
        # Load image
        sct.printv('\nLoad image...', verbose)
        data = nib.load(fname_data).get_data()

        # Load labels
        sct.printv('\nLoad labels...', verbose)
        labels = np.empty([nb_labels_total],
                          dtype=object)  # labels(nb_labels_total, x, y, z)
        for i_label in range(0, nb_labels_total):
            labels[i_label] = nib.load(path_label +
                                       label_file[i_label]).get_data()
        if fname_normalizing_label:  # if the "normalization" option is wanted,
            normalizing_label = np.empty(
                [1], dtype=object
            )  # choose this kind of structure so as to keep easily the
            # compatibility with the rest of the code (dimensions: (1, x, y, z))
            normalizing_label[0] = nib.load(fname_normalizing_label).get_data(
            )  # load the data of the normalizing label
        if vertebral_levels:  # if vertebral levels were selected,
            data_vertebral_labeling = nib.load(
                fname_vertebral_labeling).get_data()

    # Change metric data type into floats for future manipulations (normalization)
    data = np.float64(data)

    # Get dimensions of data
    sct.printv('\nGet dimensions of data...', verbose)
    nx, ny, nz = data.shape
    sct.printv('  ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), verbose)

    # Get dimensions of labels
    sct.printv('\nGet dimensions of label...', verbose)
    nx_atlas, ny_atlas, nz_atlas = labels[0].shape
    sct.printv(
        '.. ' + str(nx_atlas) + ' x ' + str(ny_atlas) + ' x ' + str(nz_atlas) +
        ' x ' + str(nb_labels_total), verbose)

    # Check dimensions consistency between atlas and data
    if (nx, ny, nz) != (nx_atlas, ny_atlas, nz_atlas):
        print '\nERROR: Metric data and labels DO NOT HAVE SAME DIMENSIONS.'
        sys.exit(2)

    # Update the flag "slices_of_interest" according to the vertebral levels selected by user (if it's the case)
    if vertebral_levels:
        slices_of_interest, actual_vert_levels, warning_vert_levels = \
            get_slices_matching_with_vertebral_levels(data, vertebral_levels, data_vertebral_labeling)

    # select slice of interest by cropping data and labels
    if slices_of_interest:
        data = remove_slices(data, slices_of_interest)
        for i_label in range(0, nb_labels_total):
            labels[i_label] = remove_slices(labels[i_label],
                                            slices_of_interest)
        if fname_normalizing_label:  # if the "normalization" option was selected,
            normalizing_label[0] = remove_slices(normalizing_label[0],
                                                 slices_of_interest)

    # if user wants to get unique value across labels, then combine all labels together
    if average_all_labels == 1:
        sum_labels_user = np.sum(
            labels[label_id_user])  # sum the labels selected by user
        if method == 'ml' or method == 'map':  # in case the maximum likelihood and the average across different labels are wanted
            labels_tmp = np.empty([nb_labels_total - len(label_id_user) + 1],
                                  dtype=object)
            labels = np.delete(
                labels, label_id_user)  # remove the labels selected by user
            labels_tmp[
                0] = sum_labels_user  # put the sum of the labels selected by user in first position of the tmp
            # variable
            for i_label in range(1, len(labels_tmp)):
                labels_tmp[i_label] = labels[
                    i_label -
                    1]  # fill the temporary array with the values of the non-selected labels
            labels = labels_tmp  # replace the initial labels value by the updated ones (with the summed labels)
            del labels_tmp  # delete the temporary labels
        else:  # in other cases than the maximum likelihood, we can remove other labels (not needed for estimation)
            labels = np.empty(1, dtype=object)
            labels[
                0] = sum_labels_user  # we create a new label array that includes only the summed labels

    if fname_normalizing_label:  # if the "normalization" option is wanted
        sct.printv('\nExtract normalization values...', verbose)
        if normalization_method == 'sbs':  # case: the user wants to normalize slice-by-slice
            for z in range(0, data.shape[-1]):
                normalizing_label_slice = np.empty(
                    [1], dtype=object
                )  # in order to keep compatibility with the function
                # 'extract_metric_within_tract', define a new array for the slice z of the normalizing labels
                normalizing_label_slice[0] = normalizing_label[0][..., z]
                metric_normalizing_label = extract_metric_within_tract(
                    data[..., z], normalizing_label_slice, method, 0)
                # estimate the metric mean in the normalizing label for the slice z
                if metric_normalizing_label[0][0] != 0:
                    data[..., z] = data[..., z] / metric_normalizing_label[0][
                        0]  # divide all the slice z by this value

        elif normalization_method == 'whole':  # case: the user wants to normalize after estimations in the whole labels
            metric_mean_norm_label, metric_std_norm_label = extract_metric_within_tract(
                data, normalizing_label, method,
                param.verbose)  # mean and std are lists

    # identify cluster for each tract (for use with robust ML)
    ml_clusters_array = get_clusters(ml_clusters, labels)

    # extract metrics within labels
    sct.printv('\nExtract metric within labels...', verbose)
    metric_mean, metric_std = extract_metric_within_tract(
        data, labels, method, verbose, ml_clusters_array,
        adv_param)  # mean and std are lists

    if fname_normalizing_label and normalization_method == 'whole':  # case: user wants to normalize after estimations in the whole labels
        metric_mean, metric_std = np.divide(metric_mean,
                                            metric_mean_norm_label), np.divide(
                                                metric_std,
                                                metric_std_norm_label)

    # update label name if average
    if average_all_labels == 1:
        label_name[0] = 'AVERAGED' + ' -'.join(
            label_name[i]
            for i in label_id_user)  # concatenate the names of the
        # labels selected by the user if the average tag was asked
        label_id_user = [
            0
        ]  # update "label_id_user" to select the "averaged" label (which is in first position)

    metric_mean = metric_mean[label_id_user]
    metric_std = metric_std[label_id_user]

    # display metrics
    sct.printv('\nEstimation results:', 1)
    for i in range(0, metric_mean.size):
        sct.printv(
            str(label_id_user[i]) + ', ' + str(label_name[label_id_user[i]]) +
            ':    ' + str(metric_mean[i]) + ' +/- ' + str(metric_std[i]), 1,
            'info')

    # save and display metrics
    save_metrics(label_id_user, label_name, slices_of_interest, metric_mean,
                 metric_std, fname_output, fname_data, method,
                 fname_normalizing_label, actual_vert_levels,
                 warning_vert_levels)