def resample_labels(fname_labels, fname_dest, fname_output):
    """
    This function re-create labels into a space that has been resampled. It works by re-defining the location of each
    label using the old and new voxel size.
    """
    # get dimensions of input and destination files
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_labels)
    nxd, nyd, nzd, ntd, pxd, pyd, pzd, ptd = sct.get_dimension(fname_dest)
    sampling_factor = [float(nx)/nxd, float(ny)/nyd, float(nz)/nzd]
    # read labels
    from sct_label_utils import ProcessLabels
    processor = ProcessLabels(fname_labels)
    label_list = processor.display_voxel().split(':')
    # parse to get each label
    # TODO: modify sct_label_utils to output list of coordinates instead of string.
    label_new_list = []
    for label in label_list:
        label_sub = label.split(',')
        label_sub_new = []
        for i_label in range(0, 3):
            label_single = round(int(label_sub[i_label])/sampling_factor[i_label])
            label_sub_new.append(str(int(label_single)))
        label_sub_new.append(str(int(float(label_sub[3]))))
        label_new_list.append(','.join(label_sub_new))
    label_new_list = ':'.join(label_new_list)
    # create new labels
    sct.run('sct_label_utils -i '+fname_dest+' -t create -x '+label_new_list+' -v 1 -o '+fname_output)
    def __init__(self, target_fname, sc_seg_fname, t2_data=None):

        self.t2star = 't2star.nii.gz'
        self.sc_seg = 't2star_sc_seg.nii.gz'
        self.t2 = 't2.nii.gz'
        self.t2_seg = 't2_seg.nii.gz'
        self.t2_landmarks = 't2_landmarks.nii.gz'

        sct.run('cp ../' + target_fname + ' ./' + self.t2star)
        sct.run('cp ../' + sc_seg_fname + ' ./' + self.sc_seg)

        nx, ny, nz, nt, self.original_px, self.original_py, pz, pt = sct.get_dimension(self.t2star)

        if round(self.original_px, 2) != 0.3 or round(self.original_py, 2) != 0.3:
            self.t2star = resample_image(self.t2star)
            self.sc_seg = resample_image(self.sc_seg, binary=True)

        status, t2_star_orientation = sct.run('sct_orientation -i ' + self.t2star)
        self.original_orientation = t2_star_orientation[4:7]

        self.square_mask = crop_t2_star(self.t2star, self.sc_seg, box_size=75)

        self.treated_target = self.t2star[:-7] + '_seg_in_croped.nii.gz'

        self.level_fname = None
        if t2_data is not None:
            sct.run('cp ../' + t2_data[0] + ' ./' + self.t2)
            sct.run('cp ../' + t2_data[1] + ' ./' + self.t2_seg)
            sct.run('cp ../' + t2_data[2] + ' ./' + self.t2_landmarks)

            self.level_fname = compute_level_file(self.t2star, self.sc_seg, self.t2, self.t2_seg, self.t2_landmarks)
    def __init__(self, target_fname, sc_seg_fname, t2_data=None):

        self.t2star = "t2star.nii.gz"
        self.sc_seg = "t2star_sc_seg.nii.gz"
        self.t2 = "t2.nii.gz"
        self.t2_seg = "t2_seg.nii.gz"
        self.t2_landmarks = "t2_landmarks.nii.gz"

        sct.run("cp ../" + target_fname + " ./" + self.t2star)
        sct.run("cp ../" + sc_seg_fname + " ./" + self.sc_seg)

        nx, ny, nz, nt, self.original_px, self.original_py, pz, pt = sct.get_dimension(self.t2star)

        if round(self.original_px, 2) != 0.3 or round(self.original_py, 2) != 0.3:
            self.t2star = resample_image(self.t2star)
            self.sc_seg = resample_image(self.sc_seg, binary=True)

        status, t2_star_orientation = sct.run("sct_orientation -i " + self.t2star)
        self.original_orientation = t2_star_orientation[4:7]

        self.square_mask = crop_t2_star(self.t2star, self.sc_seg, box_size=75)

        self.treated_target = self.t2star[:-7] + "_seg_in_croped.nii.gz"

        self.level_fname = None
        if t2_data is not None:
            sct.run("cp ../" + t2_data[0] + " ./" + self.t2)
            sct.run("cp ../" + t2_data[1] + " ./" + self.t2_seg)
            sct.run("cp ../" + t2_data[2] + " ./" + self.t2_landmarks)

            self.level_fname = compute_level_file(self.t2star, self.sc_seg, self.t2, self.t2_seg, self.t2_landmarks)
def fraction_volume(data,fname_ref,fname_label_output):
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_ref)
    img_ref = nibabel.load(fname_ref)
    # 3d array for each x y z voxel values for the input nifti image
    data_ref = img_ref.get_data()
    Xr, Yr, Zr = (data_ref > 0).nonzero()
    ref_matrix = [Xr, Yr, Zr]
    indices = range(0,len(Xr))
    indices.sort(key = ref_matrix[1].__getitem__)
    for i, sublist in enumerate(ref_matrix):
        ref_matrix[i] = [sublist[j] for j in indices]
    Xr = []; Yr = []; Zr = []
    for i in range(0,len(ref_matrix[1])):
        Xr.append(ref_matrix[0][i])
        Yr.append(ref_matrix[1][i])
        Zr.append(ref_matrix[2][i])
    
    X, Y, Z = (data > 0).nonzero()
    data_matrix = [X, Y, Z]
    indices = range(0,len(X))
    indices.sort(key = data_matrix[1].__getitem__)
    for i, sublist in enumerate(data_matrix):
        data_matrix[i] = [sublist[j] for j in indices]
    X = []; Y = []; Z = []
    for i in range(0,len(data_matrix[1])):
        X.append(data_matrix[0][i])
        Y.append(data_matrix[1][i])
        Z.append(data_matrix[2][i])

    volume_fraction = []
    for i in range(ny):
        r = []
        for j,p in enumerate(Yr):
            if p == i:
                r.append(j)
        d = []
        for j,p in enumerate(Y):
            if p == i:
                d.append(j)
        volume_ref = 0.0
        for k in range(len(r)):
            value = data_ref[Xr[r[k]]][Yr[r[k]]][Zr[r[k]]]
            if value > 0.5:
                volume_ref = volume_ref + value #suppose 1mm isotropic resolution
        volume_data = 0.0
        for k in range(len(d)):
            value = data[X[d[k]]][Y[d[k]]][Z[d[k]]]
            if value > 0.5:
                volume_data = volume_data + value #suppose 1mm isotropic resolution
        if volume_ref!=0:
            volume_fraction.append(volume_data/volume_ref)
        else:
            volume_fraction.append(0)

    fo = open(fname_label_output, "wb")
    for i in range(ny):
        fo.write("%i %f\n" %(i,volume_fraction[i]))
    fo.close()
Exemplo n.º 5
0
    def addfiles(self, file):

        path_data, file_data, ext_data = sct.extract_fname(file)
        #check that files are same size
        if len(self.list_file) > 0 :
            self.dimension = sct.get_dimension(self.list_file[0])
            nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(file)

            #if self.dimension != (nx, ny, nz, nt, px, py, pz, pt) :
            if self.dimension[0:3] != (nx, ny, nz) or self.dimension[4:7] != (px, py, pz) :
                # Return error and exit programm if not same size
                print('\nError: Files are not of the same size.')
                sys.exit()
        # Add file if same size
        self.list_file.append(file)

        image_input = Image(file)
        self.list_image.append(image_input)
        print('\nFile', file_data+ext_data,' added to the list.')
    def __init__(self,
                 target_fname,
                 sc_seg_fname,
                 t2_data=None,
                 denoising=True):

        self.t2star = 't2star.nii.gz'
        self.sc_seg = 't2star_sc_seg.nii.gz'
        self.t2 = 't2.nii.gz'
        self.t2_seg = 't2_seg.nii.gz'
        self.t2_landmarks = 't2_landmarks.nii.gz'
        self.resample_to = 0.3

        sct.run('cp ../' + target_fname + ' ./' + self.t2star)
        sct.run('cp ../' + sc_seg_fname + ' ./' + self.sc_seg)

        nx, ny, nz, nt, self.original_px, self.original_py, pz, pt = sct.get_dimension(
            self.t2star)

        if round(self.original_px, 2) != self.resample_to or round(
                self.original_py, 2) != self.resample_to:
            self.t2star = resample_image(self.t2star,
                                         npx=self.resample_to,
                                         npy=self.resample_to)
            self.sc_seg = resample_image(self.sc_seg,
                                         binary=True,
                                         npx=self.resample_to,
                                         npy=self.resample_to)

        t2star_im = Image(self.t2star)
        if denoising:
            t2star_im.denoise_ornlm()
            t2star_im.save()
            self.t2star = t2star_im.file_name + t2star_im.ext
        '''
        status, t2_star_orientation = sct.run('sct_orientation -i ' + self.t2star)
        self.original_orientation = t2_star_orientation[4:7]
        '''
        self.original_orientation = t2star_im.orientation

        self.square_mask = crop_t2_star(self.t2star, self.sc_seg, box_size=75)

        self.treated_target = sct.extract_fname(
            self.t2star)[1] + '_seg_in_croped.nii.gz'

        self.level_fname = None
        if t2_data is not None:
            sct.run('cp ../' + t2_data[0] + ' ./' + self.t2)
            sct.run('cp ../' + t2_data[1] + ' ./' + self.t2_seg)
            sct.run('cp ../' + t2_data[2] + ' ./' + self.t2_landmarks)

            self.level_fname = compute_level_file(self.t2star, self.sc_seg,
                                                  self.t2, self.t2_seg,
                                                  self.t2_landmarks)
def create_cross(contrast):
    # Define list to gather all distances
    list_distances_1 = []
    list_distances_2 = []
    for i in range(0,len(SUBJECTS_LIST)):
        subject = SUBJECTS_LIST[i][0]

        # go to output folder
        print '\nGo to output folder '+ PATH_OUTPUT + '/subjects/'+ subject+ '/' + contrast
        os.chdir(PATH_OUTPUT + '/subjects/' + subject + '/' + contrast )

        #Calculate distances between : (last_label and bottom)  and (first label and top)
        print '\nCalculating distances between : (last_label and bottom)  and (first label and top)...'
        img_label = nibabel.load('labels_vertebral_dilated_reg_2point_crop.nii.gz')
        data_labels = img_label.get_data()
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('labels_vertebral_dilated_reg_2point_crop.nii.gz')
        X, Y, Z = (data_labels > 0).nonzero()
        list_coordinates = [([X[i], Y[i], Z[i], data_labels[X[i], Y[i], Z[i]]]) for i in range(0, len(X))]
        for i in range(len(list_coordinates)):
            if list_coordinates[i][3] == 1:
                coordinates_first_label = list_coordinates[i]
            if list_coordinates[i][3] == 20:
                coordinates_last_label = list_coordinates[i]
        # Distance 1st label top
        distance_1 = nz - 1 - coordinates_first_label[2]
        distance_2 = nz - 1 - coordinates_last_label[2]

        # Complete list to gather all distances
        list_distances_1.append(distance_1)
        list_distances_2.append(distance_2)

        # Create a cross on each subject at first and last labels
        print '\nCreating a cross at first and last labels...'
        os.system('sct_create_cross.py -i data_RPI_crop_normalized_straight_crop.nii.gz -x ' +str(int(round(nx/2.0)))+' -y '+str(int(round(ny/2.0)))+ ' -s '+str(coordinates_last_label[2])+ ' -e '+ str(coordinates_first_label[2]))

        # Write into a txt file the list of distances
        # os.chdir('../')
        # f_distance = open('list_distances.txt', 'w')
        # f_distance.write(str(distance_1))
        # f_distance.write(' ')
        # f_distance.write(str(distance_2))
        # f_distance.write('\n')

    # Calculate mean cross height for template and create file of reference
    print '\nCalculating mean cross height for template and create file of reference'
    mean_distance_1 = int(round(sum(list_distances_1)/len(list_distances_1)))
    mean_distance_2 = int(round(sum(list_distances_2)/len(list_distances_2)))
    L = height_of_template_space - 2 * mean_distance_2
    H = height_of_template_space - 2 * mean_distance_1
    os.chdir(path_sct+'/dev/template_creation')
    os.system('sct_create_cross.py -i template_landmarks-mm.nii.gz -x ' +str(x_size_of_template_space/2)+' -y '+str(y_size_of_template_space/2)+ ' -s '+str(L)+ ' -e '+ str(H))
def create_cross(contrast):
    # Define list to gather all distances
    list_distances_1 = []
    list_distances_2 = []
    for i in range(0,len(SUBJECTS_LIST)):
        subject = SUBJECTS_LIST[i][0]

        # go to output folder
        print '\nGo to output folder '+ PATH_OUTPUT + '/subjects/'+ subject+ '/' + contrast
        os.chdir(PATH_OUTPUT + '/subjects/' + subject + '/' + contrast )

        #Calculate distances between : (last_label and bottom)  and (first label and top)
        print '\nCalculating distances between : (last_label and bottom)  and (first label and top)...'
        img_label = nibabel.load('labels_vertebral_dilated_reg_2point_crop.nii.gz')
        data_labels = img_label.get_data()
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('labels_vertebral_dilated_reg_2point_crop.nii.gz')
        X, Y, Z = (data_labels > 0).nonzero()
        list_coordinates = [([X[i], Y[i], Z[i], data_labels[X[i], Y[i], Z[i]]]) for i in range(0, len(X))]
        for i in range(len(list_coordinates)):
            if list_coordinates[i][3] == 1:
                coordinates_first_label = list_coordinates[i]
            if list_coordinates[i][3] == 20:
                coordinates_last_label = list_coordinates[i]
        # Distance 1st label top
        distance_1 = nz - 1 - coordinates_first_label[2]
        distance_2 = nz - 1 - coordinates_last_label[2]

        # Complete list to gather all distances
        list_distances_1.append(distance_1)
        list_distances_2.append(distance_2)

        #Create a cross on each subject at first and last labels (modify create cross to do so or do it )
        print '\nCreating a cross at first and last labels...'
        os.system('sct_create_cross.py -i data_RPI_crop_normalized_straight_crop.nii.gz -x ' +str(int(round(nx/2.0)))+' -y '+str(int(round(ny/2.0)))+ ' -s '+str(coordinates_last_label[2])+ ' -e '+ str(coordinates_first_label[2]))

        # Write into a txt file the list of distances
        # os.chdir('../')
        # f_distance = open('list_distances.txt', 'w')
        # f_distance.write(str(distance_1))
        # f_distance.write(' ')
        # f_distance.write(str(distance_2))
        # f_distance.write('\n')

    # Calculate mean cross height for template and create file of reference
    print '\nCalculating mean cross height for template and create file of reference'
    mean_distance_1 = int(round(sum(list_distances_1)/len(list_distances_1)))
    mean_distance_2 = int(round(sum(list_distances_2)/len(list_distances_2)))
    L = height_of_template_space - 2 * mean_distance_2
    H = height_of_template_space - 2 * mean_distance_1
    os.chdir(path_sct+'/dev/template_creation')
    os.system('sct_create_cross.py -i template_landmarks-mm.nii.gz -x ' +str(x_size_of_template_space/2)+' -y '+str(y_size_of_template_space/2)+ ' -s '+str(L)+ ' -e '+ str(H))
Exemplo n.º 9
0
    def writeCenterline(self, output_file_name=None):
        # Compute the centerline and write the float coordinates into a txt file

        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(self.list_file[0])

        # Define output image (size matter)
        image_concatenation = self.list_image[0].copy()
        image_concatenation.data *= 0
        image_output = self.list_image[0].copy()
        image_output.data *= 0
        # Concatenate all files by addition
        for i in range(0, len(self.list_image)):
            for s in range(0, nz) :
                image_concatenation.data[:,:,s] = image_concatenation.data[:,:,s] + self.list_image[i].data[:,:,s] #* (1/len(self.list_image))


        # get center of mass of the centerline/segmentation
        sct.printv('\nGet center of mass of the concatenate file...')
        z_centerline = [iz for iz in range(0, nz, 1) if image_concatenation.data[:, :, iz].any()]
        nz_nonz = len(z_centerline)
        x_centerline = [0 for iz in range(0, nz_nonz, 1)]
        y_centerline = [0 for iz in range(0, nz_nonz, 1)]


        # Calculate centerline coordinates and create image of the centerline
        for iz in range(0, nz_nonz, 1):
            x_centerline[iz], y_centerline[iz] = ndimage.measurements.center_of_mass(image_concatenation.data[:, :, z_centerline[iz]])

        #x_centerline_fit, y_centerline_fit,x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)

        points = [[x_centerline[n], y_centerline[n], z_centerline[n]] for n in range(nz_nonz)]
        nurbs = NURBS(3, 1000, points)
        P = nurbs.getCourbe3D()
        x_centerline_fit = P[0]
        y_centerline_fit = P[1]
        z_centerline_fit = P[2]

        # Create output text file
        if output_file_name != None :
            file_name = output_file_name
        else: file_name = 'generated_centerline.txt'

        sct.printv('\nWrite text file...')
        #file_results = open("../"+file_name, 'w')
        file_results = open(file_name, 'w')
        for i in range(0, z_centerline_fit.shape[0], 1):
            file_results.write(str(int(z_centerline_fit[i])) + ' ' + str(x_centerline_fit[i]) + ' ' + str(y_centerline_fit[i]) + '\n')
        file_results.close()
def align_vertebrae(contrast):
    for i in range(0,len(SUBJECTS_LIST)):
        subject = SUBJECTS_LIST[i][0]

        # go to output folder
        print '\nGo to output folder '+ PATH_OUTPUT + '/subjects/'+subject+ '/' + contrast + '\n'
        os.chdir(PATH_OUTPUT + '/subjects/' + subject + '/' + contrast)

        print '\nAligning vertebrae for subject '+subject+'...'
        sct.printv('\nsct_align_vertebrae.py -i data_RPI_crop_normalized_straight_crop_2temp.nii.gz -l ' + PATH_OUTPUT + '/subjects/' + subject + '/' + contrast + '/labels_vertebral_dilated_reg_2point_crop_2temp.nii.gz -R ' +PATH_OUTPUT +'/labels_vertebral_' + contrast + '/template_landmarks.nii.gz -o '+ subject+'_aligned.nii.gz -t SyN -w spline')
        os.system('sct_align_vertebrae.py -i data_RPI_crop_normalized_straight_crop_2temp.nii.gz -l ' + PATH_OUTPUT + '/subjects/' + subject + '/' + contrast + '/labels_vertebral_dilated_reg_2point_crop_2temp.nii.gz -R ' +PATH_OUTPUT +'/labels_vertebral_' + contrast + '/template_landmarks.nii.gz -o '+ subject+'_aligned.nii.gz -t SyN -w spline')

        # # Normalize intensity of result
        # print'\nNormalizing intensity of results...'
        # sct.run('sct_normalize.py -i '+subject+'_aligned_' + contrast + '.nii.gz')

        # Warning that results for the subject is ready
        print'\nThe results for subject '+subject+' are ready. You can visualize them by tapping: fslview '+subject+'_aligned_normalized.nii.gz'

        # Copy final results into final results
        if not os.path.isdir(PATH_OUTPUT +'/Final_results'):
            os.makedirs(PATH_OUTPUT +'/Final_results')
        sct.run('cp '+subject+'_aligned.nii.gz ' +PATH_OUTPUT +'/Final_results/'+subject+'_aligned_' + contrast + '.nii.gz')
        # sct.run('cp '+subject+'_aligned_normalized.nii.gz ' +PATH_OUTPUT +'/Final_results/'+subject+'_aligned_normalized_' + contrast + '.nii.gz')

        #Save png images of the results into a different folder
        print '\nSaving png image of the final result into ' + PATH_OUTPUT +'/Image_results...'
        if not os.path.isdir(PATH_OUTPUT +'/Image_results'):
            os.makedirs(PATH_OUTPUT +'/Image_results')
        f = nibabel.load(PATH_OUTPUT +'/Final_results/'+subject+'_aligned_' + contrast + '.nii.gz')
        data = f.get_data()
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(PATH_OUTPUT +'/Final_results/'+subject+'_aligned_' + contrast + '.nii.gz')
        sagital_middle = nx / 2
        coronal_middle = ny / 2
        sagittal = data[sagital_middle, :, :].T
        coronal = data[:, coronal_middle, :].T
        fig, ax = plt.subplots(1, 2)
        ax[0].imshow(sagittal, cmap='gray', origin='lower')
        ax[0].set_title('sagittal')
        ax[1].imshow(coronal, cmap='gray', origin='lower')
        ax[1].set_title('coronal')
        for i in range(2):
            ax[i].set_axis_off()
        fig1 = plt.gcf()
        #plt.show()
        fig1.savefig(PATH_OUTPUT +'/Image_results'+'/'+subject+'_aligned_' + contrast + '.png', format='png')
Exemplo n.º 11
0
def align_vertebrae(contrast):
    for i in range(0,len(SUBJECTS_LIST)):
        subject = SUBJECTS_LIST[i][0]

        # go to output folder
        print '\nGo to output folder '+ PATH_OUTPUT + '/subjects/'+subject+ '/' + contrast + '\n'
        os.chdir(PATH_OUTPUT + '/subjects/' + subject + '/' + contrast)

        print '\nAligning vertebrae for subject '+subject+'...'
        sct.printv('\nsct_align_vertebrae.py -i data_RPI_crop_normalized_straight_crop_2temp.nii.gz -l ' + PATH_OUTPUT + '/subjects/' + subject + '/' + contrast + '/labels_vertebral_dilated_reg_2point_crop_2temp.nii.gz -R ' +PATH_OUTPUT +'/labels_vertebral_' + contrast + '/template_landmarks.nii.gz -o '+ subject+'_aligned.nii.gz -t SyN -w spline')
        os.system('sct_align_vertebrae.py -i data_RPI_crop_normalized_straight_crop_2temp.nii.gz -l ' + PATH_OUTPUT + '/subjects/' + subject + '/' + contrast + '/labels_vertebral_dilated_reg_2point_crop_2temp.nii.gz -R ' +PATH_OUTPUT +'/labels_vertebral_' + contrast + '/template_landmarks.nii.gz -o '+ subject+'_aligned.nii.gz -t SyN -w spline')

        # Change image type from float64 to uint16
        sct.run('sct_change_image_type.py -i ' + subject+'_aligned.nii.gz -o ' + subject+'_aligned.nii.gz -t uint16')

        # Inform that results for the subject is ready
        print'\nThe results for subject '+subject+' are ready. You can visualize them by tapping: fslview '+subject+'_aligned_normalized.nii.gz'

        # Copy final results into final results
        if not os.path.isdir(PATH_OUTPUT +'/Final_results'):
            os.makedirs(PATH_OUTPUT +'/Final_results')
        sct.run('cp '+subject+'_aligned.nii.gz ' +PATH_OUTPUT +'/Final_results/'+subject+'_aligned_' + contrast + '.nii.gz')

        #Save png images of the results into a different folder
        print '\nSaving png image of the final result into ' + PATH_OUTPUT +'/Image_results...'
        if not os.path.isdir(PATH_OUTPUT +'/Image_results'):
            os.makedirs(PATH_OUTPUT +'/Image_results')
        f = nibabel.load(PATH_OUTPUT +'/Final_results/'+subject+'_aligned_' + contrast + '.nii.gz')
        data = f.get_data()
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(PATH_OUTPUT +'/Final_results/'+subject+'_aligned_' + contrast + '.nii.gz')
        sagital_middle = nx / 2
        coronal_middle = ny / 2
        sagittal = data[sagital_middle, :, :].T
        coronal = data[:, coronal_middle, :].T
        fig, ax = plt.subplots(1, 2)
        ax[0].imshow(sagittal, cmap='gray', origin='lower')
        ax[0].set_title('sagittal')
        ax[1].imshow(coronal, cmap='gray', origin='lower')
        ax[1].set_title('coronal')
        for i in range(2):
            ax[i].set_axis_off()
        fig1 = plt.gcf()
        fig1.savefig(PATH_OUTPUT +'/Image_results'+'/'+subject+'_aligned_' + contrast + '.png', format='png')
def compute_length(fname_segmentation, remove_temp_files, verbose=0):
    from math import sqrt

    # Extract path, file and extension
    fname_segmentation = os.path.abspath(fname_segmentation)
    path_data, file_data, ext_data = sct.extract_fname(fname_segmentation)

    # create temporary folder
    path_tmp = "tmp." + time.strftime("%y%m%d%H%M%S")
    sct.run("mkdir " + path_tmp)

    # copy files into tmp folder
    sct.run("cp " + fname_segmentation + " " + path_tmp)

    # go to tmp folder
    os.chdir(path_tmp)

    # Change orientation of the input centerline into RPI
    sct.printv("\nOrient centerline to RPI orientation...", param.verbose)
    fname_segmentation_orient = "segmentation_rpi" + ext_data
    set_orientation(file_data + ext_data, "RPI", fname_segmentation_orient)

    # Get dimension
    sct.printv("\nGet dimensions...", param.verbose)
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_segmentation_orient)
    sct.printv(".. matrix size: " + str(nx) + " x " + str(ny) + " x " + str(nz), param.verbose)
    sct.printv(".. voxel size:  " + str(px) + "mm x " + str(py) + "mm x " + str(pz) + "mm", param.verbose)

    # smooth segmentation/centerline
    # x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = smooth_centerline(fname_segmentation_orient, param, 'hanning', 1)
    x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline(
        fname_segmentation_orient, type_window="hanning", window_length=80, algo_fitting="hanning", verbose=verbose
    )
    # compute length of centerline
    result_length = 0.0
    for i in range(len(x_centerline_fit) - 1):
        result_length += sqrt(
            ((x_centerline_fit[i + 1] - x_centerline_fit[i]) * px) ** 2
            + ((y_centerline_fit[i + 1] - y_centerline_fit[i]) * py) ** 2
            + ((z_centerline[i + 1] - z_centerline[i]) * pz) ** 2
        )

    return result_length
    def cross(self):
        image_output = Image(self.image_input)
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(self.image_input.absolutepath)

        coordinates_input = self.image_input.getNonZeroCoordinates()
        d = self.cross_radius  # cross radius in pixel
        dx = d / px  # cross radius in mm
        dy = d / py

        # for all points with non-zeros neighbors, force the neighbors to 0
        for coord in coordinates_input:
            image_output.data[coord.x][coord.y][coord.z] = 0  # remove point on the center of the spinal cord
            image_output.data[coord.x][coord.y + dy][
                coord.z] = coord.value * 10 + 1  # add point at distance from center of spinal cord
            image_output.data[coord.x + dx][coord.y][coord.z] = coord.value * 10 + 2
            image_output.data[coord.x][coord.y - dy][coord.z] = coord.value * 10 + 3
            image_output.data[coord.x - dx][coord.y][coord.z] = coord.value * 10 + 4

            # dilate cross to 3x3
            if self.dilate:
                image_output.data[coord.x - 1][coord.y + dy - 1][coord.z] = image_output.data[coord.x][coord.y + dy - 1][coord.z] = \
                    image_output.data[coord.x + 1][coord.y + dy - 1][coord.z] = image_output.data[coord.x + 1][coord.y + dy][coord.z] = \
                    image_output.data[coord.x + 1][coord.y + dy + 1][coord.z] = image_output.data[coord.x][coord.y + dy + 1][coord.z] = \
                    image_output.data[coord.x - 1][coord.y + dy + 1][coord.z] = image_output.data[coord.x - 1][coord.y + dy][coord.z] = \
                    image_output.data[coord.x][coord.y + dy][coord.z]
                image_output.data[coord.x + dx - 1][coord.y - 1][coord.z] = image_output.data[coord.x + dx][coord.y - 1][coord.z] = \
                    image_output.data[coord.x + dx + 1][coord.y - 1][coord.z] = image_output.data[coord.x + dx + 1][coord.y][coord.z] = \
                    image_output.data[coord.x + dx + 1][coord.y + 1][coord.z] = image_output.data[coord.x + dx][coord.y + 1][coord.z] = \
                    image_output.data[coord.x + dx - 1][coord.y + 1][coord.z] = image_output.data[coord.x + dx - 1][coord.y][coord.z] = \
                    image_output.data[coord.x + dx][coord.y][coord.z]
                image_output.data[coord.x - 1][coord.y - dy - 1][coord.z] = image_output.data[coord.x][coord.y - dy - 1][coord.z] = \
                    image_output.data[coord.x + 1][coord.y - dy - 1][coord.z] = image_output.data[coord.x + 1][coord.y - dy][coord.z] = \
                    image_output.data[coord.x + 1][coord.y - dy + 1][coord.z] = image_output.data[coord.x][coord.y - dy + 1][coord.z] = \
                    image_output.data[coord.x - 1][coord.y - dy + 1][coord.z] = image_output.data[coord.x - 1][coord.y - dy][coord.z] = \
                    image_output.data[coord.x][coord.y - dy][coord.z]
                image_output.data[coord.x - dx - 1][coord.y - 1][coord.z] = image_output.data[coord.x - dx][coord.y - 1][coord.z] = \
                    image_output.data[coord.x - dx + 1][coord.y - 1][coord.z] = image_output.data[coord.x - dx + 1][coord.y][coord.z] = \
                    image_output.data[coord.x - dx + 1][coord.y + 1][coord.z] = image_output.data[coord.x - dx][coord.y + 1][coord.z] = \
                    image_output.data[coord.x - dx - 1][coord.y + 1][coord.z] = image_output.data[coord.x - dx - 1][coord.y][coord.z] = \
                    image_output.data[coord.x - dx][coord.y][coord.z]

        return image_output
Exemplo n.º 14
0
    def compute(self):
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(self.list_file[0])

        # Define output image (size matter)
        image_concatenation = self.list_image[0].copy()
        image_concatenation.data *= 0
        image_output = self.list_image[0].copy()
        image_output.data *= 0
        # Concatenate all files by addition
        for i in range(0, len(self.list_image)):
            for s in range(0, nz) :
                image_concatenation.data[:,:,s] = image_concatenation.data[:,:,s] + self.list_image[i].data[:,:,s] #* (1/len(self.list_image))


        # get center of mass of the centerline/segmentation
        sct.printv('\nGet center of mass of the concatenate file...')
        z_centerline = [iz for iz in range(0, nz, 1) if image_concatenation.data[:, :, iz].any()]

        nz_nonz = len(z_centerline)
        x_centerline = [0 for iz in range(0, nz_nonz, 1)]
        y_centerline = [0 for iz in range(0, nz_nonz, 1)]


        # Calculate centerline coordinates and create image of the centerline
        for iz in range(0, nz_nonz, 1):
            x_centerline[iz], y_centerline[iz] = ndimage.measurements.center_of_mass(image_concatenation.data[:, :, z_centerline[iz]])

        points = [[x_centerline[n],y_centerline[n], z_centerline[n]] for n in range(len(z_centerline))]
        nurbs = NURBS(3, 1000, points)
        P = nurbs.getCourbe3D()
        x_centerline_fit = P[0]
        y_centerline_fit = P[1]
        z_centerline_fit = P[2]

        #x_centerline_fit, y_centerline_fit,x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)

        for iz in range(0, z_centerline_fit.shape[0], 1):
            image_output.data[x_centerline_fit[iz], y_centerline_fit[iz], z_centerline_fit[iz]] = 1


        return image_output
Exemplo n.º 15
0
    def pretreat(self):
        import scipy.ndimage.filters as scp_filters
        '''
        status, orientation = sct.run('sct_orientation.py -i ' + self.image.path + self.image.file_name + self.image.ext)
        orientation = orientation[4:7]
        if orientation != 'RPI':
            sct.run('sct_orientation.py -i ' + self.image.path + self.image.file_name + self.image.ext + ' -s RPI')
        fname = self.image.file_name[:-7] + '_RPI.nii.gz'
        '''
        fname = self.image.file_name

        pretreated = self.image.copy()
        pretreated.file_name = fname

        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(pretreated.file_name)
        sc_size = 3  # in mm
        sc_npix = ((sc_size/px) + (sc_size/py))/2.0
        pretreated.data = scp_filters.gaussian_filter(pretreated.data, sigma=sc_npix)
        pretreated.file_name = pretreated.file_name + '_gaussian'
        pretreated.save()
        return pretreated
    def __init__(self, target_fname, sc_seg_fname, t2_data=None, denoising=True):

        self.t2star = 't2star.nii.gz'
        self.sc_seg = 't2star_sc_seg.nii.gz'
        self.t2 = 't2.nii.gz'
        self.t2_seg = 't2_seg.nii.gz'
        self.t2_landmarks = 't2_landmarks.nii.gz'
        self.resample_to = 0.3

        sct.run('cp ../' + target_fname + ' ./' + self.t2star)
        sct.run('cp ../' + sc_seg_fname + ' ./' + self.sc_seg)

        nx, ny, nz, nt, self.original_px, self.original_py, pz, pt = sct.get_dimension(self.t2star)

        if round(self.original_px, 2) != self.resample_to or round(self.original_py, 2) != self.resample_to:
            self.t2star = resample_image(self.t2star, npx=self.resample_to, npy=self.resample_to)
            self.sc_seg = resample_image(self.sc_seg, binary=True, npx=self.resample_to, npy=self.resample_to)

        t2star_im = Image(self.t2star)
        if denoising:
            t2star_im.denoise_ornlm()
            t2star_im.save()
            self.t2star = t2star_im.file_name + t2star_im.ext
        '''
        status, t2_star_orientation = sct.run('sct_orientation -i ' + self.t2star)
        self.original_orientation = t2_star_orientation[4:7]
        '''
        self.original_orientation = t2star_im.orientation

        self.square_mask = crop_t2_star(self.t2star, self.sc_seg, box_size=75)

        self.treated_target = sct.extract_fname(self.t2star)[1] + '_seg_in_croped.nii.gz'

        self.level_fname = None
        if t2_data is not None:
            sct.run('cp ../' + t2_data[0] + ' ./' + self.t2)
            sct.run('cp ../' + t2_data[1] + ' ./' + self.t2_seg)
            sct.run('cp ../' + t2_data[2] + ' ./' + self.t2_landmarks)

            self.level_fname = compute_level_file(self.t2star, self.sc_seg, self.t2, self.t2_seg, self.t2_landmarks)
Exemplo n.º 17
0
    def loadFromPath(self, path, verbose):
        """
        This function load an image from an absolute path using nibabel library
        :param path: path of the file from which the image will be loaded
        :return:
        """
        from nibabel import load, spatialimages
        from sct_utils import check_file_exist, printv, extract_fname, get_dimension
        from sct_orientation import get_orientation

        check_file_exist(path, verbose=verbose)
        try:
            im_file = load(path)
        except spatialimages.ImageFileError:
            printv('Error: make sure ' + path + ' is an image.', 1, 'error')
        self.orientation = get_orientation(path)
        self.data = im_file.get_data()
        self.hdr = im_file.get_header()
        self.absolutepath = path
        self.path, self.file_name, self.ext = extract_fname(path)
        nx, ny, nz, nt, px, py, pz, pt = get_dimension(path)
        self.dim = [nx, ny, nz]
Exemplo n.º 18
0
    def loadFromPath(self, path, verbose):
        """
        This function load an image from an absolute path using nibabel library
        :param path: path of the file from which the image will be loaded
        :return:
        """
        from nibabel import load, spatialimages
        from sct_utils import check_file_exist, printv, extract_fname, get_dimension
        from sct_orientation import get_orientation

        check_file_exist(path, verbose=verbose)
        try:
            im_file = load(path)
        except spatialimages.ImageFileError:
            printv("Error: make sure " + path + " is an image.", 1, "error")
        self.orientation = get_orientation(path)
        self.data = im_file.get_data()
        self.hdr = im_file.get_header()
        self.absolutepath = path
        self.path, self.file_name, self.ext = extract_fname(path)
        nx, ny, nz, nt, px, py, pz, pt = get_dimension(path)
        self.dim = [nx, ny, nz]
def test(data_path):

    # parameters
    folder_data = "t2/"
    file_data = ["t2.nii.gz", "t2_seg.nii.gz"]

    # test normal crop
    cmd = (
        "sct_crop_image -i "
        + data_path
        + folder_data
        + file_data[0]
        + " -o cropped_normal.nii.gz -dim 1 -start 10 -end 50"
    )

    status, output = sct.run(cmd, 0)

    if status == 0:
        # check if cropping was correct
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension("cropped_normal.nii.gz")
        if ny != 41:
            status = 1

    return status, output
Exemplo n.º 20
0
def filter_2Dgaussian(input_padded_file,
                      size_filter,
                      output_file_name='Result'):
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(input_padded_file)
    print nx, ny, nz, nt, px, py, pz, pt
    gaussian_filter = sct_create_mask.create_mask2d(
        center=((int(size_filter / px) - 1.0) / 2.0,
                (int(size_filter / py) - 1.0) / 2.0),
        shape='gaussian',
        size=size_filter,
        nx=int(size_filter / px),
        ny=int(size_filter / py))  #pb center
    #on a oublie le facteur multiplicatif du filtre gaussien classique (create_mask2D ne le prend pas en compte)
    print(int(size_filter / px) - 1.0) / 2.0, (int(size_filter / py) -
                                               1.0) / 2.0
    print int(size_filter / px), int(size_filter / py)

    plt.plot(gaussian_filter)
    plt.grid()
    plt.show()
    #center=(int(size_filter/px)/2.0,int(size_filter/py)/2.0)

    #Pad
    #gaussian_filter_pad = 'pad_' + gaussian_filter
    #sct.run('sct_c2d ' + gaussian_filter + ' -pad ' + pad + 'x0vox ' + pad + 'x' + pad + 'x0vox 0 -o ' + gaussian_filter_pad)   #+ pad+ 'x'

    image_input_padded_file = Image(input_padded_file)

    print('1: numpy.sum(image_input_padded_file.data[:,:,:])',
          numpy.sum(image_input_padded_file.data[:, :, :]))

    #Create the output file
    #im_output=image_input_padded_file
    im_output = image_input_padded_file.data * 0  #ici, image_input_padded_file.data est lui aussi mis a zero
    #im_output_freq=image_input_padded_file
    im_output_freq = image_input_padded_file.data * 0

    #Create padded filter in frequency domain
    gaussian_filter_freq = numpy.fft.fft2(
        gaussian_filter,
        s=(image_input_padded_file.data.shape[0],
           image_input_padded_file.data.shape[1]))

    plt.plot(gaussian_filter_freq)
    plt.grid()
    plt.show()

    hauteur_image = image_input_padded_file.data.shape[2]

    print('2: numpy.sum(image_input_padded_file.data[:,:,:])',
          numpy.sum(image_input_padded_file.data[:, :, :]))

    #Apply 2D filter to every slice of the image
    for i in range(hauteur_image):
        image_input_padded_file_frequentiel = numpy.fft.fft2(
            image_input_padded_file.data[:, :, i], axes=(0, 1))
        im_output_freq[:, :,
                       i] = gaussian_filter_freq * image_input_padded_file_frequentiel
        im_output[:, :, i] = numpy.fft.ifft2(im_output_freq[:, :, i],
                                             axes=(0, 1))

    print('numpy.sum(im_output[:,:,:])', numpy.sum(im_output[:, :, :]))

    #Save the file
    #im_output.setFileName(output_file_name)
    #im_output.save('minimize')

    # Generate the T1, PD and MTVF maps as a NIFTI file with the right header
    path_spgr, file_name, ext_spgr = sct.extract_fname(input_padded_file)
    fname_output = path_spgr + output_file_name + ext_spgr
    sct.printv('Generate the NIFTI file with the right header...')
    # Associate the header to the MTVF and PD maps data as a NIFTI file
    hdr = nib.load(input_padded_file).get_header()
    img_with_hdr = nib.Nifti1Image(im_output, None, hdr)
    # Save the T1, PD and MTVF maps file
    nib.save(
        img_with_hdr, fname_output
    )  #PB: enregistre le fichier Result dans tmp.150317111945 lors des tests

    return img_with_hdr
def main():

    # Initialization
    path_script = os.path.dirname(__file__)
    fsloutput = 'export FSLOUTPUTTYPE=NIFTI; ' # for faster processing, all outputs are in NIFTI
    # THIS DOES NOT WORK IN MY LAPTOP: path_sct = os.environ['SCT_DIR'] # path to spinal cord toolbox
    path_sct = path_script[:-8] # TODO: make it cleaner!
    fname_data = ''
    fname_bvecs = ''
    verbose = param.verbose
    start_time = time.time()

    # Parameters for debug mode
    if param.debug:
        fname_data = os.path.expanduser("~")+'/code/spinalcordtoolbox_dev/testing/data/errsm_22/dmri/dmri.nii.gz'
        fname_bvecs = os.path.expanduser("~")+'/code/spinalcordtoolbox_dev/testing/data/errsm_22/dmri/bvecs.txt'
        verbose = 1

    # Check input parameters
    try:
        opts, args = getopt.getopt(sys.argv[1:],'hb:i:v:')
    except getopt.GetoptError:
        usage()
    for opt, arg in opts:
        if opt == '-h':
            usage()
        elif opt in ("-b"):
            fname_bvecs = arg
        elif opt in ("-i"):
            fname_data = arg
        elif opt in ('-v'):
            verbose = int(arg)

    # display usage if a mandatory argument is not provided
    if fname_data == '' or fname_bvecs == '':
        usage()

    # check existence of input files
    sct.check_file_exist(fname_data)
    sct.check_file_exist(fname_bvecs)

    # print arguments
    print '\nCheck parameters:'
    print '.. DWI data:             '+fname_data
    print '.. bvecs file:           '+fname_bvecs

    # Extract path, file and extension
    path_data, file_data, ext_data = sct.extract_fname(fname_data)

    # create temporary folder
    path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
    sct.run('mkdir '+path_tmp)

    # copy files into tmp folder
    sct.run('cp '+fname_data+' '+path_tmp)
    sct.run('cp '+fname_bvecs+' '+path_tmp)

    # go to tmp folder
    os.chdir(path_tmp)

    # Get size of data
    print '\nGet dimensions data...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_data)
    print '.. '+str(nx)+' x '+str(ny)+' x '+str(nz)+' x '+str(nt)

    # Open bvecs file
    bvecs = []
    with open(fname_bvecs) as f:
        for line in f:
            bvecs_new = map(float, line.split())
            bvecs.append(bvecs_new)

    # Check if bvecs file is nx3
    if not len(bvecs[0][:]) == 3:
        print 'WARNING: bvecs file is 3xn instead of nx3. Consider using sct_dmri_transpose_bvecs'
        # transpose bvecs
        bvecs = zip(*bvecs)

    # Identify b=0 and DW images
    print '\nIdentify b=0 and DW images...'
    index_b0 = []
    index_dwi = []
    for it in xrange(0,nt):
        if math.sqrt(math.fsum([i**2 for i in bvecs[it]])) < 0.01:
            index_b0.append(it)
        else:
            index_dwi.append(it)
    nb_b0 = len(index_b0)
    nb_dwi = len(index_dwi)
    print '.. Number of b=0: '+str(nb_b0)+' '+str(index_b0)
    print '.. Number of DWI: '+str(nb_dwi)+' '+str(index_dwi)

    #TODO: check if number of bvecs and nt match

    # Split into T dimension
    print '\nSplit along T dimension...'
    sct.run(fsloutput+' fslsplit '+fname_data+' data_splitT')

    # retrieve output names
    status, output = sct.run('ls data_splitT*.*')
    file_data_split = output.split()
    # Remove .nii extension
    file_data_split = [file_data_split[i].replace('.nii','') for i in xrange (0,len(file_data_split))]

    # Merge b=0 images
    print '\nMerge b=0...'
    cmd = fsloutput+'fslmerge -t b0'
    for it in xrange(0,nb_b0):
        cmd += ' '+file_data_split[index_b0[it]]
    sct.run(cmd)

    # Merge DWI images
    print '\nMerge DWI...'
    cmd = fsloutput+'fslmerge -t dwi'
    for it in xrange(0,nb_dwi):
        cmd += ' '+file_data_split[index_dwi[it]]
    sct.run(cmd)

    # come back to parent folder
    os.chdir('..')

    # Generate output files
    print('\nGenerate output files...')
    sct.generate_output_file(path_tmp+'/b0.nii',path_data,'b0',ext_data)
    sct.generate_output_file(path_tmp+'/dwi.nii',path_data,'dwi',ext_data)

    # Remove temporary files
    print('\nRemove temporary files...')
    sct.run('rm -rf '+path_tmp)

    # display elapsed time
    elapsed_time = time.time() - start_time
    print '\nFinished! Elapsed time: '+str(int(round(elapsed_time)))+'s'

    # to view results
    print '\nTo view results, type:'
    print 'fslview b0 dwi &\n'
Exemplo n.º 22
0
def main():

    #Initialization
    fname = ''
    verbose = param.verbose

    try:
        opts, args = getopt.getopt(sys.argv[1:], 'hi:v:')
    except getopt.GetoptError:
        usage()
    for opt, arg in opts:
        if opt == '-h':
            usage()
        elif opt in ("-i"):
            fname = arg
        elif opt in ('-v'):
            verbose = int(arg)

    # display usage if a mandatory argument is not provided
    if fname == '':
        usage()

    # check existence of input files
    print '\nCheck if file exists ...'

    sct.check_file_exist(fname)

    # Display arguments
    print '\nCheck input arguments...'
    print '  Input volume ...................... ' + fname
    print '  Verbose ........................... ' + str(verbose)

    # create temporary folder
    path_tmp = 'tmp.' + time.strftime("%y%m%d%H%M%S")
    sct.run('mkdir ' + path_tmp)

    fname = os.path.abspath(fname)
    path_data, file_data, ext_data = sct.extract_fname(fname)

    # copy files into tmp folder
    sct.run('cp ' + fname + ' ' + path_tmp)

    # go to tmp folder
    os.chdir(path_tmp)

    # Get size of data
    print '\nGet dimensions of template...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname)
    print '.. ' + str(nx) + ' x ' + str(ny) + ' y ' + str(nz) + ' z ' + str(nt)

    # extract left side and right side
    sct.run('sct_crop_image -i ' + fname + ' -o left.nii.gz -dim 0 -start ' +
            str(int(0)) + ' -end ' + str(int(floor(nx / 2) - 1)))
    sct.run('sct_crop_image -i ' + fname + ' -o right.nii.gz -dim 0 -start ' +
            str(int(floor(nx / 2))) + ' -end ' + str(int(nx - 1)))

    # create mirror right
    right = nibabel.load('right.nii.gz')
    data_right = right.get_data()
    hdr_right = right.get_header()

    nx_r, ny_r, nz_r, nt_r, px_r, py_r, pz_r, pt_r = sct.get_dimension(
        'right.nii.gz')

    mirror_right = data_right * 0

    for i in xrange(nx_r):
        for j in xrange(ny_r):
            for k in xrange(nz_r):

                mirror_right[i, j, k] = data_right[(nx_r - 1) - i, j, k]

    print '\nSave volume ...'

    img = nibabel.Nifti1Image(mirror_right, None, hdr_right)
    file_name = 'mirror_right.nii.gz'
    nibabel.save(img, file_name)

    #copy header of left to mirror right
    sct.run('fslcpgeom left.nii.gz mirror_right.nii.gz')

    # compute transfo from left to mirror right
    #MI [fixed,moving]
    ### Beause it takes time there's a output that were computed on guillimin /home/django/jtouati/data/test_templateANTS/final_preprocessed/MI/test/tmp.141015123447

    #
    cmd = 'isct_antsRegistration \
    --dimensionality 3 \
    --transform Syn[0.5,3,0] \
    --metric MI[mirror_right.nii.gz,left.nii.gz,1,32] \
    --convergence 50x20 \
    --shrink-factors 4x1 \
    --smoothing-sigmas 1x1mm \
    --Restrict-Deformation 1x1x0 \
    --output [l2r,l2r.nii.gz]'

    status, output = sct.run(cmd)
    if verbose:
        print output

    #output are : l2r0InverseWarp.nii.gz l2r.nii.gz l2r0Warp.nii.gz

    # separate the 2 warping fields along the 3 directions
    status, output = sct.run(
        'isct_c3d -mcs l2r0Warp.nii.gz -oo l2rwarpx.nii.gz l2rwarpy.nii.gz l2rwarpz.nii.gz'
    )
    status, output = sct.run(
        'isct_c3d -mcs l2r0InverseWarp.nii.gz -oo l2rinvwarpx.nii.gz l2rinvwarpy.nii.gz l2rinvwarpz.nii.gz'
    )
    print 'Loading ..'
    # load warping fields
    warpx = nibabel.load('l2rwarpx.nii.gz')
    data_warpx = warpx.get_data()
    hdr_warpx = warpx.get_header()

    warpy = nibabel.load('l2rwarpy.nii.gz')
    data_warpy = warpy.get_data()
    hdr_warpy = warpy.get_header()

    warpz = nibabel.load('l2rwarpz.nii.gz')
    data_warpz = warpz.get_data()
    hdr_warpz = warpz.get_header()

    invwarpx = nibabel.load('l2rinvwarpx.nii.gz')
    data_invwarpx = invwarpx.get_data()
    hdr_invwarpx = invwarpx.get_header()

    invwarpy = nibabel.load('l2rinvwarpy.nii.gz')
    data_invwarpy = invwarpy.get_data()
    hdr_invwarpy = invwarpy.get_header()

    invwarpz = nibabel.load('l2rinvwarpz.nii.gz')
    data_invwarpz = invwarpz.get_data()
    hdr_invwarpz = invwarpz.get_header()
    print 'Creating..'
    # create demi warping fields
    data_warpx = (data_warpx - data_warpx[::-1, :, :]) / 2
    data_warpy = (data_warpy + data_warpy[::-1, :, :]) / 2
    data_warpz = (data_warpz + data_warpz[::-1, :, :]) / 2
    data_invwarpx = (data_invwarpx - data_invwarpx[::-1, :, :]) / 2
    data_invwarpy = (data_invwarpy + data_invwarpy[::-1, :, :]) / 2
    data_invwarpz = (data_invwarpz + data_invwarpz[::-1, :, :]) / 2
    print 'Saving ..'
    # save demi warping fields
    img = nibabel.Nifti1Image(data_warpx, None, hdr_warpx)
    file_name = 'warpx_demi.nii.gz'
    nibabel.save(img, file_name)

    img = nibabel.Nifti1Image(data_warpy, None, hdr_warpy)
    file_name = 'warpy_demi.nii.gz'
    nibabel.save(img, file_name)

    img = nibabel.Nifti1Image(data_warpz, None, hdr_warpz)
    file_name = 'warpz_demi.nii.gz'
    nibabel.save(img, file_name)

    img = nibabel.Nifti1Image(data_invwarpx, None, hdr_invwarpx)
    file_name = 'invwarpx_demi.nii.gz'
    nibabel.save(img, file_name)

    img = nibabel.Nifti1Image(data_invwarpy, None, hdr_invwarpy)
    file_name = 'invwarpy_demi.nii.gz'
    nibabel.save(img, file_name)

    img = nibabel.Nifti1Image(data_invwarpz, None, hdr_invwarpz)
    file_name = 'invwarpz_demi.nii.gz'
    nibabel.save(img, file_name)
    print 'Copy ..'
    # copy transform
    status, output = sct.run(
        'isct_c3d l2rwarpx.nii.gz warpx_demi.nii.gz -copy-transform -o warpx_demi.nii.gz'
    )
    status, output = sct.run(
        'isct_c3d l2rwarpy.nii.gz warpy_demi.nii.gz -copy-transform -o warpy_demi.nii.gz'
    )
    status, output = sct.run(
        'isct_c3d l2rwarpz.nii.gz warpz_demi.nii.gz -copy-transform -o warpz_demi.nii.gz'
    )
    status, output = sct.run(
        'isct_c3d l2rinvwarpx.nii.gz invwarpx_demi.nii.gz -copy-transform -o invwarpx_demi.nii.gz'
    )
    status, output = sct.run(
        'isct_c3d l2rinvwarpy.nii.gz invwarpy_demi.nii.gz -copy-transform -o invwarpy_demi.nii.gz'
    )
    status, output = sct.run(
        'isct_c3d l2rinvwarpz.nii.gz invwarpz_demi.nii.gz -copy-transform -o invwarpz_demi.nii.gz'
    )

    # combine warping fields
    print 'Combine ..'
    sct.run(
        'isct_c3d warpx_demi.nii.gz warpy_demi.nii.gz warpz_demi.nii.gz -omc 3 warpl2r_demi.nii.gz'
    )
    sct.run(
        'isct_c3d invwarpx_demi.nii.gz invwarpy_demi.nii.gz invwarpz_demi.nii.gz -omc 3 invwarpl2r_demi.nii.gz'
    )

    #warpl2r_demi.nii.gz invwarpl2r_demi.nii.gz

    # apply demi warping fields
    sct.run(
        'sct_apply_transfo -i left.nii.gz -d left.nii.gz -w warpl2r_demi.nii.gz -o left_demi.nii.gz'
    )
    sct.run(
        'sct_apply_transfo -i mirror_right.nii.gz -d mirror_right.nii.gz -w invwarpl2r_demi.nii.gz -o mirror_right_demi.nii.gz'
    )

    #unmirror right

    demi_right = nibabel.load('mirror_right_demi.nii.gz')
    data_demi_right = demi_right.get_data()
    hdr_demi_right = demi_right.get_header()

    nx_r, ny_r, nz_r, nt_r, px_r, py_r, pz_r, pt_r = sct.get_dimension(
        'mirror_right_demi.nii.gz')

    unmirror_right = data_demi_right * 0

    for i in xrange(nx_r):
        for j in xrange(ny_r):
            for k in xrange(nz_r):

                unmirror_right[i, j, k] = data_demi_right[(nx_r - 1) - i, j, k]

    print '\nSave volume ...'

    img = nibabel.Nifti1Image(unmirror_right, None, hdr_right)
    file_name = 'un_mirror_right.nii.gz'
    nibabel.save(img, file_name)

    sct.run(
        'fslmaths left_demi.nii.gz -add un_mirror_right.nii.gz symetrize_template.nii.gz'
    )
Exemplo n.º 23
0
def main():

    # Initialization
    fname_anat = ''
    fname_centerline = ''
    centerline_fitting = ''
    start_time = time.time()

    # Parameters initialization
    # Width and length of xy zone to smooth in terms of voxels
    x_width_sc = 15
    y_width_sc = 17
    # Length of the vector to smooth in terms of number of voxels
    smooth_extend = 81
    # Standard deviation for Gaussian kernel (in terms of number of voxels) of the filter to apply
    sigma = 16

    # Check input param
    try:
        opts, args = getopt.getopt(sys.argv[1:], 'hi:c:f:')
    except getopt.GetoptError as err:
        print str(err)
        usage()
    for opt, arg in opts:
        if opt == '-h':
            usage()
        elif opt in ('-i'):
            fname_anat = arg
        elif opt in ('-c'):
            fname_centerline = arg
        elif opt in ('-f'):
            centerline_fitting = str(arg)


    # Display usage if a mandatory argument is not provided
    if fname_anat == '' or fname_centerline == '':
        print '\n \n All mandatory arguments are not provided \n \n'
        usage()

    # Display usage if optional arguments are not correctly provided
    if centerline_fitting == '':
        centerline_fitting = 'splines'
    elif not centerline_fitting == '' and centerline_fitting == 'splines' and centerline_fitting == 'polynome':
        print '\n \n -f argument is not valid \n \n'
        usage()

    # Check existence of input files
    sct.check_file_exist(fname_anat)
    sct.check_file_exist(fname_centerline)

    # Extract path/file/extension of the original image file
    path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
    # Extract path/file/extension of the centerline
    path_centerline, file_centerline, ext_centerline = sct.extract_fname(fname_centerline)

    # Get input image orientation
    status, output = sct.run('sct_orientation -i ' + fname_anat + ' -get')
    input_image_orientation = output[-3:]
    # Get centerline orientation
    status, output = sct.run('sct_orientation -i ' + fname_centerline + ' -get')
    centerline_orientation = output[-3:]

    # Display arguments
    print '\nCheck input arguments...'
    print '.. Anatomical image:           ' + fname_anat
    print '.... orientation:              ' + input_image_orientation
    print '.. Centerline:                 ' + fname_centerline
    print '.... orientation:              ' + centerline_orientation
    print '.... Centerline fitting option:' + centerline_fitting


    # Change orientation of the input image into RPI
    print '\nOrient input volume to RPI orientation...'
    fname_anat_orient = path_anat+ file_anat+'_rpi'+ ext_anat
    sct.run('sct_orientation -i ' + fname_anat + ' -o ' + fname_anat_orient + ' -orientation RPI')
    # Change orientation of the input image into RPI
    print '\nOrient centerline to RPI orientation...'
    fname_centerline_orient = path_centerline + file_centerline +'_rpi' + ext_centerline
    sct.run('sct_orientation -i ' + fname_centerline + ' -o ' + fname_centerline_orient + ' -orientation RPI')

    # Read nifti anat file
    img = nibabel.load(fname_anat_orient)
    # 3d array for each x y z voxel values for the input nifti image
    data_anat = img.get_data()
    hdr_anat = img.get_header()

    # Read nifti centerline file
    img = nibabel.load(fname_centerline_orient)
    # 3d array for each x y z voxel values for the input nifti image
    data_centerline = img.get_data()
    hdr_centerline = img.get_header()

    # Get dimensions of input centerline
    print '\nGet dimensions of input centerline...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(str(fname_centerline_orient))

    # Make a copy of the anatomic image data to be smoothed
    data_anat_smoothed=numpy.copy(data_anat)
    data_anat_smoothed = data_anat_smoothed.astype(float)

    #Loop across z and associate x,y coordinate with the point having maximum intensity
    x_centerline = [0 for iz in range(0, nz, 1)]
    y_centerline = [0 for iz in range(0, nz, 1)]
    z_centerline = [iz for iz in range(0, nz, 1)]
    for iz in range(0, nz, 1):
        x_centerline[iz], y_centerline[iz] = np.unravel_index(data_centerline[:, :, iz].argmax(),
                                                              data_centerline[:, :, iz].shape)
    del data_centerline


    # Fit the centerline points with the kind of curve given as argument of the script and return the new smoothed coordinates
    if centerline_fitting == 'splines':
        x_centerline_fit, y_centerline_fit = b_spline_centerline(x_centerline,y_centerline,z_centerline)
    elif centerline_fitting == 'polynome':
        x_centerline_fit, y_centerline_fit = polynome_centerline(x_centerline,y_centerline,z_centerline)



    # Loop accross the z-slices of the spinal cord
    for z_cl in range(0,nz):

        # Find the nearest coordinates of the centerline point at this z-slice in the anatomic image
        x_cl_anat = round(x_centerline_fit[z_cl])
        y_cl_anat = round(y_centerline_fit[z_cl])



        # Loop accross the points of defined zone of the plane z=z_cl in the anatomic image plane
        for x in range(-int(math.floor(x_width_sc/2)+1),int(math.floor(x_width_sc/2)+1),1):
            for y in range(-int(math.floor(y_width_sc/2)+1),int(math.floor(y_width_sc/2)+1),1):

                # Initialization of the vector to smooth
                vector_to_smooth = numpy.zeros((1,smooth_extend))
                # Filling of the vector to smooth
                for i in range(0,smooth_extend):

                    # Calculate the z coordinate of the slice below and above the considered anatomic z-slice
                    zi = -int(math.floor(smooth_extend/2)) + i + z_cl
                    # CASE: slices on the edges of the image are not taken into account
                    if zi<0 or zi>=nz:
                        zi=0

                    # Calculate the (x,y)
                    x_point_to_interpolate = x_cl_anat + x + (x_centerline_fit[zi]-x_centerline_fit[z_cl])
                    y_point_to_interpolate = y_cl_anat + y + (y_centerline_fit[zi]-y_centerline_fit[z_cl])


                    vector_to_smooth[0][i] = bilin_interpol(data_anat,x_point_to_interpolate,y_point_to_interpolate,zi)

                # Smooth the vector
                vector_smoothed = scipy.ndimage.filters.gaussian_filter1d(vector_to_smooth[0],sigma)

                # Replace the original value by the smoothed value
                data_anat_smoothed[x_cl_anat+x,y_cl_anat+y,z_cl] = vector_smoothed[int(math.floor(smooth_extend/2))]


    # Return the nifti corrected data
    hdr_anat.set_data_dtype('uint8') # set imagetype to uint8
    print '\nWrite NIFTI volumes...'
    img = nibabel.Nifti1Image(data_anat_smoothed, None, hdr_anat)
    nibabel.save(img, 'tmp.anat_smoothed.nii')
    fname_output_image = sct.generate_output_file('tmp.anat_smoothed.nii', './', file_anat+'_centerline_shift_smoothed', ext_anat)

    # Reorient the output image into its initial orientation
    print '\nReorient the output image into its initial orientation...'
    sct.run('sct_orientation -i '+fname_output_image +' -o ' +fname_output_image+' -orientation '+input_image_orientation)

    # Delete temporary files
    print '\nDelete temporary files...'
    sct.run('rm '+fname_anat_orient)
    sct.run('rm '+fname_centerline_orient)



    #Display elapsed time
    elapsed_time = time.time() - start_time
    print '\nFinished!'
    print '.. '+str(int(round(elapsed_time)))+'s\n'
            sct.run(
                "sct_propseg -i data_RPI_registered.nii.gz -t t1 -init-centerline "
                + PATH_INFO
                + "/"
                + subject
                + "/centerline_propseg_RPI.nii.gz"
            )
        else:
            sct.printv("sct_propseg -i data_RPI_registered.nii.gz -t t1")
            sct.run("sct_propseg -i data_RPI_registered.nii.gz -t t1")

        # Erase 3 top and 3 bottom slices of the segmentation to avoid edge effects  (Done because propseg tends to diverge on edges)
        print "\nErasing 3 top and 3 bottom slices of the segmentation to avoid edge effects..."
        path_seg, file_seg, ext_seg = sct.extract_fname("data_RPI_registered_seg.nii.gz")
        image_seg = nibabel.load("data_RPI_registered_seg.nii.gz")
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension("data_RPI_registered_seg.nii.gz")
        data_seg = image_seg.get_data()
        hdr_seg = image_seg.get_header()
        # List slices that contain non zero values
        z_centerline = [iz for iz in range(0, nz, 1) if data_seg[:, :, iz].any()]

        for k in range(0, 3):
            data_seg[:, :, z_centerline[-1] - k] = 0
            if z_centerline[0] + k < nz:
                data_seg[:, :, z_centerline[0] + k] = 0
        img_seg = nibabel.Nifti1Image(data_seg, None, hdr_seg)
        nibabel.save(img_seg, file_seg + "_mod" + ext_seg)

        # crop segmentation along z(but keep same dimension)
        # input:
        # - data_RPI_registered_seg_mod.nii.gz
Exemplo n.º 25
0
            sct.printv(
                'sct_propseg -i data_RPI_crop.nii.gz -t t2 -init-centerline ' +
                PATH_INFO + '/' + subject + '/centerline_propseg_RPI.nii.gz')
            os.system(
                'sct_propseg -i data_RPI_crop.nii.gz -t t2 -init-centerline ' +
                PATH_INFO + '/' + subject + '/centerline_propseg_RPI.nii.gz')
        else:
            sct.printv('sct_propseg -i data_RPI_crop.nii.gz -t t2')
            os.system('sct_propseg -i data_RPI_crop.nii.gz -t t2')

        # Erase 3 top and 3 bottom slices of the segmentation to avoid edge effects  (Done because propseg tends to diverge on edges)
        print '\nErasing 3 top and 3 bottom slices of the segmentation to avoid edge effects...'
        path_seg, file_seg, ext_seg = sct.extract_fname(
            'data_RPI_crop_seg.nii.gz')
        image_seg = nibabel.load('data_RPI_crop_seg.nii.gz')
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(
            'data_RPI_crop_seg.nii.gz')
        data_seg = image_seg.get_data()
        hdr_seg = image_seg.get_header()
        # List slices that contain non zero values
        z_centerline = [
            iz for iz in range(0, nz, 1) if data_seg[:, :, iz].any()
        ]

        for k in range(0, 3):
            data_seg[:, :, z_centerline[-1] - k] = 0
            if z_centerline[0] + k < nz:
                data_seg[:, :, z_centerline[0] + k] = 0
        img_seg = nibabel.Nifti1Image(data_seg, None, hdr_seg)
        nibabel.save(img_seg, file_seg + '_mod' + ext_seg)
        #nibabel.save(img_seg, file_seg + ext_seg)
Exemplo n.º 26
0
        centerline_proseg = False
        for k in range(len(list_dir)):
            if list_dir[k] == 'centerline_propseg_RPI.nii.gz':
                centerline_proseg = True
        if centerline_proseg == True:
            sct.printv('sct_propseg -i data_RPI_crop.nii.gz -t t1 -init-centerline ' + PATH_INFO + '/' + subject + '/centerline_propseg_RPI.nii.gz')
            sct.run('sct_propseg -i data_RPI_crop.nii.gz -t t1 -init-centerline ' + PATH_INFO + '/' + subject + '/centerline_propseg_RPI.nii.gz')
        else:
            sct.printv('sct_propseg -i data_RPI_crop.nii.gz -t t1')
            sct.run('sct_propseg -i data_RPI_crop.nii.gz -t t1')

        # Erase 3 top and 3 bottom slices of the segmentation to avoid edge effects  (Done because propseg tends to diverge on edges)
        print '\nErasing 3 top and 3 bottom slices of the segmentation to avoid edge effects...'
        path_seg, file_seg, ext_seg = sct.extract_fname('data_RPI_crop_seg.nii.gz')
        image_seg = nibabel.load('data_RPI_crop_seg.nii.gz')
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('data_RPI_crop_seg.nii.gz')
        data_seg = image_seg.get_data()
        hdr_seg = image_seg.get_header()
           # List slices that contain non zero values
        z_centerline = [iz for iz in range(0, nz, 1) if data_seg[:,:,iz].any() ]

        for k in range(0,3):
            data_seg[:,:,z_centerline[-1]-k] = 0
            if z_centerline[0]+k < nz:
                data_seg[:,:,z_centerline[0]+k] = 0
        img_seg = nibabel.Nifti1Image(data_seg, None, hdr_seg)
        nibabel.save(img_seg, file_seg + '_mod' + ext_seg)

        # crop segmentation (but keep same dimension)
        # input:
        # - data_crop_denoised_seg.nii.gz
def generate_warping_field(im_dest,
                           x_trans,
                           y_trans,
                           theta_rot=None,
                           center_rotation=None,
                           matrix_def=None,
                           fname='warping_field.nii.gz'):
    from nibabel import load
    from math import cos, sin
    from numpy import matrix

    #Make sure image is in rpi format

    file_dest = load(im_dest)
    hdr_file_dest = file_dest.get_header()
    hdr_warp = hdr_file_dest.copy()

    # Get image dimensions
    print '\nGet image dimensions of destination image...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(im_dest)
    print '.. matrix size: ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz)
    print '.. voxel size:  ' + str(px) + 'mm x ' + str(py) + 'mm x ' + str(
        pz) + 'mm'

    #Center of rotation
    if center_rotation == None:
        x_a = int(round(nx))  #int(round(nx/2))-200
        y_a = 0  #int(round(ny/2))
    else:
        x_a = center_rotation[0]
        y_a = center_rotation[1]

    # Calculate displacement for each voxel
    data_warp = zeros(((((nx, ny, nz, 1, 3)))))
    # matrix_pass = matrix([[1,0,-x_a],[0,1,-y_a],[0,0,1]])
    # matrix_pass_inv = matrix([[1,0,x_a],[0,1,y_a],[0,0,1]])
    if theta_rot != None:
        for k in range(nz):
            for i in range(nx):
                for j in range(ny):
                    # matrix_rot = matrix([[cos(theta_rot[k]), -sin(theta_rot[k]),0],[sin(theta_rot[k]), cos(theta_rot[k]),0],[0,0,1]])
                    # a = array(matrix_pass_inv * matrix_rot * matrix_pass * matrix([i,j,1]).T)
                    # data_warp[i,j,k,0,:] = (a.T[0] - array([i,j,1]) + array([x_trans[k],y_trans[k],0])).tolist()

                    # data_warp[i, j, k, 0, 0] = (cos(theta_rot[k])-1) * (i - x_a) - sin(theta_rot[k]) * (j - y_a) - x_trans[k]
                    # data_warp[i, j, k, 0, 1] = -(sin(theta_rot[k]) * (i - x_a) + (cos(theta_rot[k])-1) * (j - y_a)) + y_trans[k]

                    data_warp[i, j, k, 0, 0] = (cos(theta_rot[k]) - 1) * (
                        i - x_a) + sin(theta_rot[k]) * (j - y_a) - x_trans[
                            k]  #+ sin(theta_rot[k]) * (int(round(nx/2))-x_a)
                    data_warp[i, j, k, 0, 1] = sin(theta_rot[k]) * (
                        i -
                        x_a) - (cos(theta_rot[k]) - 1) * (j - y_a) + y_trans[
                            k]  #- sin(theta_rot[k]) * (int(round(nx/2))-x_a)
                    # data_warp[i, j, k, 0, 0] = (cos(theta_rot[k])-1) * (i + x_a) - sin(theta_rot[k]) * (j + y_a) - x_trans[k]
                    # data_warp[i, j, k, 0, 1] = -sin(theta_rot[k]) * (-i - x_a) - (cos(theta_rot[k])+1) * (j + y_a) + y_trans[k]
                    data_warp[i, j, k, 0, 2] = 0
    if theta_rot == None and matrix_def == None:
        for i in range(nx):
            for j in range(ny):
                for k in range(nz):
                    data_warp[i, j, k, 0, 0] = x_trans[k]
                    data_warp[i, j, k, 0, 1] = y_trans[k]
                    data_warp[i, j, k, 0, 2] = 0
    if theta_rot == None and matrix_def != None:
        matrix_def_0 = [matrix_def[j][0][0] for j in range(len(matrix_def))]
        matrix_def_1 = [matrix_def[j][0][1] for j in range(len(matrix_def))]
        matrix_def_2 = [matrix_def[j][1][0] for j in range(len(matrix_def))]
        matrix_def_3 = [matrix_def[j][1][1] for j in range(len(matrix_def))]
        for i in range(nx):
            for j in range(ny):
                for k in range(nz):
                    data_warp[i, j, k, 0,
                              0] = (matrix_def_0[i] -
                                    1) * i + matrix_def_1[i] * j + x_trans[k]
                    data_warp[i, j, k, 0,
                              1] = matrix_def_2[i] * i + (matrix_def_3[i] -
                                                          1) * j + y_trans[k]
                    data_warp[i, j, k, 0, 2] = 0

    hdr_warp.set_intent('vector', (), '')
    hdr_warp.set_data_dtype('float32')
    img = nibabel.Nifti1Image(data_warp, None, hdr_warp)
    nibabel.save(img, fname)
    print '\nDONE ! Warping field generated: ' + fname
def main():

    # get path of the toolbox
    status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
    print path_sct

    # Initialization
    fsloutput = 'export FSLOUTPUTTYPE=NIFTI; ' # for faster processing, all outputs are in NIFTI
    fname_data = ''
    fname_bvecs = ''
    fname_schedule = path_sct+'/flirtsch/schedule_TxTy.sch'
    interp = param.interp
    remove_temp_files = param.remove_temp_files
    verbose = param.verbose
    start_time = time.time()

    # Parameters for debug mode
    if param.debug:
        fname_data = path_sct+'/testing/data/errsm_23/dmri/dmri.nii.gz'
        fname_bvecs = path_sct+'/testing/data/errsm_23/dmri/bvecs.txt'
        interp = 'trilinear'
        remove_temp_files = 0
        verbose = 1

    # Check input parameters
    try:
        opts, args = getopt.getopt(sys.argv[1:],'hb:i:v:s:')
    except getopt.GetoptError:
        usage()
    for opt, arg in opts:
        if opt == '-h':
            usage()
        elif opt in ("-b"):
            fname_bvecs = arg
        elif opt in ("-i"):
            fname_data = arg
        elif opt in ('-s'):
            interp = str(arg)
        elif opt in ('-v'):
            verbose = int(arg)

    # display usage if a mandatory argument is not provided
    if fname_data == '' or fname_bvecs == '':
        usage()

    # check existence of input files
    sct.check_file_exist(fname_data)
    sct.check_file_exist(fname_bvecs)

    # print arguments
    print '\nCheck parameters:'
    print '.. DWI data:             '+fname_data
    print '.. bvecs file:           '+fname_bvecs
    print ''

    # Get full path
    fname_data = os.path.abspath(fname_data)
    fname_bvecs = os.path.abspath(fname_bvecs)

    # Extract path, file and extension
    path_data, file_data, ext_data = sct.extract_fname(fname_data)

    # create temporary folder
    path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
    sct.run('mkdir '+path_tmp)

    # go to tmp folder
    os.chdir(path_tmp)

    # Get size of data
    print '\nGet dimensions data...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_data)
    print '.. '+str(nx)+' x '+str(ny)+' x '+str(nz)+' x '+str(nt)

    # Open bvecs file
    print '\nOpen bvecs file...'
    bvecs = []
    with open(fname_bvecs) as f:
        for line in f:
            bvecs_new = map(float, line.split())
            bvecs.append(bvecs_new)

    # Check if bvecs file is nx3
    if not len(bvecs[0][:]) == 3:
        print '.. WARNING: bvecs file is 3xn instead of nx3. Consider using sct_dmri_transpose_bvecs.'
        print 'Transpose bvecs...'
        # transpose bvecs
        bvecs = zip(*bvecs)

    # Identify b=0 and DW images
    print '\nIdentify b=0 and DW images...'
    index_b0 = []
    index_dwi = []
    for it in xrange(0,nt):
        if math.sqrt(math.fsum([i**2 for i in bvecs[it]])) < 0.01:
            index_b0.append(it)
        else:
            index_dwi.append(it)
    n_b0 = len(index_b0)
    n_dwi = len(index_dwi)
    print '.. Index of b=0:'+str(index_b0)
    print '.. Index of DWI:'+str(index_dwi)

    #TODO: check if number of bvecs and nt match

    # Split into T dimension
    print '\nSplit along T dimension...'
    #cmd = fsloutput+'fslsplit tmp.data tmp.data_splitT'
    status, output = sct.run(fsloutput+'fslsplit '+fname_data+' tmp.data_splitT')

    # retrieve output names
    status, output = sct.run('ls tmp.data_splitT*.*')
    file_data_split = output.split()
    # Remove .nii extension
    file_data_split = [file_data_split[i].replace('.nii','') for i in xrange (0,len(file_data_split))]

    # Merge b=0 images
    print '\nMerge b=0...'
    file_b0 = 'tmp.b0'
    cmd = fsloutput+'fslmerge -t '+file_b0
    for it in xrange(0,n_b0):
        cmd += ' '+file_data_split[index_b0[it]]
    #print('>> '+cmd)
    status, output = sct.run(cmd)

    # Merge DWI images
    print '\nMerge DWI...'
    file_dwi = 'tmp.dwi'
    cmd = fsloutput+'fslmerge -t '+file_dwi
    for it in xrange(0,n_dwi):
        cmd += ' '+file_data_split[index_dwi[it]]
    status, output = sct.run(cmd)

    # Average b=0 images
    print '\nAverage b=0...'
    file_b0_mean = 'tmp.b0_mean'
    cmd = fsloutput+'fslmaths '+file_b0+' -Tmean '+file_b0_mean
    status, output = sct.run(cmd)

    # Average DWI images
    print '\nAverage DWI...'
    file_dwi_mean = 'tmp.dwi_mean'
    cmd = fsloutput+'fslmaths '+file_dwi+' -Tmean '+file_dwi_mean
    status, output = sct.run(cmd)



    # REGISTER DWI TO THE MEAN DWI  -->  output transfo Tdwi
    # ---------------------------------------------------------------------------------------

    # loop across DWI data
    print '\nRegister DWI data to '+file_dwi_mean+'...'
    for it in xrange(0,n_dwi):
        # estimate transformation matrix
        file_target = file_dwi_mean
        file_mat = 'tmp.mat_'+str(index_dwi[it]).zfill(4)
        cmd = fsloutput+'flirt -in '+file_data_split[index_dwi[it]]+' -ref '+file_target+' -omat '+file_mat+' -cost normcorr -schedule '+fname_schedule+' -interp trilinear -out '+file_data_split[index_dwi[it]]+'_moco'
        status, output = sct.run(cmd)

    # Merge corrected DWI images
    print '\nMerge corrected DWI...'
    file_dwi = 'tmp.dwi_moco'
    cmd = fsloutput+'fslmerge -t '+file_dwi
    for it in xrange(0,n_dwi):
        cmd += ' '+file_data_split[index_dwi[it]]+'_moco'
    status, output = sct.run(cmd)

    # Average corrected DWI
    print '\nAverage corrected DWI...'
    file_dwi_mean = 'tmp.dwi_moco_mean'
    cmd = fsloutput+'fslmaths '+file_dwi+' -Tmean '+file_dwi_mean
    status, output = sct.run(cmd)


    # REGISTER B=0 DATA TO THE FIRST B=0  --> output transfo Tb0
    # ---------------------------------------------------------------------------------------
    print '\nRegister b=0 data to the first b=0...'
    for it in xrange(0,n_b0):
        # estimate transformation matrix
        file_target = file_data_split[int(index_b0[0])]
        file_mat = 'tmp.mat_'+str(index_b0[it]).zfill(4)
        cmd = fsloutput+'flirt -in '+file_data_split[index_b0[it]]+' -ref '+file_target+' -omat '+file_mat+' -cost normcorr -forcescaling -2D -out '+file_data_split[index_b0[it]]+'_moco'
        status, output = sct.run(cmd)

    # Merge corrected b=0 images
    print '\nMerge corrected b=0...'
    cmd = fsloutput+'fslmerge -t tmp.b0_moco'
    for it in xrange(0,n_b0):
        cmd += ' '+file_data_split[index_b0[it]]+'_moco'
    status, output = sct.run(cmd)

    # Average corrected b=0
    print '\nAverage corrected b=0...'
    cmd = fsloutput+'fslmaths tmp.b0_moco -Tmean tmp.b0_moco_mean'
    status, output = sct.run(cmd)


    # REGISTER MEAN DWI TO THE MEAN B=0  --> output transfo Tdwi2b0
    # ---------------------------------------------------------------------------------------
    print '\nRegister mean DWI to the mean b=0...'
    cmd = fsloutput+'flirt -in tmp.dwi_moco_mean -ref tmp.b0_moco_mean -omat tmp.mat_dwi2b0 -cost mutualinfo -forcescaling -dof 12 -2D -out tmp.dwi_mean_moco_reg2b0'
    status, output = sct.run(cmd)


    # COMBINE TRANSFORMATIONS
    # ---------------------------------------------------------------------------------------
    print '\nCombine all transformations...'
    # USE FSL convert_xfm: convert_xfm -omat AtoC.mat -concat BtoC.mat AtoB.mat
    # For DWI
    print '\n.. For DWI:'
    for it in xrange(0,n_dwi):
        cmd = 'convert_xfm -omat tmp.mat_final_'+str(index_dwi[it]).zfill(4)+' -concat tmp.mat_dwi2b0 tmp.mat_'+str(index_dwi[it]).zfill(4)
        status, output = sct.run(cmd)
    # For b=0 (don't concat because there is just one mat file -- just rename it)
    print '\n.. For b=0:'
    for it in xrange(0,n_b0):
        cmd = 'cp tmp.mat_'+str(index_b0[it]).zfill(4)+' tmp.mat_final_'+str(index_b0[it]).zfill(4)
        status, output = sct.run(cmd)


    # APPLY TRANSFORMATIONS
    # ---------------------------------------------------------------------------------------
    ## Split original data into T dimension
    #print '\nSplit original data along T dimension...'
    #cmd = fsloutput+'fslsplit '+fname_data+' tmp.data_raw_splitT'
    #print('>> '+cmd)
    #status, output = commands.getstatusoutput(cmd)

    #print '\nApply transformations to original data...'
    #for it in xrange(0,nt):
    #    cmd = fsloutput+'flirt -in tmp.data_raw_splitT'+str(it).zfill(4)+' -ref tmp.data_raw_splitT'+index_b0[0].zfill(4)+' -applyxfm -init tmp.mat_final_'+str(it).zfill(4)+' -out tmp.data_raw_splitT'+str(it).zfill(4)+'_moco'
    #    print('>> '+cmd)
    #    status, output = commands.getstatusoutput(cmd)
    #
    ## Merge corrected data
    #print '\nMerge corrected data...'
    #cmd = fsloutput+'fslmerge -t tmp.data_raw_moco'
    #for it in xrange(0,it):
    #    cmd += ' tmp.data_raw_splitT'+str(it).zfill(4)+'_moco'
    #print('>> '+cmd)
    #status, output = commands.getstatusoutput(cmd)

    print '\nApply transformations...'
    for it in xrange(0,nt):
        # -paddingsize 3 prevents from having missing slices at the edge
        cmd = fsloutput+'flirt -in tmp.data_splitT'+str(it).zfill(4)+' -ref tmp.data_splitT'+str(index_b0[0]).zfill(4)+' -applyxfm -init tmp.mat_final_'+str(it).zfill(4)+' -out tmp.data_splitT'+str(it).zfill(4)+'_moco -paddingsize 3'+' -interp '+interp
        status, output = sct.run(cmd)

    # Merge corrected data
    print '\nMerge all corrected data...'
    cmd = fsloutput+'fslmerge -t tmp.data_moco'
    for it in xrange(0,nt):
        cmd += ' tmp.data_splitT'+str(it).zfill(4)+'_moco'
    status, output = sct.run(cmd)

    # Merge corrected DWI images
    print '\nMerge corrected DWI...'
    cmd = fsloutput+'fslmerge -t tmp.dwi_moco'
    for it in xrange(0,n_dwi):
        cmd += ' tmp.data_splitT'+str(index_dwi[it]).zfill(4)+'_moco'
    status, output = sct.run(cmd)

    # Average corrected DWI
    print '\nAverage corrected DWI...'
    cmd = fsloutput+'fslmaths tmp.dwi_moco -Tmean tmp.dwi_moco_mean'
    status, output = sct.run(cmd)

    # Merge corrected b=0 images
    print '\nMerge corrected b=0...'
    cmd = fsloutput+'fslmerge -t tmp.b0_moco'
    for it in xrange(0,n_b0):
        cmd += ' tmp.data_splitT'+str(index_b0[it]).zfill(4)+'_moco'
    status, output = sct.run(cmd)

    # Average corrected b=0
    print '\nAverage corrected b=0...'
    cmd = fsloutput+'fslmaths tmp.b0_moco -Tmean tmp.b0_moco_mean'
    status, output = sct.run(cmd)

    # Generate output files
    print('\nGenerate output files...')
    sct.generate_output_file('tmp.data_moco.nii',path_data,file_data+'_moco',ext_data)
    sct.generate_output_file('tmp.dwi_moco_mean.nii',path_data,'dwi_moco_mean',ext_data)
    sct.generate_output_file('tmp.b0_moco_mean.nii',path_data,'b0_moco_mean',ext_data)

    # come back to parent folder
    os.chdir('..')

    # Delete temporary files
    if remove_temp_files == 1:
        print '\nDelete temporary files...'
        sct.run('rm -rf '+path_tmp)

    # display elapsed time
    elapsed_time = time.time() - start_time
    print '\nFinished! Elapsed time: '+str(int(round(elapsed_time)))+'s'

    # to view results
    print '\nTo view results, type:'
    print 'fslview '+file_data+' '+file_data+'_moco &\n'
Exemplo n.º 29
0
    def getCenterline(self, type='', output_file_name=None, verbose=0):
        # Compute the centerline and save it into a image file of type "type"

        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(self.list_file[0])

        # Define output image (size matter)
        image_concatenation = self.list_image[0].copy()
        image_concatenation.data *= 0
        image_output = self.list_image[0].copy()
        image_output.data *= 0
        # Concatenate all files by addition
        for i in range(0, len(self.list_image)):
            for s in range(0, nz) :
                image_concatenation.data[:,:,s] = image_concatenation.data[:,:,s] + self.list_image[i].data[:,:,s] #* (1/len(self.list_image))
        print image_concatenation.data[:,:,414]

        # get center of mass of the centerline/segmentation
        sct.printv('\nGet center of mass of the concatenate file...')
        z_centerline = [iz for iz in range(0, nz, 1) if image_concatenation.data[:, :, iz].any()]
        nz_nonz = len(z_centerline)
        x_centerline = [0 for iz in range(0, nz_nonz, 1)]
        y_centerline = [0 for iz in range(0, nz_nonz, 1)]


        # Calculate centerline coordinates and create image of the centerline
        for iz in range(0, nz_nonz, 1):
            x_centerline[iz], y_centerline[iz] = ndimage.measurements.center_of_mass(image_concatenation.data[:, :, z_centerline[iz]])
        #x_centerline_fit, y_centerline_fit,x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)


        points = [[x_centerline[n], y_centerline[n], z_centerline[n]] for n in range(nz_nonz)]
        nurbs = NURBS(3, 1000, points, nbControl=None)
        P = nurbs.getCourbe3D()
        x_centerline_fit = P[0]
        y_centerline_fit = P[1]
        z_centerline_fit = P[2]

        if verbose==1 :
                import matplotlib.pyplot as plt

                #Creation of a vector x that takes into account the distance between the labels
                x_display = [0 for i in range(x_centerline_fit.shape[0])]
                y_display = [0 for i in range(y_centerline_fit.shape[0])]
                for i in range(0, nz_nonz, 1):
                    x_display[z_centerline[i]-z_centerline[0]] = x_centerline[i]
                    y_display[z_centerline[i]-z_centerline[0]] = y_centerline[i]

                plt.figure(1)
                plt.subplot(2,1,1)
                #plt.plot(z_centerline,x_centerline, 'ro')
                plt.plot(z_centerline_fit, x_display, 'ro')
                plt.plot(z_centerline_fit, x_centerline_fit)
                plt.xlabel("Z")
                plt.ylabel("X")
                plt.title("x and x_fit coordinates")

                plt.subplot(2,1,2)
                #plt.plot(z_centerline,y_centerline, 'ro')
                plt.plot(z_centerline_fit, y_display, 'ro')
                plt.plot(z_centerline_fit, y_centerline_fit)
                plt.xlabel("Z")
                plt.ylabel("Y")
                plt.title("y and y_fit coordinates")
                plt.show()


        for iz in range(0, z_centerline_fit.shape[0], 1):
            image_output.data[int(round(x_centerline_fit[iz])), int(round(y_centerline_fit[iz])), z_centerline_fit[iz]] = 1

        #image_output.save(type)
        file_load = nibabel.load(self.list_file[0])
        data = file_load.get_data()
        hdr = file_load.get_header()

        print '\nWrite NIFTI volumes...'
        img = nibabel.Nifti1Image(image_output.data, None, hdr)
        if output_file_name != None :
            file_name = output_file_name
        else: file_name = 'generated_centerline.nii.gz'
        nibabel.save(img,file_name)


        # to view results
        print '\nDone !'
        print '\nTo view results, type:'
        print 'fslview '+file_name+' &\n'
Exemplo n.º 30
0
            os.system("matlab_batcher.sh sct_get_centerline \"\'" +
                      name_anatomy_file + "\'\"")

            # Extract segmentation using propseg with spline-centerline
            sct.printv(
                'sct_propseg -i *_t2_crop.nii.gz -t t2 -init-centerline *t2_crop_centerline.nii'
            )
            os.system(
                'sct_propseg -i *_t2_crop.nii.gz -t t2 -init-centerline *t2_crop_centerline.nii'
            )

            # Erase 3 top and 3 bottom slices of the segmentation to avoid edge effects
            name_seg = glob('*t2_crop_seg.nii.gz')[0]
            path_seg, file_seg, ext_seg = sct.extract_fname(name_seg)
            image_seg = nibabel.load(name_seg)
            nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(name_seg)
            data_seg = image_seg.get_data()
            hdr_seg = image_seg.get_header()
            # List slices that contain non zero values
            z_centerline = [
                iz for iz in range(0, nz, 1) if data_seg[:, :, iz].any()
            ]
            for k in range(0, 3):
                data_seg[:, :, z_centerline[-1] - k] = 0
                if z_centerline[0] + k < nz:
                    data_seg[:, :, z_centerline[0] + k] = 0
            img_seg = nibabel.Nifti1Image(data_seg, None, hdr_seg)
            nibabel.save(img_seg, file_seg + '_mod' + ext_seg)

            # Add label files and preprocess data for template registration
            print '\nPreprocessing data from: ' + list_dir[
def register_images(im_input,
                    im_dest,
                    mask='',
                    paramreg=Paramreg(step='0',
                                      type='im',
                                      algo='Translation',
                                      metric='MI',
                                      iter='5',
                                      shrink='1',
                                      smooth='0',
                                      gradStep='0.5'),
                    remove_tmp_folder=1):

    path_i, root_i, ext_i = sct.extract_fname(im_input)
    path_d, root_d, ext_d = sct.extract_fname(im_dest)
    path_m, root_m, ext_m = sct.extract_fname(mask)

    # set metricSize
    if paramreg.metric == 'MI':
        metricSize = '32'  # corresponds to number of bins
    else:
        metricSize = '4'  # corresponds to radius (for CC, MeanSquares...)

    # initiate default parameters of antsRegistration transformation
    ants_registration_params = {
        'rigid': '',
        'affine': '',
        'compositeaffine': '',
        'similarity': '',
        'translation': '',
        'bspline': ',10',
        'gaussiandisplacementfield': ',3,0',
        'bsplinedisplacementfield': ',5,10',
        'syn': ',3,0',
        'bsplinesyn': ',3,32'
    }

    # Get image dimensions and retrieve nz
    print '\nGet image dimensions of destination image...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(im_dest)
    print '.. matrix size: ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz)
    print '.. voxel size:  ' + str(px) + 'mm x ' + str(py) + 'mm x ' + str(
        pz) + 'mm'

    # Define x and y displacement as list
    x_displacement = [0 for i in range(nz)]
    y_displacement = [0 for i in range(nz)]
    theta_rotation = [0 for i in range(nz)]
    matrix_def = [0 for i in range(nz)]

    # create temporary folder
    print('\nCreate temporary folder...')
    path_tmp = 'tmp.' + time.strftime("%y%m%d%H%M%S")
    sct.create_folder(path_tmp)
    print '\nCopy input data...'
    sct.run('cp ' + im_input + ' ' + path_tmp + '/' + root_i + ext_i)
    sct.run('cp ' + im_dest + ' ' + path_tmp + '/' + root_d + ext_d)
    if mask:
        sct.run('cp ' + mask + ' ' + path_tmp + '/mask.nii.gz')

    # go to temporary folder
    os.chdir(path_tmp)

    # Split input volume along z
    print '\nSplit input volume...'
    sct.run(sct.fsloutput + 'fslsplit ' + im_input + ' ' + root_i + '_z -z')
    #file_anat_split = ['tmp.anat_orient_z'+str(z).zfill(4) for z in range(0,nz,1)]

    # Split destination volume along z
    print '\nSplit destination volume...'
    sct.run(sct.fsloutput + 'fslsplit ' + im_dest + ' ' + root_d + '_z -z')
    #file_anat_split = ['tmp.anat_orient_z'+str(z).zfill(4) for z in range(0,nz,1)]

    # Split mask volume along z
    if mask:
        print '\nSplit mask volume...'
        sct.run(sct.fsloutput + 'fslsplit mask.nii.gz mask_z -z')
        #file_anat_split = ['tmp.anat_orient_z'+str(z).zfill(4) for z in range(0,nz,1)]

    im_dest_img = Image(im_dest)
    im_input_img = Image(im_input)
    coord_origin_dest = im_dest_img.transfo_pix2phys([[0, 0, 0]])
    coord_origin_input = im_input_img.transfo_pix2phys([[0, 0, 0]])
    coord_diff_origin_z = coord_origin_dest[0][2] - coord_origin_input[0][2]
    [[x_o, y_o,
      z_o]] = im_input_img.transfo_phys2pix([[0, 0, coord_diff_origin_z]])

    # loop across slices
    for i in range(nz):
        # set masking
        num = numerotation(i)
        num_2 = numerotation(int(num) + z_o)
        if mask:
            masking = '-x mask_z' + num + '.nii'
        else:
            masking = ''

        cmd = (
            'isct_antsRegistration '
            '--dimensionality 2 '
            '--transform ' + paramreg.algo + '[' + paramreg.gradStep +
            ants_registration_params[paramreg.algo.lower()] + '] '
            '--metric ' + paramreg.metric + '[' + root_d + '_z' + num +
            '.nii' + ',' + root_i + '_z' + num_2 + '.nii' + ',1,' +
            metricSize +
            '] '  #[fixedImage,movingImage,metricWeight +nb_of_bins (MI) or radius (other)
            '--convergence ' + paramreg.iter + ' '
            '--shrink-factors ' + paramreg.shrink + ' '
            '--smoothing-sigmas ' + paramreg.smooth + 'mm '
            #'--restrict-deformation 1x1x0 '    # how to restrict? should not restrict here, if transform is precised...?
            '--output [transform_' + num +
            '] '  #--> file.txt (contains Tx,Ty)    [outputTransformPrefix,<outputWarpedImage>,<outputInverseWarpedImage>]
            '--interpolation BSpline[3] ' + masking)

        try:
            sct.run(cmd)

            if paramreg.algo == 'Rigid' or paramreg.algo == 'Translation':
                f = 'transform_' + num + '0GenericAffine.mat'
                matfile = loadmat(f, struct_as_record=True)
                array_transfo = matfile['AffineTransform_double_2_2']
                if i == 20 or i == 40:
                    print i
                x_displacement[i] = -array_transfo[4][0]  #is it? or is it y?
                y_displacement[i] = array_transfo[5][0]
                theta_rotation[i] = asin(array_transfo[2])

            if paramreg.algo == 'Affine':
                f = 'transform_' + num + '0GenericAffine.mat'
                matfile = loadmat(f, struct_as_record=True)
                array_transfo = matfile['AffineTransform_double_2_2']
                x_displacement[i] = -array_transfo[4][0]  #is it? or is it y?
                y_displacement[i] = array_transfo[5][0]
                matrix_def[i] = [[array_transfo[0][0], array_transfo[1][0]],
                                 [array_transfo[2][0], array_transfo[3][0]]
                                 ]  # comment savoir lequel est lequel?

        except:
            if paramreg.algo == 'Rigid' or paramreg.algo == 'Translation':
                x_displacement[i] = x_displacement[i - 1]  #is it? or is it y?
                y_displacement[i] = y_displacement[i - 1]
                theta_rotation[i] = theta_rotation[i - 1]
            if paramreg.algo == 'Affine':
                x_displacement[i] = x_displacement[i - 1]
                y_displacement[i] = y_displacement[i - 1]
                matrix_def[i] = matrix_def[i - 1]

        # # get displacement form this slice and complete x and y displacement lists
        # with open('transform_'+num+'.csv') as f:
        #     reader = csv.reader(f)
        #     count = 0
        #     for line in reader:
        #         count += 1
        #         if count == 2:
        #             x_displacement[i] = line[0]
        #             y_displacement[i] = line[1]
        #             f.close()

        # # get matrix of transfo for a rigid transform   (pb slicereg fait une rotation ie le deplacement n'est pas homogene par slice)
        # # recuperer le deplacement ne donnerait pas une liste mais un warping field: mieux vaut recup la matrice output
        # # pb du smoothing du deplacement par slice !!   on peut smoother les param theta tx ty
        # if paramreg.algo == 'Rigid' or paramreg.algo == 'Translation':
        #     f = 'transform_' +num+ '0GenericAffine.mat'
        #     matfile = loadmat(f, struct_as_record=True)
        #     array_transfo = matfile['AffineTransform_double_2_2']
        #     x_displacement[i] = -array_transfo[4][0]  #is it? or is it y?
        #     y_displacement[i] = array_transfo[5][0]
        #     theta_rotation[i] = acos(array_transfo[0])

        #TO DO: different treatment for other algo

    #Delete tmp folder
    os.chdir('../')
    if remove_tmp_folder:
        print('\nRemove temporary files...')
        sct.run('rm -rf ' + path_tmp)
    if paramreg.algo == 'Rigid':
        return x_displacement, y_displacement, theta_rotation  # check if the displacement are not inverted (x_dis = -x_disp...)   theta is in radian
    if paramreg.algo == 'Translation':
        return x_displacement, y_displacement
    if paramreg.algo == 'Affine':
        return x_displacement, y_displacement, matrix_def
def main():

    # get path of the toolbox
    status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
    print path_sct

    # Initialization
    fsloutput = 'export FSLOUTPUTTYPE=NIFTI; '  # for faster processing, all outputs are in NIFTI
    fname_data = ''
    fname_bvecs = ''
    fname_schedule = path_sct + '/flirtsch/schedule_TxTy.sch'
    interp = param.interp
    remove_temp_files = param.remove_temp_files
    verbose = param.verbose
    start_time = time.time()

    # Parameters for debug mode
    if param.debug:
        fname_data = path_sct + '/testing/data/errsm_23/dmri/dmri.nii.gz'
        fname_bvecs = path_sct + '/testing/data/errsm_23/dmri/bvecs.txt'
        interp = 'trilinear'
        remove_temp_files = 0
        verbose = 1

    # Check input parameters
    try:
        opts, args = getopt.getopt(sys.argv[1:], 'hb:i:v:s:')
    except getopt.GetoptError:
        usage()
    for opt, arg in opts:
        if opt == '-h':
            usage()
        elif opt in ("-b"):
            fname_bvecs = arg
        elif opt in ("-i"):
            fname_data = arg
        elif opt in ('-s'):
            interp = str(arg)
        elif opt in ('-v'):
            verbose = int(arg)

    # display usage if a mandatory argument is not provided
    if fname_data == '' or fname_bvecs == '':
        usage()

    # check existence of input files
    sct.check_file_exist(fname_data)
    sct.check_file_exist(fname_bvecs)

    # print arguments
    print '\nCheck parameters:'
    print '.. DWI data:             ' + fname_data
    print '.. bvecs file:           ' + fname_bvecs
    print ''

    # Get full path
    fname_data = os.path.abspath(fname_data)
    fname_bvecs = os.path.abspath(fname_bvecs)

    # Extract path, file and extension
    path_data, file_data, ext_data = sct.extract_fname(fname_data)

    # create temporary folder
    path_tmp = 'tmp.' + time.strftime("%y%m%d%H%M%S")
    sct.run('mkdir ' + path_tmp)

    # go to tmp folder
    os.chdir(path_tmp)

    # Get size of data
    print '\nGet dimensions data...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_data)
    print '.. ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz) + ' x ' + str(nt)

    # Open bvecs file
    print '\nOpen bvecs file...'
    bvecs = []
    with open(fname_bvecs) as f:
        for line in f:
            bvecs_new = map(float, line.split())
            bvecs.append(bvecs_new)

    # Check if bvecs file is nx3
    if not len(bvecs[0][:]) == 3:
        print '.. WARNING: bvecs file is 3xn instead of nx3. Consider using sct_dmri_transpose_bvecs.'
        print 'Transpose bvecs...'
        # transpose bvecs
        bvecs = zip(*bvecs)

    # Identify b=0 and DW images
    print '\nIdentify b=0 and DW images...'
    index_b0 = []
    index_dwi = []
    for it in xrange(0, nt):
        if math.sqrt(math.fsum([i**2 for i in bvecs[it]])) < 0.01:
            index_b0.append(it)
        else:
            index_dwi.append(it)
    n_b0 = len(index_b0)
    n_dwi = len(index_dwi)
    print '.. Index of b=0:' + str(index_b0)
    print '.. Index of DWI:' + str(index_dwi)

    #TODO: check if number of bvecs and nt match

    # Split into T dimension
    print '\nSplit along T dimension...'
    #cmd = fsloutput+'fslsplit tmp.data tmp.data_splitT'
    status, output = sct.run(fsloutput + 'fslsplit ' + fname_data +
                             ' tmp.data_splitT')

    # retrieve output names
    status, output = sct.run('ls tmp.data_splitT*.*')
    file_data_split = output.split()
    # Remove .nii extension
    file_data_split = [
        file_data_split[i].replace('.nii', '')
        for i in xrange(0, len(file_data_split))
    ]

    # Merge b=0 images
    print '\nMerge b=0...'
    file_b0 = 'tmp.b0'
    cmd = fsloutput + 'fslmerge -t ' + file_b0
    for it in xrange(0, n_b0):
        cmd += ' ' + file_data_split[index_b0[it]]
    #print('>> '+cmd)
    status, output = sct.run(cmd)

    # Merge DWI images
    print '\nMerge DWI...'
    file_dwi = 'tmp.dwi'
    cmd = fsloutput + 'fslmerge -t ' + file_dwi
    for it in xrange(0, n_dwi):
        cmd += ' ' + file_data_split[index_dwi[it]]
    status, output = sct.run(cmd)

    # Average b=0 images
    print '\nAverage b=0...'
    file_b0_mean = 'tmp.b0_mean'
    cmd = fsloutput + 'fslmaths ' + file_b0 + ' -Tmean ' + file_b0_mean
    status, output = sct.run(cmd)

    # Average DWI images
    print '\nAverage DWI...'
    file_dwi_mean = 'tmp.dwi_mean'
    cmd = fsloutput + 'fslmaths ' + file_dwi + ' -Tmean ' + file_dwi_mean
    status, output = sct.run(cmd)

    # REGISTER DWI TO THE MEAN DWI  -->  output transfo Tdwi
    # ---------------------------------------------------------------------------------------

    # loop across DWI data
    print '\nRegister DWI data to ' + file_dwi_mean + '...'
    for it in xrange(0, n_dwi):
        # estimate transformation matrix
        file_target = file_dwi_mean
        file_mat = 'tmp.mat_' + str(index_dwi[it]).zfill(4)
        cmd = fsloutput + 'flirt -in ' + file_data_split[index_dwi[
            it]] + ' -ref ' + file_target + ' -omat ' + file_mat + ' -cost normcorr -schedule ' + fname_schedule + ' -interp trilinear -out ' + file_data_split[
                index_dwi[it]] + '_moco'
        status, output = sct.run(cmd)

    # Merge corrected DWI images
    print '\nMerge corrected DWI...'
    file_dwi = 'tmp.dwi_moco'
    cmd = fsloutput + 'fslmerge -t ' + file_dwi
    for it in xrange(0, n_dwi):
        cmd += ' ' + file_data_split[index_dwi[it]] + '_moco'
    status, output = sct.run(cmd)

    # Average corrected DWI
    print '\nAverage corrected DWI...'
    file_dwi_mean = 'tmp.dwi_moco_mean'
    cmd = fsloutput + 'fslmaths ' + file_dwi + ' -Tmean ' + file_dwi_mean
    status, output = sct.run(cmd)

    # REGISTER B=0 DATA TO THE FIRST B=0  --> output transfo Tb0
    # ---------------------------------------------------------------------------------------
    print '\nRegister b=0 data to the first b=0...'
    for it in xrange(0, n_b0):
        # estimate transformation matrix
        file_target = file_data_split[int(index_b0[0])]
        file_mat = 'tmp.mat_' + str(index_b0[it]).zfill(4)
        cmd = fsloutput + 'flirt -in ' + file_data_split[index_b0[
            it]] + ' -ref ' + file_target + ' -omat ' + file_mat + ' -cost normcorr -forcescaling -2D -out ' + file_data_split[
                index_b0[it]] + '_moco'
        status, output = sct.run(cmd)

    # Merge corrected b=0 images
    print '\nMerge corrected b=0...'
    cmd = fsloutput + 'fslmerge -t tmp.b0_moco'
    for it in xrange(0, n_b0):
        cmd += ' ' + file_data_split[index_b0[it]] + '_moco'
    status, output = sct.run(cmd)

    # Average corrected b=0
    print '\nAverage corrected b=0...'
    cmd = fsloutput + 'fslmaths tmp.b0_moco -Tmean tmp.b0_moco_mean'
    status, output = sct.run(cmd)

    # REGISTER MEAN DWI TO THE MEAN B=0  --> output transfo Tdwi2b0
    # ---------------------------------------------------------------------------------------
    print '\nRegister mean DWI to the mean b=0...'
    cmd = fsloutput + 'flirt -in tmp.dwi_moco_mean -ref tmp.b0_moco_mean -omat tmp.mat_dwi2b0 -cost mutualinfo -forcescaling -dof 12 -2D -out tmp.dwi_mean_moco_reg2b0'
    status, output = sct.run(cmd)

    # COMBINE TRANSFORMATIONS
    # ---------------------------------------------------------------------------------------
    print '\nCombine all transformations...'
    # USE FSL convert_xfm: convert_xfm -omat AtoC.mat -concat BtoC.mat AtoB.mat
    # For DWI
    print '\n.. For DWI:'
    for it in xrange(0, n_dwi):
        cmd = 'convert_xfm -omat tmp.mat_final_' + str(
            index_dwi[it]).zfill(4) + ' -concat tmp.mat_dwi2b0 tmp.mat_' + str(
                index_dwi[it]).zfill(4)
        status, output = sct.run(cmd)
    # For b=0 (don't concat because there is just one mat file -- just rename it)
    print '\n.. For b=0:'
    for it in xrange(0, n_b0):
        cmd = 'cp tmp.mat_' + str(
            index_b0[it]).zfill(4) + ' tmp.mat_final_' + str(
                index_b0[it]).zfill(4)
        status, output = sct.run(cmd)

    # APPLY TRANSFORMATIONS
    # ---------------------------------------------------------------------------------------
    ## Split original data into T dimension
    #print '\nSplit original data along T dimension...'
    #cmd = fsloutput+'fslsplit '+fname_data+' tmp.data_raw_splitT'
    #print('>> '+cmd)
    #status, output = commands.getstatusoutput(cmd)

    #print '\nApply transformations to original data...'
    #for it in xrange(0,nt):
    #    cmd = fsloutput+'flirt -in tmp.data_raw_splitT'+str(it).zfill(4)+' -ref tmp.data_raw_splitT'+index_b0[0].zfill(4)+' -applyxfm -init tmp.mat_final_'+str(it).zfill(4)+' -out tmp.data_raw_splitT'+str(it).zfill(4)+'_moco'
    #    print('>> '+cmd)
    #    status, output = commands.getstatusoutput(cmd)
    #
    ## Merge corrected data
    #print '\nMerge corrected data...'
    #cmd = fsloutput+'fslmerge -t tmp.data_raw_moco'
    #for it in xrange(0,it):
    #    cmd += ' tmp.data_raw_splitT'+str(it).zfill(4)+'_moco'
    #print('>> '+cmd)
    #status, output = commands.getstatusoutput(cmd)

    print '\nApply transformations...'
    for it in xrange(0, nt):
        # -paddingsize 3 prevents from having missing slices at the edge
        cmd = fsloutput + 'flirt -in tmp.data_splitT' + str(it).zfill(
            4) + ' -ref tmp.data_splitT' + str(index_b0[0]).zfill(
                4) + ' -applyxfm -init tmp.mat_final_' + str(it).zfill(
                    4) + ' -out tmp.data_splitT' + str(it).zfill(
                        4) + '_moco -paddingsize 3' + ' -interp ' + interp
        status, output = sct.run(cmd)

    # Merge corrected data
    print '\nMerge all corrected data...'
    cmd = fsloutput + 'fslmerge -t tmp.data_moco'
    for it in xrange(0, nt):
        cmd += ' tmp.data_splitT' + str(it).zfill(4) + '_moco'
    status, output = sct.run(cmd)

    # Merge corrected DWI images
    print '\nMerge corrected DWI...'
    cmd = fsloutput + 'fslmerge -t tmp.dwi_moco'
    for it in xrange(0, n_dwi):
        cmd += ' tmp.data_splitT' + str(index_dwi[it]).zfill(4) + '_moco'
    status, output = sct.run(cmd)

    # Average corrected DWI
    print '\nAverage corrected DWI...'
    cmd = fsloutput + 'fslmaths tmp.dwi_moco -Tmean tmp.dwi_moco_mean'
    status, output = sct.run(cmd)

    # Merge corrected b=0 images
    print '\nMerge corrected b=0...'
    cmd = fsloutput + 'fslmerge -t tmp.b0_moco'
    for it in xrange(0, n_b0):
        cmd += ' tmp.data_splitT' + str(index_b0[it]).zfill(4) + '_moco'
    status, output = sct.run(cmd)

    # Average corrected b=0
    print '\nAverage corrected b=0...'
    cmd = fsloutput + 'fslmaths tmp.b0_moco -Tmean tmp.b0_moco_mean'
    status, output = sct.run(cmd)

    # Generate output files
    print('\nGenerate output files...')
    sct.generate_output_file('tmp.data_moco.nii', path_data,
                             file_data + '_moco', ext_data)
    sct.generate_output_file('tmp.dwi_moco_mean.nii', path_data,
                             'dwi_moco_mean', ext_data)
    sct.generate_output_file('tmp.b0_moco_mean.nii', path_data, 'b0_moco_mean',
                             ext_data)

    # come back to parent folder
    os.chdir('..')

    # Delete temporary files
    if remove_temp_files == 1:
        print '\nDelete temporary files...'
        sct.run('rm -rf ' + path_tmp)

    # display elapsed time
    elapsed_time = time.time() - start_time
    print '\nFinished! Elapsed time: ' + str(int(round(elapsed_time))) + 's'

    # to view results
    print '\nTo view results, type:'
    print 'fslview ' + file_data + ' ' + file_data + '_moco &\n'
Exemplo n.º 33
0
def curve_smoothing(file_seg_or_centerline, output_file_name,
                    number_mm):  #voir a ajouter un parametre sur le nbr de mm

    image_to_modify = Image(file_seg_or_centerline).copy()

    im_output = Image(file_seg_or_centerline).copy()
    im_output.data *= 0

    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(file_seg_or_centerline)
    #nx: nbr de pixels selon x
    #px: taille d'un pixel en mm (mm)

    #Def of the window (hamming):  same size as the height of the image
    window_hamming = np.zeros(nz)
    for i in range(0, nz):
        window_hamming[i] = 0.54 - 0.46 * cos((2 * pi * i) / nz)

    window_rectangular = np.zeros(nz)
    for i in range(0, nz):
        if i in range(nz - 8, nz + 8):
            window_rectangular[i] = 1

    #Def of the window (hamming):  5mm  avec 50 points
    window_hamming_local = np.zeros(
        10 * int(number_mm / pz))  #5mm  5mm=px*(nb_pixels_for_5mm)
    for i in range(0, int(number_mm / pz)):
        for j in range(0, 10):
            point = i + j / 10.0
            print(point)
            window_hamming_local[point * 10] = 0.54 - 0.46 * cos(
                (2 * pi * point) / (int(number_mm / pz)))

    plt.figure(2)
    plt.plot(window_hamming_local[:])
    plt.show()

    #Definition d'un vecteur "coordi" qui va regrouper le resultat du smoothing selon z (fait reference aux coordonnees selon x ou y)
    coordi_x = np.zeros(nz)  #y "fixe"
    coordi_y = np.zeros(nz)  #x "fixe"

    print("nz=", nz)
    print("pz=", pz, "int(", number_mm, "/pz)=", int(number_mm / pz))
    print(5 * 3.5, float(5 * 3.5))

    #Pour un recalcul des coordonnees en appliquant la fenêtre au voisinage
    X, Y, Z = np.nonzero((image_to_modify.data[:, :, :] > 0))  #

    print("Z[:]=", Z[:], "max(Z[:])=", max(Z[:]), "min(Z[:])=", min(Z[:]))

    plt.figure(1)
    plt.subplot(211)
    plt.plot(Z[:], X[:], 'ro')

    print("X.shape[0]=", X.shape[0], "Z.shape[0]=", Z.shape[0])
    if not X.any:
        print("The binary file is empty; no labels can be found.")
    for z in range(
            0, nz
    ):  #z position du point considere (celui qui va recevoir la somme ponderee)
        #Calcul de la position des points non nuls de l'image
        #for i in range(0, int(number_mm/pz)):   #i ecart du point pondere au point qui recoit la somme
        for i in range(-5, 5):
            coordi_x[z] += window_hamming_local[
                int((int(number_mm / pz) / 2.0) + i * 10) %
                (int(number_mm / pz) * 10)] * X[
                    (z + i) % X.shape[0]]  #ajuster 10 en fonction du nbr de mm
            #coordi_x[z] += X[(z+i)%X.shape[0]]/5.0

            coordi_y[z] += window_hamming_local[
                int((int(number_mm / pz) / 2.0) + i * 10) %
                (int(number_mm / pz) * 10)] * Y[(z + i) % Y.shape[0]]
        #print("coordi_x[z]=", coordi_x[z])

    plt.subplot(212)
    plt.plot(range(0, nz), coordi_x[:], 'ro')
    plt.show()
Exemplo n.º 34
0
def sct_moco_process_dmri(param, fname_data, fname_bvecs):
    
    fsloutput = 'export FSLOUTPUTTYPE=NIFTI; ' # for faster processing, all outputs are in NIFTI
    interp_final = param.interp

    dwi_group_size = param.dwi_group_size

    #path_tmp = param.path_tmp
    #path_script = param.path_script
    
    ## get path of the toolbox # TODO: no need to do that another time!
    #status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
    ## append path that contains scripts, to be able to load modules
    #sys.path.append(path_sct + '/scripts')
    #import sct_utils as sct
    
    # check existence of input files
    sct.check_file_exist(fname_data)
    sct.check_file_exist(fname_bvecs)
    
    # Get full path
    fname_data = os.path.abspath(fname_data)
    fname_bvecs = os.path.abspath(fname_bvecs)
    
    # Extract path, file and extension
    path_data, file_data, ext_data = sct.extract_fname(fname_data)
    
    file_b0 = 'b0'
    file_dwi = 'dwi'
    
    # Get size of data
    print '\nGet dimensions data...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_data)
    print '.. '+str(nx)+' x '+str(ny)+' x '+str(nz)+' x '+str(nt)

    # Open bvecs file
    print '\nOpen bvecs file...'
    bvecs = []
    with open(fname_bvecs) as f:
        for line in f:
            bvecs_new = map(float, line.split())
            bvecs.append(bvecs_new)
    
    # Check if bvecs file is nx3
    if not len(bvecs[0][:]) == 3:
        print '.. WARNING: bvecs file is 3xn instead of nx3. Consider using sct_dmri_transpose_bvecs.'
        print 'Transpose bvecs...'
        # transpose bvecs
        bvecs = zip(*bvecs)

    # Identify b=0 and DWI images
    print '\nIdentify b=0 and DWI images...'
    index_b0 = []
    index_dwi = []
    for it in xrange(0,nt):
        if math.sqrt(math.fsum([i**2 for i in bvecs[it]])) < 0.01:
            index_b0.append(it)
        else:
            index_dwi.append(it)
    n_b0 = len(index_b0)
    n_dwi = len(index_dwi)
    print '.. Index of b=0:'+str(index_b0)
    print '.. Index of DWI:'+str(index_dwi)

    # Split into T dimension
    print '\nSplit along T dimension...'
    status, output = sct.run(fsloutput + 'fslsplit ' + fname_data + ' data_splitT')
    numT = []
    for i in range(nt):
        if len(str(i))==1:
            numT.append('000' + str(i))
        elif len(str(i))==2:
            numT.append('00' + str(i))
        elif len(str(i))==3:
            numT.append('0' + str(i))
        else:
            numT.append(str(nt))

    # Merge b=0 images
    print '\nMerge b=0...'
    fname_b0_merge = file_b0    #+ suffix_data
    cmd = fsloutput + 'fslmerge -t ' + fname_b0_merge
    for iT in range(n_b0):
        cmd = cmd + ' data_splitT' + numT[index_b0[iT]]
    status, output = sct.run(cmd)
    print '.. File created: ', fname_b0_merge

    # Average b=0 images
    print '\nAverage b=0...'
    fname_b0_mean = file_b0 + '_mean'
    cmd = fsloutput + 'fslmaths ' + fname_b0_merge + ' -Tmean ' + fname_b0_mean
    status, output = sct.run(cmd)

    # Number of DWI groups
    nb_groups = int(math.floor(n_dwi/dwi_group_size))
    
    # Generate groups indexes
    group_indexes = []
    for iGroup in range(nb_groups):
        group_indexes.append(index_dwi[(iGroup*dwi_group_size):((iGroup+1)*dwi_group_size)])

    # add the remaining images to the last DWI group
    # TODO: fix the thing below
    #nb_remaining = n_dwi%dwi_group_size # number of remaining images
    nb_remaining = n_dwi - dwi_group_size * nb_groups # number of remaining images
    if nb_remaining > 0:
        #if nb_remaining < 3: # TODO: WHY 3?
        #    #group_indexes[nb_groups-1].append(index_dwi[len(index_dwi)-nb_remaining:len(index_dwi)])
        #    group_indexes.append(index_dwi[len(index_dwi)-nb_remaining:len(index_dwi)])
        #else:
        nb_groups += 1
        group_indexes.append(index_dwi[len(index_dwi)-nb_remaining:len(index_dwi)])

    # Size of dwi groups                        #SUFFIX
    for iGroup in range(nb_groups):
        print '\nGroup ', str((iGroup+1)), ' of DW images'
    
        index_dwi_i = group_indexes[iGroup]
        nb_dwi_i = len(index_dwi_i)
        
        # Merge DWI images
        print '\nMerge DW images...'        
        fname_dwi_merge_i = file_dwi + '_' + str(iGroup)
        cmd = fsloutput + 'fslmerge -t ' + fname_dwi_merge_i
        for iT in range(nb_dwi_i):
            cmd = cmd + ' data_splitT' + numT[index_dwi_i[iT]]
        status, output = sct.run(cmd)

        # Average DWI images
        print '\nAverage DW images...'
        fname_dwi_mean = file_dwi + '_mean' + '_' + str(iGroup)
        cmd = fsloutput + 'fslmaths ' + fname_dwi_merge_i + ' -Tmean ' + fname_dwi_mean
        status, output = sct.run(cmd)

    # Merge DWI groups means
    print '\nMerging DW files...'
    fname_dwi_groups_means_merge = 'dwi_averaged_groups'
    cmd = fsloutput + 'fslmerge -t ' + fname_dwi_groups_means_merge
    for iGroup in range(nb_groups):
        cmd = cmd + ' ' + file_dwi + '_mean_' + str(iGroup)
    status, output = sct.run(cmd)

    # Average DWI images
    print '\nAveraging all DW images...'
    fname_dwi_mean = 'dwi_mean'
    cmd = fsloutput + 'fslmaths ' + fname_dwi_groups_means_merge + ' -Tmean ' + fname_dwi_mean
    status, output = sct.run(cmd)

    # Estimate moco on dwi groups
    print '\n------------------------------------------------------------------------------'
    print 'Estimating motion based on DW groups...'
    print '------------------------------------------------------------------------------\n'
    param.fname_data =  fname_dwi_groups_means_merge
    param.fname_target =  fname_dwi_mean
    param.todo = 'estimate_and_apply'
    param.mat_moco = 'dwigroups_moco.mat'
    param.interp = 'trilinear'
    sct_moco(param)

    #Copy registration matrix for every dwi based on dwi_averaged_groups
    print '\n------------------------------------------------------------------------------'
    print 'Copy registration matrix for every dwi based on dwi_averaged_groups matrix...'
    print '------------------------------------------------------------------------------\n'
    mat_final = 'mat_final/'
    if not os.path.exists(mat_final):
        os.makedirs(mat_final)
        
    for b0 in range(len(index_b0)):
        for i_Z in range(nz):
            cmd = 'cp dwigroups_moco.mat/' + 'mat.T0' + '_Z' + str(i_Z) + '.txt' + ' ' + mat_final + 'mat.T' + str(index_b0[b0]) + '_Z' + str(i_Z) + '.txt'
            status, output = sct.run(cmd)
    
    for iGroup in range(nb_groups):
        for dwi in range(len(group_indexes[iGroup])):
            for i_Z in range(nz):
                cmd = 'cp dwigroups_moco.mat/' + 'mat.T' + str(iGroup) + '_Z' + str(i_Z) + '.txt' + ' ' + mat_final + 'mat.T' + str(group_indexes[iGroup][dwi]) + '_Z' + str(i_Z) + '.txt'
                status, output = sct.run(cmd)

    #Apply moco on all dmri data
    print '\n\n\n------------------------------------------------------------------------------'
    print 'Apply moco on all dmri data...'
    print '------------------------------------------------------------------------------\n'
    param.fname_data =  fname_data
    param.fname_target = fname_data
    param.mat_final = mat_final
    param.todo = 'apply'
    param.interp = interp_final
    sct_moco(param)
def main():

    # Initialization
    fname_anat = ''
    fname_point = ''
    slice_gap = param.gap
    remove_tmp_files = param.remove_tmp_files
    gaussian_kernel = param.gaussian_kernel
    start_time = time.time()

    # get path of the toolbox
    status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
    path_sct = sct.slash_at_the_end(path_sct, 1)

    # Parameters for debug mode
    if param.debug == 1:
        sct.printv('\n*** WARNING: DEBUG MODE ON ***\n\t\t\tCurrent working directory: '+os.getcwd(), 'warning')
        status, path_sct_testing_data = commands.getstatusoutput('echo $SCT_TESTING_DATA_DIR')
        fname_anat = path_sct_testing_data+'/t2/t2.nii.gz'
        fname_point = path_sct_testing_data+'/t2/t2_centerline_init.nii.gz'
        slice_gap = 5

    else:
        # Check input param
        try:
            opts, args = getopt.getopt(sys.argv[1:],'hi:p:g:r:k:')
        except getopt.GetoptError as err:
            print str(err)
            usage()
        if not opts:
            usage()
        for opt, arg in opts:
            if opt == '-h':
                usage()
            elif opt in ('-i'):
                fname_anat = arg
            elif opt in ('-p'):
                fname_point = arg
            elif opt in ('-g'):
                slice_gap = int(arg)
            elif opt in ('-r'):
                remove_tmp_files = int(arg)
            elif opt in ('-k'):
                gaussian_kernel = int(arg)

    # display usage if a mandatory argument is not provided
    if fname_anat == '' or fname_point == '':
        usage()

    # check existence of input files
    sct.check_file_exist(fname_anat)
    sct.check_file_exist(fname_point)

    # extract path/file/extension
    path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
    path_point, file_point, ext_point = sct.extract_fname(fname_point)

    # extract path of schedule file
    # TODO: include schedule file in sct
    # TODO: check existence of schedule file
    file_schedule = path_sct + param.schedule_file

    # Get input image orientation
    input_image_orientation = get_orientation(fname_anat)

    # Display arguments
    print '\nCheck input arguments...'
    print '  Anatomical image:     '+fname_anat
    print '  Orientation:          '+input_image_orientation
    print '  Point in spinal cord: '+fname_point
    print '  Slice gap:            '+str(slice_gap)
    print '  Gaussian kernel:      '+str(gaussian_kernel)
    print '  Degree of polynomial: '+str(param.deg_poly)

    # create temporary folder
    print('\nCreate temporary folder...')
    path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
    sct.create_folder(path_tmp)
    print '\nCopy input data...'
    sct.run('cp '+fname_anat+ ' '+path_tmp+'/tmp.anat'+ext_anat)
    sct.run('cp '+fname_point+ ' '+path_tmp+'/tmp.point'+ext_point)

    # go to temporary folder
    os.chdir(path_tmp)

    # convert to nii
    sct.run('fslchfiletype NIFTI tmp.anat')
    sct.run('fslchfiletype NIFTI tmp.point')

    # Reorient input anatomical volume into RL PA IS orientation
    print '\nReorient input volume to RL PA IS orientation...'
    #sct.run(sct.fsloutput + 'fslswapdim tmp.anat RL PA IS tmp.anat_orient')
    set_orientation('tmp.anat.nii', 'RPI', 'tmp.anat_orient.nii')
    # Reorient binary point into RL PA IS orientation
    print '\nReorient binary point into RL PA IS orientation...'
    sct.run(sct.fsloutput + 'fslswapdim tmp.point RL PA IS tmp.point_orient')
    set_orientation('tmp.point.nii', 'RPI', 'tmp.point_orient')

    # Get image dimensions
    print '\nGet image dimensions...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('tmp.anat_orient')
    print '.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz)
    print '.. voxel size:  '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm'

    # Split input volume
    print '\nSplit input volume...'
    sct.run(sct.fsloutput + 'fslsplit tmp.anat_orient tmp.anat_orient_z -z')
    file_anat_split = ['tmp.anat_orient_z'+str(z).zfill(4) for z in range(0,nz,1)]

    # Get the coordinates of the input point
    print '\nGet the coordinates of the input point...'
    file = nibabel.load('tmp.point_orient.nii')
    data = file.get_data()
    x_init, y_init, z_init = (data > 0).nonzero()
    x_init = x_init[0]
    y_init = y_init[0]
    z_init = z_init[0]
    print '('+str(x_init)+', '+str(y_init)+', '+str(z_init)+')'

    # Extract the slice corresponding to z=z_init
    print '\nExtract the slice corresponding to z='+str(z_init)+'...'
    file_point_split = ['tmp.point_orient_z'+str(z).zfill(4) for z in range(0,nz,1)]
    sct.run(sct.fsloutput+'fslroi tmp.point_orient '+file_point_split[z_init]+' 0 -1 0 -1 '+str(z_init)+' 1')

    # Create gaussian mask from point
    print '\nCreate gaussian mask from point...'
    file_mask_split = ['tmp.mask_orient_z'+str(z).zfill(4) for z in range(0,nz,1)]
    sct.run(sct.fsloutput+'fslmaths '+file_point_split[z_init]+' -s '+str(gaussian_kernel)+' '+file_mask_split[z_init])

    # Obtain max value from mask
    print '\nFind maximum value from mask...'
    file = nibabel.load(file_mask_split[z_init]+'.nii')
    data = file.get_data()
    max_value_mask = numpy.max(data)
    print '..'+str(max_value_mask)

    # Normalize mask beween 0 and 1
    print '\nNormalize mask beween 0 and 1...'
    sct.run(sct.fsloutput+'fslmaths '+file_mask_split[z_init]+' -div '+str(max_value_mask)+' '+file_mask_split[z_init])

    ## Take the square of the mask
    #print '\nCalculate the square of the mask...'
    #sct.run(sct.fsloutput+'fslmaths '+file_mask_split[z_init]+' -mul '+file_mask_split[z_init]+' '+file_mask_split[z_init])

    # initialize variables
    file_mat = ['tmp.mat_z'+str(z).zfill(4) for z in range(0,nz,1)]
    file_mat_inv = ['tmp.mat_inv_z'+str(z).zfill(4) for z in range(0,nz,1)]
    file_mat_inv_cumul = ['tmp.mat_inv_cumul_z'+str(z).zfill(4) for z in range(0,nz,1)]

    # create identity matrix for initial transformation matrix
    fid = open(file_mat_inv_cumul[z_init], 'w')
    fid.write('%i %i %i %i\n' %(1, 0, 0, 0) )
    fid.write('%i %i %i %i\n' %(0, 1, 0, 0) )
    fid.write('%i %i %i %i\n' %(0, 0, 1, 0) )
    fid.write('%i %i %i %i\n' %(0, 0, 0, 1) )
    fid.close()

    # initialize centerline: give value corresponding to initial point
    x_centerline = [x_init]
    y_centerline = [y_init]
    z_centerline = [z_init]
    warning_count = 0

    # go up (1), then down (2) in reference to the binary point
    for iUpDown in range(1, 3):

        if iUpDown == 1:
            # z increases
            slice_gap_signed = slice_gap
        elif iUpDown == 2:
            # z decreases
            slice_gap_signed = -slice_gap
            # reverse centerline (because values will be appended at the end)
            x_centerline.reverse()
            y_centerline.reverse()
            z_centerline.reverse()

        # initialization before looping
        z_dest = z_init # point given by user
        z_src = z_dest + slice_gap_signed

        # continue looping if 0 < z < nz
        while 0 <= z_src and z_src <= nz-1:

            # print current z:
            print 'z='+str(z_src)+':'

            # estimate transformation
            sct.run(fsloutput+'flirt -in '+file_anat_split[z_src]+' -ref '+file_anat_split[z_dest]+' -schedule '+file_schedule+ ' -verbose 0 -omat '+file_mat[z_src]+' -cost normcorr -forcescaling -inweight '+file_mask_split[z_dest]+' -refweight '+file_mask_split[z_dest])

            # display transfo
            status, output = sct.run('cat '+file_mat[z_src])
            print output

            # check if transformation is bigger than 1.5x slice_gap
            tx = float(output.split()[3])
            ty = float(output.split()[7])
            norm_txy = numpy.linalg.norm([tx, ty],ord=2)
            if norm_txy > 1.5*slice_gap:
                print 'WARNING: Transformation is too large --> using previous one.'
                warning_count = warning_count + 1
                # if previous transformation exists, replace current one with previous one
                if os.path.isfile(file_mat[z_dest]):
                    sct.run('cp '+file_mat[z_dest]+' '+file_mat[z_src])

            # estimate inverse transformation matrix
            sct.run('convert_xfm -omat '+file_mat_inv[z_src]+' -inverse '+file_mat[z_src])

            # compute cumulative transformation
            sct.run('convert_xfm -omat '+file_mat_inv_cumul[z_src]+' -concat '+file_mat_inv[z_src]+' '+file_mat_inv_cumul[z_dest])

            # apply inverse cumulative transformation to initial gaussian mask (to put it in src space)
            sct.run(fsloutput+'flirt -in '+file_mask_split[z_init]+' -ref '+file_mask_split[z_init]+' -applyxfm -init '+file_mat_inv_cumul[z_src]+' -out '+file_mask_split[z_src])

            # open inverse cumulative transformation file and generate centerline
            fid = open(file_mat_inv_cumul[z_src])
            mat = fid.read().split()
            x_centerline.append(x_init + float(mat[3]))
            y_centerline.append(y_init + float(mat[7]))
            z_centerline.append(z_src)
            #z_index = z_index+1

            # define new z_dest (target slice) and new z_src (moving slice)
            z_dest = z_dest + slice_gap_signed
            z_src = z_src + slice_gap_signed


    # Reconstruct centerline
    # ====================================================================================================

    # reverse back centerline (because it's been reversed once, so now all values are in the right order)
    x_centerline.reverse()
    y_centerline.reverse()
    z_centerline.reverse()

    # fit centerline in the Z-X plane using polynomial function
    print '\nFit centerline in the Z-X plane using polynomial function...'
    coeffsx = numpy.polyfit(z_centerline, x_centerline, deg=param.deg_poly)
    polyx = numpy.poly1d(coeffsx)
    x_centerline_fit = numpy.polyval(polyx, z_centerline)
    # calculate RMSE
    rmse = numpy.linalg.norm(x_centerline_fit-x_centerline)/numpy.sqrt( len(x_centerline) )
    # calculate max absolute error
    max_abs = numpy.max( numpy.abs(x_centerline_fit-x_centerline) )
    print '.. RMSE (in mm): '+str(rmse*px)
    print '.. Maximum absolute error (in mm): '+str(max_abs*px)

    # fit centerline in the Z-Y plane using polynomial function
    print '\nFit centerline in the Z-Y plane using polynomial function...'
    coeffsy = numpy.polyfit(z_centerline, y_centerline, deg=param.deg_poly)
    polyy = numpy.poly1d(coeffsy)
    y_centerline_fit = numpy.polyval(polyy, z_centerline)
    # calculate RMSE
    rmse = numpy.linalg.norm(y_centerline_fit-y_centerline)/numpy.sqrt( len(y_centerline) )
    # calculate max absolute error
    max_abs = numpy.max( numpy.abs(y_centerline_fit-y_centerline) )
    print '.. RMSE (in mm): '+str(rmse*py)
    print '.. Maximum absolute error (in mm): '+str(max_abs*py)

    # display
    if param.debug == 1:
        import matplotlib.pyplot as plt
        plt.figure()
        plt.plot(z_centerline,x_centerline,'.',z_centerline,x_centerline_fit,'r')
        plt.legend(['Data','Polynomial Fit'])
        plt.title('Z-X plane polynomial interpolation')
        plt.show()

        plt.figure()
        plt.plot(z_centerline,y_centerline,'.',z_centerline,y_centerline_fit,'r')
        plt.legend(['Data','Polynomial Fit'])
        plt.title('Z-Y plane polynomial interpolation')
        plt.show()

    # generate full range z-values for centerline
    z_centerline_full = [iz for iz in range(0, nz, 1)]

    # calculate X and Y values for the full centerline
    x_centerline_fit_full = numpy.polyval(polyx, z_centerline_full)
    y_centerline_fit_full = numpy.polyval(polyy, z_centerline_full)

    # Generate fitted transformation matrices and write centerline coordinates in text file
    print '\nGenerate fitted transformation matrices and write centerline coordinates in text file...'
    file_mat_inv_cumul_fit = ['tmp.mat_inv_cumul_fit_z'+str(z).zfill(4) for z in range(0,nz,1)]
    file_mat_cumul_fit = ['tmp.mat_cumul_fit_z'+str(z).zfill(4) for z in range(0,nz,1)]
    fid_centerline = open('tmp.centerline_coordinates.txt', 'w')
    for iz in range(0, nz, 1):
        # compute inverse cumulative fitted transformation matrix
        fid = open(file_mat_inv_cumul_fit[iz], 'w')
        fid.write('%i %i %i %f\n' %(1, 0, 0, x_centerline_fit_full[iz]-x_init) )
        fid.write('%i %i %i %f\n' %(0, 1, 0, y_centerline_fit_full[iz]-y_init) )
        fid.write('%i %i %i %i\n' %(0, 0, 1, 0) )
        fid.write('%i %i %i %i\n' %(0, 0, 0, 1) )
        fid.close()
        # compute forward cumulative fitted transformation matrix
        sct.run('convert_xfm -omat '+file_mat_cumul_fit[iz]+' -inverse '+file_mat_inv_cumul_fit[iz])
        # write centerline coordinates in x, y, z format
        fid_centerline.write('%f %f %f\n' %(x_centerline_fit_full[iz], y_centerline_fit_full[iz], z_centerline_full[iz]) )
    fid_centerline.close()


    # Prepare output data
    # ====================================================================================================

    # write centerline as text file
    for iz in range(0, nz, 1):
        # compute inverse cumulative fitted transformation matrix
        fid = open(file_mat_inv_cumul_fit[iz], 'w')
        fid.write('%i %i %i %f\n' %(1, 0, 0, x_centerline_fit_full[iz]-x_init) )
        fid.write('%i %i %i %f\n' %(0, 1, 0, y_centerline_fit_full[iz]-y_init) )
        fid.write('%i %i %i %i\n' %(0, 0, 1, 0) )
        fid.write('%i %i %i %i\n' %(0, 0, 0, 1) )
        fid.close()

    # write polynomial coefficients
    numpy.savetxt('tmp.centerline_polycoeffs_x.txt',coeffsx)
    numpy.savetxt('tmp.centerline_polycoeffs_y.txt',coeffsy)

    # apply transformations to data
    print '\nApply fitted transformation matrices...'
    file_anat_split_fit = ['tmp.anat_orient_fit_z'+str(z).zfill(4) for z in range(0,nz,1)]
    file_mask_split_fit = ['tmp.mask_orient_fit_z'+str(z).zfill(4) for z in range(0,nz,1)]
    file_point_split_fit = ['tmp.point_orient_fit_z'+str(z).zfill(4) for z in range(0,nz,1)]
    for iz in range(0, nz, 1):
        # forward cumulative transformation to data
        sct.run(fsloutput+'flirt -in '+file_anat_split[iz]+' -ref '+file_anat_split[iz]+' -applyxfm -init '+file_mat_cumul_fit[iz]+' -out '+file_anat_split_fit[iz])
        # inverse cumulative transformation to mask
        sct.run(fsloutput+'flirt -in '+file_mask_split[z_init]+' -ref '+file_mask_split[z_init]+' -applyxfm -init '+file_mat_inv_cumul_fit[iz]+' -out '+file_mask_split_fit[iz])
        # inverse cumulative transformation to point
        sct.run(fsloutput+'flirt -in '+file_point_split[z_init]+' -ref '+file_point_split[z_init]+' -applyxfm -init '+file_mat_inv_cumul_fit[iz]+' -out '+file_point_split_fit[iz]+' -interp nearestneighbour')

    # Merge into 4D volume
    print '\nMerge into 4D volume...'
    sct.run(fsloutput+'fslmerge -z tmp.anat_orient_fit tmp.anat_orient_fit_z*')
    sct.run(fsloutput+'fslmerge -z tmp.mask_orient_fit tmp.mask_orient_fit_z*')
    sct.run(fsloutput+'fslmerge -z tmp.point_orient_fit tmp.point_orient_fit_z*')

    # Copy header geometry from input data
    print '\nCopy header geometry from input data...'
    sct.run(fsloutput+'fslcpgeom tmp.anat_orient.nii tmp.anat_orient_fit.nii ')
    sct.run(fsloutput+'fslcpgeom tmp.anat_orient.nii tmp.mask_orient_fit.nii ')
    sct.run(fsloutput+'fslcpgeom tmp.anat_orient.nii tmp.point_orient_fit.nii ')

    # Reorient outputs into the initial orientation of the input image
    print '\nReorient the centerline into the initial orientation of the input image...'
    set_orientation('tmp.point_orient_fit.nii', input_image_orientation, 'tmp.point_orient_fit.nii')
    set_orientation('tmp.mask_orient_fit.nii', input_image_orientation, 'tmp.mask_orient_fit.nii')

    # Generate output file (in current folder)
    print '\nGenerate output file (in current folder)...'
    os.chdir('..')  # come back to parent folder
    #sct.generate_output_file('tmp.centerline_polycoeffs_x.txt','./','centerline_polycoeffs_x','.txt')
    #sct.generate_output_file('tmp.centerline_polycoeffs_y.txt','./','centerline_polycoeffs_y','.txt')
    #sct.generate_output_file('tmp.centerline_coordinates.txt','./','centerline_coordinates','.txt')
    #sct.generate_output_file('tmp.anat_orient.nii','./',file_anat+'_rpi',ext_anat)
    #sct.generate_output_file('tmp.anat_orient_fit.nii', file_anat+'_rpi_align'+ext_anat)
    #sct.generate_output_file('tmp.mask_orient_fit.nii', file_anat+'_mask'+ext_anat)
    fname_output_centerline = sct.generate_output_file(path_tmp+'/tmp.point_orient_fit.nii', file_anat+'_centerline'+ext_anat)

    # Delete temporary files
    if remove_tmp_files == 1:
        print '\nRemove temporary files...'
        sct.run('rm -rf '+path_tmp)

    # print number of warnings
    print '\nNumber of warnings: '+str(warning_count)+' (if >10, you should probably reduce the gap and/or increase the kernel size'

    # display elapsed time
    elapsed_time = time.time() - start_time
    print '\nFinished! \n\tGenerated file: '+fname_output_centerline+'\n\tElapsed time: '+str(int(round(elapsed_time)))+'s\n'
def main():
    
    # Initialization
    fname_data = ''
    interp_factor = param.interp_factor
    remove_temp_files = param.remove_temp_files
    verbose = param.verbose
    suffix = param.suffix
    smoothing_sigma = param.smoothing_sigma

    # start timer
    start_time = time.time()
    
    # Parameters for debug mode
    if param.debug:
        fname_data = path_sct+'/testing/data/errsm_23/t2/t2_manual_segmentation.nii.gz'
        remove_temp_files = 0
        param.mask_size = 10

    # Check input parameters
    try:
        opts, args = getopt.getopt(sys.argv[1:],'hi:v:r:')
    except getopt.GetoptError:
        usage()
    for opt, arg in opts:
        if opt == '-h':
            usage()
        elif opt in ('-i'):
            fname_data = arg
        elif opt in ('-r'):
            remove_temp_files = int(arg)
        elif opt in ('-s'):
            smoothing_sigma = arg
        elif opt in ('-v'):
            verbose = int(arg)

    # display usage if a mandatory argument is not provided
    if fname_data == '':
        usage()

    # print arguments
    print '\nCheck parameters:'
    print '  segmentation ........... '+fname_data
    print '  interp factor .......... '+str(interp_factor)
    print '  smoothing sigma ........ '+str(smoothing_sigma)

    # check existence of input files
    print('\nCheck existence of input files...')
    sct.check_file_exist(fname_data, verbose)

    # Extract path, file and extension
    path_data, file_data, ext_data = sct.extract_fname(fname_data)

    # create temporary folder
    print('\nCreate temporary folder...')
    path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
    sct.run('mkdir '+path_tmp)

    # copy files to temporary folder
    print('\nCopy files...')
    sct.run('c3d '+fname_data+' -o '+path_tmp+'/data.nii')

    # go to tmp folder
    os.chdir(path_tmp)

    # Get dimensions of data
    sct.printv('\nGet dimensions of data...', verbose)
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('data.nii')
    sct.printv('.. '+str(nx)+' x '+str(ny)+' x '+str(nz), verbose)

    # upsample data
    sct.printv('\nUpsample data...', verbose)
    sct.run('c3d data.nii -interpolation Linear -resample '+str(nx*interp_factor)+'x'+str(ny*interp_factor)+'x'+str(nz*interp_factor)+'vox -o data_up.nii', verbose)

    # Smooth along centerline
    sct.printv('\nSmooth along centerline...', verbose)
    sct.run('sct_smooth_spinalcord.py -i data_up.nii -c data_up.nii'+' -s '+str(smoothing_sigma)+' -r '+str(remove_temp_files)+' -v '+str(verbose), verbose)

    # downsample data
    sct.printv('\nDownsample data...', verbose)
    sct.run('c3d data_up_smooth.nii -interpolation Linear -resample '+str(nx)+'x'+str(ny)+'x'+str(nz)+'vox -o data_up_smooth_down.nii', verbose)

    # come back to parent folder
    os.chdir('..')

    # Generate output files
    print('\nGenerate output files...')
    fname_out = sct.generate_output_file(path_tmp+'/data_up_smooth_down.nii', '', file_data+suffix, ext_data)

    # Delete temporary files
    if remove_temp_files == 1:
        print '\nRemove temporary files...'
        sct.run('rm -rf '+ path_tmp)

    # display elapsed time
    elapsed_time = time.time() - start_time
    print '\nFinished! Elapsed time: '+str(int(round(elapsed_time)))+'s'

    # to view results
    print '\nTo view results, type:'
    print 'fslview '+file_data+' '+file_data+suffix+' &\n'
Exemplo n.º 37
0
#!/usr/bin/env python

import os, sys, commands

# Get path of the toolbox
status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
# Append path that contains scripts, to be able to load modules
sys.path.append(path_sct + '/scripts')

from msct_register_regularized import generate_warping_field
import sct_utils as sct

path = '/Users/tamag/Desktop/sliceregaffine/negativescaling/tmp.150717110850/tmp.150717110853'
fname_dest = 'src_reg_z0000.nii'

os.chdir(path)

print '\nGet image dimensions of destination image...'
nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_dest)

x_trans = [0 for i in range(nz)]
y_trans = [0 for i in range(nz)]

generate_warping_field(fname_dest,
                       x_trans=x_trans,
                       y_trans=y_trans,
                       fname='warp_null.nii.gz')
Exemplo n.º 38
0
def main():
    
    
    # get path of the toolbox
    status, path_sct = getstatusoutput('echo $SCT_DIR')
    #print path_sct


    #Initialization
    fname = ''
    landmark = ''
    verbose = param.verbose
    output_name = 'aligned.nii.gz'
    template_landmark = ''
    final_warp = param.final_warp
    compose = param.compose
    transfo = 'affine'
        
    try:
         opts, args = getopt.getopt(sys.argv[1:],'hi:l:o:R:t:w:c:v:')
    except getopt.GetoptError:
        usage()
    for opt, arg in opts :
        if opt == '-h':
            usage()
        elif opt in ("-i"):
            fname = arg
        elif opt in ("-l"):
            landmark = arg       
        elif opt in ("-o"):
            output_name = arg  
        elif opt in ("-R"):
            template_landmark = arg
        elif opt in ("-t"):
            transfo = arg    
        elif opt in ("-w"):
            final_warp = arg
        elif opt in ("-c"):
            compose = int(arg)                          
        elif opt in ('-v'):
            verbose = int(arg)
    
    # display usage if a mandatory argument is not provided
    if fname == '' or landmark == '' or template_landmark == '' :
        usage()
        
    if final_warp not in ['','spline','NN']:
        usage()
        
    if transfo not in ['affine','bspline','SyN']:
        usage()       
    
    # check existence of input files
    print'\nCheck if file exists ...'
    
    sct.check_file_exist(fname)
    sct.check_file_exist(landmark)
    sct.check_file_exist(template_landmark)
    
    
        
    # Display arguments
    print'\nCheck input arguments...'
    print'  Input volume ...................... '+fname
    print'  Verbose ........................... '+str(verbose)

    if transfo == 'affine':
        print 'Creating cross using input landmarks\n...'
        sct.run('sct_label_utils -i ' + landmark + ' -o ' + 'cross_native.nii.gz -t cross ' )
    
        print 'Creating cross using template landmarks\n...'
        sct.run('sct_label_utils -i ' + template_landmark + ' -o ' + 'cross_template.nii.gz -t cross ' )
    
        print 'Computing affine transformation between subject and destination landmarks\n...'
        os.system('isct_ANTSUseLandmarkImagesToGetAffineTransform cross_template.nii.gz cross_native.nii.gz affine n2t.txt')
        warping = 'n2t.txt'
    elif transfo == 'SyN':
        warping = 'warp_subject2template.nii.gz'
        tmp_name = 'tmp.'+time.strftime("%y%m%d%H%M%S")
        sct.run('mkdir '+tmp_name)
        tmp_abs_path = os.path.abspath(tmp_name)
        sct.run('cp ' + landmark + ' ' + tmp_abs_path)
        os.chdir(tmp_name)

        # sct.run('sct_label_utils -i '+landmark+' -t dist-inter')
        # sct.run('sct_label_utils -i '+template_landmark+' -t plan -o template_landmarks_plan.nii.gz -c 5')
        # sct.run('sct_crop_image -i template_landmarks_plan.nii.gz -o template_landmarks_plan_cropped.nii.gz -start 0.35,0.35 -end 0.65,0.65 -dim 0,1')
        # sct.run('sct_label_utils -i '+landmark+' -t plan -o landmarks_plan.nii.gz -c 5')
        # sct.run('sct_crop_image -i landmarks_plan.nii.gz -o landmarks_plan_cropped.nii.gz -start 0.35,0.35 -end 0.65,0.65 -dim 0,1')
        # sct.run('isct_antsRegistration --dimensionality 3 --transform SyN[0.5,3,0] --metric MeanSquares[template_landmarks_plan_cropped.nii.gz,landmarks_plan_cropped.nii.gz,1] --convergence 400x200 --shrink-factors 4x2 --smoothing-sigmas 4x2mm --restrict-deformation 0x0x1 --output [landmarks_reg,landmarks_reg.nii.gz] --interpolation NearestNeighbor --float')
        # sct.run('isct_c3d -mcs landmarks_reg0Warp.nii.gz -oo warp_vecx.nii.gz warp_vecy.nii.gz warp_vecz.nii.gz')
        # sct.run('isct_c3d warp_vecz.nii.gz -resample 200% -o warp_vecz_r.nii.gz')
        # sct.run('isct_c3d warp_vecz_r.nii.gz -smooth 0x0x3mm -o warp_vecz_r_sm.nii.gz')
        # sct.run('sct_crop_image -i warp_vecz_r_sm.nii.gz -o warp_vecz_r_sm_line.nii.gz -start 0.5,0.5 -end 0.5,0.5 -dim 0,1 -b 0')
        # sct.run('sct_label_utils -i warp_vecz_r_sm_line.nii.gz -t plan_ref -o warp_vecz_r_sm_line_extended.nii.gz -c 0 -r '+template_landmark)
        # sct.run('isct_c3d '+template_landmark+' warp_vecx.nii.gz -reslice-identity -o warp_vecx_res.nii.gz')
        # sct.run('isct_c3d '+template_landmark+' warp_vecy.nii.gz -reslice-identity -o warp_vecy_res.nii.gz')
        # sct.run('isct_c3d warp_vecx_res.nii.gz warp_vecy_res.nii.gz warp_vecz_r_sm_line_extended.nii.gz -omc 3 '+warping)  # no x?


        #new
        #put labels of the subject at the center of the image (for plan xOy)
        import nibabel
        from copy import copy
        file_labels_input = nibabel.load(landmark)
        hdr_labels_input = file_labels_input.get_header()
        data_labels_input = file_labels_input.get_data()
        data_labels_middle = copy(data_labels_input)
        data_labels_middle *= 0
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(landmark)
        X,Y,Z = data_labels_input.nonzero()
        x_middle = int(round(nx/2.0))
        y_middle = int(round(ny/2.0))
        for i in range(len(Z)):
            data_labels_middle[x_middle, y_middle, Z[i]] = data_labels_input[X[i], Y[i], Z[i]]
        img = nibabel.Nifti1Image(data_labels_middle, None, hdr_labels_input)
        nibabel.save(img, 'labels_input_middle_xy.nii.gz')

        #put labels of the template at the center of the image (for plan xOy)  #probably not necessary as already done by average labels
        file_labels_template = nibabel.load(template_landmark)
        hdr_labels_template = file_labels_template.get_header()
        data_labels_template = file_labels_template.get_data()
        data_template_middle = copy(data_labels_template)
        data_template_middle *= 0

        x,y,z = data_labels_template.nonzero()
        for i in range(len(Z)):
            data_template_middle[x_middle, y_middle, z[i]] = data_labels_template[x[i], y[i], z[i]]
        img_template = nibabel.Nifti1Image(data_template_middle, None, hdr_labels_template)
        nibabel.save(img_template, 'labels_template_middle_xy.nii.gz')


        #estimate Bspline transform to register to template
        sct.run('isct_ANTSUseLandmarkImagesToGetBSplineDisplacementField labels_template_middle_xy.nii.gz labels_input_middle_xy.nii.gz '+ warping+' 40*40*1 2 5 1')

        # select centerline of warping field according to z and extend it
        sct.run('isct_c3d -mcs '+warping+' -oo warp_vecx.nii.gz warp_vecy.nii.gz warp_vecz.nii.gz')
        #sct.run('isct_c3d warp_vecz.nii.gz -resample 200% -o warp_vecz_r.nii.gz')
        #sct.run('isct_c3d warp_vecz.nii.gz -smooth 0x0x3mm -o warp_vecz_r_sm.nii.gz')
        sct.run('sct_crop_image -i warp_vecz.nii.gz -o warp_vecz_r_sm_line.nii.gz -start 0.5,0.5 -end 0.5,0.5 -dim 0,1 -b 0')
        sct.run('sct_label_utils -i warp_vecz_r_sm_line.nii.gz -t plan_ref -o warp_vecz_r_sm_line_extended.nii.gz -r '+template_landmark)
        sct.run('isct_c3d '+template_landmark+' warp_vecx.nii.gz -reslice-identity -o warp_vecx_res.nii.gz')
        sct.run('isct_c3d '+template_landmark+' warp_vecy.nii.gz -reslice-identity -o warp_vecy_res.nii.gz')
        sct.run('isct_c3d warp_vecx_res.nii.gz warp_vecy_res.nii.gz warp_vecz_r_sm_line_extended.nii.gz -omc 3 '+warping)

        # check results
        #dilate first labels
        sct.run('fslmaths labels_input_middle_xy.nii.gz -dilF landmark_dilated.nii.gz') #new
        sct.run('sct_apply_transfo -i landmark_dilated.nii.gz -o label_moved.nii.gz -d labels_template_middle_xy.nii.gz -w '+warping+' -x nn')
        #undilate
        sct.run('sct_label_utils -i label_moved.nii.gz -t cubic-to-point -o label_moved_2point.nii.gz')
        sct.run('sct_label_utils -i labels_template_middle_xy.nii.gz -r label_moved_2point.nii.gz -o template_removed.nii.gz -t remove')
        #end new


        # check results
        #dilate first labels

        #sct.run('fslmaths '+landmark+' -dilF landmark_dilated.nii.gz') #old
        #sct.run('sct_apply_transfo -i landmark_dilated.nii.gz -o label_moved.nii.gz -d '+template_landmark+' -w '+warping+' -x nn') #old



        #undilate
        #sct.run('sct_label_utils -i label_moved.nii.gz -t cubic-to-point -o label_moved_2point.nii.gz') #old

        #sct.run('sct_label_utils -i '+template_landmark+' -r label_moved_2point.nii.gz -o template_removed.nii.gz -t remove') #old


        # # sct.run('sct_apply_transfo -i '+landmark+' -o label_moved.nii.gz -d '+template_landmark+' -w '+warping+' -x nn')
        # # sct.run('sct_label_utils -i '+template_landmark+' -r label_moved.nii.gz -o template_removed.nii.gz -t remove')
        # # status, output = sct.run('sct_label_utils -i label_moved.nii.gz -r template_removed.nii.gz -t MSE')

        status, output = sct.run('sct_label_utils -i label_moved_2point.nii.gz -r template_removed.nii.gz -t MSE')
        sct.printv(output,1,'info')
        remove_temp_files = False
        if os.path.isfile('error_log_label_moved.txt'):
            remove_temp_files = False
            with open('log.txt', 'a') as log_file:
                log_file.write('Error for '+fname+'\n')
        # Copy warping into parent folder
        sct.run('cp '+ warping+' ../'+warping)

        os.chdir('..')
        if remove_temp_files:
            sct.run('rm -rf '+tmp_name)



    # if transfo == 'bspline' :
    #     print 'Computing bspline transformation between subject and destination landmarks\n...'
    #     sct.run('isct_ANTSUseLandmarkImagesToGetBSplineDisplacementField cross_template.nii.gz cross_native.nii.gz warp_ntotemp.nii.gz 5x5x5 3 2 0')
    #     warping = 'warp_ntotemp.nii.gz'
        
    # if final_warp == '' :    
    #     print 'Apply transfo to input image\n...'
    #     sct.run('isct_antsApplyTransforms 3 ' + fname + ' ' + output_name + ' -r ' + template_landmark + ' -t ' + warping + ' -n Linear')
        
    # if final_warp == 'NN':
    #     print 'Apply transfo to input image\n...'
    #     sct.run('isct_antsApplyTransforms 3 ' + fname + ' ' + output_name + ' -r ' + template_landmark + ' -t ' + warping + ' -n NearestNeighbor')
    if final_warp == 'spline':
        print 'Apply transfo to input image\n...'
        sct.run('sct_apply_transfo -i ' + fname + ' -o ' + output_name + ' -d ' + template_landmark + ' -w ' + warping + ' -x spline')


    # Remove warping
    os.remove(warping)

    # if compose :
        
    #     print 'Computing affine transformation between subject and destination landmarks\n...'
    #     sct.run('isct_ANTSUseLandmarkImagesToGetAffineTransform cross_template.nii.gz cross_native.nii.gz affine n2t.txt')
    #     warping_affine = 'n2t.txt'
        
        
    #     print 'Apply transfo to input landmarks\n...'
    #     sct.run('isct_antsApplyTransforms 3 ' + cross_native + ' cross_affine.nii.gz -r ' + template_landmark + ' -t ' + warping_affine + ' -n NearestNeighbor')
        
    #     print 'Computing transfo between moved landmarks and template landmarks\n...'
    #     sct.run('isct_ANTSUseLandmarkImagesToGetBSplineDisplacementField cross_template.nii.gz cross_affine.nii.gz warp_affine2temp.nii.gz 5x5x5 3 2 0')
    #     warping_bspline = 'warp_affine2temp.nii.gz'
        
    #     print 'Composing transformations\n...'
    #     sct.run('isct_ComposeMultiTransform 3 warp_full.nii.gz -r ' + template_landmark + ' ' + warping_bspline + ' ' + warping_affine)
    #     warping_concat = 'warp_full.nii.gz'
        
    #     if final_warp == '' :    
    #         print 'Apply concat warp to input image\n...'
    #         sct.run('isct_antsApplyTransforms 3 ' + fname + ' ' + output_name + ' -r ' + template_landmark + ' -t ' + warping_concat + ' -n Linear')
        
    #     if final_warp == 'NN':
    #         print 'Apply concat warp to input image\n...'
    #         sct.run('isct_antsApplyTransforms 3 ' + fname + ' ' + output_name + ' -r ' + template_landmark + ' -t ' + warping_concat + ' -n NearestNeighbor')
        
    #     if final_warp == 'spline':
    #         print 'Apply concat warp to input image\n...'
    #         sct.run('isct_antsApplyTransforms 3 ' + fname + ' ' + output_name + ' -r ' + template_landmark + ' -t ' + warping_concat + ' -n BSpline[3]')
          
    
    
    print '\nFile created : ' + output_name
def main():

    #Initialization
    directory = ""
    fname_template = ''
    n_l = 0
    verbose = param.verbose

    try:
        opts, args = getopt.getopt(sys.argv[1:], 'hi:t:n:v:')
    except getopt.GetoptError:
        usage()
    for opt, arg in opts:
        if opt == '-h':
            usage()
        elif opt in ("-i"):
            directory = arg
        elif opt in ("-t"):
            fname_template = arg
        elif opt in ('-n'):
            n_l = int(arg)
        elif opt in ('-v'):
            verbose = int(arg)

    # display usage if a mandatory argument is not provided
    if fname_template == '' or directory == '':
        usage()

    # check existence of input files
    print '\nCheck if file exists ...\n'
    sct.check_file_exist(fname_template)
    sct.check_folder_exist(directory)

    path_template, file_template, ext_template = sct.extract_fname(
        fname_template)
    template_absolute_path = sct.get_absolute_path(fname_template)

    os.chdir(directory)

    n_i = len([
        name for name in os.listdir('.')
        if (os.path.isfile(name) and name.endswith(".nii.gz")
            and name != 'template_landmarks.nii.gz')
    ])  # number of landmark images

    average = zeros((n_i, n_l))
    compteur = 0

    for file in os.listdir('.'):
        if file.endswith(".nii.gz") and file != 'template_landmarks.nii.gz':
            print file
            img = nibabel.load(file)
            data = img.get_data()
            X, Y, Z = (data > 0).nonzero()
            Z = [Z[i] for i in Z.argsort()]
            Z.reverse()

            for i in xrange(n_l):
                if i < len(Z):
                    average[compteur][i] = Z[i]

            compteur = compteur + 1

    average = array([
        int(round(mean([average[average[:, i] > 0, i]]))) for i in xrange(n_l)
    ])

    #print average

    print template_absolute_path
    print '\nGet dimensions of template...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(template_absolute_path)
    print '.. matrix size: ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz)
    print '.. voxel size:  ' + str(px) + 'mm x ' + str(py) + 'mm x ' + str(
        pz) + 'mm'

    img = nibabel.load(template_absolute_path)
    data = img.get_data()
    hdr = img.get_header()
    data[:, :, :] = 0
    compteur = 1
    for i in average:
        print int(round(nx / 2.0)), int(round(ny / 2.0)), int(round(i)), int(
            round(compteur))
        data[int(round(nx / 2.0)),
             int(round(ny / 2.0)),
             int(round(i))] = int(round(compteur))
        compteur = compteur + 1

    print '\nSave volume ...'
    #hdr.set_data_dtype('float32') # set imagetype to uint8
    # save volume
    #data = data.astype(float32, copy =False)
    img = nibabel.Nifti1Image(data, None, hdr)
    file_name = 'template_landmarks.nii.gz'
    nibabel.save(img, file_name)
    print '\nFile created : ' + file_name
def main():
    
    #Initialization
    fname = ''
    verbose = param.verbose
        
    try:
         opts, args = getopt.getopt(sys.argv[1:],'hi:v:')
    except getopt.GetoptError:
        usage()
    for opt, arg in opts :
        if opt == '-h':
            usage()
        elif opt in ("-i"):
            fname = arg                      
        elif opt in ('-v'):
            verbose = int(arg)
    
    # display usage if a mandatory argument is not provided
    if fname == '' :
        usage()

    # check existence of input files
    print'\nCheck if file exists ...'

    sct.check_file_exist(fname)


    # Display arguments
    print'\nCheck input arguments...'
    print'  Input volume ...................... '+fname
    print'  Verbose ........................... '+str(verbose)


    # create temporary folder
    path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
    sct.run('mkdir '+path_tmp)

    fname = os.path.abspath(fname)
    path_data, file_data, ext_data = sct.extract_fname(fname)

    # copy files into tmp folder
    sct.run('cp '+fname+' '+path_tmp)

    # go to tmp folder
    os.chdir(path_tmp)


    # Get size of data
    print '\nGet dimensions of template...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname)
    print '.. '+str(nx)+' x '+str(ny)+' y '+str(nz)+' z '+str(nt)

    # extract left side and right side
    sct.run('sct_crop_image -i '+fname+' -o left.nii.gz -dim 0 -start '+str(int(0))+' -end '+str(int(floor(nx/2)-1)))
    sct.run('sct_crop_image -i '+fname+' -o right.nii.gz -dim 0 -start '+str(int(floor(nx/2)))+' -end '+str(int(nx-1)))

    # create mirror right
    right = nibabel.load('right.nii.gz')
    data_right = right.get_data()
    hdr_right = right.get_header()

    nx_r, ny_r, nz_r, nt_r, px_r, py_r, pz_r, pt_r = sct.get_dimension('right.nii.gz')

    mirror_right = data_right*0

    for i in xrange(nx_r):
        for j in xrange(ny_r):
            for k in xrange(nz_r):

                mirror_right[i,j,k] = data_right[(nx_r-1)-i,j,k]


    print '\nSave volume ...'

    img = nibabel.Nifti1Image(mirror_right, None, hdr_right)
    file_name = 'mirror_right.nii.gz'
    nibabel.save(img,file_name)

    #copy header of left to mirror right
    sct.run ('fslcpgeom left.nii.gz mirror_right.nii.gz')



    # compute transfo from left to mirror right
     #MI [fixed,moving]
    ### Beause it takes time there's a output that were computed on guillimin /home/django/jtouati/data/test_templateANTS/final_preprocessed/MI/test/tmp.141015123447

     #
    cmd = 'isct_antsRegistration \
    --dimensionality 3 \
    --transform Syn[0.5,3,0] \
    --metric MI[mirror_right.nii.gz,left.nii.gz,1,32] \
    --convergence 50x20 \
    --shrink-factors 4x1 \
    --smoothing-sigmas 1x1mm \
    --Restrict-Deformation 1x1x0 \
    --output [l2r,l2r.nii.gz]'

    status, output = sct.run(cmd)
    if verbose:
        print output

    #output are : l2r0InverseWarp.nii.gz l2r.nii.gz l2r0Warp.nii.gz

    # separate the 2 warping fields along the 3 directions
    status, output = sct.run('isct_c3d -mcs l2r0Warp.nii.gz -oo l2rwarpx.nii.gz l2rwarpy.nii.gz l2rwarpz.nii.gz')
    status, output = sct.run('isct_c3d -mcs l2r0InverseWarp.nii.gz -oo l2rinvwarpx.nii.gz l2rinvwarpy.nii.gz l2rinvwarpz.nii.gz')
    print 'Loading ..'
    # load warping fields
    warpx = nibabel.load('l2rwarpx.nii.gz')
    data_warpx = warpx.get_data()
    hdr_warpx=warpx.get_header()

    warpy = nibabel.load('l2rwarpy.nii.gz')
    data_warpy = warpy.get_data()
    hdr_warpy=warpy.get_header()

    warpz = nibabel.load('l2rwarpz.nii.gz')
    data_warpz = warpz.get_data()
    hdr_warpz=warpz.get_header()

    invwarpx = nibabel.load('l2rinvwarpx.nii.gz')
    data_invwarpx = invwarpx.get_data()
    hdr_invwarpx=invwarpx.get_header()

    invwarpy = nibabel.load('l2rinvwarpy.nii.gz')
    data_invwarpy = invwarpy.get_data()
    hdr_invwarpy=invwarpy.get_header()

    invwarpz = nibabel.load('l2rinvwarpz.nii.gz')
    data_invwarpz = invwarpz.get_data()
    hdr_invwarpz=invwarpz.get_header()
    print 'Creating..'
    # create demi warping fields
    data_warpx = (data_warpx - data_warpx[::-1,:,:])/2
    data_warpy = (data_warpy + data_warpy[::-1,:,:])/2
    data_warpz = (data_warpz + data_warpz[::-1,:,:])/2
    data_invwarpx = (data_invwarpx - data_invwarpx[::-1,:,:])/2
    data_invwarpy = (data_invwarpy + data_invwarpy[::-1,:,:])/2
    data_invwarpz = (data_invwarpz + data_invwarpz[::-1,:,:])/2
    print 'Saving ..'
    # save demi warping fields
    img = nibabel.Nifti1Image(data_warpx, None, hdr_warpx)
    file_name = 'warpx_demi.nii.gz'
    nibabel.save(img,file_name)

    img = nibabel.Nifti1Image(data_warpy, None, hdr_warpy)
    file_name = 'warpy_demi.nii.gz'
    nibabel.save(img,file_name)

    img = nibabel.Nifti1Image(data_warpz, None, hdr_warpz)
    file_name = 'warpz_demi.nii.gz'
    nibabel.save(img,file_name)

    img = nibabel.Nifti1Image(data_invwarpx, None, hdr_invwarpx)
    file_name = 'invwarpx_demi.nii.gz'
    nibabel.save(img,file_name)

    img = nibabel.Nifti1Image(data_invwarpy, None, hdr_invwarpy)
    file_name = 'invwarpy_demi.nii.gz'
    nibabel.save(img,file_name)

    img = nibabel.Nifti1Image(data_invwarpz, None, hdr_invwarpz)
    file_name = 'invwarpz_demi.nii.gz'
    nibabel.save(img,file_name)
    print 'Copy ..'
    # copy transform
    status,output = sct.run('isct_c3d l2rwarpx.nii.gz warpx_demi.nii.gz -copy-transform -o warpx_demi.nii.gz')
    status,output = sct.run('isct_c3d l2rwarpy.nii.gz warpy_demi.nii.gz -copy-transform -o warpy_demi.nii.gz')
    status,output = sct.run('isct_c3d l2rwarpz.nii.gz warpz_demi.nii.gz -copy-transform -o warpz_demi.nii.gz')
    status,output = sct.run('isct_c3d l2rinvwarpx.nii.gz invwarpx_demi.nii.gz -copy-transform -o invwarpx_demi.nii.gz')
    status,output = sct.run('isct_c3d l2rinvwarpy.nii.gz invwarpy_demi.nii.gz -copy-transform -o invwarpy_demi.nii.gz')
    status,output = sct.run('isct_c3d l2rinvwarpz.nii.gz invwarpz_demi.nii.gz -copy-transform -o invwarpz_demi.nii.gz')
    
    # combine warping fields
    print 'Combine ..'
    sct.run('isct_c3d warpx_demi.nii.gz warpy_demi.nii.gz warpz_demi.nii.gz -omc 3 warpl2r_demi.nii.gz')
    sct.run('isct_c3d invwarpx_demi.nii.gz invwarpy_demi.nii.gz invwarpz_demi.nii.gz -omc 3 invwarpl2r_demi.nii.gz')
    
    #warpl2r_demi.nii.gz invwarpl2r_demi.nii.gz
    
    # apply demi warping fields
    sct.run('sct_apply_transfo -i left.nii.gz -d left.nii.gz -w warpl2r_demi.nii.gz -o left_demi.nii.gz')
    sct.run('sct_apply_transfo -i mirror_right.nii.gz -d mirror_right.nii.gz -w invwarpl2r_demi.nii.gz -o mirror_right_demi.nii.gz')
    
    #unmirror right
    
    demi_right = nibabel.load('mirror_right_demi.nii.gz')
    data_demi_right = demi_right.get_data()
    hdr_demi_right = demi_right.get_header()
    
    nx_r, ny_r, nz_r, nt_r, px_r, py_r, pz_r, pt_r = sct.get_dimension('mirror_right_demi.nii.gz')
    
    unmirror_right = data_demi_right*0

    for i in xrange(nx_r):
        for j in xrange(ny_r):
            for k in xrange(nz_r):

                unmirror_right[i,j,k] = data_demi_right[(nx_r-1)-i,j,k]
    
    print '\nSave volume ...'
    
    img = nibabel.Nifti1Image(unmirror_right, None, hdr_right)
    file_name = 'un_mirror_right.nii.gz'
    nibabel.save(img,file_name)
    
    
    sct.run('fslmaths left_demi.nii.gz -add un_mirror_right.nii.gz symetrize_template.nii.gz')
Exemplo n.º 41
0
def do_preprocessing(contrast):

   # Loop across subjects
    for i in range(0,len(SUBJECTS_LIST)):
        subject = SUBJECTS_LIST[i][0]

        # Should check all inputs before starting the processing of the data

        # Create and go to output folder
        print '\nCreate -if not existing- and go to output folder '+ PATH_OUTPUT + '/subjects/'+subject+'/'+contrast
        if not os.path.isdir(PATH_OUTPUT + '/subjects/'+subject):
            os.makedirs(PATH_OUTPUT + '/subjects/'+subject)
        if not os.path.isdir(PATH_OUTPUT + '/subjects/'+subject+'/'+contrast):
            os.makedirs(PATH_OUTPUT + '/subjects/'+subject+'/'+contrast)
        os.chdir(PATH_OUTPUT + '/subjects/'+subject+'/'+contrast)

        # convert to nii
        print '\nChecking if dicoms have already been imported...'
        list_file = os.listdir(PATH_OUTPUT + '/subjects/'+subject+'/'+contrast)
        if 'data.nii.gz' not in list_file:
            print '\nImporting dicoms and converting to nii...'
            if contrast =='T1':
                sct.run('dcm2nii -o . -r N ' + SUBJECTS_LIST[i][1] + '/*.dcm')
            if contrast =='T2':
                sct.run('dcm2nii -o . -r N ' + SUBJECTS_LIST[i][2] + '/*.dcm')

            # change file name
            print '\nChanging file name to data.nii.gz...'
            sct.run('mv *.nii.gz data.nii.gz')

        # Convert to RPI
        # Input:
        # - data.nii.gz
        # - data_RPI.nii.gz
        print '\nConverting to RPI...'
        sct.run('sct_orientation -i data.nii.gz -s RPI')

        # Get info from txt file
        print '\nRecover infos from text file' + PATH_INFO + '/' + contrast + '/' + subject+ '/' + 'crop.txt'
        file_name = 'crop.txt'
        os.chdir(PATH_INFO + '/' + contrast + '/' + subject)

        file_results = open(PATH_INFO + '/' + contrast + '/' +subject+ '/' +file_name, 'r')
        ymin_anatomic = None
        ymax_anatomic = None
        for line in file_results:
            line_list = line.split(',')
            zmin_anatomic = line.split(',')[0]
            zmax_anatomic = line.split(',')[1]
            zmin_seg = line.split(',')[2]
            zmax_seg = line.split(',')[3]
            if len(line_list)==6:
                ymin_anatomic = line.split(',')[4]
                ymax_anatomic = line.split(',')[5]
        file_results.close()

        os.chdir(PATH_OUTPUT + '/subjects/'+subject+ '/' + contrast)

        # Crop image
        print '\nCropping image at L2-L3 and a little above brainstem...'
        if ymin_anatomic == None and ymax_anatomic == None:
            sct.run('sct_crop_image -i data_RPI.nii.gz -o data_RPI_crop.nii.gz -dim 2 -start ' + zmin_anatomic + ' -end ' + zmax_anatomic )
        else: sct.run('sct_crop_image -i data_RPI.nii.gz -o data_RPI_crop.nii.gz -dim 1,2 -start ' + ymin_anatomic +','+zmin_anatomic+ ' -end ' + ymax_anatomic+','+zmax_anatomic )

        # propseg
        # input:
        # - data_RPI_crop.nii.gz
        # - labels_propseg.nii.gz
        # output:
        # - data_RPI_crop_seg.nii.gz
        print '\nExtracting segmentation...'
        list_dir = os.listdir(PATH_INFO + '/' + contrast + '/'+subject)
        centerline_proseg = False
        for k in range(len(list_dir)):
            if list_dir[k] == 'centerline_propseg_RPI.nii.gz':
                centerline_proseg = True
        if centerline_proseg == True:
            if contrast == 'T1':
                sct.run('sct_propseg -i data_RPI_crop.nii.gz -t t1 -init-centerline ' + PATH_INFO + '/' + contrast + '/' + subject + '/centerline_propseg_RPI.nii.gz')
            if contrast == 'T2':
                sct.run('sct_propseg -i data_RPI_crop.nii.gz -t t2 -init-centerline ' + PATH_INFO + '/' + contrast + '/' + subject + '/centerline_propseg_RPI.nii.gz')
        else:
            if contrast == 'T1':
                sct.run('sct_propseg -i data_RPI_crop.nii.gz -t t1')
            if contrast == 'T2':
                sct.run('sct_propseg -i data_RPI_crop.nii.gz -t t2')

        # Erase 3 top and 3 bottom slices of the segmentation to avoid edge effects  (Done because propseg tends to diverge on edges)
        print '\nErasing 3 top and 3 bottom slices of the segmentation to avoid edge effects of propseg...'
        path_seg, file_seg, ext_seg = sct.extract_fname('data_RPI_crop_seg.nii.gz')
        image_seg = nibabel.load('data_RPI_crop_seg.nii.gz')
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('data_RPI_crop_seg.nii.gz')
        data_seg = image_seg.get_data()
        hdr_seg = image_seg.get_header()
           # List slices that contain non zero values
        z_centerline = [iz for iz in range(0, nz, 1) if data_seg[:,:,iz].any() ]
        for k in range(0,3):
            data_seg[:,:,z_centerline[-1]-k] = 0
            if z_centerline[0]+k < nz:
                data_seg[:,:,z_centerline[0]+k] = 0
        img_seg = nibabel.Nifti1Image(data_seg, None, hdr_seg)
        nibabel.save(img_seg, file_seg + '_mod' + ext_seg)

        # crop segmentation (but keep same dimension)
        # input:
        # - data_crop_denoised_seg_mod.nii.gz
        # - crop.txt
        # output:
        # - data_crop_denoised_seg_mod_crop.nii.gz
        print '\nCropping segmentation...'
        if zmax_seg == 'max':
            nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('data_RPI_crop_seg.nii.gz')
            sct.run('sct_crop_image -i data_RPI_crop_seg_mod.nii.gz -o data_RPI_crop_seg_mod_crop.nii.gz -start ' + zmin_seg + ' -end ' + str(nz) + ' -dim 2 -b 0')
        else: sct.run('sct_crop_image -i data_RPI_crop_seg_mod.nii.gz -o data_RPI_crop_seg_mod_crop.nii.gz -start ' + zmin_seg + ' -end ' + zmax_seg + ' -dim 2 -b 0')

        # Concatenate segmentation and labels_updown if labels_updown is inputed. If not, it concatenates the segmentation and centerline_propseg_RPI.
        print '\nConcatenating segmentation and label files...'
        labels_updown = False
        list_file_info = os.listdir(PATH_INFO+ '/' + contrast + '/' + subject)
        for k in range(0,len(list_file_info)):
            if list_file_info[k] == 'labels_updown.nii.gz':
                labels_updown = True
        if centerline_proseg == False and labels_updown == False:
            print '\nERROR: No label file centerline_propseg_RPI.nii.gz or labels_updown.nii.gz in '+PATH_INFO+ '/' + contrast + '/' + subject +'. There must be at least one. Check '+ path_sct+'/dev/template_preprocessing/Readme.md for necessary inputs.'
            sys.exit(2)
        if labels_updown:
            sct.run('fslmaths data_RPI_crop_seg_mod_crop.nii.gz -add '+ PATH_INFO + '/' + contrast + '/' + subject + '/labels_updown.nii.gz seg_and_labels.nii.gz')
        else: sct.run('fslmaths data_RPI_crop_seg_mod_crop.nii.gz -add '+ PATH_INFO + '/' + contrast + '/' + subject + '/centerline_propseg_RPI.nii.gz seg_and_labels.nii.gz')


        # Creation of centerline from seg and labels for intensity normalization.
        print '\nExtracting centerline for intensity normalization...'
        sct.run('sct_get_centerline_from_labels -i seg_and_labels.nii.gz')

        # Normalisation of intensity with centerline before straightening (pb of brainstem with bad centerline)
        print '\nNormalizing intensity...'
        sct.run('sct_normalize.py -i data_RPI_crop.nii.gz -c generated_centerline.nii.gz')

        # straighten image using the concatenation of the segmentation and the labels
        # function: sct_straighten_spinalcord (option: nurbs)
        # input:
        # - data_crop_normalized.nii.gz
        # output:
        # - warp_curve2straight.nii.gz
        # - data_RPI_crop_normalized_straight.nii.gz
        print '\nStraightening image using centerline...'
        cmd_straighten = ('sct_straighten_spinalcord -i data_RPI_crop_normalized.nii.gz -c ' + PATH_OUTPUT + '/subjects/' + subject + '/' + contrast + '/seg_and_labels.nii.gz -a nurbs -o data_RPI_crop_normalized_straight.nii.gz')
        sct.printv(cmd_straighten)
        os.system(cmd_straighten)

        # # # normalize intensity
        # print '\nNormalizing intensity of the straightened image...'
        # sct.run('sct_normalize.py -i data_RPI_crop_straight.nii.gz')

        # Crop labels_vertebral file
        print '\nCropping labels_vertebral file...'
        if ymin_anatomic == None and ymax_anatomic == None:
            sct.run('sct_crop_image -i '+PATH_INFO + '/' + contrast + '/' + subject+ '/labels_vertebral.nii.gz -o labels_vertebral_crop.nii.gz -start ' + zmin_anatomic + ' -end ' + zmax_anatomic + ' -dim 2')
        else: sct.run('sct_crop_image -i '+PATH_INFO + '/' + contrast + '/' + subject+ '/labels_vertebral.nii.gz -o labels_vertebral_crop.nii.gz -start ' + ymin_anatomic+','+zmin_anatomic + ' -end ' + ymax_anatomic+','+ zmax_anatomic + ' -dim 1,2')
        # Dilate labels from labels_vertebral file before straightening
        print '\nDilating labels from labels_vertebral file...'
        sct.run('fslmaths '+ PATH_OUTPUT + '/subjects/' + subject+ '/' + contrast + '/labels_vertebral_crop.nii.gz -dilF labels_vertebral_dilated.nii.gz')

        # apply straightening to labels_vertebral_dilated.nii.gz and to seg_and_labels.nii.gz
        # function: sct_apply_transfo
        # input:
        # - labels_vertebral_dilated.nii.gz
        # - warp_curve2straight.nii.gz
        # output:
        # - labels_vertebral_dilated_reg.nii.gz
        print '\nApplying straightening to labels_vertebral_dilated.nii.gz...'
        sct.run('sct_apply_transfo -i labels_vertebral_dilated.nii.gz -d data_RPI_crop_normalized_straight.nii.gz -w warp_curve2straight.nii.gz -x nn')

        # Select center of mass of labels volume due to past dilatation
        # REMOVE IF NOT REQUIRED
        print '\nSelecting center of mass of labels volume due to past dilatation...'
        sct.run('sct_label_utils -i labels_vertebral_dilated_reg.nii.gz -o labels_vertebral_dilated_reg_2point.nii.gz -t cubic-to-point')

        # Apply straightening to seg_and_labels.nii.gz
        print'\nApplying transfo to seg_and_labels.nii.gz ...'
        sct.run('sct_apply_transfo -i seg_and_labels.nii.gz -d data_RPI_crop_normalized_straight.nii.gz -w warp_curve2straight.nii.gz -x nn')

        ##Calculate the extrem non zero points of the straightened centerline file to crop image one last time
        file = nibabel.load('seg_and_labels_reg.nii.gz')
        data_c = file.get_data()

        X,Y,Z = (data_c>0).nonzero()

        z_max = max(Z)

        z_min = min(Z)

        # Crop image one last time
        print'\nCrop image one last time and create cross to push into template space...'
        sct.run('sct_crop_image -i data_RPI_crop_normalized_straight.nii.gz -o data_RPI_crop_normalized_straight_crop.nii.gz -dim 2 -start '+ str(z_min)+' -end '+ str(z_max))

        # Crop labels_vertebral_reg.nii.gz
        print'\nCrop labels_vertebral_reg.nii.gz and use cross to push into template space...'
        sct.run('sct_crop_image -i labels_vertebral_dilated_reg_2point.nii.gz -o labels_vertebral_dilated_reg_2point_crop.nii.gz -dim 2 -start '+ str(z_min)+' -end '+ str(z_max))
Exemplo n.º 42
0
    def apply(self):
        # Initialization
        fname_src = self.input_filename  # source image (moving)
        fname_warp_list = self.warp_input  # list of warping fields
        fname_dest = self.output_filename  # destination image (fix)
        fname_src_reg = self.source_reg
        verbose = self.verbose
        remove_temp_files = self.remove_temp_files
        fsloutput = 'export FSLOUTPUTTYPE=NIFTI; '  # for faster processing, all outputs are in NIFTI
        crop_reference = self.crop  # if = 1, put 0 everywhere around warping field, if = 2, real crop

        # Parameters for debug mode
        if self.debug:
            print '\n*** WARNING: DEBUG MODE ON ***\n'
            # get path of the testing data
            status, path_sct_data = commands.getstatusoutput('echo $SCT_TESTING_DATA_DIR')
            fname_src = path_sct_data+'/template/MNI-Poly-AMU_T2.nii.gz'
            fname_warp_list = path_sct_data+'/t2/warp_template2anat.nii.gz'
            fname_dest = path_sct_data+'/t2/t2.nii.gz'
            verbose = 1

        interp = sct.get_interpolation('isct_antsApplyTransforms', self.interp)

        # Parse list of warping fields
        sct.printv('\nParse list of warping fields...', verbose)
        use_inverse = []
        fname_warp_list_invert = []
        # fname_warp_list = fname_warp_list.replace(' ', '')  # remove spaces
        # fname_warp_list = fname_warp_list.split(",")  # parse with comma
        for i in range(len(fname_warp_list)):
            # Check if inverse matrix is specified with '-' at the beginning of file name
            if fname_warp_list[i].find('-') == 0:
                use_inverse.append('-i ')
                fname_warp_list[i] = fname_warp_list[i][1:]  # remove '-'
            else:
                use_inverse.append('')
            sct.printv('  Transfo #'+str(i)+': '+use_inverse[i]+fname_warp_list[i], verbose)
            fname_warp_list_invert.append(use_inverse[i]+fname_warp_list[i])

        # need to check if last warping field is an affine transfo
        isLastAffine = False
        path_fname, file_fname, ext_fname = sct.extract_fname(fname_warp_list_invert[-1])
        if ext_fname in ['.txt','.mat']:
            isLastAffine = True

        # Check file existence
        sct.printv('\nCheck file existence...', verbose)
        sct.check_file_exist(fname_src, self.verbose)
        sct.check_file_exist(fname_dest, self.verbose)
        for i in range(len(fname_warp_list)):
            # check if file exist
            sct.check_file_exist(fname_warp_list[i], self.verbose)

        # check if destination file is 3d
        sct.check_if_3d(fname_dest)

        # N.B. Here we take the inverse of the warp list, because sct_WarpImageMultiTransform concatenates in the reverse order
        fname_warp_list_invert.reverse()

        # Extract path, file and extension
        path_src, file_src, ext_src = sct.extract_fname(fname_src)
        path_dest, file_dest, ext_dest = sct.extract_fname(fname_dest)

        # Get output folder and file name
        if fname_src_reg == '':
            path_out = ''  # output in user's current directory
            file_out = file_src+'_reg'
            ext_out = ext_src
            fname_out = path_out+file_out+ext_out
        else:
            fname_out = fname_src_reg

        # Get dimensions of data
        sct.printv('\nGet dimensions of data...', verbose)
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_src)
        sct.printv('  ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz)+ ' x ' + str(nt), verbose)

        # if 3d
        if nt == 1:
            # Apply transformation
            sct.printv('\nApply transformation...', verbose)
            sct.run('isct_antsApplyTransforms -d 3 -i '+fname_src+' -o '+fname_out+' -t '+' '.join(fname_warp_list_invert)+' -r '+fname_dest+interp, verbose)

        # if 4d, loop across the T dimension
        else:
            # create temporary folder
            sct.printv('\nCreate temporary folder...', verbose)
            path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1)
            # sct.run('mkdir '+path_tmp, verbose)
            sct.run('mkdir '+path_tmp, verbose)

            # Copying input data to tmp folder
            # NB: cannot use c3d here because c3d cannot convert 4D data.
            sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose)
            sct.run('cp '+fname_src+' '+path_tmp+'data'+ext_src, verbose)
            sct.run('cp '+fname_dest+' '+path_tmp+'dest'+ext_dest, verbose)
            for i,warp in enumerate(fname_warp_list_invert):
                sct.run('cp ' + warp + ' ' + path_tmp + warp, verbose)
            # go to tmp folder
            os.chdir(path_tmp)
            try:
                # convert to nii format
                sct.run('fslchfiletype NIFTI data', verbose)

                # split along T dimension
                sct.printv('\nSplit along T dimension...', verbose)
                sct.run(fsloutput+'fslsplit data data_T', verbose)
                # apply transfo
                sct.printv('\nApply transformation to each 3D volume...', verbose)
                for it in range(nt):
                    file_data_split = 'data_T'+str(it).zfill(4)+'.nii'
                    file_data_split_reg = 'data_reg_T'+str(it).zfill(4)+'.nii'
                    sct.run('isct_antsApplyTransforms -d 3 -i '+file_data_split+' -o '+file_data_split_reg+' -t '+' '.join(fname_warp_list_invert)+' -r dest'+ext_dest+interp, verbose)

                # Merge files back
                sct.printv('\nMerge file back...', verbose)
                #cmd = fsloutput+'fslmerge -t '+fname_out
                cmd = 'fslmerge -t '+fname_out
                for it in range(nt):
                    file_data_split_reg = 'data_reg_T'+str(it).zfill(4)+'.nii'
                    cmd = cmd+' '+file_data_split_reg
                sct.run(cmd, verbose)

            except Exception, e:
                raise e
            # Copy result to parent folder
            sct.run('cp ' + fname_out + ' ../' + fname_out)

            # come back to parent folder
            os.chdir('..')

            # Delete temporary folder if specified
            if int(remove_temp_files):
                sct.printv('\nRemove temporary files...', verbose)
                sct.run('rm -rf '+path_tmp, verbose)
def do_preprocessing(contrast):
   # Create folder to gather all labels_vertebral.nii.gz files
    if not os.path.isdir(PATH_OUTPUT + '/'+'labels_vertebral'):
        os.makedirs(PATH_OUTPUT + '/'+'labels_vertebral')

   # Loop across subjects
    for i in range(0,len(SUBJECTS_LIST)):
        subject = SUBJECTS_LIST[i][0]

        # Should check all inputs before starting the processing of the data

        # Create and go to output folder
        print '\nCreate -if not existing- and go to output folder '+ PATH_OUTPUT + '/subjects/'+subject+'/'+contrast
        if not os.path.isdir(PATH_OUTPUT + '/subjects/'+subject):
            os.makedirs(PATH_OUTPUT + '/subjects/'+subject)
        if not os.path.isdir(PATH_OUTPUT + '/subjects/'+subject+'/'+contrast):
            os.makedirs(PATH_OUTPUT + '/subjects/'+subject+'/'+contrast)
        os.chdir(PATH_OUTPUT + '/subjects/'+subject+'/'+contrast)

        # convert to nii
        print '\nChecking if dicoms have already been imported...'
        list_file = os.listdir(PATH_OUTPUT + '/subjects/'+subject+'/'+contrast)
        if 'data.nii.gz' not in list_file:
            print '\nImporting dicoms and converting to nii...'
            if contrast =='T1':
                sct.run('dcm2nii -o . -r N ' + SUBJECTS_LIST[i][1] + '/*.dcm')
            if contrast =='T2':
                sct.run('dcm2nii -o . -r N ' + SUBJECTS_LIST[i][2] + '/*.dcm')

            # change file name
            print '\nChanging file name to data.nii.gz...'
            sct.run('mv *.nii.gz data.nii.gz')

        # Convert to RPI
        # Input:
        # - data.nii.gz
        # - data_RPI.nii.gz
        print '\nConverting to RPI...'
        sct.run('sct_orientation -i data.nii.gz -s RPI')

        # Get info from txt file
        print '\nRecover infos from text file' + PATH_INFO + '/' + contrast + '/' + subject+ '/' + 'crop.txt'
        file_name = 'crop.txt'
        os.chdir(PATH_INFO + '/' + contrast + '/' + subject)

        file_results = open(PATH_INFO + '/' + contrast + '/' +subject+ '/' +file_name, 'r')
        ymin_anatomic = None
        ymax_anatomic = None
        for line in file_results:
            line_list = line.split(',')
            zmin_anatomic = line.split(',')[0]
            zmax_anatomic = line.split(',')[1]
            zmin_seg = line.split(',')[2]
            zmax_seg = line.split(',')[3]
            if len(line_list)==6:
                ymin_anatomic = line.split(',')[4]
                ymax_anatomic = line.split(',')[5]
        file_results.close()

        os.chdir(PATH_OUTPUT + '/subjects/'+subject+ '/' + contrast)

        # Crop image
        print '\nCropping image at L2-L3 and a little above brainstem...'
        if ymin_anatomic == None and ymax_anatomic == None:
            sct.run('sct_crop_image -i data_RPI.nii.gz -o data_RPI_crop.nii.gz -dim 2 -start ' + zmin_anatomic + ' -end ' + zmax_anatomic )
        else: sct.run('sct_crop_image -i data_RPI.nii.gz -o data_RPI_crop.nii.gz -dim 1,2 -start ' + ymin_anatomic +','+zmin_anatomic+ ' -end ' + ymax_anatomic+','+zmax_anatomic )

        # # propseg
        # # input:
        # # - data__crop_denoised.nii.gz
        # # - labels_propseg.nii.gz
        # # output:
        # # - data_crop_denoised_seg.nii.gz
        # print '\nExtracting segmentation...'
        # list_dir = os.listdir(PATH_INFO + '/' + contrast + '/'+subject)
        # centerline_proseg = False
        # for k in range(len(list_dir)):
        #     if list_dir[k] == 'centerline_propseg_RPI.nii.gz':
        #         centerline_proseg = True
        # if centerline_proseg == True:
        #     if contrast == 'T1':
        #         sct.run('sct_propseg -i data_RPI_crop.nii.gz -t t1 -init-centerline ' + PATH_INFO + '/' + contrast + '/' + subject + '/centerline_propseg_RPI.nii.gz')
        #     if contrast == 'T2':
        #         sct.run('sct_propseg -i data_RPI_crop.nii.gz -t t2 -init-centerline ' + PATH_INFO + '/' + contrast + '/' + subject + '/centerline_propseg_RPI.nii.gz')
        # else:
        #     if contrast == 'T1':
        #         sct.run('sct_propseg -i data_RPI_crop.nii.gz -t t1')
        #     if contrast == 'T2':
        #         sct.run('sct_propseg -i data_RPI_crop.nii.gz -t t2')
        #
        # # Erase 3 top and 3 bottom slices of the segmentation to avoid edge effects  (Done because propseg tends to diverge on edges)
        # print '\nErasing 3 top and 3 bottom slices of the segmentation to avoid edge effects of propseg...'
        # path_seg, file_seg, ext_seg = sct.extract_fname('data_RPI_crop_seg.nii.gz')
        # image_seg = nibabel.load('data_RPI_crop_seg.nii.gz')
        # nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('data_RPI_crop_seg.nii.gz')
        # data_seg = image_seg.get_data()
        # hdr_seg = image_seg.get_header()
        #    # List slices that contain non zero values
        # z_centerline = [iz for iz in range(0, nz, 1) if data_seg[:,:,iz].any() ]
        # for k in range(0,3):
        #     data_seg[:,:,z_centerline[-1]-k] = 0
        #     if z_centerline[0]+k < nz:
        #         data_seg[:,:,z_centerline[0]+k] = 0
        # img_seg = nibabel.Nifti1Image(data_seg, None, hdr_seg)
        # nibabel.save(img_seg, file_seg + '_mod' + ext_seg)
        #
        # # crop segmentation (but keep same dimension)
        # # input:
        # # - data_crop_denoised_seg.nii.gz
        # # - crop.txt
        # # output:
        # # - data_crop_denoised_seg_crop.nii.gz
        # print '\nCropping segmentation...'
        # if zmax_seg == 'max':
        #     nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('data_RPI_crop_seg.nii.gz')
        #     sct.printv('sct_crop_image -i data_RPI_crop_seg_mod.nii.gz -o data_RPI_crop_seg_mod_crop.nii.gz -start ' + zmin_seg + ' -end ' + str(nz) + ' -dim 2 -b 0')
        #     os.system('sct_crop_image -i data_RPI_crop_seg_mod.nii.gz -o data_RPI_crop_seg_mod_crop.nii.gz -start ' + zmin_seg + ' -end ' + str(nz) + ' -dim 2 -b 0')
        # else: sct.run('sct_crop_image -i data_RPI_crop_seg_mod.nii.gz -o data_RPI_crop_seg_mod_crop.nii.gz -start ' + zmin_seg + ' -end ' + zmax_seg + ' -dim 2 -b 0')
        #
        # # Concatenate segmentation and labels_updown if labels_updown is inputed. If not, it concatenates the segmentation and centerline_propseg_RPI.
        # print '\nConcatenating segmentation and label files...'
        # labels_updown = False
        # list_file_info = os.listdir(PATH_INFO+ '/' + contrast + '/' + subject)
        # for k in range(0,len(list_file_info)):
        #     if list_file_info[k] == 'labels_updown.nii.gz':
        #         labels_updown = True
        # if centerline_proseg == False and labels_updown == False:
        #     print '\nERROR: No label file centerline_propseg_RPI.nii.gz or labels_updown.nii.gz in '+PATH_INFO+ '/' + contrast + '/' + subject +'. There must be at least one. Check '+ path_sct+'/dev/template_preprocessing/Readme.md for necessary inputs.'
        #     sys.exit(2)
        # if labels_updown:
        #     sct.run('fslmaths data_RPI_crop_seg_mod_crop.nii.gz -add '+ PATH_INFO + '/' + contrast + '/' + subject + '/labels_updown.nii.gz seg_and_labels.nii.gz')
        # else: sct.run('fslmaths data_RPI_crop_seg_mod_crop.nii.gz -add '+ PATH_INFO + '/' + contrast + '/' + subject + '/centerline_propseg_RPI.nii.gz seg_and_labels.nii.gz')
        #
        #
        # # Add creation of centerline from seg and labels
        # print '\nExtracting centerline for intensity normalization...'
        # sct.run('sct_get_centerline_from_labels -i seg_and_labels.nii.gz')
        # # Add normalisation of intensity with centerline before straightening (pb of brainstem with bad centerline)
        # print '\nNormalizing intensity...'
        # sct.run('sct_normalize.py -i data_RPI_crop.nii.gz -c generated_centerline.nii.gz')



        # straighten image using the concatenation of the segmentation and the labels
        # function: sct_straighten_spinalcord (option: nurbs)
        # input:
        # - data_crop_denoised.nii.gz
        # - centerline.nii.gz
        # output:
        # - warp_curve2straight.nii.gz
        # - data_crop_denoised_straight.nii.gz
        print '\nStraightening image using centerline...'
        cmd_straighten = ('sct_straighten_spinalcord -i data_RPI_crop_normalized.nii.gz -c ' + PATH_OUTPUT + '/subjects/' + subject + '/' + contrast + '/seg_and_labels.nii.gz -a nurbs -o data_RPI_crop_normalized_straight.nii.gz')
        sct.printv(cmd_straighten)
        os.system(cmd_straighten)

        # # # normalize intensity
        # print '\nNormalizing intensity of the straightened image...'
        # sct.run('sct_normalize.py -i data_RPI_crop_straight.nii.gz')
        #
        # Crop labels_vertebral file
        print '\nCropping labels_vertebral file...'
        if ymin_anatomic == None and ymax_anatomic == None:
            sct.run('sct_crop_image -i '+PATH_INFO + '/' + contrast + '/' + subject+ '/labels_vertebral.nii.gz -o labels_vertebral_crop.nii.gz -start ' + zmin_anatomic + ' -end ' + zmax_anatomic + ' -dim 2')
        else: sct.run('sct_crop_image -i '+PATH_INFO + '/' + contrast + '/' + subject+ '/labels_vertebral.nii.gz -o labels_vertebral_crop.nii.gz -start ' + ymin_anatomic+','+zmin_anatomic + ' -end ' + ymax_anatomic+','+ zmax_anatomic + ' -dim 1,2')
        #Dilate labels from labels_vertebral file
        print '\nDilating labels from labels_vertebral file...'
        sct.run('fslmaths '+ PATH_OUTPUT + '/subjects/' + subject+ '/' + contrast + '/labels_vertebral_crop.nii.gz -dilF labels_vertebral_dilated.nii.gz')

        # apply straightening to labels_vertebral.nii.gz and to seg_and_labels.nii.gz
        # function: sct_apply_transfo
        # input:
        # - centerline.nii.gz + labels_vertebral.nii.gz
        # - warp_curve2straight.nii.gz
        # output:
        # - centerline_straight.nii.gz
        print '\nApplying straightening to labels_vertebral_dilated.nii.gz...'
        sct.run('sct_apply_transfo -i labels_vertebral_dilated.nii.gz -d data_RPI_crop_normalized_straight.nii.gz -w warp_curve2straight.nii.gz -x nn')

        # Select center of mass of labels volume due to past dilatation
        # REMOVE IF NOT REQUIRED
        print '\nSelecting center of mass of labels volume due to past dilatation...'
        sct.run('sct_label_utils -i labels_vertebral_dilated_reg.nii.gz -o labels_vertebral_dilated_reg_2point.nii.gz -t cubic-to-point')

        # Apply straightening to seg_and_labels.nii.gz
        print'\nApplying transfo to seg_and_labels.nii.gz ...'
        sct.run('sct_apply_transfo -i seg_and_labels.nii.gz -d data_RPI_crop_normalized_straight.nii.gz -w warp_curve2straight.nii.gz -x nn')

        ##Calculate the extrem non zero points of the straightened centerline file
        file = nibabel.load('seg_and_labels_reg.nii.gz')
        data_c = file.get_data()
        hdr = file.get_header()
        # Get center of mass of the centerline
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('seg_and_labels_reg.nii.gz')
        z_centerline = [iz for iz in range(0, nz, 1) if data_c[:,:,iz].any() ]
        nz_nonz = len(z_centerline)
        x_centerline = [0 for iz in range(0, nz_nonz, 1)]
        y_centerline = [0 for iz in range(0, nz_nonz, 1)]
        for iz in xrange(len(z_centerline)):
           x_centerline[iz], y_centerline[iz] = ndimage.measurements.center_of_mass(array(data_c[:,:,z_centerline[iz]]))

        X,Y,Z = (data_c>0).nonzero()

        x_max,y_max = (data_c[:,:,max(Z)]).nonzero()
        x_max = x_max[0]
        y_max = y_max[0]
        z_max = max(Z)

        x_min,y_min = (data_c[:,:,min(Z)]).nonzero()
        x_min = x_min[0]
        y_min = y_min[0]
        z_min = min(Z)

        # Crop image one last time
        print'\nCrop image one last time and create cross to push into template space...'
        sct.run('sct_crop_image -i data_RPI_crop_normalized_straight.nii.gz -o data_RPI_crop_normalized_straight_crop.nii.gz -dim 2 -start '+ str(z_min)+' -end '+ str(z_max))

        # Crop labels_vertebral_reg.nii.gz
        print'\nCrop labels_vertebral_reg.nii.gz and use cross to push into template space...'
        sct.run('sct_crop_image -i labels_vertebral_dilated_reg_2point.nii.gz -o labels_vertebral_dilated_reg_2point_crop.nii.gz -dim 2 -start '+ str(z_min)+' -end '+ str(z_max))
def main():

    # Initialization
    fname_src = ''  # source image (moving)
    fname_warp_list = ''  # list of warping fields
    fname_dest = ''  # destination image (fix)
    fname_src_reg = ''
    verbose = 1
    fsloutput = 'export FSLOUTPUTTYPE=NIFTI; '  # for faster processing, all outputs are in NIFTI
    crop_reference = 0 # if = 1, put 0 everywhere around warping field, if = 2, real crop

    # Parameters for debug mode
    if param.debug:
        print '\n*** WARNING: DEBUG MODE ON ***\n'
        # get path of the testing data
        status, path_sct_data = commands.getstatusoutput('echo $SCT_TESTING_DATA_DIR')
        fname_src = path_sct_data+'/template/MNI-Poly-AMU_T2.nii.gz'
        fname_warp_list = path_sct_data+'/t2/warp_template2anat.nii.gz'
        fname_dest = path_sct_data+'/t2/t2.nii.gz'
        verbose = 1
    else:
        # Check input parameters
        try:
            opts, args = getopt.getopt(sys.argv[1:], 'hi:d:o:v:w:x:c:')
        except getopt.GetoptError:
            usage()
        if not opts:
            usage()
        for opt, arg in opts:
            if opt == '-h':
                usage()
            elif opt in ('-i'):
                fname_src = arg
            elif opt in ('-d'):
                fname_dest = arg
            elif opt in ('-o'):
                fname_src_reg = arg
            elif opt in ('-x'):
                param.interp = arg
            elif opt in ('-v'):
                verbose = int(arg)
            elif opt in ('-w'):
                fname_warp_list = arg
            elif opt in ('-c'):
                crop_reference = int(arg)

    # display usage if a mandatory argument is not provided
    if fname_src == '' or fname_warp_list == '' or fname_dest == '':
        usage()

    # get the right interpolation field depending on method
    interp = sct.get_interpolation('isct_antsApplyTransforms', param.interp)

    # Parse list of warping fields
    sct.printv('\nParse list of warping fields...', verbose)
    use_inverse = []
    fname_warp_list_invert = []
    fname_warp_list = fname_warp_list.replace(' ', '')  # remove spaces
    fname_warp_list = fname_warp_list.split(",")  # parse with comma
    for i in range(len(fname_warp_list)):
        # Check if inverse matrix is specified with '-' at the beginning of file name
        if fname_warp_list[i].find('-') == 0:
            use_inverse.append('-i ')
            fname_warp_list[i] = fname_warp_list[i][1:]  # remove '-'
        else:
            use_inverse.append('')
        sct.printv('  Transfo #'+str(i)+': '+use_inverse[i]+fname_warp_list[i], verbose)
        fname_warp_list_invert.append(use_inverse[i]+fname_warp_list[i])

    # need to check if last warping field is an affine transfo
    isLastAffine = False
    path_fname, file_fname, ext_fname = sct.extract_fname(fname_warp_list_invert[-1])
    if ext_fname in ['.txt','.mat']:
        isLastAffine = True

    # Check file existence
    sct.printv('\nCheck file existence...', verbose)
    sct.check_file_exist(fname_src)
    sct.check_file_exist(fname_dest)
    for i in range(len(fname_warp_list)):
        # check if file exist
        sct.check_file_exist(fname_warp_list[i])
    for i in range(len(fname_warp_list_invert)):
        sct.check_file_exist(fname_warp_list_invert[i])

    # check if destination file is 3d
    sct.check_if_3d(fname_dest)

    # N.B. Here we take the inverse of the warp list, because sct_WarpImageMultiTransform concatenates in the reverse order
    fname_warp_list_invert.reverse()

    # Extract path, file and extension
    # path_src, file_src, ext_src = sct.extract_fname(os.path.abspath(fname_src))
    # fname_dest = os.path.abspath(fname_dest)
    path_src, file_src, ext_src = sct.extract_fname(fname_src)
    # fname_dest = os.path.abspath(fname_dest

    # Get output folder and file name
    if fname_src_reg == '':
        path_out = ''  # output in user's current directory
        file_out = file_src+'_reg'
        ext_out = ext_src
        fname_out = path_out+file_out+ext_out
    else:
    #     path_out, file_out, ext_out = sct.extract_fname(fname_src_reg)
        fname_out = fname_src_reg

    # Get dimensions of data
    sct.printv('\nGet dimensions of data...', verbose)
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_src)
    sct.printv('  ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz)+ ' x ' + str(nt), verbose)

    # if 3d
    if nt == 1:
        # Apply transformation
        sct.printv('\nApply transformation...', verbose)
        sct.run('isct_antsApplyTransforms -d 3 -i '+fname_src+' -o '+fname_out+' -t '+' '.join(fname_warp_list_invert)+' -r '+fname_dest+interp, verbose)

    # if 4d, loop across the T dimension
    else:
        # create temporary folder
        sct.printv('\nCreate temporary folder...', verbose)
        path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1)
        sct.run('mkdir '+path_tmp, verbose)

        # Copying input data to tmp folder
        # NB: cannot use c3d here because c3d cannot convert 4D data.
        sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose)
        sct.run('cp '+fname_src+' '+path_tmp+'data'+ext_src, verbose)
        # go to tmp folder
        os.chdir(path_tmp)
        # convert to nii format
        sct.run('fslchfiletype NIFTI data', verbose)

        # split along T dimension
        sct.printv('\nSplit along T dimension...', verbose)
        sct.run(fsloutput+'fslsplit data data_T', verbose)
        # apply transfo
        sct.printv('\nApply transformation to each 3D volume...', verbose)
        for it in range(nt):
            file_data_split = 'data_T'+str(it).zfill(4)+'.nii'
            file_data_split_reg = 'data_reg_T'+str(it).zfill(4)+'.nii'
            sct.run('isct_antsApplyTransforms -d 3 -i '+file_data_split+' -o '+file_data_split_reg+' -t '+' '.join(fname_warp_list_invert)+' -r '+fname_dest+interp, verbose)
        # Merge files back
        sct.printv('\nMerge file back...', verbose)
        cmd = fsloutput+'fslmerge -t '+fname_out
        for it in range(nt):
            file_data_split_reg = 'data_reg_T'+str(it).zfill(4)+'.nii'
            cmd = cmd+' '+file_data_split_reg
        sct.run(cmd, param.verbose)
        # come back to parent folder
        os.chdir('..')

    # 2. crop the resulting image using dimensions from the warping field
    warping_field = fname_warp_list_invert[-1]
    # if last warping field is an affine transfo, we need to compute the space of the concatenate warping field:
    if isLastAffine:
        sct.printv('WARNING: the resulting image could have wrong apparent results. You should use an affine transformation as last transformation...',1,'warning')
    elif crop_reference == 1:
        sct.run('sct_crop_image -i '+fname_out+' -o '+fname_out+' -ref '+warping_field+' -b 0')
    elif crop_reference == 2:
        sct.run('sct_crop_image -i '+fname_out+' -o '+fname_out+' -ref '+warping_field)

    # display elapsed time
    sct.printv('\nDone! To view results, type:', verbose)
    sct.printv('fslview '+fname_dest+' '+fname_out+' &\n', verbose, 'info')
def main(segmentation_file=None,
         label_file=None,
         output_file_name=None,
         parameter="binary_centerline",
         remove_temp_files=1,
         verbose=0):

    #Process for a binary file as output:
    if parameter == "binary_centerline":

        # Binary_centerline: Process for only a segmentation file:
        if "-i" in arguments and "-l" not in arguments:
            # Extract path, file and extension
            segmentation_file = os.path.abspath(segmentation_file)
            path_data, file_data, ext_data = sct.extract_fname(
                segmentation_file)

            # create temporary folder
            path_tmp = 'tmp.' + time.strftime("%y%m%d%H%M%S")
            sct.run('mkdir ' + path_tmp)

            # copy files into tmp folder
            sct.run('cp ' + segmentation_file + ' ' + path_tmp)

            # go to tmp folder
            os.chdir(path_tmp)

            # Change orientation of the input segmentation into RPI
            print '\nOrient segmentation image to RPI orientation...'
            fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data
            set_orientation(file_data + ext_data, 'RPI',
                            fname_segmentation_orient)

            # Extract orientation of the input segmentation
            orientation = get_orientation(file_data + ext_data)
            print '\nOrientation of segmentation image: ' + orientation

            # Get size of data
            print '\nGet dimensions data...'
            nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(
                fname_segmentation_orient)
            print '.. ' + str(nx) + ' x ' + str(ny) + ' y ' + str(
                nz) + ' z ' + str(nt)

            print '\nOpen segmentation volume...'
            file = nibabel.load(fname_segmentation_orient)
            data = file.get_data()
            hdr = file.get_header()

            # Extract min and max index in Z direction
            X, Y, Z = (data > 0).nonzero()
            min_z_index, max_z_index = min(Z), max(Z)
            x_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
            y_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
            z_centerline = [iz for iz in range(min_z_index, max_z_index + 1)]
            # Extract segmentation points and average per slice
            for iz in range(min_z_index, max_z_index + 1):
                x_seg, y_seg = (data[:, :, iz] > 0).nonzero()
                x_centerline[iz - min_z_index] = np.mean(x_seg)
                y_centerline[iz - min_z_index] = np.mean(y_seg)

            #ne sert a rien
            for k in range(len(X)):
                data[X[k], Y[k], Z[k]] = 0

            print len(x_centerline)
            # Fit the centerline points with splines and return the new fitted coordinates
            #done with nurbs for now
            x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = b_spline_centerline(
                x_centerline, y_centerline, z_centerline)
            # Create an image with the centerline
            for iz in range(min_z_index, max_z_index + 1):
                data[round(x_centerline_fit[iz - min_z_index]),
                     round(y_centerline_fit[iz - min_z_index]),
                     iz] = 1  #with nurbs fitting
                #data[round(x_centerline[iz-min_z_index]), round(y_centerline[iz-min_z_index]), iz] = 1             #without nurbs fitting

            # Write the centerline image in RPI orientation
            hdr.set_data_dtype('uint8')  # set imagetype to uint8
            print '\nWrite NIFTI volumes...'
            img = nibabel.Nifti1Image(data, None, hdr)
            if output_file_name != None:
                file_name = output_file_name
            else:
                file_name = file_data + '_centerline' + ext_data
            nibabel.save(img, 'tmp.centerline.nii')
            sct.generate_output_file('tmp.centerline.nii', file_name)

            del data

            # come back to parent folder
            os.chdir('..')

            # Change orientation of the output centerline into input orientation
            print '\nOrient centerline image to input orientation: ' + orientation
            set_orientation(path_tmp + '/' + file_name, orientation, file_name)

            # Remove temporary files
            if remove_temp_files:
                print('\nRemove temporary files...')
                sct.run('rm -rf ' + path_tmp)

            return file_name

        # Binary_centerline: Process for only a label file:
        if "-l" in arguments and "-i" not in arguments:
            file = os.path.abspath(label_file)
            path_data, file_data, ext_data = sct.extract_fname(file)

            file = nibabel.load(label_file)
            data = file.get_data()
            hdr = file.get_header()

            X, Y, Z = (data > 0).nonzero()
            Z_new = np.linspace(min(Z), max(Z), (max(Z) - min(Z) + 1))

            # sort X and Y arrays using Z
            X = [X[i] for i in Z[:].argsort()]
            Y = [Y[i] for i in Z[:].argsort()]
            Z = [Z[i] for i in Z[:].argsort()]

            #print X, Y, Z

            f1 = interpolate.UnivariateSpline(Z, X)
            f2 = interpolate.UnivariateSpline(Z, Y)

            X_fit = f1(Z_new)
            Y_fit = f2(Z_new)

            #print X_fit
            #print Y_fit

            if verbose == 1:
                import matplotlib.pyplot as plt

                plt.figure()
                plt.plot(Z_new, X_fit)
                plt.plot(Z, X, 'o', linestyle='None')
                plt.show()

                plt.figure()
                plt.plot(Z_new, Y_fit)
                plt.plot(Z, Y, 'o', linestyle='None')
                plt.show()

            data = data * 0

            for i in xrange(len(X_fit)):
                data[X_fit[i], Y_fit[i], Z_new[i]] = 1

            # Create NIFTI image
            print '\nSave volume ...'
            hdr.set_data_dtype('float32')  # set image type to uint8
            img = nibabel.Nifti1Image(data, None, hdr)
            if output_file_name != None:
                file_name = output_file_name
            else:
                file_name = file_data + '_centerline' + ext_data
            # save volume
            nibabel.save(img, file_name)
            print '\nFile created : ' + file_name

            del data

        #### Binary_centerline: Process for a segmentation file and a label file:
        if "-l" and "-i" in arguments:

            ## Creation of a temporary file that will contain each centerline file of the process
            path_tmp = 'tmp.' + time.strftime("%y%m%d%H%M%S")
            sct.run('mkdir ' + path_tmp)

            ##From label file create centerline image
            print '\nPROCESS PART 1: From label file create centerline image.'
            file_label = os.path.abspath(label_file)
            path_data_label, file_data_label, ext_data_label = sct.extract_fname(
                file_label)

            file_label = nibabel.load(label_file)

            #Copy label_file into temporary folder
            sct.run('cp ' + label_file + ' ' + path_tmp)

            data_label = file_label.get_data()
            hdr_label = file_label.get_header()

            if verbose == 1:
                from copy import copy
                data_label_to_show = copy(data_label)

            X, Y, Z = (data_label > 0).nonzero()
            Z_new = np.linspace(min(Z), max(Z), (max(Z) - min(Z) + 1))

            # sort X and Y arrays using Z
            X = [X[i] for i in Z[:].argsort()]
            Y = [Y[i] for i in Z[:].argsort()]
            Z = [Z[i] for i in Z[:].argsort()]

            #print X, Y, Z

            f1 = interpolate.UnivariateSpline(Z, X)
            f2 = interpolate.UnivariateSpline(Z, Y)

            X_fit = f1(Z_new)
            Y_fit = f2(Z_new)

            #print X_fit
            #print Y_fit

            if verbose == 1:
                import matplotlib.pyplot as plt

                plt.figure()
                plt.plot(Z_new, X_fit)
                plt.plot(Z, X, 'o', linestyle='None')
                plt.show()

                plt.figure()
                plt.plot(Z_new, Y_fit)
                plt.plot(Z, Y, 'o', linestyle='None')
                plt.show()

            data_label = data_label * 0

            for i in xrange(len(X_fit)):
                data_label[X_fit[i], Y_fit[i], Z_new[i]] = 1

            # Create NIFTI image
            print '\nSave volume ...'
            hdr_label.set_data_dtype('float32')  # set image type to uint8
            img = nibabel.Nifti1Image(data_label, None, hdr_label)
            # save volume
            file_name_label = file_data_label + '_centerline' + ext_data_label
            nibabel.save(img, file_name_label)
            print '\nFile created : ' + file_name_label

            # copy files into tmp folder
            sct.run('cp ' + file_name_label + ' ' + path_tmp)
            #effacer fichier dans folder parent
            os.remove(file_name_label)
            del data_label

            ##From segmentation file create centerline image
            print '\nPROCESS PART 2: From segmentation file create centerline image.'
            # Extract path, file and extension
            segmentation_file = os.path.abspath(segmentation_file)
            path_data_seg, file_data_seg, ext_data_seg = sct.extract_fname(
                segmentation_file)

            # copy files into tmp folder
            sct.run('cp ' + segmentation_file + ' ' + path_tmp)

            # go to tmp folder
            os.chdir(path_tmp)

            # Change orientation of the input segmentation into RPI
            print '\nOrient segmentation image to RPI orientation...'
            fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data_seg
            set_orientation(file_data_seg + ext_data_seg, 'RPI',
                            fname_segmentation_orient)

            # Extract orientation of the input segmentation
            orientation = get_orientation(file_data_seg + ext_data_seg)
            print '\nOrientation of segmentation image: ' + orientation

            # Get size of data
            print '\nGet dimensions data...'
            nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(
                fname_segmentation_orient)
            print '.. ' + str(nx) + ' x ' + str(ny) + ' y ' + str(
                nz) + ' z ' + str(nt)

            print '\nOpen segmentation volume...'
            file_seg = nibabel.load(fname_segmentation_orient)
            data_seg = file_seg.get_data()
            hdr_seg = file_seg.get_header()

            if verbose == 1:
                data_seg_to_show = copy(data_seg)

            # Extract min and max index in Z direction
            X, Y, Z = (data_seg > 0).nonzero()
            min_z_index, max_z_index = min(Z), max(Z)
            x_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
            y_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
            z_centerline = [iz for iz in range(min_z_index, max_z_index + 1)]
            # Extract segmentation points and average per slice
            for iz in range(min_z_index, max_z_index + 1):
                x_seg, y_seg = (data_seg[:, :, iz] > 0).nonzero()
                x_centerline[iz - min_z_index] = np.mean(x_seg)
                y_centerline[iz - min_z_index] = np.mean(y_seg)
            for k in range(len(X)):
                data_seg[X[k], Y[k], Z[k]] = 0
            # Fit the centerline points with splines and return the new fitted coordinates
            #done with nurbs for now
            x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = b_spline_centerline(
                x_centerline, y_centerline, z_centerline)

            # Create an image with the centerline
            for iz in range(min_z_index, max_z_index + 1):
                data_seg[round(x_centerline_fit[iz - min_z_index]),
                         round(y_centerline_fit[iz - min_z_index]), iz] = 1
            # Write the centerline image in RPI orientation
            hdr_seg.set_data_dtype('uint8')  # set imagetype to uint8
            print '\nWrite NIFTI volumes...'
            img = nibabel.Nifti1Image(data_seg, None, hdr_seg)
            nibabel.save(img, 'tmp.centerline.nii')
            file_name_seg = file_data_seg + '_centerline' + ext_data_seg
            sct.generate_output_file('tmp.centerline.nii',
                                     file_name_seg)  #pb ici

            # copy files into parent folder
            #sct.run('cp '+file_name_seg+' ../')

            del data_seg

            # come back to parent folder
            #            os.chdir('..')

            # Change orientation of the output centerline into input orientation
            print '\nOrient centerline image to input orientation: ' + orientation
            set_orientation(file_name_seg, orientation, file_name_seg)

            print '\nRemoving overlap of the centerline obtain with label file if there are any:'

            ## Remove overlap from centerline file obtain with label file
            remove_overlap(file_name_label, file_name_seg,
                           "generated_centerline_without_overlap.nii.gz")

            ## Concatenation of the two centerline files
            print '\nConcatenation of the two centerline files:'
            if output_file_name != None:
                file_name = output_file_name
            else:
                file_name = 'centerline_total_from_label_and_seg'

            sct.run(
                'fslmaths generated_centerline_without_overlap.nii.gz -add ' +
                file_name_seg + ' ' + file_name)

            if verbose == 1:
                import matplotlib.pyplot as plt
                from scipy import ndimage

                #Get back concatenation of segmentation and labels before any processing
                data_concatenate = data_seg_to_show + data_label_to_show
                z_centerline = [
                    iz for iz in range(0, nz, 1)
                    if data_concatenate[:, :, iz].any()
                ]
                nz_nonz = len(z_centerline)
                x_centerline = [0 for iz in range(0, nz_nonz, 1)]
                y_centerline = [0 for iz in range(0, nz_nonz, 1)]

                # Calculate centerline coordinates and create image of the centerline
                for iz in range(0, nz_nonz, 1):
                    x_centerline[iz], y_centerline[
                        iz] = ndimage.measurements.center_of_mass(
                            data_concatenate[:, :, z_centerline[iz]])

                #Load file with resulting centerline
                file_centerline_fit = nibabel.load(file_name)
                data_centerline_fit = file_centerline_fit.get_data()

                z_centerline_fit = [
                    iz for iz in range(0, nz, 1)
                    if data_centerline_fit[:, :, iz].any()
                ]
                nz_nonz_fit = len(z_centerline_fit)
                x_centerline_fit_total = [0 for iz in range(0, nz_nonz_fit, 1)]
                y_centerline_fit_total = [0 for iz in range(0, nz_nonz_fit, 1)]

                #Convert to array
                x_centerline_fit_total = np.asarray(x_centerline_fit_total)
                y_centerline_fit_total = np.asarray(y_centerline_fit_total)
                #Calculate overlap between seg and label
                length_overlap = X_fit.shape[0] + x_centerline_fit.shape[
                    0] - x_centerline_fit_total.shape[0]
                # The total fitting is the concatenation of the two fitting (
                for i in range(x_centerline_fit.shape[0]):
                    x_centerline_fit_total[i] = x_centerline_fit[i]
                    y_centerline_fit_total[i] = y_centerline_fit[i]
                for i in range(X_fit.shape[0] - length_overlap):
                    x_centerline_fit_total[x_centerline_fit.shape[0] +
                                           i] = X_fit[i + length_overlap]
                    y_centerline_fit_total[x_centerline_fit.shape[0] +
                                           i] = Y_fit[i + length_overlap]
                    print x_centerline_fit.shape[0] + i

                #for iz in range(0, nz_nonz_fit, 1):
                #    x_centerline_fit[iz], y_centerline_fit[iz] = ndimage.measurements.center_of_mass(data_centerline_fit[:, :, z_centerline_fit[iz]])

                #Creation of a vector x that takes into account the distance between the labels
                #x_centerline_fit = np.asarray(x_centerline_fit)
                #y_centerline_fit = np.asarray(y_centerline_fit)
                x_display = [0 for i in range(x_centerline_fit_total.shape[0])]
                y_display = [0 for i in range(y_centerline_fit_total.shape[0])]

                for i in range(0, nz_nonz, 1):
                    x_display[z_centerline[i] -
                              z_centerline[0]] = x_centerline[i]
                    y_display[z_centerline[i] -
                              z_centerline[0]] = y_centerline[i]

                plt.figure(1)
                plt.subplot(2, 1, 1)
                plt.plot(z_centerline_fit, x_display, 'ro')
                plt.plot(z_centerline_fit, x_centerline_fit_total)
                plt.xlabel("Z")
                plt.ylabel("X")
                plt.title("x and x_fit coordinates")

                plt.subplot(2, 1, 2)
                plt.plot(z_centerline_fit, y_display, 'ro')
                plt.plot(z_centerline_fit, y_centerline_fit_total)
                plt.xlabel("Z")
                plt.ylabel("Y")
                plt.title("y and y_fit coordinates")
                plt.show()

                del data_concatenate, data_label_to_show, data_seg_to_show, data_centerline_fit

            sct.run('cp ' + file_name + ' ../')

            # Copy result into parent folder
            sct.run('cp ' + file_name + ' ../')

            # Come back to parent folder
            os.chdir('..')

            # Remove temporary centerline files
            if remove_temp_files:
                print('\nRemove temporary files...')
                sct.run('rm -rf ' + path_tmp)

#Process for a text file as output:
    if parameter == "text_file":
        print "\nText file process"
        #Process for only a segmentation file:
        if "-i" in arguments and "-l" not in arguments:

            # Extract path, file and extension
            segmentation_file = os.path.abspath(segmentation_file)
            path_data, file_data, ext_data = sct.extract_fname(
                segmentation_file)

            # create temporary folder
            path_tmp = 'tmp.' + time.strftime("%y%m%d%H%M%S")
            sct.run('mkdir ' + path_tmp)

            # copy files into tmp folder
            sct.run('cp ' + segmentation_file + ' ' + path_tmp)

            # go to tmp folder
            os.chdir(path_tmp)

            # Change orientation of the input segmentation into RPI
            print '\nOrient segmentation image to RPI orientation...'
            fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data
            set_orientation(file_data + ext_data, 'RPI',
                            fname_segmentation_orient)

            # Extract orientation of the input segmentation
            orientation = get_orientation(file_data + ext_data)
            print '\nOrientation of segmentation image: ' + orientation

            # Get size of data
            print '\nGet dimensions data...'
            nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(
                fname_segmentation_orient)
            print '.. ' + str(nx) + ' x ' + str(ny) + ' y ' + str(
                nz) + ' z ' + str(nt)

            print '\nOpen segmentation volume...'
            file = nibabel.load(fname_segmentation_orient)
            data = file.get_data()
            hdr = file.get_header()

            # Extract min and max index in Z direction
            X, Y, Z = (data > 0).nonzero()
            min_z_index, max_z_index = min(Z), max(Z)
            x_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
            y_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
            z_centerline = [iz for iz in range(min_z_index, max_z_index + 1)]
            # Extract segmentation points and average per slice
            for iz in range(min_z_index, max_z_index + 1):
                x_seg, y_seg = (data[:, :, iz] > 0).nonzero()
                x_centerline[iz - min_z_index] = np.mean(x_seg)
                y_centerline[iz - min_z_index] = np.mean(y_seg)
            for k in range(len(X)):
                data[X[k], Y[k], Z[k]] = 0
            # Fit the centerline points with splines and return the new fitted coordinates
            x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = b_spline_centerline(
                x_centerline, y_centerline, z_centerline)

            # Create output text file
            if output_file_name != None:
                file_name = output_file_name
            else:
                file_name = file_data + '_centerline' + '.txt'

            sct.printv('\nWrite text file...', verbose)
            #file_results = open("../"+file_name, 'w')
            file_results = open(file_name, 'w')
            for i in range(min_z_index, max_z_index + 1):
                file_results.write(
                    str(int(i)) + ' ' +
                    str(x_centerline_fit[i - min_z_index]) + ' ' +
                    str(y_centerline_fit[i - min_z_index]) + '\n')
            file_results.close()

            # Copy result into parent folder
            sct.run('cp ' + file_name + ' ../')

            del data

            # come back to parent folder
            os.chdir('..')

            # Remove temporary files
            if remove_temp_files:
                print('\nRemove temporary files...')
                sct.run('rm -rf ' + path_tmp)

            return file_name

        #Process for only a label file:
        if "-l" in arguments and "-i" not in arguments:
            file = os.path.abspath(label_file)
            path_data, file_data, ext_data = sct.extract_fname(file)

            file = nibabel.load(label_file)
            data = file.get_data()
            hdr = file.get_header()

            X, Y, Z = (data > 0).nonzero()
            Z_new = np.linspace(min(Z), max(Z), (max(Z) - min(Z) + 1))

            # sort X and Y arrays using Z
            X = [X[i] for i in Z[:].argsort()]
            Y = [Y[i] for i in Z[:].argsort()]
            Z = [Z[i] for i in Z[:].argsort()]

            #print X, Y, Z

            f1 = interpolate.UnivariateSpline(Z, X)
            f2 = interpolate.UnivariateSpline(Z, Y)

            X_fit = f1(Z_new)
            Y_fit = f2(Z_new)

            #print X_fit
            #print Y_fit

            if verbose == 1:
                import matplotlib.pyplot as plt

                plt.figure()
                plt.plot(Z_new, X_fit)
                plt.plot(Z, X, 'o', linestyle='None')
                plt.show()

                plt.figure()
                plt.plot(Z_new, Y_fit)
                plt.plot(Z, Y, 'o', linestyle='None')
                plt.show()

            data = data * 0

            for iz in xrange(len(X_fit)):
                data[X_fit[iz], Y_fit[iz], Z_new[iz]] = 1

            # Create output text file
            sct.printv('\nWrite text file...', verbose)
            if output_file_name != None:
                file_name = output_file_name
            else:
                file_name = file_data + '_centerline' + ext_data
            file_results = open(file_name, 'w')
            min_z_index, max_z_index = min(Z), max(Z)
            for i in range(min_z_index, max_z_index + 1):
                file_results.write(
                    str(int(i)) + ' ' + str(X_fit[i - min_z_index]) + ' ' +
                    str(Y_fit[i - min_z_index]) + '\n')
            file_results.close()

            del data

        #Process for a segmentation file and a label file:
        if "-l" and "-i" in arguments:

            ## Creation of a temporary file that will contain each centerline file of the process
            path_tmp = 'tmp.' + time.strftime("%y%m%d%H%M%S")
            sct.run('mkdir ' + path_tmp)

            ##From label file create centerline text file
            print '\nPROCESS PART 1: From label file create centerline text file.'
            file_label = os.path.abspath(label_file)
            path_data_label, file_data_label, ext_data_label = sct.extract_fname(
                file_label)

            file_label = nibabel.load(label_file)

            #Copy label_file into temporary folder
            sct.run('cp ' + label_file + ' ' + path_tmp)

            data_label = file_label.get_data()
            hdr_label = file_label.get_header()

            X, Y, Z = (data_label > 0).nonzero()
            Z_new = np.linspace(min(Z), max(Z), (max(Z) - min(Z) + 1))

            # sort X and Y arrays using Z
            X = [X[i] for i in Z[:].argsort()]
            Y = [Y[i] for i in Z[:].argsort()]
            Z = [Z[i] for i in Z[:].argsort()]

            #print X, Y, Z

            f1 = interpolate.UnivariateSpline(Z, X)
            f2 = interpolate.UnivariateSpline(Z, Y)

            X_fit = f1(Z_new)
            Y_fit = f2(Z_new)

            #print X_fit
            #print Y_fit

            if verbose == 1:
                import matplotlib.pyplot as plt

                plt.figure()
                plt.plot(Z_new, X_fit)
                plt.plot(Z, X, 'o', linestyle='None')
                plt.show()

                plt.figure()
                plt.plot(Z_new, Y_fit)
                plt.plot(Z, Y, 'o', linestyle='None')
                plt.show()

            data_label = data_label * 0

            for i in xrange(len(X_fit)):
                data_label[X_fit[i], Y_fit[i], Z_new[i]] = 1

            # Create output text file
            sct.printv('\nWrite text file...', verbose)
            file_name_label = file_data_label + '_centerline' + '.txt'
            file_results = open(path_tmp + '/' + file_name_label, 'w')
            min_z_index, max_z_index = min(Z), max(Z)
            for i in range(min_z_index, max_z_index + 1):
                file_results.write(
                    str(int(i)) + ' ' + str(X_fit[i - min_z_index]) + ' ' +
                    str(Y_fit[i - min_z_index]) + '\n')
            file_results.close()

            # copy files into tmp folder
            #sct.run('cp '+file_name_label+' '+path_tmp)

            del data_label

            ##From segmentation file create centerline text file
            print '\nPROCESS PART 2: From segmentation file create centerline image.'
            # Extract path, file and extension
            segmentation_file = os.path.abspath(segmentation_file)
            path_data_seg, file_data_seg, ext_data_seg = sct.extract_fname(
                segmentation_file)

            # copy files into tmp folder
            sct.run('cp ' + segmentation_file + ' ' + path_tmp)

            # go to tmp folder
            os.chdir(path_tmp)

            # Change orientation of the input segmentation into RPI
            print '\nOrient segmentation image to RPI orientation...'
            fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data_seg
            set_orientation(file_data_seg + ext_data_seg, 'RPI',
                            fname_segmentation_orient)

            # Extract orientation of the input segmentation
            orientation = get_orientation(file_data_seg + ext_data_seg)
            print '\nOrientation of segmentation image: ' + orientation

            # Get size of data
            print '\nGet dimensions data...'
            nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(
                fname_segmentation_orient)
            print '.. ' + str(nx) + ' x ' + str(ny) + ' y ' + str(
                nz) + ' z ' + str(nt)

            print '\nOpen segmentation volume...'
            file_seg = nibabel.load(fname_segmentation_orient)
            data_seg = file_seg.get_data()
            hdr_seg = file_seg.get_header()

            # Extract min and max index in Z direction
            X, Y, Z = (data_seg > 0).nonzero()
            min_z_index, max_z_index = min(Z), max(Z)
            x_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
            y_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)]
            z_centerline = [iz for iz in range(min_z_index, max_z_index + 1)]
            # Extract segmentation points and average per slice
            for iz in range(min_z_index, max_z_index + 1):
                x_seg, y_seg = (data_seg[:, :, iz] > 0).nonzero()
                x_centerline[iz - min_z_index] = np.mean(x_seg)
                y_centerline[iz - min_z_index] = np.mean(y_seg)
            for k in range(len(X)):
                data_seg[X[k], Y[k], Z[k]] = 0
            # Fit the centerline points with splines and return the new fitted coordinates
            #done with nurbs for now
            x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = b_spline_centerline(
                x_centerline, y_centerline, z_centerline)

            # Create output text file
            file_name_seg = file_data_seg + '_centerline' + '.txt'
            sct.printv('\nWrite text file...', verbose)
            file_results = open(file_name_seg, 'w')
            for i in range(min_z_index, max_z_index + 1):
                file_results.write(
                    str(int(i)) + ' ' +
                    str(x_centerline_fit[i - min_z_index]) + ' ' +
                    str(y_centerline_fit[i - min_z_index]) + '\n')
            file_results.close()

            del data_seg

            print '\nRemoving overlap of the centerline obtain with label file if there are any:'

            ## Remove overlap from centerline file obtain with label file
            remove_overlap(file_name_label,
                           file_name_seg,
                           "generated_centerline_without_overlap1.txt",
                           parameter=1)

            ## Concatenation of the two centerline files
            print '\nConcatenation of the two centerline files:'
            if output_file_name != None:
                file_name = output_file_name
            else:
                file_name = 'centerline_total_from_label_and_seg.txt'

            f_output = open(file_name, "w")
            f_output.close()
            with open(file_name_seg, "r") as f_seg:
                with open("generated_centerline_without_overlap1.txt",
                          "r") as f:
                    with open(file_name, "w") as f_output:
                        data_line_seg = f_seg.readlines()
                        data_line = f.readlines()
                        for line in data_line_seg:
                            f_output.write(line)
                        for line in data_line:
                            f_output.write(line)

            # Copy result into parent folder
            sct.run('cp ' + file_name + ' ../')

            # Come back to parent folder
            os.chdir('..')

            # Remove temporary centerline files
            if remove_temp_files:
                print('\nRemove temporary files...')
                sct.run('rm -rf ' + path_tmp)
    if "-init-validation" in arguments:
        cmd += " -init-validation"
    if "-nbiter" in arguments:
        cmd += " -nbiter " + str(arguments["-nbiter"])
    if "-max-area" in arguments:
        cmd += " -max-area " + str(arguments["-max-area"])
    if "-max-deformation" in arguments:
        cmd += " -max-deformation " + str(arguments["-max-deformation"])
    if "-min-contrast" in arguments:
        cmd += " -min-contrast " + str(arguments["-min-contrast"])
    if "-d" in arguments:
        cmd += " -d " + str(arguments["-d"])

    # check if input image is in 3D. Otherwise itk image reader will cut the 4D image in 3D volumes and only take the first one.
    from sct_utils import get_dimension
    nx, ny, nz, nt, px, py, pz, pt = get_dimension(input_filename)
    if nt > 1:
        sct.printv('ERROR: your input image needs to be 3D in order to be segmented.', 1, 'error')

    sct.run(cmd, verbose)

    sct.printv('\nDone! To view results, type:', verbose)
    # extracting output filename
    path_fname, file_fname, ext_fname = sct.extract_fname(input_filename)
    output_filename = file_fname+"_seg"+ext_fname

    if folder_output == ".":
        output_name = output_filename
    else:
        output_name = folder_output+"/"+output_filename
    sct.printv("fslview "+input_filename+" "+output_name+" -l Red -b 0,1 -t 0.7 &\n", verbose, 'info')
Exemplo n.º 47
0
def main():

# Initialization
    fname_anat = ''
    fname_centerline = ''
    fwhm = param.fwhm
    width=param.width
    remove_temp_files = param.remove_temp_files
    start_time = time.time()
    verbose = param.verbose

    # extract path of the script
    path_script = os.path.dirname(__file__) + '/'

    # Parameters for debug mode
    if param.debug == 1:
        print '\n*** WARNING: DEBUG MODE ON ***\n'
        fname_anat = '/home/django/ibouchard/errsm_22_t2_cropped_rpi.nii.gz'
        fname_centerline = '/home/django/ibouchard//errsm_22_t2_cropped_centerline.nii.gz'
        fwhm=1
        width=20

    # Check input param
    try:
        opts, args = getopt.getopt(sys.argv[1:], 'hi:c:f:w:r:')
    except getopt.GetoptError as err:
        print str(err)
        usage()
    for opt, arg in opts:
        if opt == '-h':
            usage()
        elif opt in ('-i'):
            fname_anat = arg
        elif opt in ('-c'):
            fname_centerline = arg
        elif opt in ('-f'):
            fwhm = int(arg)
        elif opt in ('w'):
            width=int(arg)
        elif opt in ('-r'):
            remove_temp_files = int(arg)

    # display usage if a mandatory argument is not provided
    if fname_anat == '' or fname_centerline == '':
        usage()

    # check existence of input files
    sct.check_file_exist(fname_anat)
    sct.check_file_exist(fname_centerline)

    # extract path/file/extension
    path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)

    # extract path/file/extension
    path_centerline, file_centerline, ext_centerline = sct.extract_fname(fname_centerline)

    # Display arguments
    print '\nCheck input arguments...'
    print '.. Anatomical image:           ' + fname_anat
    print '.. Centerline:                 ' + fname_centerline
    print '.. Full width at half maximum:  ' + str(fwhm)
    print '.. Width of the square window: ' + str(width)

    # create temporary folder
    sct.printv('\nCreate temporary folder...', verbose)
    path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1)
    sct.run('mkdir '+path_tmp, verbose)

    # Copying input data to tmp folder and convert to nii
    sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose)
    sct.run('cp '+fname_anat+' '+path_tmp+'data'+ext_anat, verbose)
    sct.run('cp '+fname_centerline+' '+path_tmp+'centerline'+ext_centerline, verbose)

    # go to tmp folder
    os.chdir(path_tmp)

    # convert to nii format
    convert('data'+ext_anat, 'data.nii')
    convert('centerline'+ext_centerline, 'centerline.nii')

    # # Get dimensions of data
    # sct.printv('\nGet dimensions of data...', param.verbose)
    # nx, ny, nz, nt, px, py, pz, pt = Image('data.nii').dim

    #
    # #Delete existing tmp file in the current folder to avoid problems
    #     #Delete existing tmp file in the current folder to avoid problems
    # if os.path.isfile('tmp.anat.nii'):
    #     sct.run('rm tmp.anat.nii')
    # if os.path.isfile('tmp.centerline.nii'):
    #     sct.run('rm tmp.centerline.nii')
    #
    # # Convert to nii and delete nii.gz if still existing
    # print '\nCopy input data...'
    # sct.run('cp ' + fname_anat + ' tmp.anat'+ext_anat)
    # convert('data'+ext_data, 'data.nii')
    #
    # sct.run('fslchfiletype NIFTI tmp.anat')
    # if os.path.isfile('tmp.anat.nii.gz'):
    #     sct.run('rm tmp.anat.nii.gz')
    # print '.. Anatomical image copied'
    # sct.run('cp ' + fname_centerline + ' tmp.centerline'+ext_centerline)
    # sct.run('fslchfiletype NIFTI tmp.centerline')
    # if os.path.isfile('tmp.centerline.nii.gz'):
    #     sct.run('rm tmp.centerline.nii.gz')
    # print '.. Centerline image copied'


    # Open anatomical image
    #==========================================================================================
    # Reorient input anatomical volume into RL PA IS orientation
    print '\nReorient input volume to RL PA IS orientation...'
    sct.run(sct.fsloutput + 'fslswapdim tmp.anat RL PA IS tmp.anat_orient')


    print '\nGet dimensions of input anatomical image...'
    nx_a, ny_a, nz_a, nt_a, px_a, py_a, pz_a, pt_a = sct.get_dimension('tmp.anat_orient')
    #nx_a, ny_a, nz_a, nt_a, px_a, py_a, pz_a, pt_a = sct.get_dimension(fname_anat)
    print '.. matrix size: ' + str(nx_a) + ' x ' + str(ny_a) + ' x ' + str(nz_a)
    print '.. voxel size:  ' + str(px_a) + 'mm x ' + str(py_a) + 'mm x ' + str(pz_a) + 'mm'

    print '\nOpen anatomical volume...'
    file = nibabel.load('tmp.anat_orient.nii')
    #file = nibabel.load(fname_anat)
    data_anat = file.get_data()
    data_anat=np.array(data_anat)

    data_anat_smoothed=np.copy(data_anat)


    # Open centerline
    #==========================================================================================
    # Reorient binary point into RL PA IS orientation
    print '\nReorient centerline volume into RL PA IS orientation...'
    sct.run(sct.fsloutput + 'fslswapdim tmp.centerline RL PA IS tmp.centerline_orient')

    print '\nGet dimensions of input centerline...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('tmp.centerline_orient')
    #nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_centerline)
    print '.. matrix size: ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz)
    print '.. voxel size:  ' + str(px) + 'mm x ' + str(py) + 'mm x ' + str(pz) + 'mm'

    print '\nOpen centerline volume...'
    file = nibabel.load('tmp.centerline_orient.nii')
    #file = nibabel.load(fname_centerline)
    data_centerline = file.get_data()

    #Loop across z and associate x,y coordinate with the point having maximum intensity
    x_centerline = [0 for iz in range(0, nz, 1)]
    y_centerline = [0 for iz in range(0, nz, 1)]
    z_centerline = [iz for iz in range(0, nz, 1)]
    for iz in range(0, nz, 1):
        x_centerline[iz], y_centerline[iz] = np.unravel_index(data_centerline[:, :, iz].argmax(),
                                                              data_centerline[:, :, iz].shape)
    del data_centerline


    # Fit polynomial function through centerline
    #==========================================================================================

    #Fit centerline in the Z-X plane using polynomial function
    print '\nFit centerline in the Z-X plane using polynomial function...'
    coeffsx = np.polyfit(z_centerline, x_centerline, deg=param.deg_poly)
    polyx = np.poly1d(coeffsx)
    x_centerline_fit = np.polyval(polyx, z_centerline)

    #Fit centerline in the Z-Y plane using polynomial function
    print '\nFit centerline in the Z-Y plane using polynomial function...'
    coeffsy = np.polyfit(z_centerline, y_centerline, deg=param.deg_poly)
    polyy = np.poly1d(coeffsy)
    y_centerline_fit = np.polyval(polyy, z_centerline)

    # Find tangent function of centerline along z
    #==========================================================================================

    # Find tangent to centerline in zx plane, along z
    print '\nFind tangent to centerline along z, in the Z-X plane...'
    poly_tangent_xz = np.polyder(polyx)
    tangent_xz = np.polyval(poly_tangent_xz, z_centerline)

    # Find tangent to centerline in zy plane, along z
    print '\nFind tangent to centerline along z, in the Z-Y plane...'
    poly_tangent_yz = np.polyder(polyy)
    tangent_yz = np.polyval(poly_tangent_yz, z_centerline)

	# Create a Gaussian kernel with users parameters
    #==========================================================================================
    print '\nGenerate a Gaussian kernel with users parameters...     '

    # Convert the fwhm given by users in standard deviation (sigma) and find the size of gaussian kernel knowing
    # that size_kernel=(6*sigma-1) must be odd
    sigma = int(np.round((fwhm/pz_a)*(math.sqrt(1/(2*(math.log(2)))))))
    size_kernel= (np.round(6*sigma))
    if size_kernel%2==0:
        size_kernel=size_kernel-1


    #Creates an  1D-array impulsion and apply a gaussian filter. The result is a Gaussian kernel.
    kernel_temp = np.zeros(size_kernel)
    kernel_temp[math.ceil(size_kernel/2)] = 1
    kernel= ndimage.filters.gaussian_filter1d(kernel_temp, sigma, order=0)
    sum_kernel=np.sum(kernel)

    print '.. Full width at half maximum: ' + str(fwhm)
    print '.. Kernel size : '+str(size_kernel)
    print '.. Sigma (Standard deviation): ' + str(sigma)

    del kernel_temp


    ## Smooth along the spinal cord
    ##==========================================================================================
    print '\nSmooth along the spinal cord...'


    print '\n Voxel position along z axis...'

    # Initialisations
    position=np.zeros(3)
    flag=np.zeros((nx_a,ny_a,nz_a))
    data_weight=np.ones((nx_a,ny_a,nz_a))
    smoothing_array=np.zeros(size_kernel)
    x_near=np.zeros(2)
    y_near=np.zeros(2)
    z_near=np.zeros(2)
    floor_position=np.zeros(3)
    ceil_position=np.zeros(3)
    position_d=np.zeros(3)

    #For every voxel along z axis,
    for iz in range(0,nz_a,1):

        print '.. '+str(iz+1)+ '/'+str(nz_a)

        # Determine the square area to smooth around the centerline
        xmin=x_centerline[iz]-int(width/2)
        xmax=x_centerline[iz]+int(width/2)
        ymin=y_centerline[iz]-int(width/2)
        ymax=y_centerline[iz]+int(width/2)

        #Find the angle between the tangent and the x axis in xz plane.
        theta_xz = -(math.atan(tangent_xz[iz]))

        #Find the angle between the tangent and the y axis in yz plane.
        theta_yz = -(math.atan(tangent_yz[iz]))

        #Construct a rotation array around y axis.
        Rxz=np.zeros((3,3))
        Rxz[1,1]=1
        Rxz[0,0]=(math.cos(theta_xz))
        Rxz[2,0]=(math.sin(theta_xz))
        Rxz[0,2]=-(math.sin(theta_xz))
        Rxz[2,2]=(math.cos(theta_xz))

        #Construct a rotation array around x axis.
        Ryz=np.zeros((3,3))
        Ryz[0,0]=1
        Ryz[1,1]=(math.cos(theta_yz))
        Ryz[1,2]=(math.sin(theta_yz))
        Ryz[2,1]=-(math.sin(theta_yz))
        Ryz[2,2]=(math.cos(theta_yz))


        #For every voxels in the given plane, included in the square area
        for ix in range(xmin,xmax,1):
            for iy in range(ymin,ymax,1):

                #The area to smooth has the same high as the 1D mask length
                isize=0
                centerline_point=[np.copy(x_centerline[iz]), np.copy(y_centerline[iz]), np.copy(iz)]


                #For every voxels along the line orthogonal to the considered plane and included in the kernel.
                #(Here we full a vector called smoothing_array, which has the same length as the kernel, is oriented in the direction of centerline and contains interpolated values of intensity)
                for isize in range(0,size_kernel, 1):

                    #Find the position in the xy plane, before rotation
                    position = [ix, iy, iz+isize-(np.floor(size_kernel/2))]

                    #Find the position after rotation by multiplying the position centered on centerline point with rotation array around x and y axis.
                    new_position= np.dot((np.dot((np.subtract(np.copy(position),centerline_point)), Rxz)), Ryz) + centerline_point

                    #If the resulting voxel is out of image boundaries, pad the smoothing array with a zero
                    if (new_position[0]<0)or (new_position[1]<0)or(new_position[2]<0)or(new_position[0]>nx_a-1)or (new_position[1]>ny_a-1)or(new_position[2]>nz_a-1):
                        smoothing_array[isize]=0
                    #Otherwise, fill the smoothing array with the linear interpolation of values around the actual position
                    else:

                    # Trilinear interpolation
                    #==========================================================================================================================================
                    # Determine the coordinates in grid surrounding the position of the central voxel and perform a trilinear interpolation
                        x_near[0]=np.copy(np.floor(new_position[0]))
                        x_near[1]=np.copy(np.ceil(new_position[0]))
                        xd=(new_position[0]-x_near[0])
                        y_near[0]=np.copy(np.floor(new_position[1]))
                        y_near[1]=np.copy(np.ceil(new_position[1]))
                        yd=(new_position[1]-y_near[0])
                        z_near[0]=np.copy(np.floor(new_position[2]))
                        z_near[1]=np.copy(np.ceil(new_position[2]))
                        zd=(new_position[2]-z_near[0])

                        c00=((data_anat[x_near[0],y_near[0],z_near[0]])*(1-xd))+((data_anat[x_near[1],y_near[0],z_near[0]])*(xd))
                        c10=((data_anat[x_near[0],y_near[1],z_near[0]])*(1-xd))+((data_anat[x_near[1],y_near[1],z_near[0]])*(xd))
                        c01=((data_anat[x_near[0],y_near[0],z_near[1]])*(1-xd))+((data_anat[x_near[1],y_near[0],z_near[1]])*(xd))
                        c11=((data_anat[x_near[0],y_near[1],z_near[1]])*(1-xd))+((data_anat[x_near[1],y_near[1],z_near[1]])*(xd))

                        c0=c00*(1-yd)+c10*yd
                        c1=c01*(1-yd)+c11*yd

                        smoothing_array[isize]=c0*(1-zd)+c1*zd

                    #If actual position is in the z=z_centerline plane, save the coordinates in the variable central_position. (Otherwise, don't save it).
                    if isize==(np.floor(size_kernel/2)):
                        central_position=np.copy(new_position)


                #If the central_position is out of boundaries, don't consider it anymore.
                if (central_position[0]<0)or (central_position[1]<0)or(central_position[2]<0)or(central_position[0]>nx_a-1)or (central_position[1]>ny_a-1)or(central_position[2]>nz_a-1):
                    continue

                else:
                    #Otherwise, perform the convolution of the smoothing_array and the kernel for the central voxel only (equivalent to element-wise multiply). Normalize the result.
                    result=((np.sum(np.copy(smoothing_array)*kernel))/sum_kernel)

                    # Determine the coordinates in grid surrounding the position of the central voxel
                    for i in range(0,3,1):
                        floor_position[i]=math.floor(central_position[i])
                        ceil_position[i]=math.ceil(central_position[i])
                        position_d[i]=central_position[i]-floor_position[i]



                    # Reverse trilinear interpolation
                    #==========================================================================================================================================
                    # Split the resuling intensity given by the convolution between the 8 voxels surrounding the point where the convolution is calculated (central_position).
                    # The array data_anat_smoothed is the the volume os the anatomical image smoothed alog the spinal cord.
                    # The array flag is a volume that indicates if a the corresponding voxel in the anatomical image is inside the smoothing area around the spinal cord and if there is already been an operation on this voxel.
                    # The default value of flag is 0. If it is set to 1, it means there is an operation on the corresponding voxel in anatomical image. Then we clear both the data_anat_smoothed and data_weight corresponding voxel to 0.
                    # The array data_weight represent the is represent the sum of weights used to calculate the intensity for every voxel. In a perfect case, this sum would be 1, but because there is an angle between
                    # two adjacent planes, the sum will be lower so we need to normalize the result. The default value for data_weight is 1, but once there is an operation on the corresponding voxel (flag=1), we accumulate the weights used.

                    if (flag[ceil_position[0],ceil_position[1],ceil_position[2]]==0):
                        data_anat_smoothed[ceil_position[0],ceil_position[1],ceil_position[2]]=0
                        data_weight[ceil_position[0],ceil_position[1],ceil_position[2]]=0
                        flag[ceil_position[0],ceil_position[1],ceil_position[2]]=1
                    weight=(position_d[0])*(position_d[1])*(position_d[2])
                    data_anat_smoothed[ceil_position[0],ceil_position[1],ceil_position[2]]=data_anat_smoothed[ceil_position[0],ceil_position[1],ceil_position[2]]+(weight*result)
                    data_weight[ceil_position[0],ceil_position[1],ceil_position[2]]=data_weight[ceil_position[0],ceil_position[1],ceil_position[2]]+(weight)

                    if (flag[floor_position[0],floor_position[1],floor_position[2]]==0):
                        data_anat_smoothed[floor_position[0],floor_position[1],floor_position[2]]=0
                        data_weight[floor_position[0],floor_position[1],floor_position[2]]=0
                        flag[floor_position[0],floor_position[1],floor_position[2]]=1
                    weight=(1-position_d[0])*(1-position_d[1])*(1-position_d[2])
                    data_anat_smoothed[floor_position[0],floor_position[1],floor_position[2]]=data_anat_smoothed[floor_position[0],floor_position[1],floor_position[2]]+(weight*result)
                    data_weight[floor_position[0],floor_position[1],floor_position[2]]=data_weight[floor_position[0],floor_position[1],floor_position[2]]+(weight)


                    if (flag[ceil_position[0],floor_position[1],floor_position[2]]==0):
                        data_anat_smoothed[ceil_position[0],floor_position[1],floor_position[2]]=0
                        data_weight[ceil_position[0],floor_position[1],floor_position[2]]=0
                        flag[ceil_position[0],floor_position[1],floor_position[2]]=1
                    weight=(position_d[0])*(1-position_d[1])*(1-position_d[2])
                    data_anat_smoothed[ceil_position[0],floor_position[1],floor_position[2]]=data_anat_smoothed[ceil_position[0],floor_position[1],floor_position[2]]+(weight*result)
                    data_weight[ceil_position[0],floor_position[1],floor_position[2]]=data_weight[ceil_position[0],floor_position[1],floor_position[2]]+(weight)

                    if (flag[ceil_position[0],ceil_position[1],floor_position[2]]==0):
                        data_anat_smoothed[ceil_position[0],ceil_position[1],floor_position[2]]=0
                        data_weight[ceil_position[0],ceil_position[1],floor_position[2]]=0
                        flag[ceil_position[0],ceil_position[1],floor_position[2]]=1
                    weight=(position_d[0])*(position_d[1])*(1-position_d[2])
                    data_anat_smoothed[ceil_position[0],ceil_position[1],floor_position[2]]=data_anat_smoothed[ceil_position[0],ceil_position[1],floor_position[2]]+(weight*result)
                    data_weight[ceil_position[0],ceil_position[1],floor_position[2]]=data_weight[ceil_position[0],ceil_position[1],floor_position[2]]+(weight)

                    if (flag[ceil_position[0],floor_position[1],ceil_position[2]]==0):
                        data_anat_smoothed[ceil_position[0],floor_position[1],ceil_position[2]]=0
                        data_weight[ceil_position[0],floor_position[1],ceil_position[2]]=0
                        flag[ceil_position[0],floor_position[1],ceil_position[2]]=1
                    weight=(position_d[0])*(1-position_d[1])*(position_d[2])
                    data_anat_smoothed[ceil_position[0],floor_position[1],ceil_position[2]]=data_anat_smoothed[ceil_position[0],floor_position[1],ceil_position[2]]+(weight*result)
                    data_weight[ceil_position[0],floor_position[1],ceil_position[2]]=data_weight[ceil_position[0],floor_position[1],ceil_position[2]]+(weight)

                    if (flag[floor_position[0],ceil_position[1],floor_position[2]]==0):
                        data_anat_smoothed[floor_position[0],ceil_position[1],floor_position[2]]=0
                        data_weight[floor_position[0],ceil_position[1],floor_position[2]]=0
                        flag[floor_position[0],ceil_position[1],floor_position[2]]=1
                    weight=(1-position_d[0])*(position_d[1])*(1-position_d[2])
                    data_anat_smoothed[floor_position[0],ceil_position[1],floor_position[2]]=data_anat_smoothed[floor_position[0],ceil_position[1],floor_position[2]]+(weight*result)
                    data_weight[floor_position[0],ceil_position[1],floor_position[2]]=data_weight[floor_position[0],ceil_position[1],floor_position[2]]+(weight)

                    if (flag[floor_position[0],ceil_position[1],ceil_position[2]]==0):
                        data_anat_smoothed[floor_position[0],ceil_position[1],ceil_position[2]]=0
                        data_weight[floor_position[0],ceil_position[1],ceil_position[2]]=0
                        flag[floor_position[0],ceil_position[1],ceil_position[2]]=1
                    weight=(1-position_d[0])*(position_d[1])*(position_d[2])
                    data_anat_smoothed[floor_position[0],ceil_position[1], ceil_position[2]]= data_anat_smoothed[floor_position[0],ceil_position[1], ceil_position[2]]+(weight*result)
                    data_weight[floor_position[0],ceil_position[1], ceil_position[2]]= data_weight[floor_position[0],ceil_position[1], ceil_position[2]]+(weight)

                    if (flag[floor_position[0],floor_position[1],ceil_position[2]]==0):
                        data_anat_smoothed[floor_position[0],floor_position[1],ceil_position[2]]=0
                        flag[floor_position[0],floor_position[1],ceil_position[2]]=1
                        data_weight[floor_position[0],floor_position[1],ceil_position[2]]=0
                    weight=(1-position_d[0])*(1-position_d[1])*(position_d[2])
                    data_anat_smoothed[floor_position[0],floor_position[1],ceil_position[2]]=data_anat_smoothed[floor_position[0],floor_position[1],ceil_position[2]]+(weight*result)
                    data_weight[floor_position[0],floor_position[1],ceil_position[2]]=data_weight[floor_position[0],floor_position[1],ceil_position[2]]+(weight)


    # Once we covered the whole spinal cord along z, we normalize the resulting image considering the weight used to calculate each voxel intensity
    data_anat_smoothed=data_anat_smoothed/data_weight



    #Generate output file
    #==========================================================================================

    # Write NIFTI volumes
    print '\nWrite NIFTI volumes...'
    if os.path.isfile('tmp.im_smoothed.nii'):
        sct.run('rm tmp.im_smoothed.nii')
    img = nibabel.Nifti1Image(data_anat_smoothed, None)
    nibabel.save(img, 'tmp.im_smoothed.nii')
    print '.. File created: tmp.im_smoothed.nii'

    #Copy header geometry from input data
    print '\nCopy header geometry from input data and reorient the volume...'
    sct.run(sct.fsloutput+'fslcpgeom tmp.anat_orient.nii tmp.im_smoothed.nii ')

    #Generate output file
    print '\nGenerate output file (in current folder)...'
    sct.generate_output_file('tmp.im_smoothed.nii','./',file_anat+'_smoothed',ext_anat)

    # Delete temporary files
    if remove_temp_files == 1:
        print '\nDelete temporary files...'
        sct.run('rm tmp.anat.nii')
        sct.run('rm tmp.centerline.nii')
        sct.run('rm tmp.anat_orient.nii')
        sct.run('rm tmp.centerline_orient.nii')


    #Display elapsed time
    elapsed_time = time.time() - start_time
    print '\nFinished!'
    print '.. '+str(int(round(elapsed_time)))+'s\n'
            sct.printv(
                'sct_propseg -i data_RPI_registered.nii.gz -t t1 -init-centerline '
                + PATH_INFO + '/' + subject + '/centerline_propseg_RPI.nii.gz')
            sct.run(
                'sct_propseg -i data_RPI_registered.nii.gz -t t1 -init-centerline '
                + PATH_INFO + '/' + subject + '/centerline_propseg_RPI.nii.gz')
        else:
            sct.printv('sct_propseg -i data_RPI_registered.nii.gz -t t1')
            sct.run('sct_propseg -i data_RPI_registered.nii.gz -t t1')

        # Erase 3 top and 3 bottom slices of the segmentation to avoid edge effects  (Done because propseg tends to diverge on edges)
        print '\nErasing 3 top and 3 bottom slices of the segmentation to avoid edge effects...'
        path_seg, file_seg, ext_seg = sct.extract_fname(
            'data_RPI_registered_seg.nii.gz')
        image_seg = nibabel.load('data_RPI_registered_seg.nii.gz')
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(
            'data_RPI_registered_seg.nii.gz')
        data_seg = image_seg.get_data()
        hdr_seg = image_seg.get_header()
        # List slices that contain non zero values
        z_centerline = [
            iz for iz in range(0, nz, 1) if data_seg[:, :, iz].any()
        ]

        for k in range(0, 3):
            data_seg[:, :, z_centerline[-1] - k] = 0
            if z_centerline[0] + k < nz:
                data_seg[:, :, z_centerline[0] + k] = 0
        img_seg = nibabel.Nifti1Image(data_seg, None, hdr_seg)
        nibabel.save(img_seg, file_seg + '_mod' + ext_seg)

        # crop segmentation along z(but keep same dimension)
Exemplo n.º 49
0
def main():

    # Initialization
    fname_anat = ''
    fname_centerline = ''
    gapxy = param.gapxy
    gapz = param.gapz
    padding = param.padding
    centerline_fitting = param.fitting_method
    remove_temp_files = param.remove_temp_files
    verbose = param.verbose
    interpolation_warp = param.interpolation_warp

    # get path of the toolbox
    status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
    print path_sct
    # extract path of the script
    path_script = os.path.dirname(__file__) + '/'

    # Parameters for debug mode
    if param.debug == 1:
        print '\n*** WARNING: DEBUG MODE ON ***\n'
        # fname_anat = path_sct+'/testing/data/errsm_23/t2/t2.nii.gz'
        # fname_centerline = path_sct+'/testing/data/errsm_23/t2/t2_segmentation_PropSeg.nii.gz'
        fname_anat = '/home/django/jtouati/data/cover_z_slices/errsm13_t2.nii.gz'
        fname_centerline = '/home/django/jtouati/data/cover_z_slices/segmentation_centerline_binary.nii.gz'
        remove_temp_files = 0
        centerline_fitting = 'splines'
        import matplotlib.pyplot as plt
        from mpl_toolkits.mplot3d import Axes3D
        verbose = 2

    # Check input param
    try:
        opts, args = getopt.getopt(sys.argv[1:], 'hi:c:r:w:f:v:')
    except getopt.GetoptError as err:
        print str(err)
        usage()
    for opt, arg in opts:
        if opt == '-h':
            usage()
        elif opt in ('-i'):
            fname_anat = arg
        elif opt in ('-c'):
            fname_centerline = arg
        elif opt in ('-r'):
            remove_temp_files = int(arg)
        elif opt in ('-w'):
            interpolation_warp = str(arg)
        elif opt in ('-f'):
            centerline_fitting = str(arg)
        elif opt in ('-v'):
            verbose = int(arg)

    # display usage if a mandatory argument is not provided
    if fname_anat == '' or fname_centerline == '':
        usage()

    # Display usage if optional arguments are not correctly provided
    if centerline_fitting == '':
        centerline_fitting = 'splines'
    elif not centerline_fitting == '' and not centerline_fitting == 'splines' and not centerline_fitting == 'polynomial':
        print '\n \n -f argument is not valid \n \n'
        usage()

    # check existence of input files
    sct.check_file_exist(fname_anat)
    sct.check_file_exist(fname_centerline)

    # check interp method
    if interpolation_warp == 'spline':
        interpolation_warp_ants = '--use-BSpline'
    elif interpolation_warp == 'trilinear':
        interpolation_warp_ants = ''
    elif interpolation_warp == 'nearestneighbor':
        interpolation_warp_ants = '--use-NN'
    else:
        print '\WARNING: Interpolation method not recognized. Using: ' + param.interpolation_warp
        interpolation_warp_ants = '--use-BSpline'

    # Display arguments
    print '\nCheck input arguments...'
    print '  Input volume ...................... ' + fname_anat
    print '  Centerline ........................ ' + fname_centerline
    print '  Centerline fitting option ......... ' + centerline_fitting
    print '  Final interpolation ............... ' + interpolation_warp
    print '  Verbose ........................... ' + str(verbose)
    print ''

    # if verbose 2, import matplotlib
    if verbose == 2:
        import matplotlib.pyplot as plt

    # Extract path/file/extension
    path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
    path_centerline, file_centerline, ext_centerline = sct.extract_fname(
        fname_centerline)

    # create temporary folder
    path_tmp = 'tmp.' + time.strftime("%y%m%d%H%M%S")
    sct.run('mkdir ' + path_tmp)

    # copy files into tmp folder
    sct.run('cp ' + fname_anat + ' ' + path_tmp)
    sct.run('cp ' + fname_centerline + ' ' + path_tmp)

    # go to tmp folder
    os.chdir(path_tmp)

    # Open centerline
    #==========================================================================================
    # Change orientation of the input centerline into RPI
    print '\nOrient centerline to RPI orientation...'
    fname_centerline_orient = 'tmp.centerline_rpi' + ext_centerline
    sct.run('sct_orientation -i ' + file_centerline + ext_centerline + ' -o ' +
            fname_centerline_orient + ' -orientation RPI')

    print '\nGet dimensions of input centerline...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_centerline_orient)
    print '.. matrix size: ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz)
    print '.. voxel size:  ' + str(px) + 'mm x ' + str(py) + 'mm x ' + str(
        pz) + 'mm'

    print '\nOpen centerline volume...'
    file = nibabel.load(fname_centerline_orient)
    data = file.get_data()

    # loop across z and associate x,y coordinate with the point having maximum intensity
    x_centerline = [0 for iz in range(0, nz, 1)]
    y_centerline = [0 for iz in range(0, nz, 1)]
    z_centerline = [iz for iz in range(0, nz, 1)]
    x_centerline_deriv = [0 for iz in range(0, nz, 1)]
    y_centerline_deriv = [0 for iz in range(0, nz, 1)]
    z_centerline_deriv = [0 for iz in range(0, nz, 1)]

    # Two possible scenario:
    # 1. the centerline is probabilistic: each slice contains voxels with the probability of containing the centerline [0:...:1]
    # We only take the maximum value of the image to aproximate the centerline.
    # 2. The centerline/segmentation image contains many pixels per slice with values {0,1}.
    # We take all the points and approximate the centerline on all these points.
    #
    # x_seg_start, y_seg_start = (data[:,:,0]>0).nonzero()
    # x_seg_end, y_seg_end = (data[:,:,-1]>0).nonzero()
    # REMOVED: 2014-07-18
    # check if centerline covers all the image
    #    if len(x_seg_start)==0 or len(x_seg_end)==0:
    #        print '\nERROR: centerline/segmentation must cover all "z" slices of the input image.\n' \
    #              'To solve the problem, you need to crop the input image (you can use \'sct_crop_image\') and generate one' \
    #              'more time the spinal cord centerline/segmentation from this cropped image.\n'
    #        usage()
    #
    # X, Y, Z = ((data<1)*(data>0)).nonzero() # X is empty if binary image
    # if (len(X) > 0): # Scenario 1
    #     for iz in range(0, nz, 1):
    #         x_centerline[iz], y_centerline[iz] = numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape)
    # else: # Scenario 2
    #     for iz in range(0, nz, 1):
    #         print (data[:,:,iz]>0).nonzero()
    #         x_seg, y_seg = (data[:,:,iz]>0).nonzero()
    #         x_centerline[iz] = numpy.mean(x_seg)
    #         y_centerline[iz] = numpy.mean(y_seg)
    # # TODO: find a way to do the previous loop with this, which is more neat:
    # # [numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape) for iz in range(0,nz,1)]

    # get center of mass of the centerline/segmentation
    print '\nGet center of mass of the centerline/segmentation...'
    for iz in range(0, nz, 1):
        x_centerline[iz], y_centerline[
            iz] = ndimage.measurements.center_of_mass(
                numpy.array(data[:, :, iz]))

    #print len(x_centerline),len(y_centerline)
    #print len((numpy.array(x_centerline)>=0).nonzero()[0]),len((numpy.array(y_centerline)>=0).nonzero()[0])

    x_seg_start, y_seg_start = (data[:, :, 0] > 0).nonzero()
    x_seg_end, y_seg_end = (data[:, :, -1] > 0).nonzero()

    #check if centerline covers all the image
    if len(x_seg_start) == 0 or len(x_seg_end) == 0:
        sct.printv(
            '\nWARNING : the centerline/segmentation you gave does not cover all "z" slices of the input image. Results should be improved if you crop the input image (you can use \'sct_crop_image\') and generate a new spinalcord centerline/segmentation from this cropped image.\n',
            1, 'warning')
        # print '\nWARNING : the centerline/segmentation you gave does not cover all "z" slices of the input image.\n' \
        #       'Results should be improved if you crop the input image (you can use \'sct_crop_image\') and generate\n'\
        #       'a new spinalcord centerline/segmentation from this cropped image.\n'
        #print len((numpy.array(x_centerline)>=0).nonzero()[0]),len((numpy.array(y_centerline)>=0).nonzero()[0])
        min_centerline = min((numpy.array(x_centerline) >= 0).nonzero()[0])
        max_centerline = max((numpy.array(x_centerline) >= 0).nonzero()[0])
        z_centerline = z_centerline[(min_centerline):(max_centerline + 1)]
        #print len(z_centerline)
        nz = len(z_centerline)
        x_centerline = [x for x in x_centerline if not isnan(x)]
        y_centerline = [y for y in y_centerline if not isnan(y)]
        #print len(x_centerline),len(y_centerline)

    # clear variable
    del data

    # Fit the centerline points with the kind of curve given as argument of the script and return the new fitted coordinates
    if centerline_fitting == 'splines':
        x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = msct_smooth.b_spline_nurbs(
            x_centerline, y_centerline, z_centerline)
        #x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)
    elif centerline_fitting == 'polynomial':
        x_centerline_fit, y_centerline_fit, polyx, polyy = polynome_centerline(
            x_centerline, y_centerline, z_centerline)
        #numpy.interp([i for i in xrange(0,min_centerline+1)],
        #y_centerline_fit

    #print z_centerline

    if verbose == 2:
        # plot centerline
        ax = plt.subplot(1, 2, 1)
        plt.plot(x_centerline, z_centerline, 'b:', label='centerline')
        plt.plot(x_centerline_fit, z_centerline, 'r-', label='fit')
        plt.xlabel('x')
        plt.ylabel('z')
        ax = plt.subplot(1, 2, 2)
        plt.plot(y_centerline, z_centerline, 'b:', label='centerline')
        plt.plot(y_centerline_fit, z_centerline, 'r-', label='fit')
        plt.xlabel('y')
        plt.ylabel('z')
        handles, labels = ax.get_legend_handles_labels()
        ax.legend(handles, labels)
        plt.show()

    # Get coordinates of landmarks along curved centerline
    #==========================================================================================
    print '\nGet coordinates of landmarks along curved centerline...'
    # landmarks are created along the curved centerline every z=gapz. They consist of a "cross" of size gapx and gapy.
    # find derivative of polynomial
    step_z = round(nz / gapz)
    #iz_curved = [i for i in range (0, nz, gapz)]
    iz_curved = [(min(z_centerline) + i * step_z) for i in range(0, gapz)]
    iz_curved.append(max(z_centerline))
    #print iz_curved, len(iz_curved)
    n_iz_curved = len(iz_curved)
    #print n_iz_curved
    landmark_curved = [[[0 for i in range(0, 3)] for i in range(0, 5)]
                       for i in iz_curved]
    # print x_centerline_deriv,len(x_centerline_deriv)
    # landmark[a][b][c]
    #   a: index along z. E.g., the first cross with have index=0, the next index=1, and so on...
    #   b: index of element on the cross. I.e., 0: center of the cross, 1: +x, 2 -x, 3: +y, 4: -y
    #   c: dimension, i.e., 0: x, 1: y, 2: z
    # loop across index, which corresponds to iz (points along the centerline)

    if centerline_fitting == 'polynomial':
        for index in range(0, n_iz_curved, 1):
            # set coordinates for landmark at the center of the cross
            landmark_curved[index][0][0], landmark_curved[index][0][
                1], landmark_curved[index][0][2] = x_centerline_fit[
                    iz_curved[index]], y_centerline_fit[
                        iz_curved[index]], iz_curved[index]
            # set x and z coordinates for landmarks +x and -x
            landmark_curved[index][1][2], landmark_curved[index][1][
                0], landmark_curved[index][2][2], landmark_curved[index][2][
                    0] = get_points_perpendicular_to_curve(
                        polyx, polyx.deriv(), iz_curved[index], gapxy)
            # set y coordinate to y_centerline_fit[iz] for elements 1 and 2 of the cross
            for i in range(1, 3):
                landmark_curved[index][i][1] = y_centerline_fit[
                    iz_curved[index]]
            # set coordinates for landmarks +y and -y. Here, x coordinate is 0 (already initialized).
            landmark_curved[index][3][2], landmark_curved[index][3][
                1], landmark_curved[index][4][2], landmark_curved[index][4][
                    1] = get_points_perpendicular_to_curve(
                        polyy, polyy.deriv(), iz_curved[index], gapxy)
            # set x coordinate to x_centerline_fit[iz] for elements 3 and 4 of the cross
            for i in range(3, 5):
                landmark_curved[index][i][0] = x_centerline_fit[
                    iz_curved[index]]

    elif centerline_fitting == 'splines':
        for index in range(0, n_iz_curved, 1):
            # calculate d (ax+by+cz+d=0)
            # print iz_curved[index]
            a = x_centerline_deriv[iz_curved[index] - min(z_centerline)]
            b = y_centerline_deriv[iz_curved[index] - min(z_centerline)]
            c = z_centerline_deriv[iz_curved[index] - min(z_centerline)]
            x = x_centerline_fit[iz_curved[index] - min(z_centerline)]
            y = y_centerline_fit[iz_curved[index] - min(z_centerline)]
            z = iz_curved[index]
            d = -(a * x + b * y + c * z)
            #print a,b,c,d,x,y,z
            # set coordinates for landmark at the center of the cross
            landmark_curved[index][0][0], landmark_curved[index][0][
                1], landmark_curved[index][0][2] = x_centerline_fit[
                    iz_curved[index] - min(z_centerline)], y_centerline_fit[
                        iz_curved[index] - min(z_centerline)], iz_curved[index]

            # set y coordinate to y_centerline_fit[iz] for elements 1 and 2 of the cross
            for i in range(1, 3):
                landmark_curved[index][i][1] = y_centerline_fit[
                    iz_curved[index] - min(z_centerline)]

            # set x and z coordinates for landmarks +x and -x, forcing de landmark to be in the orthogonal plan and the distance landmark/curve to be gapxy
            x_n = Symbol('x_n')
            landmark_curved[index][2][0], landmark_curved[index][1][0] = solve(
                (x_n - x)**2 + ((-1 / c) * (a * x_n + b * y + d) - z)**2 -
                gapxy**2, x_n)  #x for -x and +x
            landmark_curved[index][1][2] = (-1 / c) * (
                a * landmark_curved[index][1][0] + b * y + d)  #z for +x
            landmark_curved[index][2][2] = (-1 / c) * (
                a * landmark_curved[index][2][0] + b * y + d)  #z for -x

            # set x coordinate to x_centerline_fit[iz] for elements 3 and 4 of the cross
            for i in range(3, 5):
                landmark_curved[index][i][0] = x_centerline_fit[
                    iz_curved[index] - min(z_centerline)]

            # set coordinates for landmarks +y and -y. Here, x coordinate is 0 (already initialized).
            y_n = Symbol('y_n')
            landmark_curved[index][4][1], landmark_curved[index][3][1] = solve(
                (y_n - y)**2 + ((-1 / c) * (a * x + b * y_n + d) - z)**2 -
                gapxy**2, y_n)  #y for -y and +y
            landmark_curved[index][3][2] = (-1 / c) * (
                a * x + b * landmark_curved[index][3][1] + d)  #z for +y
            landmark_curved[index][4][2] = (-1 / c) * (
                a * x + b * landmark_curved[index][4][1] + d)  #z for -y

#    #display
#    fig = plt.figure()
#    ax = fig.add_subplot(111, projection='3d')
#    ax.plot(x_centerline_fit, y_centerline_fit,z_centerline, 'g')
#    ax.plot(x_centerline, y_centerline,z_centerline, 'r')
#    ax.plot([landmark_curved[i][j][0] for i in range(0, n_iz_curved) for j in range(0, 5)], \
#           [landmark_curved[i][j][1] for i in range(0, n_iz_curved) for j in range(0, 5)], \
#           [landmark_curved[i][j][2] for i in range(0, n_iz_curved) for j in range(0, 5)], '.')
#    ax.set_xlabel('x')
#    ax.set_ylabel('y')
#    ax.set_zlabel('z')
#    plt.show()

# Get coordinates of landmarks along straight centerline
#==========================================================================================
    print '\nGet coordinates of landmarks along straight centerline...'
    landmark_straight = [[[0 for i in range(0, 3)] for i in range(0, 5)]
                         for i in iz_curved
                         ]  # same structure as landmark_curved

    # calculate the z indices corresponding to the Euclidean distance between two consecutive points on the curved centerline (approximation curve --> line)
    iz_straight = [(min(z_centerline) + 0) for i in range(0, gapz + 1)]
    #print iz_straight,len(iz_straight)
    for index in range(1, n_iz_curved, 1):
        # compute vector between two consecutive points on the curved centerline
        vector_centerline = [x_centerline_fit[iz_curved[index]-min(z_centerline)] - x_centerline_fit[iz_curved[index-1]-min(z_centerline)], \
                             y_centerline_fit[iz_curved[index]-min(z_centerline)] - y_centerline_fit[iz_curved[index-1]-min(z_centerline)], \
                             iz_curved[index] - iz_curved[index-1]]
        # compute norm of this vector
        norm_vector_centerline = numpy.linalg.norm(vector_centerline, ord=2)
        # round to closest integer value
        norm_vector_centerline_rounded = int(round(norm_vector_centerline, 0))
        # assign this value to the current z-coordinate on the straight centerline
        iz_straight[index] = iz_straight[index -
                                         1] + norm_vector_centerline_rounded

    # initialize x0 and y0 to be at the center of the FOV
    x0 = int(round(nx / 2))
    y0 = int(round(ny / 2))
    for index in range(0, n_iz_curved, 1):
        # set coordinates for landmark at the center of the cross
        landmark_straight[index][0][0], landmark_straight[index][0][
            1], landmark_straight[index][0][2] = x0, y0, iz_straight[index]
        # set x, y and z coordinates for landmarks +x
        landmark_straight[index][1][0], landmark_straight[index][1][
            1], landmark_straight[index][1][2] = x0 + gapxy, y0, iz_straight[
                index]
        # set x, y and z coordinates for landmarks -x
        landmark_straight[index][2][0], landmark_straight[index][2][
            1], landmark_straight[index][2][2] = x0 - gapxy, y0, iz_straight[
                index]
        # set x, y and z coordinates for landmarks +y
        landmark_straight[index][3][0], landmark_straight[index][3][
            1], landmark_straight[index][3][2] = x0, y0 + gapxy, iz_straight[
                index]
        # set x, y and z coordinates for landmarks -y
        landmark_straight[index][4][0], landmark_straight[index][4][
            1], landmark_straight[index][4][2] = x0, y0 - gapxy, iz_straight[
                index]

    # # display
    # fig = plt.figure()
    # ax = fig.add_subplot(111, projection='3d')
    # #ax.plot(x_centerline_fit, y_centerline_fit,z_centerline, 'r')
    # ax.plot([landmark_straight[i][j][0] for i in range(0, n_iz_curved) for j in range(0, 5)], \
    #        [landmark_straight[i][j][1] for i in range(0, n_iz_curved) for j in range(0, 5)], \
    #        [landmark_straight[i][j][2] for i in range(0, n_iz_curved) for j in range(0, 5)], '.')
    # ax.set_xlabel('x')
    # ax.set_ylabel('y')
    # ax.set_zlabel('z')
    # plt.show()
    #

    # Create NIFTI volumes with landmarks
    #==========================================================================================
    # Pad input volume to deal with the fact that some landmarks on the curved centerline might be outside the FOV
    # N.B. IT IS VERY IMPORTANT TO PAD ALSO ALONG X and Y, OTHERWISE SOME LANDMARKS MIGHT GET OUT OF THE FOV!!!
    print '\nPad input volume to deal with the fact that some landmarks on the curved centerline might be outside the FOV...'
    sct.run('isct_c3d ' + fname_centerline_orient + ' -pad ' + str(padding) +
            'x' + str(padding) + 'x' + str(padding) + 'vox ' + str(padding) +
            'x' + str(padding) + 'x' + str(padding) +
            'vox 0 -o tmp.centerline_pad.nii.gz')

    # TODO: don't pad input volume: no need for that! instead, try to increase size of hdr when saving landmarks.

    # Open padded centerline for reading
    print '\nOpen padded centerline for reading...'
    file = nibabel.load('tmp.centerline_pad.nii.gz')
    data = file.get_data()
    hdr = file.get_header()

    # Create volumes containing curved and straight landmarks
    data_curved_landmarks = data * 0
    data_straight_landmarks = data * 0
    # initialize landmark value
    landmark_value = 1
    # Loop across cross index
    for index in range(0, n_iz_curved, 1):
        # loop across cross element index
        for i_element in range(0, 5, 1):
            # get x, y and z coordinates of curved landmark (rounded to closest integer)
            x, y, z = int(round(landmark_curved[index][i_element][0])), int(
                round(landmark_curved[index][i_element][1])), int(
                    round(landmark_curved[index][i_element][2]))
            # attribute landmark_value to the voxel and its neighbours
            data_curved_landmarks[x + padding - 1:x + padding + 2,
                                  y + padding - 1:y + padding + 2, z +
                                  padding - 1:z + padding + 2] = landmark_value
            # get x, y and z coordinates of straight landmark (rounded to closest integer)
            x, y, z = int(round(landmark_straight[index][i_element][0])), int(
                round(landmark_straight[index][i_element][1])), int(
                    round(landmark_straight[index][i_element][2]))
            # attribute landmark_value to the voxel and its neighbours
            data_straight_landmarks[x + padding - 1:x + padding + 2,
                                    y + padding - 1:y + padding + 2,
                                    z + padding - 1:z + padding +
                                    2] = landmark_value
            # increment landmark value
            landmark_value = landmark_value + 1

    # Write NIFTI volumes
    hdr.set_data_dtype(
        'uint32')  # set imagetype to uint8 #TODO: maybe use int32
    print '\nWrite NIFTI volumes...'
    img = nibabel.Nifti1Image(data_curved_landmarks, None, hdr)
    nibabel.save(img, 'tmp.landmarks_curved.nii.gz')
    print '.. File created: tmp.landmarks_curved.nii.gz'
    img = nibabel.Nifti1Image(data_straight_landmarks, None, hdr)
    nibabel.save(img, 'tmp.landmarks_straight.nii.gz')
    print '.. File created: tmp.landmarks_straight.nii.gz'

    # Estimate deformation field by pairing landmarks
    #==========================================================================================

    # Dilate landmarks (because nearest neighbour interpolation will be later used, therefore some landmarks may "disapear" if they are single points)
    #print '\nDilate landmarks...'
    #sct.run(fsloutput+'fslmaths tmp.landmarks_curved.nii -kernel box 3x3x3 -dilD tmp.landmarks_curved_dilated -odt short')
    #sct.run(fsloutput+'fslmaths tmp.landmarks_straight.nii -kernel box 3x3x3 -dilD tmp.landmarks_straight_dilated -odt short')

    # Estimate rigid transformation
    print '\nEstimate rigid transformation between paired landmarks...'
    sct.run(
        'isct_ANTSUseLandmarkImagesToGetAffineTransform tmp.landmarks_straight.nii.gz tmp.landmarks_curved.nii.gz rigid tmp.curve2straight_rigid.txt'
    )

    # Apply rigid transformation
    print '\nApply rigid transformation to curved landmarks...'
    sct.run(
        'sct_WarpImageMultiTransform 3 tmp.landmarks_curved.nii.gz tmp.landmarks_curved_rigid.nii.gz -R tmp.landmarks_straight.nii.gz tmp.curve2straight_rigid.txt --use-NN'
    )

    # Estimate b-spline transformation curve --> straight
    print '\nEstimate b-spline transformation: curve --> straight...'
    sct.run(
        'isct_ANTSUseLandmarkImagesToGetBSplineDisplacementField tmp.landmarks_straight.nii.gz tmp.landmarks_curved_rigid.nii.gz tmp.warp_curve2straight.nii.gz 5x5x5 3 2 0'
    )

    # Concatenate rigid and non-linear transformations...
    print '\nConcatenate rigid and non-linear transformations...'
    #sct.run('isct_ComposeMultiTransform 3 tmp.warp_rigid.nii -R tmp.landmarks_straight.nii tmp.warp.nii tmp.curve2straight_rigid.txt')
    # TODO: use sct.run() when output from the following command will be different from 0 (currently there seem to be a bug)
    cmd = 'isct_ComposeMultiTransform 3 tmp.curve2straight.nii.gz -R tmp.landmarks_straight.nii.gz tmp.warp_curve2straight.nii.gz tmp.curve2straight_rigid.txt'
    print('>> ' + cmd)
    commands.getstatusoutput(cmd)

    # Estimate b-spline transformation straight --> curve
    # TODO: invert warping field instead of estimating a new one
    print '\nEstimate b-spline transformation: straight --> curve...'
    sct.run(
        'isct_ANTSUseLandmarkImagesToGetBSplineDisplacementField tmp.landmarks_curved_rigid.nii.gz tmp.landmarks_straight.nii.gz tmp.warp_straight2curve.nii.gz 5x5x5 3 2 0'
    )

    # Concatenate rigid and non-linear transformations...
    print '\nConcatenate rigid and non-linear transformations...'
    #sct.run('isct_ComposeMultiTransform 3 tmp.warp_rigid.nii -R tmp.landmarks_straight.nii tmp.warp.nii tmp.curve2straight_rigid.txt')
    # TODO: use sct.run() when output from the following command will be different from 0 (currently there seem to be a bug)
    cmd = 'isct_ComposeMultiTransform 3 tmp.straight2curve.nii.gz -R tmp.landmarks_straight.nii.gz -i tmp.curve2straight_rigid.txt tmp.warp_straight2curve.nii.gz'
    print('>> ' + cmd)
    commands.getstatusoutput(cmd)

    #print '\nPad input image...'
    #sct.run('isct_c3d '+fname_anat+' -pad '+str(padz)+'x'+str(padz)+'x'+str(padz)+'vox '+str(padz)+'x'+str(padz)+'x'+str(padz)+'vox 0 -o tmp.anat_pad.nii')

    # Unpad landmarks...
    # THIS WAS REMOVED ON 2014-06-03 because the output data was cropped at the edge, which caused landmarks to sometimes disappear
    # print '\nUnpad landmarks...'
    # sct.run('fslroi tmp.landmarks_straight.nii.gz tmp.landmarks_straight_crop.nii.gz '+str(padding)+' '+str(nx)+' '+str(padding)+' '+str(ny)+' '+str(padding)+' '+str(nz))

    # Apply deformation to input image
    print '\nApply transformation to input image...'
    sct.run('sct_WarpImageMultiTransform 3 ' + file_anat + ext_anat +
            ' tmp.anat_rigid_warp.nii.gz -R tmp.landmarks_straight.nii.gz ' +
            interpolation_warp + ' tmp.curve2straight.nii.gz')
    # sct.run('sct_WarpImageMultiTransform 3 '+fname_anat+' tmp.anat_rigid_warp.nii.gz -R tmp.landmarks_straight_crop.nii.gz '+interpolation_warp+ ' tmp.curve2straight.nii.gz')

    # come back to parent folder
    os.chdir('..')

    # Generate output file (in current folder)
    # TODO: do not uncompress the warping field, it is too time consuming!
    print '\nGenerate output file (in current folder)...'
    sct.generate_output_file(path_tmp + '/tmp.curve2straight.nii.gz', '',
                             'warp_curve2straight', '.nii.gz')  # warping field
    sct.generate_output_file(path_tmp + '/tmp.straight2curve.nii.gz', '',
                             'warp_straight2curve', '.nii.gz')  # warping field
    sct.generate_output_file(path_tmp + '/tmp.anat_rigid_warp.nii.gz', '',
                             file_anat + '_straight',
                             ext_anat)  # straightened anatomic

    # Remove temporary files
    if remove_temp_files == 1:
        print('\nRemove temporary files...')
        sct.run('rm -rf ' + path_tmp)

    print '\nDone!\n'
        os.chdir(PATH_OUTPUT + '/' + subject + '/' + 'T2')

        ## Create a cross after recropping (cross in landmark_native.nii.gz)
        #  Detect extrema: (same code as sct_detect_extrema except for the detection of the center of mass)

        # Apply transfo to seg_and_labels.nii.gz which replace the centerline file
        sct.run(
            'sct_apply_transfo -i seg_and_labels.nii.gz -d data_RPI_crop_denoised_straight.nii.gz -w warp_curve2straight.nii.gz -x nn'
        )

        file = nibabel.load('seg_and_labels_reg.nii.gz')
        data_c = file.get_data()
        hdr = file.get_header()

        # Get center of mass of the centerline (no centerline anymore: to change by seg_and_labels_reg.nii.gz)
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(
            'seg_and_labels_reg.nii.gz')
        z_centerline = [iz for iz in range(0, nz, 1) if data_c[:, :, iz].any()]
        nz_nonz = len(z_centerline)
        x_centerline = [0 for iz in range(0, nz_nonz, 1)]
        y_centerline = [0 for iz in range(0, nz_nonz, 1)]
        for iz in xrange(len(z_centerline)):
            x_centerline[iz], y_centerline[
                iz] = ndimage.measurements.center_of_mass(
                    array(data_c[:, :, z_centerline[iz]]))

        X, Y, Z = (data_c > 0).nonzero()

        x_max, y_max = (data_c[:, :, max(Z)]).nonzero()
        x_max = x_max[0]
        y_max = y_max[0]
        z_max = max(Z)
Exemplo n.º 51
0
def main():

   #Initialization
   fname = ''
   fname_centerline = ''
   mean_intensity = param.mean_intensity
   verbose = param.verbose
   padding = param.padding
   window_length = param.window_length

   try:
        opts, args = getopt.getopt(sys.argv[1:],'hi:c:v:p:')
   except getopt.GetoptError:
       usage()
   for opt, arg in opts :
       if opt == '-h':
           usage()
       elif opt in ("-i"):
           fname = arg
       elif opt in ("-c"):
           fname_centerline = arg
       elif opt in ("-p"):
           window_length = int(arg)
       elif opt in ('-v'):
           verbose = int(arg)

   # display usage if a mandatory argument is not provided
   #if fname == '' or fname_centerline == '':
   if fname == '':
       usage()


   # check existence of input files
   print'\nCheck if file exists ...'
   sct.check_file_exist(fname)
   #sct.check_file_exist(fname_centerline)

   # Display arguments
   print'\nCheck input arguments...'
   print'  Input volume ...................... '+fname
   print'  Centerline ...................... '+fname_centerline
   print'  Verbose ........................... '+str(verbose)

   # Extract path, file and extension
   path_input, file_input, ext_input = sct.extract_fname(fname)


   sct.printv('\nOpen volume...',verbose)
   file = nibabel.load(fname)
   data = file.get_data()
   hdr = file.get_header()

   if fname_centerline != '':
   ## [Process 1] Command for extracting center of mass for each slice of the centerline file if provided
       sct.printv('\nOpen centerline...',verbose)
       print '\nGet dimensions of input centerline...'
       nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_centerline)
       print '.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz)
       print '.. voxel size:  '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm'
       file_c = nibabel.load(fname_centerline)
       data_c = file_c.get_data()


       #X,Y,Z = (data_c>0).nonzero()

       #min_z_index, max_z_index = min(Z), max(Z)


       z_centerline = [iz for iz in range(0, nz, 1) if data_c[:,:,iz].any() ]
       nz_nonz = len(z_centerline)
       if nz_nonz==0 :
           print '\nERROR: Centerline is empty'
           sys.exit()
       x_centerline = [0 for iz in range(0, nz_nonz, 1)]
       y_centerline = [0 for iz in range(0, nz_nonz, 1)]
       #print("z_centerline", z_centerline,nz_nonz,len(x_centerline))
       print '\nGet center of mass of the centerline ...'
       for iz in xrange(len(z_centerline)):
           x_centerline[iz], y_centerline[iz] = ndimage.measurements.center_of_mass(np.array(data_c[:,:,z_centerline[iz]]))
   # end of Process 1

   ## [Process 2] Process for defining the middle vertical line as reference for normalizing the intensity of the image
   if fname_centerline == '':
       print '\nGet dimensions of input image...'
       nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname)
       print '.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz)
       print '.. voxel size:  '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm'
       z_centerline = [iz for iz in range(0, nz, 1)]
       nz_nonz = len(z_centerline)
       x_middle = int(round(nx/2))
       y_middle = int(round(ny/2))
       x_centerline = [x_middle for iz in range(0, nz, 1)]
       y_centerline = [y_middle for iz in range(0, nz, 1)]
       # end of Process 2

   means = [0 for iz in range(0, nz_nonz, 1)]

   print '\nGet mean intensity along the centerline ...'
   for iz in xrange(len(z_centerline)):
       means[iz] =  np.mean(data[(int(round(x_centerline[iz]))-padding):(int(round(x_centerline[iz]))+padding),(int(round(y_centerline[iz]))-padding):(int(round(y_centerline[iz]))+padding),z_centerline[iz]])


   # print('\nSmoothing results with spline...')
   # # Smoothing with scipy library (Julien Touati's code)
   # m =np.mean(means)
   # sigma = np.std(means)
   # smoothing_param = (((m + np.sqrt(2*m))*(sigma**2))+((m - np.sqrt(2*m))*(sigma**2)))/2
   # #Equivalent to : m*sigma**2
   # tck = splrep(z_centerline, means, s=smoothing_param)
   # means_smooth = splev(z_centerline, tck)

   # Smoothing with low-pass filter
   print '\nSmoothing with lowpass filter: butterworth order 5...'
   from msct_smooth import lowpass
   means_smooth = lowpass(means)



   # #Smoothing with nurbs
   #points = [[means[n],0, z_centerline[n]] for n in range(len(z_centerline))]
   #nurbs = NURBS(3,1000,points)
   #P = nurbs.getCourbe3D()
   #means_smooth=P[0]  #size of means_smooth? should be bigger than len(z_centerline)

   # #Smoothing with hanning
   # print('\nSmoothing results with hanning windowing...')
   # means = np.asarray(means)
   # means_smooth = smoothing_window(means, window_len=window_length)
   # print means.shape[0], means_smooth.shape[0]

   if verbose :
       plt.figure()
       #plt.subplot(2,1,1)
       plt.plot(z_centerline,means, "ro")
       #plt.subplot(2,1,2)
       plt.plot(means_smooth)
       plt.title("Mean intensity: Type of window: hanning     Window_length= %d mm" % window_length)
       plt.show()
   print('\nNormalizing intensity along centerline...')


   #Define extended meaned intensity for all the spinal cord
   means_smooth_extended = [0 for i in range(0, data.shape[2], 1)]
   for iz in range(len(z_centerline)):
       means_smooth_extended[z_centerline[iz]] = means_smooth[iz]


   X_means_smooth_extended = np.nonzero(means_smooth_extended)
   X_means_smooth_extended = np.transpose(X_means_smooth_extended)

   if len(X_means_smooth_extended) != 0:
        means_smooth_extended[0] = means_smooth_extended[X_means_smooth_extended[0]]
        means_smooth_extended[-1] = means_smooth_extended[X_means_smooth_extended[-1]]
        #Add two rows to the vector X_mask_completed:
        # one before as mask_completed[0] is now diff from 0
        # one after as mask_completed[-1] is now diff from 0
        X_means_smooth_extended = np.append(X_means_smooth_extended, len(means_smooth_extended)-1)
        X_means_smooth_extended = np.insert(X_means_smooth_extended, 0, 0)
        #linear interpolation
        count_zeros=0
        for i in range(1,len(means_smooth_extended)-1):
            if means_smooth_extended[i]==0:
                means_smooth_extended[i] = 0.5 * (means_smooth_extended[X_means_smooth_extended[i-1-count_zeros]] + means_smooth_extended[X_means_smooth_extended[i-count_zeros]]) # linear interpolation with closest non zero points
                #redefine X_mask_completed
                X_means_smooth_extended = np.nonzero(means_smooth_extended)
                X_means_smooth_extended = np.transpose(X_means_smooth_extended)


   #recurrence
   # count_zeros=0
   # for i in range(1,len(means_smooth_extended)-1):
   #     if means_smooth_extended[i]==0:
   #          means_smooth_extended[i] = 0.5*(means_smooth_extended[X_means_smooth_extended[i-1-count_zeros]] + means_smooth_extended[X_means_smooth_extended[i-count_zeros]])
   #          # redefine X_mask_extended
   #          X_mask_completed = np.nonzero(means_smooth_extended)
   #          X_mask_completed = np.transpose(X_mask_completed)
   #          #count_zeros += 1
   if verbose :
       plt.figure()

       plt.subplot(2,1,1)
       plt.plot(z_centerline,means)
       plt.plot(z_centerline,means_smooth)
       plt.title("Mean intensity")

       plt.subplot(2,1,2)
       plt.plot(z_centerline,means)
       plt.plot(means_smooth_extended)
       plt.title("Extended mean intensity")

       plt.show()

   for i in range(data.shape[2]):
       data[:,:,i] = data[:,:,i] * (mean_intensity/means_smooth_extended[i])

   hdr.set_data_dtype('uint16') # set imagetype to uint16
   # save volume
   sct.printv('\nWrite NIFTI volumes...',verbose)
   data = data.astype(np.float32, copy =False)
   img = nibabel.Nifti1Image(data, None, hdr)
   output_name = file_input+'_normalized'+ext_input
   nibabel.save(img,output_name)
   sct.printv('\n.. File created:' + output_name,verbose)

   print('\nNormalizing overall intensity...')
   # sct.run('fslmaths ' + output_name + ' -inm ' + str(mean_intensity) + ' ' + output_name)

   # to view results
   print '\nDone !'
   print '\nTo view results, type:'
   print 'fslview '+output_name+' &\n'
def get_or_set_orientation():

    fsloutput = 'export FSLOUTPUTTYPE=NIFTI; '  # for faster processing, all outputs are in NIFTI

    # display usage if a mandatory argument is not provided
    if param.fname_data == '':
        sct.printv('ERROR: All mandatory arguments are not provided. See usage.', 1, 'error')

    # check existence of input files
    sct.printv('\ncheck existence of input files...', param.verbose)
    sct.check_file_exist(param.fname_data, param.verbose)

    # find what to do
    if param.orientation == '':
        todo = 'get_orientation'
    else:
        todo = 'set_orientation'
        # check if orientation is correct
        if check_orientation_input():
            sct.printv('\nERROR in '+os.path.basename(__file__)+': orientation is not recognized. Use one of the following orientation: '+param.list_of_correct_orientation+'\n', 1, 'error')
            sys.exit(2)

    # display input parameters
    sct.printv('\nInput parameters:', param.verbose)
    sct.printv('  data ..................'+param.fname_data, param.verbose)

    # Extract path/file/extension
    path_data, file_data, ext_data = sct.extract_fname(param.fname_data)
    if param.fname_out == '':
        # path_out, file_out, ext_out = '', file_data+'_'+param.orientation, ext_data
        fname_out = path_data+file_data+'_'+param.orientation+ext_data
    else:
        fname_out = param.fname_out

    # create temporary folder
    sct.printv('\nCreate temporary folder...', param.verbose)
    path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1)
    sct.run('mkdir '+path_tmp, param.verbose)

    # Copying input data to tmp folder and convert to nii
    # NB: cannot use c3d here because c3d cannot convert 4D data.
    sct.printv('\nCopying input data to tmp folder and convert to nii...', param.verbose)
    sct.run('cp '+param.fname_data+' '+path_tmp+'data'+ext_data, param.verbose)

    # go to tmp folder
    os.chdir(path_tmp)

    # convert to nii format
    sct.run('fslchfiletype NIFTI data', param.verbose)

    # Get dimensions of data
    sct.printv('\nGet dimensions of data...', param.verbose)
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('data.nii')
    sct.printv('  ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz)+ ' x ' + str(nt), param.verbose)

    # if 4d, loop across the data
    if nt == 1:
        if todo == 'set_orientation':
            # set orientation
            sct.printv('\nChange orientation...', param.verbose)
            set_orientation('data.nii', param.orientation, 'data_orient.nii')
        elif todo == 'get_orientation':
            # get orientation
            sct.printv('\nGet orientation...', param.verbose)
            sct.printv(get_orientation('data.nii'), 1)

    else:
        # split along T dimension
        sct.printv('\nSplit along T dimension...', param.verbose)
        sct.run(fsloutput+'fslsplit data data_T', param.verbose)

        if todo == 'set_orientation':
            # set orientation
            sct.printv('\nChange orientation...', param.verbose)
            for it in range(nt):
                file_data_split = 'data_T'+str(it).zfill(4)+'.nii'
                file_data_split_orient = 'data_orient_T'+str(it).zfill(4)+'.nii'
                set_orientation(file_data_split, param.orientation, file_data_split_orient)
            # Merge files back
            sct.printv('\nMerge file back...', param.verbose)
            cmd = fsloutput+'fslmerge -t data_orient'
            for it in range(nt):
                file_data_split_orient = 'data_orient_T'+str(it).zfill(4)+'.nii'
                cmd = cmd+' '+file_data_split_orient
            sct.run(cmd, param.verbose)

        elif todo == 'get_orientation':
            sct.printv('\nGet orientation...', param.verbose)
            sct.printv(get_orientation('data_T0000.nii'), 1)

    # come back to parent folder
    os.chdir('..')

    # Generate output files
    if todo == 'set_orientation':
        sct.printv('\nGenerate output files...', param.verbose)
        sct.generate_output_file(path_tmp+'data_orient.nii', fname_out)

    # Remove temporary files
    if param.remove_tmp_files == 1:
        sct.printv('\nRemove temporary files...', param.verbose)
        sct.run('rm -rf '+path_tmp, param.verbose)

    # to view results
    if todo == 'set_orientation':
        sct.printv('\nDone! To view results, type:', param.verbose)
        sct.printv('fslview '+fname_out+' &', param.verbose, 'code')
        print
Exemplo n.º 53
0
def main():
    
    #Initialization
    fname = ''
    fname_template = ''
    verbose = param.verbose
    gapxy = param.gapxy
    cross = param.cross
    x = ''
    y = ''
    zmin = ''
    zmax = ''

    try:
         opts, args = getopt.getopt(sys.argv[1:],'hi:x:y:s:e:c:t:v:')
    except getopt.GetoptError:
        usage()
    for opt, arg in opts :
        if opt == '-h':
            usage()
        elif opt in ("-i"):
            fname = arg
        elif opt in ('-x'):
            x = int(arg)
        elif opt in ('-y'):
            y = int(arg)
        elif opt in ('-s'):
            zmin = int(arg)
        elif opt in ('-e'):
            zmax = int(arg)
        elif opt in ('-c'):
            cross = arg    
        elif opt in ("-t"):
            fname_template = arg

        elif opt in ('-v'):
            verbose = int(arg)
    
    # display usage if a mandatory argument is not provided
    if fname == '' and fname_template == '':
        usage()
    
    # check existence of input files
    print'\nCheck if file exists ...'
    
    if fname != '':
        sct.check_file_exist(fname)
    
    if fname_template != '':
        sct.check_file_exist(fname_template)
        
    if cross not in ['mm','voxel'] :
        usage()    
    
    # Display arguments
    print'\nCheck input arguments...'
    print'  Input volume ...................... '+fname
    print'  Template ...................... '+fname_template
    print'  Verbose ........................... '+str(verbose)
    
    if fname != '':
        print '\nGet dimensions of input...'
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname)
        print '.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz)
        print '.. voxel size:  '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm'
    
        file = nibabel.load(fname)
        data = file.get_data()
        hdr = file.get_header()
    
        data = data*0

        list_opts = []
        for i in range(len(opts)):
            list_opts.append(opts[i][0])

        if cross == 'mm' :
            gapx = int(round(gapxy/px))
            gapy = int(round(gapxy/py))

            if ("-s") in list_opts and ("-e") in list_opts and zmax < nz:
                data[x,y,zmin] = 1
                data[x,y,zmax] = 2
                data[x+gapx,y,zmax] = 3
                data[x-gapx,y,zmax] = 4
                data[x,y+gapy,zmax] = 5
                data[x,y-gapy,zmax] = 6
            else:
                data[x,y,0] = 1
                data[x,y,nz-1] = 2
                data[x+gapx,y,nz-1] = 3
                data[x-gapx,y,nz-1] = 4
                data[x,y+gapy,nz-1] = 5
                data[x,y-gapy,nz-1] = 6
    
        if cross == 'voxel' :
            gapxy = int(gapxy)

            if ("-s") in list_opts and ("-e") in list_opts and zmax < nz:
                data[x,y,zmin] = 1
                data[x,y,zmax] = 2
                data[x+gapx,y,zmax] = 3
                data[x-gapx,y,zmax] = 4
                data[x,y+gapy,zmax] = 5
                data[x,y-gapy,zmax] = 6
            else:
                data[x,y,0] = 1
                data[x,y,nz-1] = 2
                data[x+gapx,y,nz-1] = 3
                data[x-gapx,y,nz-1] = 4
                data[x,y+gapy,nz-1] = 5
                data[x,y-gapy,nz-1] = 6
    
        print '\nSave volume ...'
        hdr.set_data_dtype('float32') # set imagetype to uint8
        # save volume
        #data = data.astype(float32, copy =False)
        img = nibabel.Nifti1Image(data, None, hdr)
        file_name = 'landmark_native.nii.gz'
        nibabel.save(img,file_name)
        print '\nFile created : ' + file_name
    
    if fname_template != '' :
    
        print '\nGet dimensions of template...'
        nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_template)
        print '.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz)
        print '.. voxel size:  '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm'
        
        
        file_t = nibabel.load(fname_template)
        data_t = file_t.get_data()
        hdr_t = file_t.get_header()

        data_t = data_t*0
        
        if cross == 'mm':
            
            gapx = int(round(gapxy/px))
            gapy = int(round(gapxy/py))

            if ("-s") in list_opts and ("-e") in list_opts and zmax < nz:
                data_t[int(round(nx/2.0)),int(round(ny/2.0)),zmin] = 1
                data_t[int(round(nx/2.0)),int(round(ny/2.0)),zmax] = 2
                data_t[int(round(nx/2.0)) + gapx,int(round(ny/2.0)),zmax] = 3
                data_t[int(round(nx/2.0)) - gapx,int(round(ny/2.0)),zmax] = 4
                data_t[int(round(nx/2.0)),int(round(ny/2.0)) + gapy,zmax] = 5
                data_t[int(round(nx/2.0)),int(round(ny/2.0)) - gapy,zmax] = 6

            else:
                data_t[int(round(nx/2.0)),int(round(ny/2.0)),0] = 1
                data_t[int(round(nx/2.0)),int(round(ny/2.0)),nz-1] = 2
                data_t[int(round(nx/2.0)) + gapx,int(round(ny/2.0)),nz-1] = 3
                data_t[int(round(nx/2.0)) - gapx,int(round(ny/2.0)),nz-1] = 4
                data_t[int(round(nx/2.0)),int(round(ny/2.0)) + gapy,nz-1] = 5
                data_t[int(round(nx/2.0)),int(round(ny/2.0)) - gapy,nz-1] = 6
 
        if cross == 'voxel':
            
            gapxy = int(gapxy)

            # data_t[int(round(nx/2.0)),int(round(ny/2.0)),0] = 1
            # data_t[int(round(nx/2.0)),int(round(ny/2.0)),nz-1] = 2
            # data_t[int(round(nx/2.0)) + gapxy,int(round(ny/2.0)),nz-1] = 3
            # data_t[int(round(nx/2.0)) - gapxy,int(round(ny/2.0)),nz-1] = 4
            # data_t[int(round(nx/2.0)),int(round(ny/2.0)) + gapxy,nz-1] = 5
            # data_t[int(round(nx/2.0)),int(round(ny/2.0)) - gapxy,nz-1] = 6

            if ("-s") in list_opts and ("-e") in list_opts and zmax < nz:
                data_t[int(round(nx/2.0)),int(round(ny/2.0)),zmin] = 1
                data_t[int(round(nx/2.0)),int(round(ny/2.0)),zmax] = 2
                data_t[int(round(nx/2.0)) + gapxy,int(round(ny/2.0)),zmax] = 3
                data_t[int(round(nx/2.0)) - gapxy,int(round(ny/2.0)),zmax] = 4
                data_t[int(round(nx/2.0)),int(round(ny/2.0)) + gapxy,zmax] = 5
                data_t[int(round(nx/2.0)),int(round(ny/2.0)) - gapxy,zmax] = 6

            else:
                data_t[int(round(nx/2.0)),int(round(ny/2.0)),0] = 1
                data_t[int(round(nx/2.0)),int(round(ny/2.0)),nz-1] = 2
                data_t[int(round(nx/2.0)) + gapxy,int(round(ny/2.0)),nz-1] = 3
                data_t[int(round(nx/2.0)) - gapxy,int(round(ny/2.0)),nz-1] = 4
                data_t[int(round(nx/2.0)),int(round(ny/2.0)) + gapxy,nz-1] = 5
                data_t[int(round(nx/2.0)),int(round(ny/2.0)) - gapxy,nz-1] = 6
            

        print '\nSave volume ...'
        hdr_t.set_data_dtype('float32') # set imagetype to uint8
        # save volume
        #data = data.astype(float32, copy =False)
        img_t = nibabel.Nifti1Image(data_t, None, hdr_t)
        file_name_t = 'template_landmarks.nii.gz'
        nibabel.save(img_t,file_name_t)
        print '\nFile created : ' + file_name_t
def main():
    
    # Initialization
    fname_anat = ''
    fname_centerline = ''
    centerline_fitting = 'polynome'
    remove_temp_files = param.remove_temp_files
    interp = param.interp
    degree_poly = param.deg_poly
    
    # extract path of the script
    path_script = os.path.dirname(__file__)+'/'
    
    # Parameters for debug mode
    if param.debug == 1:
        print '\n*** WARNING: DEBUG MODE ON ***\n'
        status, path_sct_data = commands.getstatusoutput('echo $SCT_TESTING_DATA_DIR')
        fname_anat = path_sct_data+'/t2/t2.nii.gz'
        fname_centerline = path_sct_data+'/t2/t2_seg.nii.gz'
    else:
        # Check input param
        try:
            opts, args = getopt.getopt(sys.argv[1:],'hi:c:r:d:f:s:')
        except getopt.GetoptError as err:
            print str(err)
            usage()
        if not opts:
            usage()
        for opt, arg in opts:
            if opt == '-h':
                usage()
            elif opt in ('-i'):
                fname_anat = arg
            elif opt in ('-c'):
                fname_centerline = arg
            elif opt in ('-r'):
                remove_temp_files = int(arg)
            elif opt in ('-d'):
                degree_poly = int(arg)
            elif opt in ('-f'):
                centerline_fitting = str(arg)
            elif opt in ('-s'):
                interp = str(arg)
    
    # display usage if a mandatory argument is not provided
    if fname_anat == '' or fname_centerline == '':
        usage()
    
    # check existence of input files
    sct.check_file_exist(fname_anat)
    sct.check_file_exist(fname_centerline)
    
    # extract path/file/extension
    path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
    
    # Display arguments
    print '\nCheck input arguments...'
    print '  Input volume ...................... '+fname_anat
    print '  Centerline ........................ '+fname_centerline
    print ''
    
    # Get input image orientation
    input_image_orientation = get_orientation(fname_anat)

    # Reorient input data into RL PA IS orientation
    set_orientation(fname_anat, 'RPI', 'tmp.anat_orient.nii')
    set_orientation(fname_centerline, 'RPI', 'tmp.centerline_orient.nii')

    # Open centerline
    #==========================================================================================
    print '\nGet dimensions of input centerline...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('tmp.centerline_orient.nii')
    print '.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz)
    print '.. voxel size:  '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm'
    
    print '\nOpen centerline volume...'
    file = nibabel.load('tmp.centerline_orient.nii')
    data = file.get_data()

    X, Y, Z = (data>0).nonzero()
    min_z_index, max_z_index = min(Z), max(Z)
    
    
    # loop across z and associate x,y coordinate with the point having maximum intensity
    x_centerline = [0 for iz in range(min_z_index, max_z_index+1, 1)]
    y_centerline = [0 for iz in range(min_z_index, max_z_index+1, 1)]
    z_centerline = [iz for iz in range(min_z_index, max_z_index+1, 1)]

    # Two possible scenario:
    # 1. the centerline is probabilistic: each slices contains voxels with the probability of containing the centerline [0:...:1]
    # We only take the maximum value of the image to aproximate the centerline.
    # 2. The centerline/segmentation image contains many pixels per slice with values {0,1}.
    # We take all the points and approximate the centerline on all these points.

    X, Y, Z = ((data<1)*(data>0)).nonzero() # X is empty if binary image
    if (len(X) > 0): # Scenario 1
        for iz in range(min_z_index, max_z_index+1, 1):
            x_centerline[iz-min_z_index], y_centerline[iz-min_z_index] = numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape)
    else: # Scenario 2
        for iz in range(min_z_index, max_z_index+1, 1):
            x_seg, y_seg = (data[:,:,iz]>0).nonzero()
            if len(x_seg) > 0:
                x_centerline[iz-min_z_index] = numpy.mean(x_seg)
                y_centerline[iz-min_z_index] = numpy.mean(y_seg)

    # TODO: find a way to do the previous loop with this, which is more neat:
    # [numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape) for iz in range(0,nz,1)]
    
    # clear variable
    del data
    
    # Fit the centerline points with the kind of curve given as argument of the script and return the new smoothed coordinates
    if centerline_fitting == 'splines':
        try:
            x_centerline_fit, y_centerline_fit = b_spline_centerline(x_centerline,y_centerline,z_centerline)
        except ValueError:
            print "splines fitting doesn't work, trying with polynomial fitting...\n"
            x_centerline_fit, y_centerline_fit = polynome_centerline(x_centerline,y_centerline,z_centerline)
    elif centerline_fitting == 'polynome':
        x_centerline_fit, y_centerline_fit = polynome_centerline(x_centerline,y_centerline,z_centerline)

    #==========================================================================================
    # Split input volume
    print '\nSplit input volume...'
    sct.run(sct.fsloutput + 'fslsplit tmp.anat_orient.nii tmp.anat_z -z')
    file_anat_split = ['tmp.anat_z'+str(z).zfill(4) for z in range(0,nz,1)]

    # initialize variables
    file_mat_inv_cumul = ['tmp.mat_inv_cumul_z'+str(z).zfill(4) for z in range(0,nz,1)]
    z_init = min_z_index
    displacement_max_z_index = x_centerline_fit[z_init-min_z_index]-x_centerline_fit[max_z_index-min_z_index]

    # write centerline as text file
    print '\nGenerate fitted transformation matrices...'
    file_mat_inv_cumul_fit = ['tmp.mat_inv_cumul_fit_z'+str(z).zfill(4) for z in range(0,nz,1)]
    for iz in range(min_z_index, max_z_index+1, 1):
        # compute inverse cumulative fitted transformation matrix
        fid = open(file_mat_inv_cumul_fit[iz], 'w')
        if (x_centerline[iz-min_z_index] == 0 and y_centerline[iz-min_z_index] == 0):
            displacement = 0
        else:
            displacement = x_centerline_fit[z_init-min_z_index]-x_centerline_fit[iz-min_z_index]
        fid.write('%i %i %i %f\n' %(1, 0, 0, displacement) )
        fid.write('%i %i %i %f\n' %(0, 1, 0, 0) )
        fid.write('%i %i %i %i\n' %(0, 0, 1, 0) )
        fid.write('%i %i %i %i\n' %(0, 0, 0, 1) )
        fid.close()

    # we complete the displacement matrix in z direction
    for iz in range(0, min_z_index, 1):
        fid = open(file_mat_inv_cumul_fit[iz], 'w')
        fid.write('%i %i %i %f\n' %(1, 0, 0, 0) )
        fid.write('%i %i %i %f\n' %(0, 1, 0, 0) )
        fid.write('%i %i %i %i\n' %(0, 0, 1, 0) )
        fid.write('%i %i %i %i\n' %(0, 0, 0, 1) )
        fid.close()
    for iz in range(max_z_index+1, nz, 1):
        fid = open(file_mat_inv_cumul_fit[iz], 'w')
        fid.write('%i %i %i %f\n' %(1, 0, 0, displacement_max_z_index) )
        fid.write('%i %i %i %f\n' %(0, 1, 0, 0) )
        fid.write('%i %i %i %i\n' %(0, 0, 1, 0) )
        fid.write('%i %i %i %i\n' %(0, 0, 0, 1) )
        fid.close()

    # apply transformations to data
    print '\nApply fitted transformation matrices...'
    file_anat_split_fit = ['tmp.anat_orient_fit_z'+str(z).zfill(4) for z in range(0,nz,1)]
    for iz in range(0, nz, 1):
        # forward cumulative transformation to data
        sct.run(fsloutput+'flirt -in '+file_anat_split[iz]+' -ref '+file_anat_split[iz]+' -applyxfm -init '+file_mat_inv_cumul_fit[iz]+' -out '+file_anat_split_fit[iz]+' -interp '+interp)

    # Merge into 4D volume
    print '\nMerge into 4D volume...'
    sct.run(fsloutput+'fslmerge -z tmp.anat_orient_fit tmp.anat_orient_fit_z*')

    # Reorient data as it was before
    print '\nReorient data back into native orientation...'
    set_orientation('tmp.anat_orient_fit.nii', input_image_orientation, 'tmp.anat_orient_fit_reorient.nii')

    # Generate output file (in current folder)
    print '\nGenerate output file (in current folder)...'
    sct.generate_output_file('tmp.anat_orient_fit_reorient.nii', file_anat+'_flatten'+ext_anat)

    # Delete temporary files
    if remove_temp_files == 1:
        print '\nDelete temporary files...'
        sct.run('rm -rf tmp.*')

    # to view results
    print '\nDone! To view results, type:'
    print 'fslview '+file_anat+ext_anat+' '+file_anat+'_flatten'+ext_anat+' &\n'
def resample():

    dim = 4  # by default, will be adjusted later
    fsloutput = 'export FSLOUTPUTTYPE=NIFTI; '  # for faster processing, all outputs are in NIFTI
    ext = '.nii'

    # display usage if a mandatory argument is not provided
    if param.fname_data == '' or param.factor == '':
        sct.printv('\nERROR: All mandatory arguments are not provided. See usage (add -h).\n', 1, 'error')

    # check existence of input files
    sct.printv('\nCheck existence of input files...', param.verbose)
    sct.check_file_exist(param.fname_data, param.verbose)

    # extract resampling factor
    sct.printv('\nParse resampling factor...', param.verbose)
    factor_split = param.factor.split('x')
    factor = [float(factor_split[i]) for i in range(len(factor_split))]
    # check if it has three values
    if not len(factor) == 3:
        sct.printv('\nERROR: factor should have three dimensions. E.g., 2x2x1.\n', 1, 'error')
    else:
        fx, fy, fz = [float(factor_split[i]) for i in range(len(factor_split))]

    # display input parameters
    sct.printv('\nInput parameters:', param.verbose)
    sct.printv('  data ..................'+param.fname_data, param.verbose)
    sct.printv('  resampling factor .....'+param.factor, param.verbose)

    # Extract path/file/extension
    path_data, file_data, ext_data = sct.extract_fname(param.fname_data)
    path_out, file_out, ext_out = '', file_data, ext_data

    # create temporary folder
    sct.printv('\nCreate temporary folder...', param.verbose)
    path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1)
    sct.run('mkdir '+path_tmp, param.verbose)

    # Copying input data to tmp folder and convert to nii
    # NB: cannot use c3d here because c3d cannot convert 4D data.
    sct.printv('\nCopying input data to tmp folder and convert to nii...', param.verbose)
    sct.run('cp '+param.fname_data+' '+path_tmp+'data'+ext_data, param.verbose)

    # go to tmp folder
    os.chdir(path_tmp)

    # convert fmri to nii format
    sct.run('fslchfiletype NIFTI data', param.verbose)

    # Get dimensions of data
    sct.printv('\nGet dimensions of data...', param.verbose)
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension('data.nii')
    sct.printv('  ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz)+ ' x ' + str(nt), param.verbose)
    if nt == 1:
        dim == 3
    if nz == 1:
        dim == 2
        sct.run('ERROR (sct_resample): Dimension of input data is different from 3 or 4. Exit program', param.verbose, 'error')

    # Calculate new dimensions
    sct.printv('\nCalculate new dimensions...', param.verbose)
    nx_new = int(round(nx*fx))
    ny_new = int(round(ny*fy))
    nz_new = int(round(nz*fz))
    sct.printv('  ' + str(nx_new) + ' x ' + str(ny_new) + ' x ' + str(nz_new)+ ' x ' + str(nt), param.verbose)

    # if dim=4, split data
    if dim == 4:
        # Split into T dimension
        sct.printv('\nSplit along T dimension...', param.verbose)
        status, output = sct.run(fsloutput+'fslsplit data data_T', param.verbose)
    elif dim == 3:
        # rename file to have compatible code with 4d
        status, output = sct.run('cp data.nii data_T0000.nii', param.verbose)

    for it in range(nt):
        # identify current volume
        file_data_splitT = 'data_T'+str(it).zfill(4)
        file_data_splitT_resample = file_data_splitT+'r'

        # resample volume
        sct.printv(('\nResample volume '+str((it+1))+'/'+str(nt)+':'), param.verbose)
        sct.run('isct_c3d '+file_data_splitT+ext+' -resample '+str(nx_new)+'x'+str(ny_new)+'x'+str(nz_new)+'vox -o '+file_data_splitT_resample+ext)

        # pad data (for ANTs)
        # # TODO: check if need to pad also for the estimate_and_apply
        # if program == 'ants' and todo == 'estimate' and slicewise == 0:
        #     sct.run('isct_c3d '+file_data_splitT_num[it]+' -pad 0x0x3vox 0x0x3vox 0 -o '+file_data_splitT_num[it]+'_pad.nii')
        #     file_data_splitT_num[it] = file_data_splitT_num[it]+'_pad'

    # merge data back along T
    file_data_resample = file_data+param.file_suffix
    sct.printv('\nMerge data back along T...', param.verbose)
    cmd = fsloutput + 'fslmerge -t ' + file_data_resample
    for it in range(nt):
        cmd = cmd + ' ' + 'data_T'+str(it).zfill(4)+'r'
    sct.run(cmd, param.verbose)

    # come back to parent folder
    os.chdir('..')

    # Generate output files
    sct.printv('\nGenerate output files...', param.verbose)
    if not param.fname_out:
        param.fname_out = path_out+file_out+param.file_suffix+ext_out
    sct.generate_output_file(path_tmp+file_data_resample+ext, param.fname_out)

    # Remove temporary files
    if param.remove_tmp_files == 1:
        print('\nRemove temporary files...')
        sct.run('rm -rf '+path_tmp, param.verbose)

    # to view results
    sct.printv('\nDone! To view results, type:', param.verbose)
    sct.printv('fslview '+param.fname_out+' &', param.verbose, 'info')
    print