def compute_ICBM152_centerline(dataset_info): """ This function extracts the centerline from the ICBM152 brain template :param dataset_info: dictionary containing dataset information :return: """ path_data = dataset_info['path_data'] if not os.path.isdir(path_data + 'icbm152/'): download_data_template(path_data=path_data, name='icbm152', force=False) image_disks = Image(path_data + 'icbm152/mni_icbm152_t1_tal_nlin_sym_09c_disks_manual.nii.gz') coord = image_disks.getNonZeroCoordinates(sorting='z', reverse_coord=True) coord_physical = [] for c in coord: if c.value <= 22 or c.value in [48, 49, 50, 51, 52]: # 22 corresponds to L2 c_p = image_disks.transfo_pix2phys([[c.x, c.y, c.z]])[0] c_p.append(c.value) coord_physical.append(c_p) x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline( path_data + 'icbm152/mni_icbm152_t1_centerline_manual.nii.gz', algo_fitting='nurbs', verbose=0, nurbs_pts_number=300, all_slices=False, phys_coordinates=True, remove_outliers=False) centerline = Centerline(x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv) centerline.compute_vertebral_distribution(coord_physical, label_reference='PMG') return centerline
def normalize_intensity_template(dataset_info, fname_template_centerline=None, contrast='t1', verbose=1): """ This function normalizes the intensity of the image inside the spinal cord :param fname_template: path to template image :param fname_template_centerline: path to template centerline (binary image or npz) :return: """ path_data = dataset_info['path_data'] list_subjects = dataset_info['subjects'] path_template = dataset_info['path_template'] average_intensity = [] intensity_profiles = {} timer_profile = sct.Timer(len(list_subjects)) timer_profile.start() # computing the intensity profile for each subject for subject_name in list_subjects: path_data_subject = path_data + subject_name + '/' + contrast + '/' if fname_template_centerline is None: fname_image = path_data_subject + contrast + '.nii.gz' fname_image_centerline = path_data_subject + contrast + dataset_info['suffix_centerline'] + '.nii.gz' else: fname_image = path_data_subject + contrast + '_straight.nii.gz' if fname_template_centerline.endswith('.npz'): fname_image_centerline = None else: fname_image_centerline = fname_template_centerline image = Image(fname_image) nx, ny, nz, nt, px, py, pz, pt = image.dim if fname_image_centerline is not None: # open centerline from template number_of_points_in_centerline = 4000 x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline( fname_image_centerline, algo_fitting='nurbs', verbose=0, nurbs_pts_number=number_of_points_in_centerline, all_slices=False, phys_coordinates=True, remove_outliers=True) centerline_template = Centerline(x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv) else: centerline_template = Centerline(fname=fname_template_centerline) x, y, z, xd, yd, zd = centerline_template.average_coordinates_over_slices(image) # Compute intensity values z_values, intensities = [], [] extend = 1 # this means the mean intensity of the slice will be calculated over a 3x3 square for i in range(len(z)): coord_z = image.transfo_phys2pix([[x[i], y[i], z[i]]])[0] z_values.append(coord_z[2]) intensities.append(np.mean(image.data[coord_z[0] - extend - 1:coord_z[0] + extend, coord_z[1] - extend - 1:coord_z[1] + extend, coord_z[2]])) # for the slices that are not in the image, extend min and max values to cover the whole image min_z, max_z = min(z_values), max(z_values) intensities_temp = copy(intensities) z_values_temp = copy(z_values) for cz in range(nz): if cz not in z_values: z_values_temp.append(cz) if cz < min_z: intensities_temp.append(intensities[z_values.index(min_z)]) elif cz > max_z: intensities_temp.append(intensities[z_values.index(max_z)]) else: print 'error...', cz intensities = intensities_temp z_values = z_values_temp # Preparing data for smoothing arr_int = [[z_values[i], intensities[i]] for i in range(len(z_values))] arr_int.sort(key=lambda x: x[0]) # and make sure it is ordered with z def smooth(x, window_len=11, window='hanning'): """smooth the data using a window with requested size. """ if x.ndim != 1: raise ValueError, "smooth only accepts 1 dimension arrays." if x.size < window_len: raise ValueError, "Input vector needs to be bigger than window size." if window_len < 3: return x if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]] if window == 'flat': # moving average w = np.ones(window_len, 'd') else: w = eval('np.' + window + '(window_len)') y = np.convolve(w / w.sum(), s, mode='same') return y[window_len - 1:-window_len + 1] # Smoothing intensities = [c[1] for c in arr_int] intensity_profile_smooth = smooth(np.array(intensities), window_len=50) average_intensity.append(np.mean(intensity_profile_smooth)) intensity_profiles[subject_name] = intensity_profile_smooth if verbose == 2: import matplotlib.pyplot as plt plt.figure() plt.title(subject_name) plt.plot(intensities) plt.plot(intensity_profile_smooth) plt.show() # set the average image intensity over the entire dataset average_intensity = 1000.0 # normalize the intensity of the image based on spinal cord for subject_name in list_subjects: path_data_subject = path_data + subject_name + '/' + contrast + '/' fname_image = path_data_subject + contrast + '_straight.nii.gz' image = Image(fname_image) nx, ny, nz, nt, px, py, pz, pt = image.dim image_image_new = image.copy() image_image_new.changeType(type='float32') for i in range(nz): image_image_new.data[:, :, i] *= average_intensity / intensity_profiles[subject_name][i] # Save intensity normalized template fname_image_normalized = sct.add_suffix(fname_image, '_norm') image_image_new.setFileName(fname_image_normalized) image_image_new.save()
def continuous_vertebral_levels(self): """ This function transforms the vertebral levels file from the template into a continuous file. Instead of having integer representing the vertebral level on each slice, a continuous value that represents the position of the slice in the vertebral level coordinate system. The image must be RPI :return: """ im_input = Image(self.image_input, self.verbose) im_output = Image(self.image_input, self.verbose) im_output.data *= 0 # 1. extract vertebral levels from input image # a. extract centerline # b. for each slice, extract corresponding level nx, ny, nz, nt, px, py, pz, pt = im_input.dim from sct_straighten_spinalcord import smooth_centerline x_centerline_fit, y_centerline_fit, z_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline(self.image_input, algo_fitting='nurbs', verbose=0) value_centerline = np.array([im_input.data[int(x_centerline_fit[it]), int(y_centerline_fit[it]), int(z_centerline_fit[it])] for it in range(len(z_centerline_fit))]) # 2. compute distance for each vertebral level --> Di for i being the vertebral levels vertebral_levels = {} for slice_image, level in enumerate(value_centerline): if level not in vertebral_levels: vertebral_levels[level] = slice_image length_levels = {} for level in vertebral_levels: indexes_slice = np.where(value_centerline == level) length_levels[level] = np.sum([math.sqrt(((x_centerline_fit[indexes_slice[0][index_slice + 1]] - x_centerline_fit[indexes_slice[0][index_slice]])*px)**2 + ((y_centerline_fit[indexes_slice[0][index_slice + 1]] - y_centerline_fit[indexes_slice[0][index_slice]])*py)**2 + ((z_centerline_fit[indexes_slice[0][index_slice + 1]] - z_centerline_fit[indexes_slice[0][index_slice]])*pz)**2) for index_slice in range(len(indexes_slice[0]) - 1)]) # 2. for each slice: # a. identify corresponding vertebral level --> i # b. calculate distance of slice from upper vertebral level --> d # c. compute relative distance in the vertebral level coordinate system --> d/Di continuous_values = {} for it, iz in enumerate(z_centerline_fit): level = value_centerline[it] indexes_slice = np.where(value_centerline == level) indexes_slice = indexes_slice[0][indexes_slice[0] >= it] distance_from_level = np.sum([math.sqrt(((x_centerline_fit[indexes_slice[index_slice + 1]] - x_centerline_fit[indexes_slice[index_slice]]) * px * px) ** 2 + ((y_centerline_fit[indexes_slice[index_slice + 1]] - y_centerline_fit[indexes_slice[index_slice]]) * py * py) ** 2 + ((z_centerline_fit[indexes_slice[index_slice + 1]] - z_centerline_fit[indexes_slice[index_slice]]) * pz * pz) ** 2) for index_slice in range(len(indexes_slice) - 1)]) continuous_values[iz] = level + 2.0 * distance_from_level / float(length_levels[level]) # 3. saving data # for each slice, get all non-zero pixels and replace with continuous values coordinates_input = self.image_input.getNonZeroCoordinates() im_output.changeType('float32') # for all points in input, find the value that has to be set up, depending on the vertebral level for i, coord in enumerate(coordinates_input): im_output.data[int(coord.x), int(coord.y), int(coord.z)] = continuous_values[coord.z] return im_output
def compute_properties_along_centerline(fname_seg_image, property_list, fname_disks_image=None, smooth_factor=5.0, interpolation_mode=0, remove_temp_files=1, verbose=1): # Check list of properties # If diameters is in the list, compute major and minor axis length and check orientation compute_diameters = False property_list_local = list(property_list) if 'diameters' in property_list_local: compute_diameters = True property_list_local.remove('diameters') property_list_local.append('major_axis_length') property_list_local.append('minor_axis_length') property_list_local.append('orientation') # TODO: make sure fname_segmentation and fname_disks are in the same space # create temporary folder and copying data sct.printv('\nCreate temporary folder...', verbose) path_tmp = sct.slash_at_the_end( 'tmp.' + time.strftime("%y%m%d%H%M%S") + '_' + str(randint(1, 1000000)), 1) sct.run('mkdir ' + path_tmp, verbose) sct.run('cp ' + fname_seg_image + ' ' + path_tmp) if fname_disks_image is not None: sct.run('cp ' + fname_disks_image + ' ' + path_tmp) # go to tmp folder os.chdir(path_tmp) fname_segmentation = os.path.abspath(fname_seg_image) path_data, file_data, ext_data = sct.extract_fname(fname_segmentation) # Change orientation of the input centerline into RPI sct.printv('\nOrient centerline to RPI orientation...', verbose) im_seg = Image(file_data + ext_data) fname_segmentation_orient = 'segmentation_rpi' + ext_data image = set_orientation(im_seg, 'RPI') image.setFileName(fname_segmentation_orient) image.save() # Initiating some variables nx, ny, nz, nt, px, py, pz, pt = image.dim resolution = 0.5 properties = {key: [] for key in property_list_local} properties['incremental_length'] = [] properties['distance_from_C1'] = [] properties['vertebral_level'] = [] properties['z_slice'] = [] # compute the spinal cord centerline based on the spinal cord segmentation number_of_points = 5 * nz x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline( fname_segmentation_orient, algo_fitting='nurbs', verbose=verbose, nurbs_pts_number=number_of_points, all_slices=False, phys_coordinates=True, remove_outliers=True) centerline = Centerline(x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv) # Compute vertebral distribution along centerline based on position of intervertebral disks if fname_disks_image is not None: fname_disks = os.path.abspath(fname_disks_image) path_data, file_data, ext_data = sct.extract_fname(fname_disks) im_disks = Image(file_data + ext_data) fname_disks_orient = 'disks_rpi' + ext_data image_disks = set_orientation(im_disks, 'RPI') image_disks.setFileName(fname_disks_orient) image_disks.save() image_disks = Image(fname_disks_orient) coord = image_disks.getNonZeroCoordinates(sorting='z', reverse_coord=True) coord_physical = [] for c in coord: c_p = image_disks.transfo_pix2phys([[c.x, c.y, c.z]])[0] c_p.append(c.value) coord_physical.append(c_p) centerline.compute_vertebral_distribution(coord_physical) sct.printv('Computing spinal cord shape along the spinal cord...') timer_properties = sct.Timer( number_of_iteration=centerline.number_of_points) timer_properties.start() # Extracting patches perpendicular to the spinal cord and computing spinal cord shape for index in range(centerline.number_of_points): # value_out = -5.0 value_out = 0.0 current_patch = centerline.extract_perpendicular_square( image, index, resolution=resolution, interpolation_mode=interpolation_mode, border='constant', cval=value_out) # check for pixels close to the spinal cord segmentation that are out of the image from skimage.morphology import dilation patch_zero = np.copy(current_patch) patch_zero[patch_zero == value_out] = 0.0 patch_borders = dilation(patch_zero) - patch_zero """ if np.count_nonzero(patch_borders + current_patch == value_out + 1.0) != 0: c = image.transfo_phys2pix([centerline.points[index]])[0] print 'WARNING: no patch for slice', c[2] timer_properties.add_iteration() continue """ sc_properties = properties2d(patch_zero, [resolution, resolution]) if sc_properties is not None: properties['incremental_length'].append( centerline.incremental_length[index]) if fname_disks_image is not None: properties['distance_from_C1'].append( centerline.dist_points[index]) properties['vertebral_level'].append( centerline.l_points[index]) properties['z_slice'].append( image.transfo_phys2pix([centerline.points[index]])[0][2]) for property_name in property_list_local: properties[property_name].append(sc_properties[property_name]) else: c = image.transfo_phys2pix([centerline.points[index]])[0] print 'WARNING: no properties for slice', c[2] timer_properties.add_iteration() timer_properties.stop() # Adding centerline to the properties for later use properties['centerline'] = centerline # We assume that the major axis is in the right-left direction # this script checks the orientation of the spinal cord and invert axis if necessary to make sure the major axis is right-left if compute_diameters: diameter_major = properties['major_axis_length'] diameter_minor = properties['minor_axis_length'] orientation = properties['orientation'] for i, orientation_item in enumerate(orientation): if -45.0 < orientation_item < 45.0: continue else: temp = diameter_minor[i] properties['minor_axis_length'][i] = diameter_major[i] properties['major_axis_length'][i] = temp properties['RL_diameter'] = properties['major_axis_length'] properties['AP_diameter'] = properties['minor_axis_length'] del properties['major_axis_length'] del properties['minor_axis_length'] # smooth the spinal cord shape with a gaussian kernel if required # TODO: not all properties can be smoothed if smooth_factor != 0.0: # smooth_factor is in mm import scipy window = scipy.signal.hann(smooth_factor / np.mean(centerline.progressive_length)) for property_name in property_list_local: properties[property_name] = scipy.signal.convolve( properties[property_name], window, mode='same') / np.sum(window) if compute_diameters: property_list_local.remove('major_axis_length') property_list_local.remove('minor_axis_length') property_list_local.append('RL_diameter') property_list_local.append('AP_diameter') property_list = property_list_local # Display properties on the referential space. Requires intervertebral disks if verbose == 2: x_increment = 'distance_from_C1' if fname_disks_image is None: x_increment = 'incremental_length' # Display the image and plot all contours found fig, axes = plt.subplots(len(property_list_local), sharex=True, sharey=False) for k, property_name in enumerate(property_list_local): axes[k].plot(properties[x_increment], properties[property_name]) axes[k].set_ylabel(property_name) if fname_disks_image is not None: properties[ 'distance_disk_from_C1'] = centerline.distance_from_C1label # distance between each disk and C1 (or first disk) xlabel_disks = [ centerline.convert_vertlabel2disklabel[label] for label in properties['distance_disk_from_C1'] ] xtick_disks = [ properties['distance_disk_from_C1'][label] for label in properties['distance_disk_from_C1'] ] plt.xticks(xtick_disks, xlabel_disks, rotation=30) else: axes[-1].set_xlabel('Position along the spinal cord (in mm)') plt.show() # Removing temporary folder os.chdir('..') shutil.rmtree(path_tmp, ignore_errors=True) return property_list, properties
def continuous_vertebral_levels(self): """ This function transforms the vertebral levels file from the template into a continuous file. Instead of having integer representing the vertebral level on each slice, a continuous value that represents the position of the slice in the vertebral level coordinate system. The image must be RPI :return: """ im_input = Image(self.image_input, self.verbose) im_output = msct_image.zeros_like(self.image_input) # 1. extract vertebral levels from input image # a. extract centerline # b. for each slice, extract corresponding level nx, ny, nz, nt, px, py, pz, pt = im_input.dim from sct_straighten_spinalcord import smooth_centerline x_centerline_fit, y_centerline_fit, z_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline(self.image_input, algo_fitting='nurbs', verbose=0) value_centerline = np.array([im_input.data[int(x_centerline_fit[it]), int(y_centerline_fit[it]), int(z_centerline_fit[it])] for it in range(len(z_centerline_fit))]) # 2. compute distance for each vertebral level --> Di for i being the vertebral levels vertebral_levels = {} for slice_image, level in enumerate(value_centerline): if level not in vertebral_levels: vertebral_levels[level] = slice_image length_levels = {} for level in vertebral_levels: indexes_slice = np.where(value_centerline == level) length_levels[level] = np.sum([np.sqrt(((x_centerline_fit[indexes_slice[0][index_slice + 1]] - x_centerline_fit[indexes_slice[0][index_slice]]) * px)**2 + ((y_centerline_fit[indexes_slice[0][index_slice + 1]] - y_centerline_fit[indexes_slice[0][index_slice]]) * py)**2 + ((z_centerline_fit[indexes_slice[0][index_slice + 1]] - z_centerline_fit[indexes_slice[0][index_slice]]) * pz)**2) for index_slice in range(len(indexes_slice[0]) - 1)]) # 2. for each slice: # a. identify corresponding vertebral level --> i # b. calculate distance of slice from upper vertebral level --> d # c. compute relative distance in the vertebral level coordinate system --> d/Di continuous_values = {} for it, iz in enumerate(z_centerline_fit): level = value_centerline[it] indexes_slice = np.where(value_centerline == level) indexes_slice = indexes_slice[0][indexes_slice[0] >= it] distance_from_level = np.sum([np.sqrt(((x_centerline_fit[indexes_slice[index_slice + 1]] - x_centerline_fit[indexes_slice[index_slice]]) * px * px) ** 2 + ((y_centerline_fit[indexes_slice[index_slice + 1]] - y_centerline_fit[indexes_slice[index_slice]]) * py * py) ** 2 + ((z_centerline_fit[indexes_slice[index_slice + 1]] - z_centerline_fit[indexes_slice[index_slice]]) * pz * pz) ** 2) for index_slice in range(len(indexes_slice) - 1)]) continuous_values[iz] = level + 2.0 * distance_from_level / float(length_levels[level]) # 3. saving data # for each slice, get all non-zero pixels and replace with continuous values coordinates_input = self.image_input.getNonZeroCoordinates() im_output.change_type(np.float32) # for all points in input, find the value that has to be set up, depending on the vertebral level for i, coord in enumerate(coordinates_input): im_output.data[int(coord.x), int(coord.y), int(coord.z)] = continuous_values[coord.z] return im_output
def flatten_sagittal(im_anat, im_centerline, centerline_fitting, verbose): # re-oriente to RPI orientation_native = im_anat.orientation im_anat.change_orientation("RPI") im_centerline.change_orientation("RPI") nx, ny, nz, nt, px, py, pz, pt = im_anat.dim # smooth centerline and return fitted coordinates in voxel space x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline( im_centerline, algo_fitting=centerline_fitting, type_window='hanning', window_length=50, nurbs_pts_number=3000, phys_coordinates=False, verbose=verbose, all_slices=True) # compute translation for each slice, such that the flattened centerline is centered in the medial plane (R-L) and # avoid discontinuity in slices where there is no centerline (in which case, simply copy the translation of the # closest Z). # first, get zmin and zmax spanned by the centerline (i.e. with non-zero values) indz_centerline = np.where( [np.sum(im_centerline.data[:, :, iz]) for iz in range(nz)])[0] zmin, zmax = indz_centerline[0], indz_centerline[-1] # then, extend the centerline by copying values below zmin and above zmax x_centerline_extended = np.concatenate([ np.ones(zmin) * x_centerline_fit[0], x_centerline_fit, np.ones(nz - zmax) * x_centerline_fit[-1] ]) # change type to float32 and scale between -1 and 1 as requested by img_as_float(). See #1790, #2069 im_anat_flattened = msct_image.change_type(im_anat, np.float32) min_data, max_data = np.min(im_anat_flattened.data), np.max( im_anat_flattened.data) im_anat_flattened.data = 2 * im_anat_flattened.data / (max_data - min_data) - 1 # loop across slices and apply translation for iz in range(nz): # compute translation along x (R-L) translation_x = x_centerline_extended[iz] - np.round(nx / 2.0) # apply transformation to 2D image with linear interpolation # tform = tf.SimilarityTransform(scale=1, rotation=0, translation=(translation_x, 0)) tform = transform.SimilarityTransform(translation=(0, translation_x)) # important to force input in float to skikit image, because it will output float values img = img_as_float(im_anat_flattened.data[:, :, iz]) img_reg = transform.warp(img, tform) im_anat_flattened.data[:, :, iz] = img_reg # img_as_uint(img_reg) # change back to native orientation im_anat_flattened.change_orientation(orientation_native) return im_anat_flattened
def angle_correction(self): # Empty arrays in which angle for each z slice will be stored self.angles = np.zeros(Image(self.fname_mask).dim[2]) if self.fname_sc is not None: im_seg = Image(self.fname_sc) data_seg = im_seg.data X, Y, Z = (data_seg > 0).nonzero() min_z_index, max_z_index = min(Z), max(Z) # fit centerline, smooth it and return the first derivative (in physical space) x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline( self.fname_sc, algo_fitting='hanning', type_window='hanning', window_length=80, nurbs_pts_number=3000, phys_coordinates=True, verbose=self.verbose, all_slices=False) centerline = Centerline(x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv) # average centerline coordinates over slices of the image x_centerline_deriv_rescorr, y_centerline_deriv_rescorr, z_centerline_deriv_rescorr = centerline.average_coordinates_over_slices( im_seg)[3:] # compute Z axis of the image, in physical coordinate axis_Z = im_seg.get_directions()[2] # for iz in xrange(min_z_index, max_z_index + 1): for zz in range(im_seg.dim[2]): if zz >= min_z_index and zz <= max_z_index: # in the case of problematic segmentation (e.g., non continuous segmentation often at the extremities), display a warning but do not crash try: # normalize the tangent vector to the centerline (i.e. its derivative) tangent_vect = self._normalize( np.array([ x_centerline_deriv_rescorr[zz], y_centerline_deriv_rescorr[zz], z_centerline_deriv_rescorr[zz] ])) # compute the angle between the normal vector of the plane and the vector z self.angles[zz] = np.arccos( np.vdot(tangent_vect, axis_Z)) except IndexError: printv( 'WARNING: Your segmentation does not seem continuous, which could cause wrong estimations at the problematic slices. Please check it, especially at the extremities.', type='warning')
def extract_centerline(fname_segmentation, remove_temp_files, name_output='', verbose = 0, algo_fitting = 'hanning', type_window = 'hanning', window_length = 80): # Extract path, file and extension fname_segmentation = os.path.abspath(fname_segmentation) path_data, file_data, ext_data = sct.extract_fname(fname_segmentation) # create temporary folder path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S") sct.run('mkdir '+path_tmp) # copy files into tmp folder sct.run('cp '+fname_segmentation+' '+path_tmp) # go to tmp folder os.chdir(path_tmp) # Change orientation of the input centerline into RPI sct.printv('\nOrient centerline to RPI orientation...', verbose) fname_segmentation_orient = 'segmentation_rpi' + ext_data set_orientation(file_data+ext_data, 'RPI', fname_segmentation_orient) # Get dimension sct.printv('\nGet dimensions...', verbose) nx, ny, nz, nt, px, py, pz, pt = Image(fname_segmentation_orient).dim sct.printv('.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz), verbose) sct.printv('.. voxel size: '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm', verbose) # Extract orientation of the input segmentation orientation = get_orientation(file_data+ext_data) sct.printv('\nOrientation of segmentation image: ' + orientation, verbose) sct.printv('\nOpen segmentation volume...', verbose) file = nibabel.load(fname_segmentation_orient) data = file.get_data() hdr = file.get_header() # Extract min and max index in Z direction X, Y, Z = (data>0).nonzero() min_z_index, max_z_index = min(Z), max(Z) x_centerline = [0 for i in range(0,max_z_index-min_z_index+1)] y_centerline = [0 for i in range(0,max_z_index-min_z_index+1)] z_centerline = [iz for iz in range(min_z_index, max_z_index+1)] # Extract segmentation points and average per slice for iz in range(min_z_index, max_z_index+1): x_seg, y_seg = (data[:,:,iz]>0).nonzero() x_centerline[iz-min_z_index] = np.mean(x_seg) y_centerline[iz-min_z_index] = np.mean(y_seg) for k in range(len(X)): data[X[k], Y[k], Z[k]] = 0 # extract centerline and smooth it x_centerline_fit, y_centerline_fit, z_centerline_fit, x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = smooth_centerline(fname_segmentation_orient, type_window = type_window, window_length = window_length, algo_fitting = algo_fitting, verbose = verbose) if verbose == 2: import matplotlib.pyplot as plt #Creation of a vector x that takes into account the distance between the labels nz_nonz = len(z_centerline) x_display = [0 for i in range(x_centerline_fit.shape[0])] y_display = [0 for i in range(y_centerline_fit.shape[0])] for i in range(0, nz_nonz, 1): x_display[int(z_centerline[i]-z_centerline[0])] = x_centerline[i] y_display[int(z_centerline[i]-z_centerline[0])] = y_centerline[i] plt.figure(1) plt.subplot(2,1,1) plt.plot(z_centerline_fit, x_display, 'ro') plt.plot(z_centerline_fit, x_centerline_fit) plt.xlabel("Z") plt.ylabel("X") plt.title("x and x_fit coordinates") plt.subplot(2,1,2) plt.plot(z_centerline_fit, y_display, 'ro') plt.plot(z_centerline_fit, y_centerline_fit) plt.xlabel("Z") plt.ylabel("Y") plt.title("y and y_fit coordinates") plt.show() # Create an image with the centerline for iz in range(min_z_index, max_z_index+1): data[round(x_centerline_fit[iz-min_z_index]), round(y_centerline_fit[iz-min_z_index]), iz] = 1 # if index is out of bounds here for hanning: either the segmentation has holes or labels have been added to the file # Write the centerline image in RPI orientation hdr.set_data_dtype('uint8') # set imagetype to uint8 sct.printv('\nWrite NIFTI volumes...', verbose) img = nibabel.Nifti1Image(data, None, hdr) nibabel.save(img, 'centerline.nii.gz') # Define name if output name is not specified if name_output=='csa_volume.nii.gz' or name_output=='': # sct.generate_output_file('centerline.nii.gz', file_data+'_centerline'+ext_data, verbose) name_output = file_data+'_centerline'+ext_data sct.generate_output_file('centerline.nii.gz', name_output, verbose) # create a txt file with the centerline path, rad_output, ext = sct.extract_fname(name_output) name_output_txt = rad_output + '.txt' sct.printv('\nWrite text file...', verbose) file_results = open(name_output_txt, 'w') for i in range(min_z_index, max_z_index+1): file_results.write(str(int(i)) + ' ' + str(x_centerline_fit[i-min_z_index]) + ' ' + str(y_centerline_fit[i-min_z_index]) + '\n') file_results.close() # Copy result into parent folder sct.run('cp '+name_output_txt+' ../') del data # come back to parent folder os.chdir('..') # Change orientation of the output centerline into input orientation sct.printv('\nOrient centerline image to input orientation: ' + orientation, verbose) fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data set_orientation(path_tmp+'/'+name_output, orientation, name_output) # Remove temporary files if remove_temp_files: sct.printv('\nRemove temporary files...', verbose) sct.run('rm -rf '+path_tmp, verbose) return name_output
def main(fname_anat, fname_centerline, degree_poly, centerline_fitting, interp, remove_temp_files, verbose): # load input image im_anat = Image(fname_anat) nx, ny, nz, nt, px, py, pz, pt = im_anat.dim # re-oriente to RPI orientation_native = im_anat.change_orientation('RPI') # load centerline im_centerline = Image(fname_centerline) im_centerline.change_orientation('RPI') # smooth centerline and return fitted coordinates in voxel space x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline( im_centerline, algo_fitting=centerline_fitting, type_window='hanning', window_length=50, nurbs_pts_number=3000, phys_coordinates=False, verbose=verbose, all_slices=True) # compute translation for each slice, such that the flattened centerline is centered in the medial plane (R-L) and # avoid discontinuity in slices where there is no centerline (in which case, simply copy the translation of the # closest Z). # first, get zmin and zmax spanned by the centerline (i.e. with non-zero values) indz_centerline = np.where([np.sum(im_centerline.data[:, :, iz]) for iz in range(nz)])[0] zmin, zmax = indz_centerline[0], indz_centerline[-1] # then, extend the centerline by padding values below zmin and above zmax x_centerline_extended = np.concatenate([np.ones(zmin) * x_centerline_fit[0], x_centerline_fit, np.ones(nz-zmax-1) * x_centerline_fit[-1]]) # loop across slices and apply translation im_anat_flattened = im_anat.copy() # change type to float32 because of subsequent conversion (img_as_float). See #1790 im_anat_flattened.changeType('float32') for iz in range(nz): # compute translation along x (R-L) translation_x = x_centerline_extended[iz] - round(nx/2.0) # apply transformation to 2D image with linear interpolation # tform = tf.SimilarityTransform(scale=1, rotation=0, translation=(translation_x, 0)) tform = transform.SimilarityTransform(translation=(0, translation_x)) # important to force input in float to skikit image, because it will output float values img = img_as_float(im_anat.data[:, :, iz]) img_reg = transform.warp(img, tform) im_anat_flattened.data[:, :, iz] = img_reg # img_as_uint(img_reg) # change back to native orientation im_anat_flattened.change_orientation(orientation_native) # save output fname_out = sct.add_suffix(fname_anat, '_flatten') im_anat_flattened.setFileName(fname_out) im_anat_flattened.save() sct.display_viewer_syntax([fname_anat, fname_out])
def compute_properties_along_centerline(im_seg, smooth_factor=5.0, interpolation_mode=0, algo_fitting='hanning', window_length=50, size_patch=7, remove_temp_files=1, verbose=1): """ Compute shape property along spinal cord centerline. This algorithm computes the centerline, oversample it, extract 2D patch orthogonal to the centerline, compute the shape on the 2D patches, and finally undersample the shape information in order to match the input slice #. :param im_seg: Image of segmentation, already oriented in RPI :param smooth_factor: :param interpolation_mode: :param algo_fitting: :param window_length: :param remove_temp_files: :param verbose: :return: """ # TODO: put size_patch back to 20 (was put to 7 for debugging purpose) # List of properties to output (in the right order) property_list = ['area', 'equivalent_diameter', 'AP_diameter', 'RL_diameter', 'ratio_minor_major', 'eccentricity', 'solidity', 'orientation'] # Initiating some variables nx, ny, nz, nt, px, py, pz, pt = im_seg.dim # Extract min and max index in Z direction data_seg = im_seg.data X, Y, Z = (data_seg > 0).nonzero() min_z_index, max_z_index = min(Z), max(Z) # Define the resampling resolution. Here, we take the minimum of half the pixel size along X or Y in order to have # sufficient precision upon resampling. Since we want isotropic resamping, we take the min between the two dims. resolution = min(float(px) / 2, float(py) / 2) # resolution = 0.5 # Initialize 1d array with nan. Each element corresponds to a slice. properties = {key: np.full_like(np.empty(nz), np.nan, dtype=np.double) for key in property_list} # properties['incremental_length'] = np.full_like(np.empty(nz), np.nan, dtype=np.double) # properties['distance_from_C1'] = np.full_like(np.empty(nz), np.nan, dtype=np.double) # properties['vertebral_level'] = np.full_like(np.empty(nz), np.nan, dtype=np.double) # properties['z_slice'] = [] # compute the spinal cord centerline based on the spinal cord segmentation number_of_points = nz # 5 * nz x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = \ smooth_centerline(im_seg, algo_fitting=algo_fitting, window_length=window_length, verbose=verbose, nurbs_pts_number=number_of_points, all_slices=False, phys_coordinates=True, remove_outliers=True) centerline = Centerline(x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv) sct.printv('Computing spinal cord shape along the spinal cord...') with tqdm.tqdm(total=len(range(min_z_index, max_z_index))) as pbar: # Extracting patches perpendicular to the spinal cord and computing spinal cord shape i_centerline = 0 # index of the centerline() object for iz in range(min_z_index, max_z_index-1): # for index in range(centerline.number_of_points): Julien # value_out = -5.0 value_out = 0.0 # TODO: correct for angulation using the cosine. The current approach has 2 issues: # - the centerline is not homogeneously sampled along z (which is the reason it is oversampled) # - computationally expensive # - requires resampling to higher resolution --> to check: maybe required with cosine approach current_patch = centerline.extract_perpendicular_square(im_seg, i_centerline, size=size_patch, resolution=resolution, interpolation_mode=interpolation_mode, border='constant', cval=value_out) # check for pixels close to the spinal cord segmentation that are out of the image patch_zero = np.copy(current_patch) patch_zero[patch_zero == value_out] = 0.0 # patch_borders = dilation(patch_zero) - patch_zero """ if np.count_nonzero(patch_borders + current_patch == value_out + 1.0) != 0: c = image.transfo_phys2pix([centerline.points[index]])[0] print('WARNING: no patch for slice', c[2]) continue """ # compute shape properties on 2D patch sc_properties = properties2d(patch_zero, [resolution, resolution]) # assign AP and RL to minor or major axis, depending on the orientation sc_properties = assign_AP_and_RL_diameter(sc_properties) # loop across properties and assign values for function output if sc_properties is not None: # properties['incremental_length'][iz] = centerline.incremental_length[i_centerline] for property_name in property_list: properties[property_name][iz] = sc_properties[property_name] else: c = im_seg.transfo_phys2pix([centerline.points[i_centerline]])[0] sct.printv('WARNING: no properties for slice', c[2]) i_centerline += 1 pbar.update(1) # # smooth the spinal cord shape with a gaussian kernel if required # # TODO: remove this smoothing # if smooth_factor != 0.0: # smooth_factor is in mm # import scipy # window = scipy.signal.hann(smooth_factor / np.mean(centerline.progressive_length)) # for property_name in property_list: # properties[property_name] = scipy.signal.convolve(properties[property_name], window, mode='same') / np.sum(window) # extract all values for shape properties to be averaged across the oversampled centerline in order to match the # input slice # # sorting_values = [] # for label in properties['z_slice']: # if label not in sorting_values: # sorting_values.append(label) # prepare output # shape_output = dict() # for property_name in property_list: # shape_output[property_name] = [] # for label in sorting_values: # averaged_shape[property_name].append(np.mean( # [item for i, item in enumerate(properties[property_name]) if # properties['z_slice'][i] == label])) return properties
def execute(self): print 'Execution of the SCAD algorithm in '+str(os.getcwd()) original_name = self.input_image.file_name vesselness_file_name = "imageVesselNessFilter.nii.gz" raw_file_name = "raw.nii" self.setup_debug_folder() if self.debug: import matplotlib.pyplot as plt # import for debug purposes # create tmp and copy input path_tmp = self.create_temporary_path() conv.convert(self.input_image.absolutepath, path_tmp+raw_file_name) if self.vesselness_provided: sct.run('cp '+vesselness_file_name+' '+path_tmp+vesselness_file_name) os.chdir(path_tmp) # get input image information img = Image(raw_file_name) # save original orientation and change image to RPI self.raw_orientation = img.change_orientation() # get body symmetry if self.enable_symmetry: from msct_image import change_data_orientation sym = SymmetryDetector(raw_file_name, self.contrast, crop_xy=0) self.raw_symmetry = sym.execute() img.change_orientation(self.raw_orientation) self.output_debug_file(img, self.raw_symmetry, "body_symmetry") img.change_orientation() # vesselness filter if not self.vesselness_provided: sct.run('sct_vesselness -i '+raw_file_name+' -t ' + self._contrast+" -radius "+str(self.spinalcord_radius)) # load vesselness filter data and perform minimum path on it img = Image(vesselness_file_name) img.change_orientation() self.minimum_path_data, self.J1_min_path, self.J2_min_path = get_minimum_path(img.data, invert=1, debug=1) self.output_debug_file(img, self.minimum_path_data, "minimal_path") self.output_debug_file(img, self.J1_min_path, "J1_minimal_path") self.output_debug_file(img, self.J2_min_path, "J2_minimal_path") # Apply an exponent to the minimum path self.minimum_path_powered = np.power(self.minimum_path_data, self.minimum_path_exponent) self.output_debug_file(img, self.minimum_path_powered, "minimal_path_power_"+str(self.minimum_path_exponent)) # Saving in Image since smooth_minimal_path needs pixel dimensions img.data = self.minimum_path_powered # smooth resulting minimal path self.smoothed_min_path = smooth_minimal_path(img) self.output_debug_file(img, self.smoothed_min_path.data, "minimal_path_smooth") # normalise symmetry values between 0 and 1 if self.enable_symmetry: normalised_symmetry = normalize_array_histogram(self.raw_symmetry) self.output_debug_file(img, self.smoothed_min_path.data, "minimal_path_smooth") # multiply normalised symmetry data with the minimum path result from msct_image import change_data_orientation self.spine_detect_data = np.multiply(self.smoothed_min_path.data, change_data_orientation(np.power(normalised_symmetry, self.symmetry_exponent), self.raw_orientation, "RPI")) self.output_debug_file(img, self.spine_detect_data, "symmetry_x_min_path") # extract the centerline from the minimal path image self.centerline_with_outliers = get_centerline(self.spine_detect_data, self.spine_detect_data.shape) else: # extract the centerline from the minimal path image self.centerline_with_outliers = get_centerline(self.smoothed_min_path.data, self.smoothed_min_path.data.shape) self.output_debug_file(img, self.centerline_with_outliers, "centerline_with_outliers") # saving centerline with outliers to have img.data = self.centerline_with_outliers img.change_orientation() img.file_name = "centerline_with_outliers" img.save() # use a b-spline to smooth out the centerline x, y, z, dx, dy, dz = smooth_centerline("centerline_with_outliers.nii.gz") # save the centerline nx, ny, nz, nt, px, py, pz, pt = img.dim img.data = np.zeros((nx, ny, nz)) for i in range(0, np.size(x)-1): img.data[int(x[i]), int(y[i]), int(z[i])] = 1 self.output_debug_file(img, img.data, "centerline") img.change_orientation(self.raw_orientation) img.file_name = "centerline" img.save() # copy back centerline os.chdir('../') conv.convert(path_tmp+img.file_name+img.ext, self.output_filename) if self.rm_tmp_file == 1: import shutil shutil.rmtree(path_tmp)
def compute_csa(fname_segmentation, name_method, volume_output, verbose, remove_temp_files, spline_smoothing, step, smoothing_param, figure_fit, name_output, slices, vert_levels, path_to_template, algo_fitting = 'hanning', type_window = 'hanning', window_length = 80): #param.algo_fitting = 'hanning' # Extract path, file and extension fname_segmentation = os.path.abspath(fname_segmentation) path_data, file_data, ext_data = sct.extract_fname(fname_segmentation) # create temporary folder sct.printv('\nCreate temporary folder...', verbose) path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1) sct.run('mkdir '+path_tmp, verbose) # Copying input data to tmp folder and convert to nii sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose) sct.run('isct_c3d '+fname_segmentation+' -o '+path_tmp+'segmentation.nii') # go to tmp folder os.chdir(path_tmp) # Change orientation of the input segmentation into RPI sct.printv('\nChange orientation of the input segmentation into RPI...', verbose) fname_segmentation_orient = set_orientation('segmentation.nii', 'RPI', 'segmentation_orient.nii') # Get size of data sct.printv('\nGet data dimensions...', verbose) nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_segmentation_orient) sct.printv(' ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), verbose) # Open segmentation volume sct.printv('\nOpen segmentation volume...', verbose) file_seg = nibabel.load(fname_segmentation_orient) data_seg = file_seg.get_data() hdr_seg = file_seg.get_header() # # # Extract min and max index in Z direction X, Y, Z = (data_seg > 0).nonzero() # coords_seg = np.array([str([X[i], Y[i], Z[i]]) for i in xrange(0,len(Z))]) # don't know why but finding strings in array of array of strings is WAY faster than doing the same with integers min_z_index, max_z_index = min(Z), max(Z) Xp,Yp = (data_seg[:,:,0]>=0).nonzero() # X and Y range # # x_centerline = [0 for i in xrange(0,max_z_index-min_z_index+1)] # y_centerline = [0 for i in xrange(0,max_z_index-min_z_index+1)] # z_centerline = np.array([iz for iz in xrange(min_z_index, max_z_index+1)]) # # # Extract segmentation points and average per slice # for iz in xrange(min_z_index, max_z_index+1): # x_seg, y_seg = (data_seg[:,:,iz]>0).nonzero() # x_centerline[iz-min_z_index] = np.mean(x_seg) # y_centerline[iz-min_z_index] = np.mean(y_seg) # # # Fit the centerline points with spline and return the new fitted coordinates # x_centerline_fit, y_centerline_fit,x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline) # extract centerline and smooth it x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = smooth_centerline(fname_segmentation_orient, algo_fitting=algo_fitting, type_window=type_window, window_length=window_length, verbose = verbose) z_centerline_scaled = [x*pz for x in z_centerline] # # 3D plot of the fit # fig=plt.figure() # ax=Axes3D(fig) # ax.plot(x_centerline,y_centerline,z_centerline,zdir='z') # ax.plot(x_centerline_fit,y_centerline_fit,z_centerline,zdir='z') # plt.show() # Defining cartesian basis vectors x = np.array([1, 0, 0]) y = np.array([0, 1, 0]) z = np.array([0, 0, 1]) # Creating folder in which JPG files will be stored sct.printv('\nCreating folder in which JPG files will be stored...', verbose) sct.create_folder('JPG_Results') # Compute CSA sct.printv('\nCompute CSA...', verbose) # Empty arrays in which CSA for each z slice will be stored csa = [0.0 for i in xrange(0,max_z_index-min_z_index+1)] # sections_ortho_counting = [0 for i in xrange(0,max_z_index-min_z_index+1)] # sections_ortho_ellipse = [0 for i in xrange(0,max_z_index-min_z_index+1)] # sections_z_ellipse = [0 for i in xrange(0,max_z_index-min_z_index+1)] # sections_z_counting = [0 for i in xrange(0,max_z_index-min_z_index+1)] sct.printv('\nCross-Section Area:', verbose, 'bold') for iz in xrange(0, len(z_centerline)): # Equation of the the plane which is orthogonal to the spline at z=iz a = x_centerline_deriv[iz] b = y_centerline_deriv[iz] c = z_centerline_deriv[iz] #vector normal to the plane normal = normalize(np.array([a, b, c])) # angle between normal vector and z angle = np.arccos(np.dot(normal, z)) if name_method == 'counting_ortho_plane' or name_method == 'ellipse_ortho_plane': x_center = x_centerline_fit[iz] y_center = y_centerline_fit[iz] z_center = z_centerline[iz] # use of x in order to get orientation of each plane, basis_1 is in the plane ax+by+cz+d=0 basis_1 = normalize(np.cross(normal,x)) basis_2 = normalize(np.cross(normal,basis_1)) # maximum dimension of the tilted plane. Try multiply numerator by sqrt(2) ? max_diameter = (max([(max(X)-min(X))*px,(max(Y)-min(Y))*py]))/(np.cos(angle)) # Forcing the step to be the min of x and y scale (default value is 1 mm) step = min([px,py]) # discretized plane which will be filled with 0/1 plane_seg = np.zeros((int(max_diameter/step),int(max_diameter/step))) # how the plane will be skimmed through plane_grid = np.linspace(-int(max_diameter/2),int(max_diameter/2),int(max_diameter/step)) # we go through the plane for i_b1 in plane_grid : for i_b2 in plane_grid : point = np.array([x_center*px,y_center*py,z_center*pz]) + i_b1*basis_1 +i_b2*basis_2 # to which voxel belongs each point of the plane coord_voxel = str([ int(point[0]/px), int(point[1]/py), int(point[2]/pz)]) if (coord_voxel in coords_seg) is True : # if this voxel is 1 plane_seg[int((plane_grid==i_b1).nonzero()[0])][int((plane_grid==i_b2).nonzero()[0])] = 1 # number of voxels that are in the intersection of each plane and the nonzeros values of segmentation, times the area of one cell of the discretized plane if name_method == 'counting_ortho_plane': csa[iz] = len((plane_seg>0).nonzero()[0])*step*step # if verbose ==1 and name_method == 'counting_ortho_plane' : # print('Cross-Section Area : ' + str(csa[iz]) + ' mm^2') if name_method == 'ellipse_ortho_plane': # import scipy stuff from scipy.misc import imsave os.chdir('JPG_Results') imsave('plane_ortho_' + str(iz) + '.jpg', plane_seg) # Tresholded gradient image mag = edge_detection('plane_ortho_' + str(iz) + '.jpg') #Coordinates of the contour x_contour,y_contour = (mag>0).nonzero() x_contour = x_contour*step y_contour = y_contour*step #Fitting an ellipse fit = Ellipse_fit(x_contour,y_contour) # Semi-minor axis, semi-major axis a_ellipse, b_ellipse = ellipse_dim(fit) #Section = pi*a*b csa[iz] = a_ellipse*b_ellipse*np.pi # if verbose == 1 and name_method == 'ellipse_ortho_plane': # print('Cross-Section Area : ' + str(csa[iz]) + ' mm^2') # os.chdir('..') if name_method == 'counting_z_plane' or name_method == 'ellipse_z_plane': # getting the segmentation for each z plane x_seg, y_seg = (data_seg[:, :, iz+min_z_index] > 0).nonzero() seg = [[x_seg[i], y_seg[i]] for i in range(0, len(x_seg))] plane = np.zeros((max(Xp), max(Yp))) for i in seg: # filling the plane with 0 and 1 regarding to the segmentation plane[i[0] - 1][i[1] - 1] = data_seg[i[0] - 1, i[1] - 1, iz+min_z_index] if name_method == 'counting_z_plane': x, y = (plane > 0.0).nonzero() len_x = len(x) for i in range(0, len_x): csa[iz] += plane[x[i], y[i]]*px*py csa[iz] *= np.cos(angle) # if verbose == 1 and name_method == 'counting_z_plane': # print('Cross-Section Area : ' + str(csa[iz]) + ' mm^2') if name_method == 'ellipse_z_plane': # import scipy stuff from scipy.misc import imsave os.chdir('JPG_Results') imsave('plane_z_' + str(iz) + '.jpg', plane) # Tresholded gradient image mag = edge_detection('plane_z_' + str(iz) + '.jpg') x_contour,y_contour = (mag>0).nonzero() x_contour = x_contour*px y_contour = y_contour*py # Fitting an ellipse fit = Ellipse_fit(x_contour,y_contour) a_ellipse, b_ellipse = ellipse_dim(fit) csa[iz] = a_ellipse*b_ellipse*np.pi*np.cos(angle) # if verbose == 1 and name_method == 'ellipse_z_plane': # print('Cross-Section Area : ' + str(csa[iz]) + ' mm^2') if spline_smoothing == 1: sct.printv('\nSmoothing results with spline...', verbose) tck = scipy.interpolate.splrep(z_centerline_scaled, csa, s=smoothing_param) csa_smooth = scipy.interpolate.splev(z_centerline_scaled, tck) if figure_fit == 1: import matplotlib.pyplot as plt plt.figure() plt.plot(z_centerline_scaled, csa) plt.plot(z_centerline_scaled, csa_smooth) plt.legend(['CSA values', 'Smoothed values'], 2) plt.savefig('Spline_fit.png') csa = csa_smooth # update variable # Create output text file sct.printv('\nWrite text file...', verbose) file_results = open('csa.txt', 'w') for i in range(min_z_index, max_z_index+1): file_results.write(str(int(i)) + ',' + str(csa[i-min_z_index])+'\n') # Display results sct.printv('z='+str(i-min_z_index)+': '+str(csa[i-min_z_index])+' mm^2', verbose, 'bold') file_results.close() # output volume of csa values if volume_output: sct.printv('\nCreate volume of CSA values...', verbose) # get orientation of the input data orientation = get_orientation('segmentation.nii') data_seg = data_seg.astype(np.float32, copy=False) # loop across slices for iz in range(min_z_index, max_z_index+1): # retrieve seg pixels x_seg, y_seg = (data_seg[:, :, iz] > 0).nonzero() seg = [[x_seg[i],y_seg[i]] for i in range(0, len(x_seg))] # loop across pixels in segmentation for i in seg: # replace value with csa value data_seg[i[0], i[1], iz] = csa[iz-min_z_index] # create header hdr_seg.set_data_dtype('float32') # set imagetype to uint8 # save volume img = nibabel.Nifti1Image(data_seg, None, hdr_seg) nibabel.save(img, 'csa_RPI.nii') # Change orientation of the output centerline into input orientation fname_csa_volume = set_orientation('csa_RPI.nii', orientation, 'csa_RPI_orient.nii') # come back to parent folder os.chdir('..') # Generate output files sct.printv('\nGenerate output files...', verbose) sct.generate_output_file(path_tmp+'csa.txt', path_data+param.fname_csa) # extension already included in param.fname_csa if volume_output: sct.generate_output_file(fname_csa_volume, path_data+name_output) # extension already included in name_output # average csa across vertebral levels or slices if asked (flag -z or -l) if slices or vert_levels: if vert_levels and not path_to_template: sct.printv('\nERROR: Path to template is missing. See usage.\n', 1, 'error') sys.exit(2) elif vert_levels and path_to_template: abs_path_to_template = os.path.abspath(path_to_template) # go to tmp folder os.chdir(path_tmp) # create temporary folder sct.printv('\nCreate temporary folder to average csa...', verbose) path_tmp_extract_metric = sct.slash_at_the_end('label_temp', 1) sct.run('mkdir '+path_tmp_extract_metric, verbose) # Copying output CSA volume in the temporary folder sct.printv('\nCopy data to tmp folder...', verbose) sct.run('cp '+fname_segmentation+' '+path_tmp_extract_metric) # create file info_label path_fname_seg, file_fname_seg, ext_fname_seg = sct.extract_fname(fname_segmentation) create_info_label('info_label.txt', path_tmp_extract_metric, file_fname_seg+ext_fname_seg) if slices: # average CSA os.system("sct_extract_metric -i "+path_data+name_output+" -f "+path_tmp_extract_metric+" -m wa -o "+sct.slash_at_the_end(path_data)+"mean_csa -z "+slices) if vert_levels: sct.run('cp -R '+abs_path_to_template+' .') # average CSA os.system("sct_extract_metric -i "+path_data+name_output+" -f "+path_tmp_extract_metric+" -m wa -o "+sct.slash_at_the_end(path_data)+"mean_csa -v "+vert_levels) os.chdir('..') # Remove temporary files print('\nRemove temporary folder used to average CSA...') sct.run('rm -rf '+path_tmp_extract_metric) # Remove temporary files if remove_temp_files: print('\nRemove temporary files...') sct.run('rm -rf '+path_tmp)
def get_crosses_coordinates(coordinates_input, gapxy=15, image_ref=None, dilate=False): from msct_types import Coordinate # if reference image is provided (segmentation), we draw the cross perpendicular to the centerline if image_ref is not None: # smooth centerline from sct_straighten_spinalcord import smooth_centerline x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline( self.image_ref, verbose=self.verbose) # compute crosses cross_coordinates = [] for coord in coordinates_input: if image_ref is None: from sct_straighten_spinalcord import compute_cross cross_coordinates_temp = compute_cross(coord, gapxy) else: from sct_straighten_spinalcord import compute_cross_centerline from numpy import where index_z = where(z_centerline == coord.z) deriv = Coordinate([ x_centerline_deriv[index_z][0], y_centerline_deriv[index_z][0], z_centerline_deriv[index_z][0], 0.0 ]) cross_coordinates_temp = compute_cross_centerline( coord, deriv, gapxy) for i, coord_cross in enumerate(cross_coordinates_temp): coord_cross.value = coord.value * 10 + i + 1 # dilate cross to 3x3x3 if dilate: additional_coordinates = [] for coord_temp in cross_coordinates_temp: additional_coordinates.append( Coordinate([ coord_temp.x, coord_temp.y, coord_temp.z + 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x, coord_temp.y, coord_temp.z - 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x, coord_temp.y + 1.0, coord_temp.z, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x, coord_temp.y + 1.0, coord_temp.z + 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x, coord_temp.y + 1.0, coord_temp.z - 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x, coord_temp.y - 1.0, coord_temp.z, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x, coord_temp.y - 1.0, coord_temp.z + 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x, coord_temp.y - 1.0, coord_temp.z - 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x + 1.0, coord_temp.y, coord_temp.z, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x + 1.0, coord_temp.y, coord_temp.z + 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x + 1.0, coord_temp.y, coord_temp.z - 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x + 1.0, coord_temp.y + 1.0, coord_temp.z, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x + 1.0, coord_temp.y + 1.0, coord_temp.z + 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x + 1.0, coord_temp.y + 1.0, coord_temp.z - 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x + 1.0, coord_temp.y - 1.0, coord_temp.z, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x + 1.0, coord_temp.y - 1.0, coord_temp.z + 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x + 1.0, coord_temp.y - 1.0, coord_temp.z - 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x - 1.0, coord_temp.y, coord_temp.z, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x - 1.0, coord_temp.y, coord_temp.z + 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x - 1.0, coord_temp.y, coord_temp.z - 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x - 1.0, coord_temp.y + 1.0, coord_temp.z, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x - 1.0, coord_temp.y + 1.0, coord_temp.z + 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x - 1.0, coord_temp.y + 1.0, coord_temp.z - 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x - 1.0, coord_temp.y - 1.0, coord_temp.z, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x - 1.0, coord_temp.y - 1.0, coord_temp.z + 1.0, coord_temp.value ])) additional_coordinates.append( Coordinate([ coord_temp.x - 1.0, coord_temp.y - 1.0, coord_temp.z - 1.0, coord_temp.value ])) cross_coordinates_temp.extend(additional_coordinates) cross_coordinates.extend(cross_coordinates_temp) cross_coordinates = sorted(cross_coordinates, key=lambda obj: obj.value) return cross_coordinates
def compute_csa(segmentation, algo_fitting='hanning', type_window='hanning', window_length=80, angle_correction=True, use_phys_coord=True, remove_temp_files=1, verbose=1): """ Compute CSA. Note: segmentation can be binary or weighted for partial volume effect. :param segmentation: input segmentation. Could be either an Image or a file name. :param algo_fitting: :param type_window: :param window_length: :param angle_correction: :param use_phys_coord: :return metrics: Dict of class process_seg.Metric() """ # create temporary folder path_tmp = sct.tmp_create() # open image and save in temp folder im_seg = msct_image.Image(segmentation).save(path_tmp, ) # change orientation to RPI im_seg.change_orientation('RPI') nx, ny, nz, nt, px, py, pz, pt = im_seg.dim fname_seg = os.path.join(path_tmp, 'segmentation_RPI.nii.gz') im_seg.save(fname_seg) # Extract min and max index in Z direction data_seg = im_seg.data X, Y, Z = (data_seg > 0).nonzero() min_z_index, max_z_index = min(Z), max(Z) # if angle correction is required, get segmentation centerline # Note: even if angle_correction=0, we should run the code below so that z_centerline_voxel is defined (later used # with option -vert). See #1791 if use_phys_coord: # fit centerline, smooth it and return the first derivative (in physical space) x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = \ smooth_centerline(fname_seg, algo_fitting=algo_fitting, type_window=type_window, window_length=window_length, nurbs_pts_number=3000, phys_coordinates=True, verbose=verbose, all_slices=False) centerline = Centerline(x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv) # average centerline coordinates over slices of the image x_centerline_fit_rescorr, y_centerline_fit_rescorr, z_centerline_rescorr, x_centerline_deriv_rescorr, \ y_centerline_deriv_rescorr, z_centerline_deriv_rescorr = centerline.average_coordinates_over_slices(im_seg) # compute Z axis of the image, in physical coordinate axis_X, axis_Y, axis_Z = im_seg.get_directions() else: # fit centerline, smooth it and return the first derivative (in voxel space but FITTED coordinates) x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = \ smooth_centerline(fname_seg, algo_fitting=algo_fitting, type_window=type_window, window_length=window_length, nurbs_pts_number=3000, phys_coordinates=False, verbose=verbose, all_slices=True) # correct centerline fitted coordinates according to the data resolution x_centerline_fit_rescorr, y_centerline_fit_rescorr, z_centerline_rescorr, \ x_centerline_deriv_rescorr, y_centerline_deriv_rescorr, z_centerline_deriv_rescorr = \ x_centerline_fit * px, y_centerline_fit * py, z_centerline * pz, \ x_centerline_deriv * px, y_centerline_deriv * py, z_centerline_deriv * pz axis_Z = [0.0, 0.0, 1.0] # Compute CSA sct.printv('\nCompute CSA...', verbose) # Initialize 1d array with nan. Each element corresponds to a slice. csa = np.full_like(np.empty(nz), np.nan, dtype=np.double) angles = np.full_like(np.empty(nz), np.nan, dtype=np.double) for iz in range(min_z_index, max_z_index + 1): if angle_correction: # in the case of problematic segmentation (e.g., non continuous segmentation often at the extremities), # display a warning but do not crash try: # normalize the tangent vector to the centerline (i.e. its derivative) tangent_vect = normalize(np.array( [x_centerline_deriv_rescorr[iz - min_z_index], y_centerline_deriv_rescorr[iz - min_z_index], z_centerline_deriv_rescorr[iz - min_z_index]])) except IndexError: sct.printv( 'WARNING: Your segmentation does not seem continuous, which could cause wrong estimations at the ' 'problematic slices. Please check it, especially at the extremities.', type='warning') # compute the angle between the normal vector of the plane and the vector z angle = np.arccos(np.vdot(tangent_vect, axis_Z)) else: angle = 0.0 # compute the number of voxels, assuming the segmentation is coded for partial volume effect between 0 and 1. number_voxels = np.sum(data_seg[:, :, iz]) # compute CSA, by scaling with voxel size (in mm) and adjusting for oblique plane csa[iz] = number_voxels * px * py * np.cos(angle) angles[iz] = math.degrees(angle) # Remove temporary files if remove_temp_files: sct.printv('\nRemove temporary files...') sct.rmtree(path_tmp) # prepare output metrics = {'csa': Metric(data=csa, label='CSA [mm^2]'), 'angle': Metric(data=angles, label='Angle between cord axis and z [deg]')} return metrics
def extract_centerline( fname_segmentation, remove_temp_files, verbose=0, algo_fitting="hanning", type_window="hanning", window_length=80 ): # Extract path, file and extension fname_segmentation = os.path.abspath(fname_segmentation) path_data, file_data, ext_data = sct.extract_fname(fname_segmentation) # create temporary folder path_tmp = "tmp." + time.strftime("%y%m%d%H%M%S") sct.run("mkdir " + path_tmp) # copy files into tmp folder sct.run("cp " + fname_segmentation + " " + path_tmp) # go to tmp folder os.chdir(path_tmp) # Change orientation of the input centerline into RPI sct.printv("\nOrient centerline to RPI orientation...", verbose) fname_segmentation_orient = "segmentation_rpi" + ext_data set_orientation(file_data + ext_data, "RPI", fname_segmentation_orient) # Get dimension sct.printv("\nGet dimensions...", verbose) nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_segmentation_orient) sct.printv(".. matrix size: " + str(nx) + " x " + str(ny) + " x " + str(nz), verbose) sct.printv(".. voxel size: " + str(px) + "mm x " + str(py) + "mm x " + str(pz) + "mm", verbose) # Extract orientation of the input segmentation orientation = get_orientation(file_data + ext_data) sct.printv("\nOrientation of segmentation image: " + orientation, verbose) sct.printv("\nOpen segmentation volume...", verbose) file = nibabel.load(fname_segmentation_orient) data = file.get_data() hdr = file.get_header() # Extract min and max index in Z direction X, Y, Z = (data > 0).nonzero() min_z_index, max_z_index = min(Z), max(Z) x_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)] y_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)] z_centerline = [iz for iz in range(min_z_index, max_z_index + 1)] # Extract segmentation points and average per slice for iz in range(min_z_index, max_z_index + 1): x_seg, y_seg = (data[:, :, iz] > 0).nonzero() x_centerline[iz - min_z_index] = np.mean(x_seg) y_centerline[iz - min_z_index] = np.mean(y_seg) for k in range(len(X)): data[X[k], Y[k], Z[k]] = 0 # extract centerline and smooth it x_centerline_fit, y_centerline_fit, z_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline( fname_segmentation_orient, type_window=type_window, window_length=window_length, algo_fitting=algo_fitting, verbose=verbose, ) if verbose == 2: import matplotlib.pyplot as plt # Creation of a vector x that takes into account the distance between the labels nz_nonz = len(z_centerline) x_display = [0 for i in range(x_centerline_fit.shape[0])] y_display = [0 for i in range(y_centerline_fit.shape[0])] for i in range(0, nz_nonz, 1): x_display[int(z_centerline[i] - z_centerline[0])] = x_centerline[i] y_display[int(z_centerline[i] - z_centerline[0])] = y_centerline[i] plt.figure(1) plt.subplot(2, 1, 1) plt.plot(z_centerline_fit, x_display, "ro") plt.plot(z_centerline_fit, x_centerline_fit) plt.xlabel("Z") plt.ylabel("X") plt.title("x and x_fit coordinates") plt.subplot(2, 1, 2) plt.plot(z_centerline_fit, y_display, "ro") plt.plot(z_centerline_fit, y_centerline_fit) plt.xlabel("Z") plt.ylabel("Y") plt.title("y and y_fit coordinates") plt.show() # Create an image with the centerline for iz in range(min_z_index, max_z_index + 1): data[ round(x_centerline_fit[iz - min_z_index]), round(y_centerline_fit[iz - min_z_index]), iz ] = ( 1 ) # if index is out of bounds here for hanning: either the segmentation has holes or labels have been added to the file # Write the centerline image in RPI orientation hdr.set_data_dtype("uint8") # set imagetype to uint8 sct.printv("\nWrite NIFTI volumes...", verbose) img = nibabel.Nifti1Image(data, None, hdr) nibabel.save(img, "centerline.nii.gz") sct.generate_output_file("centerline.nii.gz", file_data + "_centerline" + ext_data, verbose) # create a txt file with the centerline file_name = file_data + "_centerline" + ".txt" sct.printv("\nWrite text file...", verbose) file_results = open(file_name, "w") for i in range(min_z_index, max_z_index + 1): file_results.write( str(int(i)) + " " + str(x_centerline_fit[i - min_z_index]) + " " + str(y_centerline_fit[i - min_z_index]) + "\n" ) file_results.close() # Copy result into parent folder sct.run("cp " + file_name + " ../") del data # come back to parent folder os.chdir("..") # Change orientation of the output centerline into input orientation sct.printv("\nOrient centerline image to input orientation: " + orientation, verbose) fname_segmentation_orient = "tmp.segmentation_rpi" + ext_data set_orientation( path_tmp + "/" + file_data + "_centerline" + ext_data, orientation, file_data + "_centerline" + ext_data ) # Remove temporary files if remove_temp_files: sct.printv("\nRemove temporary files...", verbose) sct.run("rm -rf " + path_tmp, verbose) return file_data + "_centerline" + ext_data
def compute_length(fname_segmentation, remove_temp_files, verbose = 0): from math import sqrt # Extract path, file and extension fname_segmentation = os.path.abspath(fname_segmentation) path_data, file_data, ext_data = sct.extract_fname(fname_segmentation) # create temporary folder path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S") sct.run('mkdir '+path_tmp) # copy files into tmp folder sct.run('cp '+fname_segmentation+' '+path_tmp) # go to tmp folder os.chdir(path_tmp) # Change orientation of the input centerline into RPI sct.printv('\nOrient centerline to RPI orientation...', param.verbose) fname_segmentation_orient = 'segmentation_rpi' + ext_data set_orientation(file_data+ext_data, 'RPI', fname_segmentation_orient) # Get dimension sct.printv('\nGet dimensions...', param.verbose) nx, ny, nz, nt, px, py, pz, pt = Iamge(fname_segmentation_orient).dim sct.printv('.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz), param.verbose) sct.printv('.. voxel size: '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm', param.verbose) # smooth segmentation/centerline #x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = smooth_centerline(fname_segmentation_orient, param, 'hanning', 1) x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = smooth_centerline(fname_segmentation_orient, type_window='hanning', window_length=80, algo_fitting='hanning', verbose = verbose) # compute length of centerline result_length = 0.0 for i in range(len(x_centerline_fit)-1): result_length += sqrt(((x_centerline_fit[i+1]-x_centerline_fit[i])*px)**2+((y_centerline_fit[i+1]-y_centerline_fit[i])*py)**2+((z_centerline[i+1]-z_centerline[i])*pz)**2) return result_length
def run_main(): sct.start_stream_logger() parser = get_parser() args = sys.argv[1:] arguments = parser.parse(args) # Input filename fname_input_data = arguments["-i"] fname_data = os.path.abspath(fname_input_data) # Method used method = 'optic' if "-method" in arguments: method = arguments["-method"] # Contrast type contrast_type = '' if "-c" in arguments: contrast_type = arguments["-c"] if method == 'optic' and not contrast_type: # Contrast must be error = 'ERROR: -c is a mandatory argument when using Optic method.' sct.printv(error, type='error') return # Ga between slices interslice_gap = 10.0 if "-gap" in arguments: interslice_gap = float(arguments["-gap"]) # Output folder if "-ofolder" in arguments: folder_output = sct.slash_at_the_end(arguments["-ofolder"], slash=1) else: folder_output = './' # Remove temporary files remove_temp_files = True if "-r" in arguments: remove_temp_files = bool(int(arguments["-r"])) # Outputs a ROI file output_roi = False if "-roi" in arguments: output_roi = bool(int(arguments["-roi"])) # Verbosity verbose = 0 if "-v" in arguments: verbose = int(arguments["-v"]) if method == 'viewer': path_data, file_data, ext_data = sct.extract_fname(fname_data) # create temporary folder temp_folder = sct.TempFolder() temp_folder.copy_from(fname_data) temp_folder.chdir() # make sure image is in SAL orientation, as it is the orientation used by the viewer image_input = Image(fname_data) image_input_orientation = orientation(image_input, get=True, verbose=False) reoriented_image_filename = sct.add_suffix(file_data + ext_data, "_SAL") cmd_image = 'sct_image -i "%s" -o "%s" -setorient SAL -v 0' % ( fname_data, reoriented_image_filename) sct.run(cmd_image, verbose=False) # extract points manually using the viewer fname_points = viewer_centerline(image_fname=reoriented_image_filename, interslice_gap=interslice_gap, verbose=verbose) if fname_points is not None: image_points_RPI = sct.add_suffix(fname_points, "_RPI") cmd_image = 'sct_image -i "%s" -o "%s" -setorient RPI -v 0' % ( fname_points, image_points_RPI) sct.run(cmd_image, verbose=False) image_input_reoriented = Image(image_points_RPI) # fit centerline, smooth it and return the first derivative (in physical space) x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline( image_points_RPI, algo_fitting='nurbs', nurbs_pts_number=3000, phys_coordinates=True, verbose=verbose, all_slices=False) centerline = Centerline(x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv) # average centerline coordinates over slices of the image x_centerline_fit_rescorr, y_centerline_fit_rescorr, z_centerline_rescorr, x_centerline_deriv_rescorr, y_centerline_deriv_rescorr, z_centerline_deriv_rescorr = centerline.average_coordinates_over_slices( image_input_reoriented) # compute z_centerline in image coordinates for usage in vertebrae mapping voxel_coordinates = image_input_reoriented.transfo_phys2pix([[ x_centerline_fit_rescorr[i], y_centerline_fit_rescorr[i], z_centerline_rescorr[i] ] for i in range(len(z_centerline_rescorr))]) x_centerline_voxel = [coord[0] for coord in voxel_coordinates] y_centerline_voxel = [coord[1] for coord in voxel_coordinates] z_centerline_voxel = [coord[2] for coord in voxel_coordinates] # compute z_centerline in image coordinates with continuous precision voxel_coordinates = image_input_reoriented.transfo_phys2continuouspix( [[ x_centerline_fit_rescorr[i], y_centerline_fit_rescorr[i], z_centerline_rescorr[i] ] for i in range(len(z_centerline_rescorr))]) x_centerline_voxel_cont = [coord[0] for coord in voxel_coordinates] y_centerline_voxel_cont = [coord[1] for coord in voxel_coordinates] z_centerline_voxel_cont = [coord[2] for coord in voxel_coordinates] # Create an image with the centerline image_input_reoriented.data *= 0 min_z_index, max_z_index = int(round( min(z_centerline_voxel))), int(round(max(z_centerline_voxel))) for iz in range(min_z_index, max_z_index + 1): image_input_reoriented.data[ int(round(x_centerline_voxel[iz - min_z_index])), int(round(y_centerline_voxel[iz - min_z_index])), int( iz )] = 1 # if index is out of bounds here for hanning: either the segmentation has holes or labels have been added to the file # Write the centerline image sct.printv('\nWrite NIFTI volumes...', verbose) fname_centerline_oriented = file_data + '_centerline' + ext_data image_input_reoriented.setFileName(fname_centerline_oriented) image_input_reoriented.changeType('uint8') image_input_reoriented.save() sct.printv('\nSet to original orientation...', verbose) sct.run('sct_image -i ' + fname_centerline_oriented + ' -setorient ' + image_input_orientation + ' -o ' + fname_centerline_oriented) # create a txt file with the centerline fname_centerline_oriented_txt = file_data + '_centerline.txt' file_results = open(fname_centerline_oriented_txt, 'w') for i in range(min_z_index, max_z_index + 1): file_results.write( str(int(i)) + ' ' + str(round(x_centerline_voxel_cont[i - min_z_index], 2)) + ' ' + str(round(y_centerline_voxel_cont[i - min_z_index], 2)) + '\n') file_results.close() fname_centerline_oriented_roi = optic.centerline2roi( fname_image=fname_centerline_oriented, folder_output='./', verbose=verbose) # return to initial folder temp_folder.chdir_undo() # copy result to output folder shutil.copy(temp_folder.get_path() + fname_centerline_oriented, folder_output) shutil.copy(temp_folder.get_path() + fname_centerline_oriented_txt, folder_output) if output_roi: shutil.copy( temp_folder.get_path() + fname_centerline_oriented_roi, folder_output) centerline_filename = folder_output + fname_centerline_oriented else: centerline_filename = 'error' # delete temporary folder if remove_temp_files: temp_folder.cleanup() else: # condition on verbose when using OptiC if verbose == 1: verbose = 2 # OptiC models path_script = os.path.dirname(__file__) path_sct = os.path.dirname(path_script) optic_models_path = os.path.join(path_sct, 'data/optic_models', '{}_model'.format(contrast_type)) # Execute OptiC binary _, centerline_filename = optic.detect_centerline( image_fname=fname_data, contrast_type=contrast_type, optic_models_path=optic_models_path, folder_output=folder_output, remove_temp_files=remove_temp_files, output_roi=output_roi, verbose=verbose) sct.printv('\nDone! To view results, type:', verbose) sct.printv( "fslview " + fname_input_data + " " + centerline_filename + " -l Red -b 0,1 -t 0.7 &\n", verbose, 'info')
def compute_csa(fname_segmentation, name_method, volume_output, verbose, remove_temp_files, step, smoothing_param, figure_fit, name_output, slices, vert_levels, path_to_template, algo_fitting = 'hanning', type_window = 'hanning', window_length = 80): # Extract path, file and extension fname_segmentation = os.path.abspath(fname_segmentation) path_data, file_data, ext_data = sct.extract_fname(fname_segmentation) # create temporary folder sct.printv('\nCreate temporary folder...', verbose) path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1) sct.run('mkdir '+path_tmp, verbose) # Copying input data to tmp folder and convert to nii sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose) sct.run('isct_c3d '+fname_segmentation+' -o '+path_tmp+'segmentation.nii') # go to tmp folder os.chdir(path_tmp) # Change orientation of the input segmentation into RPI sct.printv('\nChange orientation of the input segmentation into RPI...', verbose) fname_segmentation_orient = set_orientation('segmentation.nii', 'RPI', 'segmentation_orient.nii') # Get size of data sct.printv('\nGet data dimensions...', verbose) nx, ny, nz, nt, px, py, pz, pt = Image(fname_segmentation_orient).dim sct.printv(' ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), verbose) # Open segmentation volume sct.printv('\nOpen segmentation volume...', verbose) file_seg = nibabel.load(fname_segmentation_orient) data_seg = file_seg.get_data() hdr_seg = file_seg.get_header() # # Extract min and max index in Z direction X, Y, Z = (data_seg > 0).nonzero() min_z_index, max_z_index = min(Z), max(Z) # Xp, Yp = (data_seg[:, :, 0] >= 0).nonzero() # X and Y range # extract centerline and smooth it x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline(fname_segmentation_orient, algo_fitting=algo_fitting, type_window=type_window, window_length=window_length, verbose=verbose) z_centerline_scaled = [x*pz for x in z_centerline] # Compute CSA sct.printv('\nCompute CSA...', verbose) # Empty arrays in which CSA for each z slice will be stored csa = np.zeros(max_z_index-min_z_index+1) # csa = [0.0 for i in xrange(0, max_z_index-min_z_index+1)] for iz in xrange(0, len(z_centerline)): # compute the vector normal to the plane normal = normalize(np.array([x_centerline_deriv[iz], y_centerline_deriv[iz], z_centerline_deriv[iz]])) # compute the angle between the normal vector of the plane and the vector z angle = np.arccos(np.dot(normal, [0, 0, 1])) # compute the number of voxels, assuming the segmentation is coded for partial volume effect between 0 and 1. number_voxels = sum(sum(data_seg[:, :, iz+min_z_index])) # compute CSA, by scaling with voxel size (in mm) and adjusting for oblique plane csa[iz] = number_voxels * px * py * np.cos(angle) if smoothing_param: from msct_smooth import smoothing_window sct.printv('\nSmooth CSA across slices...', verbose) sct.printv('.. Hanning window: '+str(smoothing_param)+' mm', verbose) csa_smooth = smoothing_window(csa, window_len=smoothing_param/pz, window='hanning', verbose=0) # display figure if verbose == 2: import matplotlib.pyplot as plt plt.figure() pltx, = plt.plot(z_centerline_scaled, csa, 'bo') pltx_fit, = plt.plot(z_centerline_scaled, csa_smooth, 'r', linewidth=2) plt.title("Cross-sectional area (CSA)") plt.xlabel('z (mm)') plt.ylabel('CSA (mm^2)') plt.legend([pltx, pltx_fit], ['Raw', 'Smoothed']) plt.show() # update variable csa = csa_smooth # Create output text file sct.printv('\nWrite text file...', verbose) file_results = open('csa.txt', 'w') for i in range(min_z_index, max_z_index+1): file_results.write(str(int(i)) + ',' + str(csa[i-min_z_index])+'\n') # Display results sct.printv('z='+str(i-min_z_index)+': '+str(csa[i-min_z_index])+' mm^2', verbose, 'bold') file_results.close() # output volume of csa values if volume_output: sct.printv('\nCreate volume of CSA values...', verbose) # get orientation of the input data orientation = get_orientation('segmentation.nii') data_seg = data_seg.astype(np.float32, copy=False) # loop across slices for iz in range(min_z_index, max_z_index+1): # retrieve seg pixels x_seg, y_seg = (data_seg[:, :, iz] > 0).nonzero() seg = [[x_seg[i],y_seg[i]] for i in range(0, len(x_seg))] # loop across pixels in segmentation for i in seg: # replace value with csa value data_seg[i[0], i[1], iz] = csa[iz-min_z_index] # create header hdr_seg.set_data_dtype('float32') # set imagetype to uint8 # save volume img = nibabel.Nifti1Image(data_seg, None, hdr_seg) nibabel.save(img, 'csa_RPI.nii') # Change orientation of the output centerline into input orientation fname_csa_volume = set_orientation('csa_RPI.nii', orientation, 'csa_RPI_orient.nii') # come back to parent folder os.chdir('..') # Generate output files sct.printv('\nGenerate output files...', verbose) from shutil import copyfile copyfile(path_tmp+'csa.txt', path_data+param.fname_csa) # sct.generate_output_file(path_tmp+'csa.txt', path_data+param.fname_csa) # extension already included in param.fname_csa if volume_output: sct.generate_output_file(fname_csa_volume, path_data+name_output) # extension already included in name_output # average csa across vertebral levels or slices if asked (flag -z or -l) if slices or vert_levels: if vert_levels and not path_to_template: sct.printv('\nERROR: Path to template is missing. See usage.\n', 1, 'error') sys.exit(2) elif vert_levels and path_to_template: abs_path_to_template = os.path.abspath(path_to_template) # go to tmp folder os.chdir(path_tmp) # create temporary folder sct.printv('\nCreate temporary folder to average csa...', verbose) path_tmp_extract_metric = sct.slash_at_the_end('label_temp', 1) sct.run('mkdir '+path_tmp_extract_metric, verbose) # Copying output CSA volume in the temporary folder sct.printv('\nCopy data to tmp folder...', verbose) sct.run('cp '+fname_segmentation+' '+path_tmp_extract_metric) # create file info_label path_fname_seg, file_fname_seg, ext_fname_seg = sct.extract_fname(fname_segmentation) create_info_label('info_label.txt', path_tmp_extract_metric, file_fname_seg+ext_fname_seg) # average CSA if slices: os.system("sct_extract_metric -i "+path_data+name_output+" -f "+path_tmp_extract_metric+" -m wa -o ../csa_mean.txt -z "+slices) if vert_levels: sct.run('cp -R '+abs_path_to_template+' .') os.system("sct_extract_metric -i "+path_data+name_output+" -f "+path_tmp_extract_metric+" -m wa -o ../csa_mean.txt -v "+vert_levels) os.chdir('..') # Remove temporary files print('\nRemove temporary folder used to average CSA...') sct.run('rm -rf '+path_tmp_extract_metric) # Remove temporary files if remove_temp_files: print('\nRemove temporary files...') sct.run('rm -rf '+path_tmp)
def extract_centerline(fname_segmentation, remove_temp_files, verbose = 0, algo_fitting = 'hanning', type_window = 'hanning', window_length = 80): # Extract path, file and extension fname_segmentation = os.path.abspath(fname_segmentation) path_data, file_data, ext_data = sct.extract_fname(fname_segmentation) # create temporary folder sct.printv('\nCreate temporary folder...', verbose) path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S") + '_'+str(randint(1, 1000000)), 1) sct.run('mkdir '+path_tmp, verbose) # Copying input data to tmp folder sct.printv('\nCopying data to tmp folder...', verbose) sct.run('sct_convert -i '+fname_segmentation+' -o '+path_tmp+'segmentation.nii.gz', verbose) # go to tmp folder os.chdir(path_tmp) # Change orientation of the input centerline into RPI sct.printv('\nOrient centerline to RPI orientation...', verbose) # fname_segmentation_orient = 'segmentation_RPI.nii.gz' # BELOW DOES NOT WORK (JULIEN, 2015-10-17) # im_seg = Image(file_data+ext_data) # set_orientation(im_seg, 'RPI') # im_seg.setFileName(fname_segmentation_orient) # im_seg.save() sct.run('sct_image -i segmentation.nii.gz -setorient RPI -o segmentation_RPI.nii.gz', verbose) # Open segmentation volume sct.printv('\nOpen segmentation volume...', verbose) im_seg = Image('segmentation_RPI.nii.gz') data = im_seg.data # Get size of data sct.printv('\nGet data dimensions...', verbose) nx, ny, nz, nt, px, py, pz, pt = im_seg.dim sct.printv('.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz), verbose) sct.printv('.. voxel size: '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm', verbose) # # Get dimension # sct.printv('\nGet dimensions...', verbose) # nx, ny, nz, nt, px, py, pz, pt = im_seg.dim # # # Extract orientation of the input segmentation # orientation = get_orientation(im_seg) # sct.printv('\nOrientation of segmentation image: ' + orientation, verbose) # # sct.printv('\nOpen segmentation volume...', verbose) # data = im_seg.data # hdr = im_seg.hdr # Extract min and max index in Z direction X, Y, Z = (data>0).nonzero() min_z_index, max_z_index = min(Z), max(Z) x_centerline = [0 for i in range(0,max_z_index-min_z_index+1)] y_centerline = [0 for i in range(0,max_z_index-min_z_index+1)] z_centerline = [iz for iz in range(min_z_index, max_z_index+1)] # Extract segmentation points and average per slice for iz in range(min_z_index, max_z_index+1): x_seg, y_seg = (data[:,:,iz]>0).nonzero() x_centerline[iz-min_z_index] = np.mean(x_seg) y_centerline[iz-min_z_index] = np.mean(y_seg) for k in range(len(X)): data[X[k], Y[k], Z[k]] = 0 # extract centerline and smooth it x_centerline_fit, y_centerline_fit, z_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline('segmentation_RPI.nii.gz', type_window = type_window, window_length = window_length, algo_fitting = algo_fitting, verbose = verbose) if verbose == 2: import matplotlib.pyplot as plt #Creation of a vector x that takes into account the distance between the labels nz_nonz = len(z_centerline) x_display = [0 for i in range(x_centerline_fit.shape[0])] y_display = [0 for i in range(y_centerline_fit.shape[0])] for i in range(0, nz_nonz, 1): x_display[int(z_centerline[i]-z_centerline[0])] = x_centerline[i] y_display[int(z_centerline[i]-z_centerline[0])] = y_centerline[i] plt.figure(1) plt.subplot(2,1,1) plt.plot(z_centerline_fit, x_display, 'ro') plt.plot(z_centerline_fit, x_centerline_fit) plt.xlabel("Z") plt.ylabel("X") plt.title("x and x_fit coordinates") plt.subplot(2,1,2) plt.plot(z_centerline_fit, y_display, 'ro') plt.plot(z_centerline_fit, y_centerline_fit) plt.xlabel("Z") plt.ylabel("Y") plt.title("y and y_fit coordinates") plt.show() # Create an image with the centerline for iz in range(min_z_index, max_z_index+1): data[round(x_centerline_fit[iz-min_z_index]), round(y_centerline_fit[iz-min_z_index]), iz] = 1 # if index is out of bounds here for hanning: either the segmentation has holes or labels have been added to the file # Write the centerline image in RPI orientation # hdr.set_data_dtype('uint8') # set imagetype to uint8 sct.printv('\nWrite NIFTI volumes...', verbose) im_seg.data = data im_seg.setFileName('centerline_RPI.nii.gz') im_seg.changeType('uint8') im_seg.save() sct.printv('\nSet to original orientation...', verbose) # get orientation of the input data im_seg_original = Image('segmentation.nii.gz') orientation = im_seg_original.orientation sct.run('sct_image -i centerline_RPI.nii.gz -setorient '+orientation+' -o centerline.nii.gz') # create a txt file with the centerline name_output_txt = 'centerline.txt' sct.printv('\nWrite text file...', verbose) file_results = open(name_output_txt, 'w') for i in range(min_z_index, max_z_index+1): file_results.write(str(int(i)) + ' ' + str(x_centerline_fit[i-min_z_index]) + ' ' + str(y_centerline_fit[i-min_z_index]) + '\n') file_results.close() # come back to parent folder os.chdir('..') # Generate output files sct.printv('\nGenerate output files...', verbose) sct.generate_output_file(path_tmp+'centerline.nii.gz', file_data+'_centerline.nii.gz') sct.generate_output_file(path_tmp+'centerline.txt', file_data+'_centerline.txt') # Remove temporary files if remove_temp_files: sct.printv('\nRemove temporary files...', verbose) sct.run('rm -rf '+path_tmp, verbose) return file_data+'_centerline.nii.gz'
def execute(self): print 'Execution of the SCAD algorithm' vesselness_file_name = "imageVesselNessFilter.nii.gz" raw_file_name = "raw.nii" if self.debug: import matplotlib.pyplot as plt # import for debug purposes # create tmp and copy input path_tmp = sct.tmp_create() sct.tmp_copy_nifti(self.input_image.absolutepath, path_tmp, raw_file_name) if self.vesselness_provided: sct.run('cp '+vesselness_file_name+' '+path_tmp+vesselness_file_name) os.chdir(path_tmp) # get input image information img = Image(raw_file_name) # save original orientation and change image to RPI self.raw_orientation = img.change_orientation() # get body symmetry sym = SymmetryDetector(raw_file_name, self.contrast, crop_xy=1) self.raw_symmetry = sym.execute() # vesselness filter if not self.vesselness_provided: sct.run('sct_vesselness -i '+raw_file_name+' -t ' + self._contrast) # load vesselness filter data and perform minimum path on it img = Image(vesselness_file_name) raw_orientation = img.change_orientation() self.minimum_path_data, self.J1_min_path, self.J2_min_path = get_minimum_path(img.data, invert=1, debug=1, smooth_factor=1) # Apply an exponent to the minimum path self.minimum_path_powered = np.power(self.minimum_path_data, self.minimum_path_exponent) # Saving in Image since smooth_minimal_path needs pixel dimensions img.data = self.minimum_path_powered # smooth resulting minimal path self.smoothed_min_path = smooth_minimal_path(img) # normalise symmetry values between 0 and 1 normalised_symmetry = equalize_array_histogram(self.raw_symmetry) # multiply normalised symmetry data with the minimum path result self.spine_detect_data = np.multiply(self.smoothed_min_path.data, normalised_symmetry) # extract the centerline from the minimal path image centerline_with_outliers = get_centerline(self.spine_detect_data, self.spine_detect_data.shape) img.data = centerline_with_outliers img.change_orientation() img.file_name = "centerline_with_outliers" img.save() # use a b-spline to smooth out the centerline x, y, z, dx, dy, dz = smooth_centerline("centerline_with_outliers.nii.gz") # save the centerline centerline_dim = img.dim img.data = np.zeros(centerline_dim) for i in range(0, np.size(x)-1): img.data[int(x[i]), int(y[i]), int(z[i])] = 1 img.change_orientation(raw_orientation) img.file_name = "centerline" img.save() # copy back centerline os.chdir('../') sct.tmp_copy_nifti(path_tmp + 'centerline.nii.gz',self.input_image.path,self.input_image.file_name+'_centerline'+self.input_image.ext) if self.rm_tmp_file == 1: import shutil shutil.rmtree(path_tmp) if self.produce_output: self.produce_output_files()
def compute_csa(fname_segmentation, verbose, remove_temp_files, step, smoothing_param, figure_fit, file_csa_volume, slices, vert_levels, fname_vertebral_labeling='', algo_fitting = 'hanning', type_window = 'hanning', window_length = 80): # Extract path, file and extension fname_segmentation = os.path.abspath(fname_segmentation) path_data, file_data, ext_data = sct.extract_fname(fname_segmentation) # create temporary folder sct.printv('\nCreate temporary folder...', verbose) path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S") + '_'+str(randint(1, 1000000)), 1) sct.run('mkdir '+path_tmp, verbose) # Copying input data to tmp folder sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose) sct.run('sct_convert -i '+fname_segmentation+' -o '+path_tmp+'segmentation.nii.gz', verbose) # go to tmp folder os.chdir(path_tmp) # Change orientation of the input segmentation into RPI sct.printv('\nChange orientation to RPI...', verbose) sct.run('sct_image -i segmentation.nii.gz -setorient RPI -o segmentation_RPI.nii.gz', verbose) # Open segmentation volume sct.printv('\nOpen segmentation volume...', verbose) im_seg = Image('segmentation_RPI.nii.gz') data_seg = im_seg.data # hdr_seg = im_seg.hdr # Get size of data sct.printv('\nGet data dimensions...', verbose) nx, ny, nz, nt, px, py, pz, pt = im_seg.dim sct.printv(' ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), verbose) # # Extract min and max index in Z direction X, Y, Z = (data_seg > 0).nonzero() min_z_index, max_z_index = min(Z), max(Z) # extract centerline and smooth it x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline('segmentation_RPI.nii.gz', algo_fitting=algo_fitting, type_window=type_window, window_length=window_length, verbose=verbose) z_centerline_scaled = [x*pz for x in z_centerline] # Compute CSA sct.printv('\nCompute CSA...', verbose) # Empty arrays in which CSA for each z slice will be stored csa = np.zeros(max_z_index-min_z_index+1) for iz in xrange(min_z_index, max_z_index+1): # compute the vector normal to the plane normal = normalize(np.array([x_centerline_deriv[iz-min_z_index], y_centerline_deriv[iz-min_z_index], z_centerline_deriv[iz-min_z_index]])) # compute the angle between the normal vector of the plane and the vector z angle = np.arccos(np.dot(normal, [0, 0, 1])) # compute the number of voxels, assuming the segmentation is coded for partial volume effect between 0 and 1. number_voxels = np.sum(data_seg[:, :, iz]) # compute CSA, by scaling with voxel size (in mm) and adjusting for oblique plane csa[iz-min_z_index] = number_voxels * px * py * np.cos(angle) sct.printv('\nSmooth CSA across slices...', verbose) if smoothing_param: from msct_smooth import smoothing_window sct.printv('.. Hanning window: '+str(smoothing_param)+' mm', verbose) csa_smooth = smoothing_window(csa, window_len=smoothing_param/pz, window='hanning', verbose=0) # display figure if verbose == 2: import matplotlib.pyplot as plt plt.figure() pltx, = plt.plot(z_centerline_scaled, csa, 'bo') pltx_fit, = plt.plot(z_centerline_scaled, csa_smooth, 'r', linewidth=2) plt.title("Cross-sectional area (CSA)") plt.xlabel('z (mm)') plt.ylabel('CSA (mm^2)') plt.legend([pltx, pltx_fit], ['Raw', 'Smoothed']) plt.show() # update variable csa = csa_smooth else: sct.printv('.. No smoothing!', verbose) # Create output text file sct.printv('\nWrite text file...', verbose) file_results = open('csa.txt', 'w') for i in range(min_z_index, max_z_index+1): file_results.write(str(int(i)) + ',' + str(csa[i-min_z_index])+'\n') # Display results sct.printv('z='+str(i-min_z_index)+': '+str(csa[i-min_z_index])+' mm^2', verbose, 'bold') file_results.close() # output volume of csa values sct.printv('\nCreate volume of CSA values...', verbose) data_csa = data_seg.astype(np.float32, copy=False) # loop across slices for iz in range(min_z_index, max_z_index+1): # retrieve seg pixels x_seg, y_seg = (data_csa[:, :, iz] > 0).nonzero() seg = [[x_seg[i],y_seg[i]] for i in range(0, len(x_seg))] # loop across pixels in segmentation for i in seg: # replace value with csa value data_csa[i[0], i[1], iz] = csa[iz-min_z_index] # replace data im_seg.data = data_csa # set original orientation # TODO: FIND ANOTHER WAY!! # im_seg.change_orientation(orientation) --> DOES NOT WORK! # set file name -- use .gz because faster to write im_seg.setFileName('csa_volume_RPI.nii.gz') im_seg.changeType('float32') # save volume im_seg.save() # get orientation of the input data im_seg_original = Image('segmentation.nii.gz') orientation = im_seg_original.orientation sct.run('sct_image -i csa_volume_RPI.nii.gz -setorient '+orientation+' -o '+file_csa_volume) # come back to parent folder os.chdir('..') # Generate output files sct.printv('\nGenerate output files...', verbose) copyfile(path_tmp+'csa.txt', path_data+param.fname_csa) # sct.generate_output_file(path_tmp+'csa.txt', path_data+param.fname_csa) # extension already included in param.fname_csa sct.generate_output_file(path_tmp+file_csa_volume, path_data+file_csa_volume) # extension already included in name_output # average csa across vertebral levels or slices if asked (flag -z or -l) if slices or vert_levels: from sct_extract_metric import save_metrics warning = '' if vert_levels and not fname_vertebral_labeling: sct.printv('\nERROR: Vertebral labeling file is missing. See usage.\n', 1, 'error') elif vert_levels and fname_vertebral_labeling: # from sct_extract_metric import get_slices_matching_with_vertebral_levels sct.printv('\tSelected vertebral levels... '+vert_levels) # convert the vertebral labeling file to RPI orientation im_vertebral_labeling = set_orientation(Image(fname_vertebral_labeling), 'RPI', fname_out=path_tmp+'vertebral_labeling_RPI.nii') # get the slices corresponding to the vertebral levels # slices, vert_levels_list, warning = get_slices_matching_with_vertebral_levels(data_seg, vert_levels, im_vertebral_labeling.data, 1) slices, vert_levels_list, warning = get_slices_matching_with_vertebral_levels_based_centerline(vert_levels, im_vertebral_labeling.data, x_centerline_fit, y_centerline_fit, z_centerline) elif not vert_levels: vert_levels_list = [] sct.printv('Average CSA across slices...', type='info') # parse the selected slices slices_lim = slices.strip().split(':') slices_list = range(int(slices_lim[0]), int(slices_lim[1])+1) CSA_for_selected_slices = [] # Read the file csa.txt and get the CSA for the selected slices with open(path_data+param.fname_csa) as openfile: for line in openfile: line_split = line.strip().split(',') if int(line_split[0]) in slices_list: CSA_for_selected_slices.append(float(line_split[1])) # average the CSA mean_CSA = np.mean(np.asarray(CSA_for_selected_slices)) std_CSA = np.std(np.asarray(CSA_for_selected_slices)) sct.printv('Mean CSA: '+str(mean_CSA)+' +/- '+str(std_CSA)+' mm^2', type='info') # write result into output file save_metrics([0], [file_data], slices, [mean_CSA], [std_CSA], path_data + 'csa_mean.txt', path_data+file_csa_volume, 'nb_voxels x px x py x cos(theta) slice-by-slice (in mm^3)', '', actual_vert=vert_levels_list, warning_vert_levels=warning) # compute volume between the selected slices sct.printv('Compute the volume in between the selected slices...', type='info') nb_vox = np.sum(data_seg[:, :, slices_list]) volume = nb_vox*px*py*pz sct.printv('Volume in between the selected slices: '+str(volume)+' mm^3', type='info') # write result into output file save_metrics([0], [file_data], slices, [volume], [np.nan], path_data + 'volume.txt', path_data+file_data, 'nb_voxels x px x py x pz (in mm^3)', '', actual_vert=vert_levels_list, warning_vert_levels=warning) # Remove temporary files if remove_temp_files: sct.printv('\nRemove temporary files...') sct.run('rm -rf '+path_tmp, error_exit='warning')
def get_crosses_coordinates(coordinates_input, gapxy=15, image_ref=None, dilate=False): from msct_types import Coordinate # if reference image is provided (segmentation), we draw the cross perpendicular to the centerline if image_ref is not None: # smooth centerline from sct_straighten_spinalcord import smooth_centerline x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline(self.image_ref, verbose=self.verbose) # compute crosses cross_coordinates = [] for coord in coordinates_input: if image_ref is None: from sct_straighten_spinalcord import compute_cross cross_coordinates_temp = compute_cross(coord, gapxy) else: from sct_straighten_spinalcord import compute_cross_centerline from numpy import where index_z = where(z_centerline == coord.z) deriv = Coordinate([x_centerline_deriv[index_z][0], y_centerline_deriv[index_z][0], z_centerline_deriv[index_z][0], 0.0]) cross_coordinates_temp = compute_cross_centerline(coord, deriv, gapxy) for i, coord_cross in enumerate(cross_coordinates_temp): coord_cross.value = coord.value * 10 + i + 1 # dilate cross to 3x3x3 if dilate: additional_coordinates = [] for coord_temp in cross_coordinates_temp: additional_coordinates.append(Coordinate([coord_temp.x, coord_temp.y, coord_temp.z+1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x, coord_temp.y, coord_temp.z-1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x, coord_temp.y+1.0, coord_temp.z, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x, coord_temp.y+1.0, coord_temp.z+1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x, coord_temp.y+1.0, coord_temp.z-1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x, coord_temp.y-1.0, coord_temp.z, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x, coord_temp.y-1.0, coord_temp.z+1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x, coord_temp.y-1.0, coord_temp.z-1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x+1.0, coord_temp.y, coord_temp.z, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x+1.0, coord_temp.y, coord_temp.z+1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x+1.0, coord_temp.y, coord_temp.z-1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x+1.0, coord_temp.y+1.0, coord_temp.z, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x+1.0, coord_temp.y+1.0, coord_temp.z+1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x+1.0, coord_temp.y+1.0, coord_temp.z-1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x+1.0, coord_temp.y-1.0, coord_temp.z, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x+1.0, coord_temp.y-1.0, coord_temp.z+1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x+1.0, coord_temp.y-1.0, coord_temp.z-1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x-1.0, coord_temp.y, coord_temp.z, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x-1.0, coord_temp.y, coord_temp.z+1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x-1.0, coord_temp.y, coord_temp.z-1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x-1.0, coord_temp.y+1.0, coord_temp.z, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x-1.0, coord_temp.y+1.0, coord_temp.z+1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x-1.0, coord_temp.y+1.0, coord_temp.z-1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x-1.0, coord_temp.y-1.0, coord_temp.z, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x-1.0, coord_temp.y-1.0, coord_temp.z+1.0, coord_temp.value])) additional_coordinates.append(Coordinate([coord_temp.x-1.0, coord_temp.y-1.0, coord_temp.z-1.0, coord_temp.value])) cross_coordinates_temp.extend(additional_coordinates) cross_coordinates.extend(cross_coordinates_temp) cross_coordinates = sorted(cross_coordinates, key=lambda obj: obj.value) return cross_coordinates
def project_labels_on_spinalcord(fname_label, fname_seg): """ Project labels orthogonally on the spinal cord centerline. The algorithm works by finding the smallest distance between each label and the spinal cord center of mass. :param fname_label: file name of labels :param fname_seg: file name of cord segmentation (could also be of centerline) :return: file name of projected labels """ # build output name fname_label_projected = sct.add_suffix(fname_label, "_projected") # open labels and segmentation im_label = Image(fname_label) im_seg = Image(fname_seg) # orient to RPI native_orient = im_seg.change_orientation('RPI') im_label.change_orientation('RPI') # smooth centerline and return fitted coordinates in voxel space centerline_x, centerline_y, centerline_z, centerline_derivx, centerline_derivy, centerline_derivz = smooth_centerline( im_seg, algo_fitting="hanning", type_window="hanning", window_length=50, nurbs_pts_number=3000, phys_coordinates=False, all_slices=True) # convert pixel into physical coordinates centerline_xyz_transposed = [ im_seg.transfo_pix2phys( [[centerline_x[i], centerline_y[i], centerline_z[i]]])[0] for i in range(len(centerline_x)) ] # transpose list centerline_phys_x, centerline_phys_y, centerline_phys_z = map( list, map(None, *centerline_xyz_transposed)) # get center of mass of label labels = im_label.getCoordinatesAveragedByValue() # initialize image of projected labels. Note that we use the space of the seg (not label). im_label_projected = im_seg.copy() im_label_projected.data = np.zeros(im_label_projected.data.shape, dtype='uint8') # loop across label values for label in labels: # convert pixel into physical coordinates for the label label_phys_x, label_phys_y, label_phys_z = im_label.transfo_pix2phys( [[label.x, label.y, label.z]])[0] # calculate distance between label and each point of the centerline distance_centerline = [ np.linalg.norm([ centerline_phys_x[i] - label_phys_x, centerline_phys_y[i] - label_phys_y, centerline_phys_z[i] - label_phys_z ]) for i in range(len(centerline_x)) ] # get the index corresponding to the min distance ind_min_distance = np.argmin(distance_centerline) # get centerline coordinate (in physical space) [min_phy_x, min_phy_y, min_phy_z] = [ centerline_phys_x[ind_min_distance], centerline_phys_y[ind_min_distance], centerline_phys_z[ind_min_distance] ] # convert coordinate to voxel space minx, miny, minz = im_seg.transfo_phys2pix( [[min_phy_x, min_phy_y, min_phy_z]])[0] # use that index to assign projected label in the centerline im_label_projected.data[minx, miny, minz] = label.value # re-orient projected labels to native orientation and save im_label_projected.change_orientation( native_orient) # note: native_orient refers to im_seg (not im_label) im_label_projected.setFileName(fname_label_projected) im_label_projected.save() return fname_label_projected
def execute(self): print 'Execution of the SCAD algorithm in ' + str(os.getcwd()) original_name = self.input_image.file_name vesselness_file_name = "imageVesselNessFilter.nii.gz" raw_file_name = "raw.nii" self.setup_debug_folder() if self.debug: import matplotlib.pyplot as plt # import for debug purposes # create tmp and copy input path_tmp = self.create_temporary_path() conv.convert(self.input_image.absolutepath, path_tmp + raw_file_name) if self.vesselness_provided: sct.run('cp ' + vesselness_file_name + ' ' + path_tmp + vesselness_file_name) os.chdir(path_tmp) # get input image information img = Image(raw_file_name) # save original orientation and change image to RPI self.raw_orientation = img.change_orientation() # get body symmetry if self.enable_symmetry: from msct_image import change_data_orientation sym = SymmetryDetector(raw_file_name, self.contrast, crop_xy=0) self.raw_symmetry = sym.execute() img.change_orientation(self.raw_orientation) self.output_debug_file(img, self.raw_symmetry, "body_symmetry") img.change_orientation() # vesselness filter if not self.vesselness_provided: sct.run('isct_vesselness -i ' + raw_file_name + ' -t ' + self._contrast + " -radius " + str(self.spinalcord_radius)) # load vesselness filter data and perform minimum path on it img = Image(vesselness_file_name) self.output_debug_file(img, img.data, "Vesselness_Filter") img.change_orientation() self.minimum_path_data, self.J1_min_path, self.J2_min_path = get_minimum_path( img.data, invert=1, debug=1) self.output_debug_file(img, self.minimum_path_data, "minimal_path") self.output_debug_file(img, self.J1_min_path, "J1_minimal_path") self.output_debug_file(img, self.J2_min_path, "J2_minimal_path") # Apply an exponent to the minimum path self.minimum_path_powered = np.power(self.minimum_path_data, self.minimum_path_exponent) self.output_debug_file( img, self.minimum_path_powered, "minimal_path_power_" + str(self.minimum_path_exponent)) # Saving in Image since smooth_minimal_path needs pixel dimensions img.data = self.minimum_path_powered # smooth resulting minimal path self.smoothed_min_path = smooth_minimal_path(img) self.output_debug_file(img, self.smoothed_min_path.data, "minimal_path_smooth") # normalise symmetry values between 0 and 1 if self.enable_symmetry: normalised_symmetry = normalize_array_histogram(self.raw_symmetry) self.output_debug_file(img, self.smoothed_min_path.data, "minimal_path_smooth") # multiply normalised symmetry data with the minimum path result from msct_image import change_data_orientation self.spine_detect_data = np.multiply( self.smoothed_min_path.data, change_data_orientation( np.power(normalised_symmetry, self.symmetry_exponent), self.raw_orientation, "RPI")) self.output_debug_file(img, self.spine_detect_data, "symmetry_x_min_path") # extract the centerline from the minimal path image self.centerline_with_outliers = get_centerline( self.spine_detect_data, self.spine_detect_data.shape) else: # extract the centerline from the minimal path image self.centerline_with_outliers = get_centerline( self.smoothed_min_path.data, self.smoothed_min_path.data.shape) self.output_debug_file(img, self.centerline_with_outliers, "centerline_with_outliers") # saving centerline with outliers to have img.data = self.centerline_with_outliers img.change_orientation() img.file_name = "centerline_with_outliers" img.save() # use a b-spline to smooth out the centerline x, y, z, dx, dy, dz = smooth_centerline( "centerline_with_outliers.nii.gz") # save the centerline nx, ny, nz, nt, px, py, pz, pt = img.dim img.data = np.zeros((nx, ny, nz)) for i in range(0, np.size(x) - 1): img.data[int(x[i]), int(y[i]), int(z[i])] = 1 self.output_debug_file(img, img.data, "centerline") img.change_orientation(self.raw_orientation) img.file_name = "centerline" img.save() # copy back centerline os.chdir('../') conv.convert(path_tmp + img.file_name + img.ext, self.output_filename) if self.rm_tmp_file == 1: import shutil shutil.rmtree(path_tmp) print "To view the output with FSL :" sct.printv( "fslview " + self.input_image.absolutepath + " " + self.output_filename + " -l Red", self.verbose, "info")
def compute_length(fname_segmentation, remove_temp_files, verbose=0): from math import sqrt # Extract path, file and extension fname_segmentation = os.path.abspath(fname_segmentation) path_data, file_data, ext_data = sct.extract_fname(fname_segmentation) # create temporary folder path_tmp = 'tmp.' + time.strftime("%y%m%d%H%M%S") sct.run('mkdir ' + path_tmp) # copy files into tmp folder sct.run('cp ' + fname_segmentation + ' ' + path_tmp) # go to tmp folder os.chdir(path_tmp) # Change orientation of the input centerline into RPI sct.printv('\nOrient centerline to RPI orientation...', param.verbose) fname_segmentation_orient = 'segmentation_rpi' + ext_data set_orientation(file_data + ext_data, 'RPI', fname_segmentation_orient) # Get dimension sct.printv('\nGet dimensions...', param.verbose) nx, ny, nz, nt, px, py, pz, pt = Iamge(fname_segmentation_orient).dim sct.printv( '.. matrix size: ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), param.verbose) sct.printv( '.. voxel size: ' + str(px) + 'mm x ' + str(py) + 'mm x ' + str(pz) + 'mm', param.verbose) # smooth segmentation/centerline #x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = smooth_centerline(fname_segmentation_orient, param, 'hanning', 1) x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline( fname_segmentation_orient, type_window='hanning', window_length=80, algo_fitting='hanning', verbose=verbose) # compute length of centerline result_length = 0.0 for i in range(len(x_centerline_fit) - 1): result_length += sqrt( ((x_centerline_fit[i + 1] - x_centerline_fit[i]) * px)**2 + ((y_centerline_fit[i + 1] - y_centerline_fit[i]) * py)**2 + ((z_centerline[i + 1] - z_centerline[i]) * pz)**2) return result_length
def generate_centerline(dataset_info, contrast='t1', regenerate=False): """ This function generates spinal cord centerline from binary images (either an image of centerline or segmentation) :param dataset_info: dictionary containing dataset information :param contrast: {'t1', 't2'} :return list of centerline objects """ path_data = dataset_info['path_data'] list_subjects = dataset_info['subjects'] list_centerline = [] current_path = os.getcwd() timer_centerline = sct.Timer(len(list_subjects)) timer_centerline.start() for subject_name in list_subjects: path_data_subject = path_data + subject_name + '/' + contrast + '/' fname_image_centerline = path_data_subject + contrast + dataset_info['suffix_centerline'] + '.nii.gz' fname_image_disks = path_data_subject + contrast + dataset_info['suffix_disks'] + '.nii.gz' # go to output folder sct.printv('\nExtracting centerline from ' + path_data_subject) os.chdir(path_data_subject) fname_centerline = 'centerline' # if centerline exists, we load it, if not, we compute it if os.path.isfile(fname_centerline + '.npz') and not regenerate: centerline = Centerline(fname=path_data_subject + fname_centerline + '.npz') else: # extracting intervertebral disks im = Image(fname_image_disks) coord = im.getNonZeroCoordinates(sorting='z', reverse_coord=True) coord_physical = [] for c in coord: if c.value <= 22 or c.value in [48, 49, 50, 51, 52]: # 22 corresponds to L2 c_p = im.transfo_pix2phys([[c.x, c.y, c.z]])[0] c_p.append(c.value) coord_physical.append(c_p) # extracting centerline from binary image and create centerline object with vertebral distribution x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline( fname_image_centerline, algo_fitting='nurbs', verbose=0, nurbs_pts_number=4000, all_slices=False, phys_coordinates=True, remove_outliers=False) centerline = Centerline(x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv) centerline.compute_vertebral_distribution(coord_physical) centerline.save_centerline(fname_output=fname_centerline) list_centerline.append(centerline) timer_centerline.add_iteration() timer_centerline.stop() os.chdir(current_path) return list_centerline
def extract_centerline(fname_segmentation, remove_temp_files, name_output='', verbose=0, algo_fitting='hanning', type_window='hanning', window_length=80): # Extract path, file and extension fname_segmentation = os.path.abspath(fname_segmentation) path_data, file_data, ext_data = sct.extract_fname(fname_segmentation) # create temporary folder path_tmp = 'tmp.' + time.strftime("%y%m%d%H%M%S") sct.run('mkdir ' + path_tmp) # copy files into tmp folder sct.run('cp ' + fname_segmentation + ' ' + path_tmp) # go to tmp folder os.chdir(path_tmp) # Change orientation of the input centerline into RPI sct.printv('\nOrient centerline to RPI orientation...', verbose) fname_segmentation_orient = 'segmentation_rpi' + ext_data set_orientation(file_data + ext_data, 'RPI', fname_segmentation_orient) # Get dimension sct.printv('\nGet dimensions...', verbose) nx, ny, nz, nt, px, py, pz, pt = Image(fname_segmentation_orient).dim sct.printv( '.. matrix size: ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), verbose) sct.printv( '.. voxel size: ' + str(px) + 'mm x ' + str(py) + 'mm x ' + str(pz) + 'mm', verbose) # Extract orientation of the input segmentation orientation = get_orientation(file_data + ext_data) sct.printv('\nOrientation of segmentation image: ' + orientation, verbose) sct.printv('\nOpen segmentation volume...', verbose) file = nibabel.load(fname_segmentation_orient) data = file.get_data() hdr = file.get_header() # Extract min and max index in Z direction X, Y, Z = (data > 0).nonzero() min_z_index, max_z_index = min(Z), max(Z) x_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)] y_centerline = [0 for i in range(0, max_z_index - min_z_index + 1)] z_centerline = [iz for iz in range(min_z_index, max_z_index + 1)] # Extract segmentation points and average per slice for iz in range(min_z_index, max_z_index + 1): x_seg, y_seg = (data[:, :, iz] > 0).nonzero() x_centerline[iz - min_z_index] = np.mean(x_seg) y_centerline[iz - min_z_index] = np.mean(y_seg) for k in range(len(X)): data[X[k], Y[k], Z[k]] = 0 # extract centerline and smooth it x_centerline_fit, y_centerline_fit, z_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline( fname_segmentation_orient, type_window=type_window, window_length=window_length, algo_fitting=algo_fitting, verbose=verbose) if verbose == 2: import matplotlib.pyplot as plt #Creation of a vector x that takes into account the distance between the labels nz_nonz = len(z_centerline) x_display = [0 for i in range(x_centerline_fit.shape[0])] y_display = [0 for i in range(y_centerline_fit.shape[0])] for i in range(0, nz_nonz, 1): x_display[int(z_centerline[i] - z_centerline[0])] = x_centerline[i] y_display[int(z_centerline[i] - z_centerline[0])] = y_centerline[i] plt.figure(1) plt.subplot(2, 1, 1) plt.plot(z_centerline_fit, x_display, 'ro') plt.plot(z_centerline_fit, x_centerline_fit) plt.xlabel("Z") plt.ylabel("X") plt.title("x and x_fit coordinates") plt.subplot(2, 1, 2) plt.plot(z_centerline_fit, y_display, 'ro') plt.plot(z_centerline_fit, y_centerline_fit) plt.xlabel("Z") plt.ylabel("Y") plt.title("y and y_fit coordinates") plt.show() # Create an image with the centerline for iz in range(min_z_index, max_z_index + 1): data[ round(x_centerline_fit[iz - min_z_index]), round(y_centerline_fit[iz - min_z_index]), iz] = 1 # if index is out of bounds here for hanning: either the segmentation has holes or labels have been added to the file # Write the centerline image in RPI orientation hdr.set_data_dtype('uint8') # set imagetype to uint8 sct.printv('\nWrite NIFTI volumes...', verbose) img = nibabel.Nifti1Image(data, None, hdr) nibabel.save(img, 'centerline.nii.gz') # Define name if output name is not specified if name_output == 'csa_volume.nii.gz' or name_output == '': # sct.generate_output_file('centerline.nii.gz', file_data+'_centerline'+ext_data, verbose) name_output = file_data + '_centerline' + ext_data sct.generate_output_file('centerline.nii.gz', name_output, verbose) # create a txt file with the centerline path, rad_output, ext = sct.extract_fname(name_output) name_output_txt = rad_output + '.txt' sct.printv('\nWrite text file...', verbose) file_results = open(name_output_txt, 'w') for i in range(min_z_index, max_z_index + 1): file_results.write( str(int(i)) + ' ' + str(x_centerline_fit[i - min_z_index]) + ' ' + str(y_centerline_fit[i - min_z_index]) + '\n') file_results.close() # Copy result into parent folder sct.run('cp ' + name_output_txt + ' ../') del data # come back to parent folder os.chdir('..') # Change orientation of the output centerline into input orientation sct.printv( '\nOrient centerline image to input orientation: ' + orientation, verbose) fname_segmentation_orient = 'tmp.segmentation_rpi' + ext_data set_orientation(path_tmp + '/' + name_output, orientation, name_output) # Remove temporary files if remove_temp_files: sct.printv('\nRemove temporary files...', verbose) sct.run('rm -rf ' + path_tmp, verbose) return name_output
def generate_initial_template_space(dataset_info, points_average_centerline, position_template_disks): """ This function generates the initial template space, on which all images will be registered. :param points_average_centerline: list of points (x, y, z) of the average spinal cord and brainstem centerline :param position_template_disks: index of intervertebral disks along the template centerline :return: """ # initializing variables path_template = dataset_info['path_template'] x_size_of_template_space, y_size_of_template_space = 201, 201 spacing = 0.5 # creating template space size_template_z = int(abs(points_average_centerline[0][2] - points_average_centerline[-1][2]) / spacing) + 15 template_space = Image([x_size_of_template_space, y_size_of_template_space, size_template_z]) template_space.data = np.zeros((x_size_of_template_space, y_size_of_template_space, size_template_z)) template_space.hdr.set_data_dtype('float32') origin = [points_average_centerline[-1][0] + x_size_of_template_space * spacing / 2.0, points_average_centerline[-1][1] - y_size_of_template_space * spacing / 2.0, (points_average_centerline[-1][2] - spacing)] template_space.hdr.as_analyze_map()['dim'] = [3.0, x_size_of_template_space, y_size_of_template_space, size_template_z, 1.0, 1.0, 1.0, 1.0] template_space.hdr.as_analyze_map()['qoffset_x'] = origin[0] template_space.hdr.as_analyze_map()['qoffset_y'] = origin[1] template_space.hdr.as_analyze_map()['qoffset_z'] = origin[2] template_space.hdr.as_analyze_map()['srow_x'][-1] = origin[0] template_space.hdr.as_analyze_map()['srow_y'][-1] = origin[1] template_space.hdr.as_analyze_map()['srow_z'][-1] = origin[2] template_space.hdr.as_analyze_map()['srow_x'][0] = -spacing template_space.hdr.as_analyze_map()['srow_y'][1] = spacing template_space.hdr.as_analyze_map()['srow_z'][2] = spacing template_space.hdr.set_sform(template_space.hdr.get_sform()) template_space.hdr.set_qform(template_space.hdr.get_sform()) template_space.setFileName(path_template + 'template_space.nii.gz') template_space.save(type='uint8') # generate template centerline as an image image_centerline = template_space.copy() for coord in points_average_centerline: coord_pix = image_centerline.transfo_phys2pix([coord])[0] if 0 <= coord_pix[0] < image_centerline.data.shape[0] and 0 <= coord_pix[1] < image_centerline.data.shape[1] and 0 <= coord_pix[2] < image_centerline.data.shape[2]: image_centerline.data[int(coord_pix[0]), int(coord_pix[1]), int(coord_pix[2])] = 1 image_centerline.setFileName(path_template + 'template_centerline.nii.gz') image_centerline.save(type='float32') # generate template disks position coord_physical = [] image_disks = template_space.copy() for disk in position_template_disks: label = labels_regions[disk] coord = position_template_disks[disk] coord_pix = image_disks.transfo_phys2pix([coord])[0] coord = coord.tolist() coord.append(label) coord_physical.append(coord) if 0 <= coord_pix[0] < image_disks.data.shape[0] and 0 <= coord_pix[1] < image_disks.data.shape[1] and 0 <= coord_pix[2] < image_disks.data.shape[2]: image_disks.data[int(coord_pix[0]), int(coord_pix[1]), int(coord_pix[2])] = label else: sct.printv(str(coord_pix)) sct.printv('ERROR: the disk label ' + str(disk) + ' is not in the template image.') image_disks.setFileName(path_template + 'template_disks.nii.gz') image_disks.save(type='uint8') # generate template centerline as a npz file x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline( path_template + 'template_centerline.nii.gz', algo_fitting='nurbs', verbose=0, nurbs_pts_number=4000, all_slices=False, phys_coordinates=True, remove_outliers=True) centerline_template = Centerline(x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv) centerline_template.compute_vertebral_distribution(coord_physical) centerline_template.save_centerline(fname_output=path_template + 'template_centerline')
def compute_csa(fname_segmentation, name_method, volume_output, verbose, remove_temp_files, step, smoothing_param, figure_fit, name_output, slices, vert_levels, path_to_template, algo_fitting='hanning', type_window='hanning', window_length=80): # Extract path, file and extension fname_segmentation = os.path.abspath(fname_segmentation) path_data, file_data, ext_data = sct.extract_fname(fname_segmentation) # create temporary folder sct.printv('\nCreate temporary folder...', verbose) path_tmp = sct.slash_at_the_end('tmp.' + time.strftime("%y%m%d%H%M%S"), 1) sct.run('mkdir ' + path_tmp, verbose) # Copying input data to tmp folder and convert to nii sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose) sct.run('isct_c3d ' + fname_segmentation + ' -o ' + path_tmp + 'segmentation.nii') # go to tmp folder os.chdir(path_tmp) # Change orientation of the input segmentation into RPI sct.printv('\nChange orientation of the input segmentation into RPI...', verbose) fname_segmentation_orient = set_orientation('segmentation.nii', 'RPI', 'segmentation_orient.nii') # Get size of data sct.printv('\nGet data dimensions...', verbose) nx, ny, nz, nt, px, py, pz, pt = Image(fname_segmentation_orient).dim sct.printv(' ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), verbose) # Open segmentation volume sct.printv('\nOpen segmentation volume...', verbose) file_seg = nibabel.load(fname_segmentation_orient) data_seg = file_seg.get_data() hdr_seg = file_seg.get_header() # # Extract min and max index in Z direction X, Y, Z = (data_seg > 0).nonzero() min_z_index, max_z_index = min(Z), max(Z) # Xp, Yp = (data_seg[:, :, 0] >= 0).nonzero() # X and Y range # extract centerline and smooth it x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline( fname_segmentation_orient, algo_fitting=algo_fitting, type_window=type_window, window_length=window_length, verbose=verbose) z_centerline_scaled = [x * pz for x in z_centerline] # Compute CSA sct.printv('\nCompute CSA...', verbose) # Empty arrays in which CSA for each z slice will be stored csa = np.zeros(max_z_index - min_z_index + 1) # csa = [0.0 for i in xrange(0, max_z_index-min_z_index+1)] for iz in xrange(0, len(z_centerline)): # compute the vector normal to the plane normal = normalize( np.array([ x_centerline_deriv[iz], y_centerline_deriv[iz], z_centerline_deriv[iz] ])) # compute the angle between the normal vector of the plane and the vector z angle = np.arccos(np.dot(normal, [0, 0, 1])) # compute the number of voxels, assuming the segmentation is coded for partial volume effect between 0 and 1. number_voxels = sum(sum(data_seg[:, :, iz + min_z_index])) # compute CSA, by scaling with voxel size (in mm) and adjusting for oblique plane csa[iz] = number_voxels * px * py * np.cos(angle) if smoothing_param: from msct_smooth import smoothing_window sct.printv('\nSmooth CSA across slices...', verbose) sct.printv('.. Hanning window: ' + str(smoothing_param) + ' mm', verbose) csa_smooth = smoothing_window(csa, window_len=smoothing_param / pz, window='hanning', verbose=0) # display figure if verbose == 2: import matplotlib.pyplot as plt plt.figure() pltx, = plt.plot(z_centerline_scaled, csa, 'bo') pltx_fit, = plt.plot(z_centerline_scaled, csa_smooth, 'r', linewidth=2) plt.title("Cross-sectional area (CSA)") plt.xlabel('z (mm)') plt.ylabel('CSA (mm^2)') plt.legend([pltx, pltx_fit], ['Raw', 'Smoothed']) plt.show() # update variable csa = csa_smooth # Create output text file sct.printv('\nWrite text file...', verbose) file_results = open('csa.txt', 'w') for i in range(min_z_index, max_z_index + 1): file_results.write( str(int(i)) + ',' + str(csa[i - min_z_index]) + '\n') # Display results sct.printv( 'z=' + str(i - min_z_index) + ': ' + str(csa[i - min_z_index]) + ' mm^2', verbose, 'bold') file_results.close() # output volume of csa values if volume_output: sct.printv('\nCreate volume of CSA values...', verbose) # get orientation of the input data orientation = get_orientation('segmentation.nii') data_seg = data_seg.astype(np.float32, copy=False) # loop across slices for iz in range(min_z_index, max_z_index + 1): # retrieve seg pixels x_seg, y_seg = (data_seg[:, :, iz] > 0).nonzero() seg = [[x_seg[i], y_seg[i]] for i in range(0, len(x_seg))] # loop across pixels in segmentation for i in seg: # replace value with csa value data_seg[i[0], i[1], iz] = csa[iz - min_z_index] # create header hdr_seg.set_data_dtype('float32') # set imagetype to uint8 # save volume img = nibabel.Nifti1Image(data_seg, None, hdr_seg) nibabel.save(img, 'csa_RPI.nii') # Change orientation of the output centerline into input orientation fname_csa_volume = set_orientation('csa_RPI.nii', orientation, 'csa_RPI_orient.nii') # come back to parent folder os.chdir('..') # Generate output files sct.printv('\nGenerate output files...', verbose) from shutil import copyfile copyfile(path_tmp + 'csa.txt', path_data + param.fname_csa) # sct.generate_output_file(path_tmp+'csa.txt', path_data+param.fname_csa) # extension already included in param.fname_csa if volume_output: sct.generate_output_file( fname_csa_volume, path_data + name_output) # extension already included in name_output # average csa across vertebral levels or slices if asked (flag -z or -l) if slices or vert_levels: if vert_levels and not path_to_template: sct.printv('\nERROR: Path to template is missing. See usage.\n', 1, 'error') sys.exit(2) elif vert_levels and path_to_template: abs_path_to_template = os.path.abspath(path_to_template) # go to tmp folder os.chdir(path_tmp) # create temporary folder sct.printv('\nCreate temporary folder to average csa...', verbose) path_tmp_extract_metric = sct.slash_at_the_end('label_temp', 1) sct.run('mkdir ' + path_tmp_extract_metric, verbose) # Copying output CSA volume in the temporary folder sct.printv('\nCopy data to tmp folder...', verbose) sct.run('cp ' + fname_segmentation + ' ' + path_tmp_extract_metric) # create file info_label path_fname_seg, file_fname_seg, ext_fname_seg = sct.extract_fname( fname_segmentation) create_info_label('info_label.txt', path_tmp_extract_metric, file_fname_seg + ext_fname_seg) # average CSA if slices: os.system("sct_extract_metric -i " + path_data + name_output + " -f " + path_tmp_extract_metric + " -m wa -o ../csa_mean.txt -z " + slices) if vert_levels: sct.run('cp -R ' + abs_path_to_template + ' .') os.system("sct_extract_metric -i " + path_data + name_output + " -f " + path_tmp_extract_metric + " -m wa -o ../csa_mean.txt -v " + vert_levels) os.chdir('..') # Remove temporary files print('\nRemove temporary folder used to average CSA...') sct.run('rm -rf ' + path_tmp_extract_metric) # Remove temporary files if remove_temp_files: print('\nRemove temporary files...') sct.run('rm -rf ' + path_tmp)
def vertebral_detection(fname, fname_seg, contrast): shift_AP = 14 # shift the centerline on the spine in mm default : 17 mm size_AP = 3 # mean around the centerline in the anterior-posterior direction in mm size_RL = 3 # mean around the centerline in the right-left direction in mm verbose = param.verbose if verbose: import matplotlib.pyplot as plt # open anatomical volume img = Image(fname) # orient to RPI img.change_orientation() # get dimension nx, ny, nz, nt, px, py, pz, pt = img.dim #================================================== # Compute intensity profile across vertebrae #================================================== shift_AP = shift_AP * py size_AP = size_AP * py size_RL = size_RL * px # orient segmentation to RPI run('sct_orientation -i ' + fname_seg + ' -s RPI') # smooth segmentation/centerline path_centerline, file_centerline, ext_centerline = extract_fname(fname_seg) x, y, z, Tx, Ty, Tz = smooth_centerline(path_centerline + file_centerline + '_RPI' + ext_centerline) # build intensity profile along the centerline I = np.zeros((len(y), 1)) # mask where intensity profile will be taken if verbose == 2: mat = img.copy() mat.data = np.zeros(mat.dim) for iz in range(len(z)): # define vector orthogonal to the cord in RL direction P1 = np.array([1, 0, -Tx[iz]/Tz[iz]]) P1 = P1/np.linalg.norm(P1) # define vector orthogonal to the cord in AP direction P2 = np.array([0, 1, -Ty[iz]/Tz[iz]]) P2 = P2/np.linalg.norm(P2) # define X and Y coordinates of the voxels to extract intensity profile from indexRL = range(-np.int(round(size_RL)), np.int(round(size_RL))) indexAP = range(0, np.int(round(size_AP)))+np.array(shift_AP) # loop over coordinates of perpendicular plane for i_RL in indexRL: for i_AP in indexAP: i_vect = np.round(np.array([x[iz], y[iz], z[iz]])+P1*i_RL+P2*i_AP) i_vect = np.minimum(np.maximum(i_vect, 0), np.array([nx, ny, nz])-1) # check if index stays in image dimension I[iz] = I[iz] + img.data[i_vect[0], i_vect[1], i_vect[2]] # create a mask with this perpendicular plane if verbose == 2: mat.data[i_vect[0], i_vect[1], i_vect[2]] = 1 if verbose == 2: mat.file_name = 'mask' mat.save() # Detrending Intensity start_centerline_y = y[0] X = np.where(I == 0) mask2 = np.ones((len(y), 1), dtype=bool) mask2[X, 0] = False # low pass filtering import scipy.signal frequency = 2/pz Wn = 0.1/frequency N = 2 #Order of the filter # b, a = scipy.signal.butter(N, Wn, btype='low', analog=False, output='ba') b, a = scipy.signal.iirfilter(N, Wn, rp=None, rs=None, btype='high', analog=False, ftype='bessel', output='ba') I_detrend = scipy.signal.filtfilt(b, a, I[:, 0], axis=-1, padtype='constant', padlen=None) I_detrend = I_detrend/(np.amax(I_detrend)) #================================================== # step 1 : Find the First Peak #================================================== if contrast == 't1': I_detrend2 = np.diff(I_detrend) elif contrast == 't2': space = np.linspace(-10/pz, 10/pz, round(21/pz), endpoint=True) pattern = (np.sinc((space*pz)/20)) ** 20 I_corr = scipy.signal.correlate(-I_detrend.squeeze().squeeze()+1,pattern,'same') b, a = scipy.signal.iirfilter(N, Wn, rp=None, rs=None, btype='high', analog=False, ftype='bessel', output='ba') I_detrend2 = scipy.signal.filtfilt(b, a, I_corr, axis=-1, padtype='constant', padlen=None) I_detrend2[I_detrend2 < 0.2] = 0 ind_locs = np.squeeze(scipy.signal.argrelextrema(I_detrend2, np.greater)) # remove peaks that are too closed locsdiff = np.diff(z[ind_locs]) ind = locsdiff > 10 ind_locs = np.hstack((ind_locs[ind], ind_locs[-1])) locs = z[ind_locs] if verbose == 2: # x=0: most caudal, x=max: most rostral plt.figure() plt.plot(I_detrend2) plt.plot(ind_locs, I_detrend2[ind_locs], '+') plt.show() #===================================================================================== # step 2 : Cross correlation between the adjusted template and the intensity profile. # Local moving of template's peak from the first peak already found #===================================================================================== #For each loop, a peak is located at the most likely position and then local adjustment is done. #The position of the next peak is calculated from previous positions # TODO: use mean distance mean_distance = [12.1600, 20.8300, 18.0000, 16.0000, 15.1667, 15.3333, 15.8333, 18.1667, 18.6667, 18.6667, 19.8333, 20.6667, 21.6667, 22.3333, 23.8333, 24.1667, 26.0000, 28.6667, 30.5000, 33.5000, 33.0000, 31.3330] # # #Creating pattern printv('\nFinding Cross correlation between the adjusted template and the intensity profile...', verbose) space = np.linspace(-10/pz, 10/pz, round(21/pz), endpoint=True) pattern = (np.sinc((space*pz)/20))**20 I_corr = scipy.signal.correlate(I_detrend2.squeeze().squeeze()+1, pattern, 'same') # # level_start=1 # if contrast == 'T1': # mean_distance = mean_distance[level_start-1:len(mean_distance)] # xmax_pattern = np.argmax(pattern) # else: # mean_distance = mean_distance[level_start+1:len(mean_distance)] # xmax_pattern = np.argmin(pattern) # position of the peak in the pattern # pixend = len(pattern) - xmax_pattern #number of pixel after the peaks in the pattern # # # mean_distance_new = mean_distance # mean_ratio = np.zeros(len(mean_distance)) # # L = np.round(1.2*max(mean_distance)) - np.round(0.8*min(mean_distance)) # corr_peak = np.zeros((L,len(mean_distance))) # corr_peak = np.nan #for T2 # # #loop on each peak # for i_peak in range(len(mean_distance)): # scale_min = np.round(0.80*mean_distance_new[i_peak]) - xmax_pattern - pixend # if scale_min<0: # scale_min = 0 # # scale_max = np.round(1.2*mean_distance_new[i_peak]) - xmax_pattern - pixend # scale_peak = np.arange(scale_min,scale_max+1) # # for i_scale in range(len(scale_peak)): # template_resize_peak = np.concatenate([template_truncated,np.zeros(scale_peak[i_scale]),pattern]) # if len(I_detrend[:,0])>len(template_resize_peak): # template_resize_peak1 = np.concatenate((template_resize_peak,np.zeros(len(I_detrend[:,0])-len(template_resize_peak)))) # # #cross correlation # corr_template = scipy.signal.correlate(I_detrend[:,0],template_resize_peak) # # if len(I_detrend[:,0])>len(template_resize_peak): # val = np.dot(I_detrend[:,0],template_resize_peak1.T) # else: # I_detrend_2 = np.concatenate((I_detrend[:,0],np.zeros(len(template_resize_peak)-len(I_detrend[:,0])))) # val = np.dot(I_detrend_2,template_resize_peak.T) # corr_peak[i_scale,i_peak] = val # # if verbose: # plt.xlim(0,len(I_detrend[:,0])) # plt.plot(I_detrend[:,0]) # plt.plot(template_resize_peak) # plt.show(block=False) # # plt.plot(corr_peak[:,i_peak],marker='+',linestyle='None',color='r') # plt.title('correlation value against the displacement of the peak (px)') # plt.show(block=False) # # max_peak = np.amax(corr_peak[:,i_peak]) # index_scale_peak = np.where(corr_peak[:,i_peak]==max_peak) # good_scale_peak = scale_peak[index_scale_peak][0] # Mcorr = Mcorr1 # Mcorr = np.resize(Mcorr,i_peak+2) # Mcorr[i_peak+1] = np.amax(corr_peak[:,0:(i_peak+1)]) # flag = 0 # # #If the correlation coefficient is too low, put the peak at the mean position # if i_peak>0: # if (Mcorr[i_peak+1]-Mcorr[i_peak])<0.4*np.mean(Mcorr[1:i_peak+2]-Mcorr[0:i_peak+1]): # test = i_peak # template_resize_peak = np.concatenate((template_truncated,np.zeros(round(mean_distance[i_peak])-xmax_pattern-pixend),pattern)) # good_scale_peak = np.round(mean_distance[i_peak]) - xmax_pattern - pixend # flag = 1 # if i_peak==0: # if (Mcorr[i_peak+1] - Mcorr[i_peak])<0.4*Mcorr[0]: # template_resize_peak = np.concatenate((template_truncated,np.zeros(round(mean_distance[i_peak])-xmax_pattern-pixend),pattern)) # good_scale_peak = round(mean_distance[i_peak]) - xmax_pattern - pixend # flag = 1 # if flag==0: # template_resize_peak=np.concatenate((template_truncated,np.zeros(good_scale_peak),pattern)) # # #update mean-distance by a adjustement ratio # mean_distance_new[i_peak] = good_scale_peak + xmax_pattern + pixend # mean_ratio[i_peak] = np.mean(mean_distance_new[:,0:i_peak]/mean_distance[:,0:i_peak]) # # template_truncated = template_resize_peak # # if verbose: # plt.plot(I_detrend[:,0]) # plt.plot(template_truncated) # plt.xlim(0,(len(I_detrend[:,0])-1)) # plt.show() # # #finding the maxima of the adjusted template # minpeakvalue = 0.5 # loc_disk = np.arange(len(template_truncated)) # index_disk = [] # for i in range(len(template_truncated)): # if template_truncated[i]>=minpeakvalue: # if i==0: # if template_truncated[i]<template_truncated[i+1]: # index_disk.append(i) # elif i==(len(template_truncated)-1): # if template_truncated[i]<template_truncated[i-1]: # index_disk.append(i) # else: # if template_truncated[i]<template_truncated[i+1]: # index_disk.append(i) # elif template_truncated[i]<template_truncated[i-1]: # index_disk.append(i) # else: # index_disk.append(i) # # mask_disk = np.ones(len(template_truncated), dtype=bool) # mask_disk[index_disk] = False # loc_disk = loc_disk[mask_disk] # X1 = np.where(loc_disk > I_detrend.shape[0]) # mask_disk1 = np.ones(len(loc_disk), dtype=bool) # mask_disk1[X1] = False # loc_disk = loc_disk[mask_disk1] # loc_disk = loc_disk + start_centerline_y - 1 #===================================================================== # Step 3: Label segmentation #===================================================================== # # Project vertebral levels back to the centerline # centerline = Image(fname_seg) # raw_orientation = centerline.change_orientation() # centerline.data[:, :, :] = 0 # for iz in range(locs[0]): # centerline.data[np.round(x[iz]), np.round(y[iz]), iz] = 1 # for i in range(len(locs)-1): # for iz in range(locs[i], min(locs[i+1], len(z))): # centerline.data[np.round(x[iz]), np.round(y[iz]), iz] = i+2 # for iz in range(locs[-1], len(z)): # centerline.data[np.round(x[iz]), np.round(y[iz]), iz] = i+3 # # #centerline.change_orientation(raw_orientation) # centerline.file_name += '_labeled' # centerline.save() # Label segmentation with vertebral number # Method: loop across all voxels of the segmentation, project each voxel to the line passing through the vertebrae # (using minimum distance) and assign vertebral level. printv('\nLabel segmentation...', verbose) seg = Image(fname_seg) seg_raw_orientation = seg.change_orientation() # find all voxels belonging to segmentation x_seg, y_seg, z_seg = np.where(seg.data) # loop across voxels in segmentation for ivox in range(len(x_seg)): # get voxel coordinate vox_coord = np.array([x_seg[ivox], y_seg[ivox], z_seg[ivox]]) # find closest point to the curved line passing through the vertebrae for iplane in range(len(locs)): ind = np.where(z == locs[iplane]) vox_vector = vox_coord - np.hstack((x[ind], y[ind], z[ind])) normal2plane_vector = np.hstack((Tx[ind], Ty[ind], Tz[ind])) # Tx, Ty and Tz are the derivatives of the centerline # if voxel is above the plane --> give the number of the plane if np.dot(vox_vector, normal2plane_vector) > 0: seg.data[vox_coord[0], vox_coord[1], vox_coord[2]] = iplane+2 else: # if the voxel gets below the plane --> next voxel break seg.change_orientation(seg_raw_orientation) seg.file_name += '_labeled' seg.save() # # color the segmentation with vertebral number # printv('\nLabel input segmentation...', verbose) # # if fname_segmentation: # seg = Image(fname_seg) # seg_raw_orientation = seg.change_orientation() # x_seg, y_seg, z_seg = np.where(seg.data) # find all voxels belonging to segmentation # for ivox in range(len(x_seg)): # loop across voxels in segmentation # vox_coord = np.array([x_seg[ivox], y_seg[ivox], z_seg[ivox]]) # get voxel coordinate # for iplane in range(len(locs)): # ind = np.where(z == locs[iplane]) # vox_vector = vox_coord - np.hstack((x[ind], y[ind], z[ind])) # normal2plane_vector = np.hstack((Tx[ind], Ty[ind], Tz[ind])) # Tx, Ty and Tz are the derivatives of the centerline # # # if voxel is above the plane --> give the number of the plane # if np.dot(vox_vector, normal2plane_vector) > 0: # seg.data[vox_coord[0], vox_coord[1], vox_coord[2]] = iplane+2 # else: # if the voxel gets below the plane --> next voxel # break # seg.change_orientation(seg_raw_orientation) # seg.file_name += '_labeled' # seg.save() return locs
def extract_centerline(segmentation, verbose=0, algo_fitting='hanning', type_window='hanning', window_length=5, use_phys_coord=True, file_out='centerline'): """ Extract centerline from a binary or weighted segmentation by computing the center of mass slicewise. :param segmentation: input segmentation. Could be either an Image or a file name. :param verbose: :param algo_fitting: :param type_window: :param window_length: :param use_phys_coord: TODO: Explain the pros/cons of use_phys_coord. :param file_out: :return: None """ # TODO: output continuous centerline (and add in unit test) # TODO: centerline coordinate should have the same orientation as the input image # TODO: no need for unecessary i/o. Everything could be done in RAM # Create temp folder path_tmp = sct.tmp_create() # Open segmentation volume im_seg = msct_image.Image(segmentation) # im_seg.change_orientation('RPI', generate_path=True) native_orientation = im_seg.orientation im_seg.change_orientation("RPI", generate_path=True).save(path_tmp, mutable=True) fname_tmp_seg = im_seg.absolutepath # extract centerline and smooth it if use_phys_coord: # fit centerline, smooth it and return the first derivative (in physical space) x_centerline_fit, y_centerline_fit, z_centerline, \ x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline( fname_tmp_seg, algo_fitting=algo_fitting, type_window=type_window, window_length=window_length, nurbs_pts_number=3000, phys_coordinates=True, verbose=verbose, all_slices=False) centerline = Centerline(x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv) # average centerline coordinates over slices of the image (floating point) x_centerline_fit_rescorr, y_centerline_fit_rescorr, z_centerline_rescorr, \ x_centerline_deriv_rescorr, y_centerline_deriv_rescorr, z_centerline_deriv_rescorr = \ centerline.average_coordinates_over_slices(im_seg) # compute z_centerline in image coordinates (discrete) voxel_coordinates = im_seg.transfo_phys2pix( [[x_centerline_fit_rescorr[i], y_centerline_fit_rescorr[i], z_centerline_rescorr[i]] for i in range(len(z_centerline_rescorr))]) x_centerline_voxel = [coord[0] for coord in voxel_coordinates] y_centerline_voxel = [coord[1] for coord in voxel_coordinates] z_centerline_voxel = [coord[2] for coord in voxel_coordinates] else: # fit centerline, smooth it and return the first derivative (in voxel space but FITTED coordinates) x_centerline_voxel, y_centerline_voxel, z_centerline_voxel, \ x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline( 'segmentation_RPI.nii.gz', algo_fitting=algo_fitting, type_window=type_window, window_length=window_length, nurbs_pts_number=3000, phys_coordinates=False, verbose=verbose, all_slices=True) if verbose == 2: # TODO: code below does not work import matplotlib.pyplot as plt # Creation of a vector x that takes into account the distance between the labels nz_nonz = len(z_centerline_voxel) x_display = [0 for i in range(x_centerline_voxel.shape[0])] y_display = [0 for i in range(y_centerline_voxel.shape[0])] for i in range(0, nz_nonz, 1): x_display[int(z_centerline_voxel[i] - z_centerline_voxel[0])] = x_centerline[i] y_display[int(z_centerline_voxel[i] - z_centerline_voxel[0])] = y_centerline[i] plt.figure(1) plt.subplot(2, 1, 1) plt.plot(z_centerline_voxel, x_display, 'ro') plt.plot(z_centerline_voxel, x_centerline_voxel) plt.xlabel("Z") plt.ylabel("X") plt.title("x and x_fit coordinates") plt.subplot(2, 1, 2) plt.plot(z_centerline_voxel, y_display, 'ro') plt.plot(z_centerline_voxel, y_centerline_voxel) plt.xlabel("Z") plt.ylabel("Y") plt.title("y and y_fit coordinates") plt.show() # Create an image with the centerline # TODO: write the center of mass, not the discrete image coordinate (issue #1938) im_centerline = im_seg.copy() data_centerline = im_centerline.data * 0 # Find z-boundaries above which and below which there is no non-null slices min_z_index, max_z_index = int(round(min(z_centerline_voxel))), int(round(max(z_centerline_voxel))) # loop across slices and set centerline pixel to value=1 for iz in range(min_z_index, max_z_index + 1): data_centerline[int(round(x_centerline_voxel[iz - min_z_index])), int(round(y_centerline_voxel[iz - min_z_index])), int(iz)] = 1 # assign data to centerline image im_centerline.data = data_centerline # reorient centerline to native orientation im_centerline.change_orientation(native_orientation) # save nifti volume fname_centerline = file_out + '.nii.gz' im_centerline.save(fname_centerline, dtype='uint8') # display stuff # sct.display_viewer_syntax([fname_segmentation, fname_centerline], colormaps=['gray', 'green']) # output csv with centerline coordinates fname_centerline_csv = file_out + '.csv' f_csv = open(fname_centerline_csv, 'w') f_csv.write('x,y,z\n') # csv header for i in range(min_z_index, max_z_index + 1): f_csv.write("%d,%d,%d\n" % (int(i), x_centerline_voxel[i - min_z_index], y_centerline_voxel[i - min_z_index])) f_csv.close() # TODO: display open syntax for csv # create a .roi file fname_roi_centerline = optic.centerline2roi(fname_image=fname_centerline, folder_output='./', verbose=verbose)