def plan_ref(self): """ Generate a plane in the reference space for each label present in the input image """ image_output = msct_image.zeros_like(Image(self.image_ref)) image_input_neg = msct_image.zeros_like(Image(self.image_input)) image_input_pos = msct_image.zeros_like(Image(self.image_input)) X, Y, Z = (self.image_input.data < 0).nonzero() for i in range(len(X)): image_input_neg.data[X[i], Y[i], Z[i]] = -self.image_input.data[X[i], Y[i], Z[i]] # in order to apply getNonZeroCoordinates X_pos, Y_pos, Z_pos = (self.image_input.data > 0).nonzero() for i in range(len(X_pos)): image_input_pos.data[X_pos[i], Y_pos[i], Z_pos[i]] = self.image_input.data[X_pos[i], Y_pos[i], Z_pos[i]] coordinates_input_neg = image_input_neg.getNonZeroCoordinates() coordinates_input_pos = image_input_pos.getNonZeroCoordinates() image_output.change_type('float32') for coord in coordinates_input_neg: image_output.data[:, :, int(coord.z)] = -coord.value # PB: takes the int value of coord.value for coord in coordinates_input_pos: image_output.data[:, :, int(coord.z)] = coord.value return image_output
def custom_brain(ifolder, ofolder): ifname_dct = {} for roi in BRAIN_DCT: ifname_dct[roi] = os.path.join(ifolder, BRAIN_DCT[roi]) for roi in ifname_dct: ofname = os.path.join(ofolder, 'brain_' + roi + '.nii.gz') i_im = Image(ifname_dct[roi]) o_im = zeros_like(i_im) o_im.data = i_im.data del i_im o_im.data[:, :, :BRAMSTEM_ZTOP + 1] = 0.0 o_im.save(ofname) del o_im sum_roi_im = sum([Image(ifname_dct[roi]).data for roi in ifname_dct]) i_im = Image(ifname_dct[roi]) o_im = zeros_like(i_im) del i_im o_im.data[sum_roi_im > 0.0] = 1.0 o_im.data[:, :, :BRAMSTEM_ZTOP + 1] = 0.0 o_im.save(os.path.join(ofolder, 'brain.nii.gz')) del o_im
def _call_viewer_centerline(im_data, interslice_gap=20.0): # TODO: _call_viewer_centerline should not be "internal" anymore, i.e., remove the "_" """ Call Qt viewer for manually selecting labels. :param im_data: :param interslice_gap: :return: Image() of labels. """ from spinalcordtoolbox.gui.base import AnatomicalParams from spinalcordtoolbox.gui.centerline import launch_centerline_dialog if not isinstance(im_data, Image): raise ValueError("Expecting an image") # Get the number of slice along the (IS) axis im_tmp = im_data.copy().change_orientation('RPI') _, _, nz, _, _, _, pz, _ = im_tmp.dim del im_tmp params = AnatomicalParams() # setting maximum number of points to a reasonable value params.num_points = np.ceil(nz * pz / interslice_gap) + 2 params.interval_in_mm = interslice_gap params.starting_slice = 'top' im_mask_viewer = zeros_like(im_data) launch_centerline_dialog(im_data, im_mask_viewer, params) return im_mask_viewer
def segment_2d_slices(image, seg_model, binary_seg=True, threshold=0.5): """Applies seg_model on 2d slices of a cropped Image. Inputs: image - Image to be segmented seg_model - 2d segmentation model binary - whether the segmentation is binary or partial threshold - threshold for binary segmentation Returns: seg_crop - output segmentation as an Image """ cropped_seg = zeros_like(image) cropped_seg_data = np.zeros(image.data.shape) data_norm = image.data x_cOm, y_cOm = None, None #?? for z in range(data_norm.shape[2]): pred_seg = seg_model.predict(np.expand_dims( np.expand_dims(data_norm[:, :, z], -1), 0), batch_size=BATCH_SIZE)[0, :, :, 0] if binary_seg: pred_seg_th = (pred_seg > threshold).astype(int) pred_seg_pp = post_processing_slice_wise(pred_seg_th, x_cOm, y_cOm) else: pred_seg_pp = pred_seg cropped_seg_data[:, :, z] = pred_seg_pp cropped_seg.data = cropped_seg_data return cropped_seg
def segment_2d(model_fname, contrast_type, input_size, im_in): """ Segment data using 2D convolutions. :return: seg_crop.data: ndarray float32: Output prediction """ seg_model = nn_architecture_seg(height=input_size[0], width=input_size[1], depth=2 if contrast_type != 't2' else 3, features=32, batchnorm=False, dropout=0.0) seg_model.load_weights(model_fname) seg_crop = zeros_like(im_in, dtype=np.float32) data_norm = im_in.data # TODO: use sct_progress_bar for zz in range(im_in.dim[2]): # 2D CNN prediction pred_seg = seg_model.predict(np.expand_dims(np.expand_dims(data_norm[:, :, zz], -1), 0), batch_size=BATCH_SIZE)[0, :, :, 0] seg_crop.data[:, :, zz] = pred_seg return seg_crop.data
def _call_viewer_centerline(im_data, interslice_gap=20.0): # TODO: _call_viewer_centerline should not be "internal" anymore, i.e., remove the "_" """ FIXME doc Call Qt viewer for manually selecting labels. :param im_data: :param interslice_gap: :return: Image() of labels. """ from spinalcordtoolbox.gui.base import AnatomicalParams from spinalcordtoolbox.gui.centerline import launch_centerline_dialog if not isinstance(im_data, Image): raise ValueError("Expecting an image") # Get the number of slice along the (IS) axis im_tmp = im_data.copy().change_orientation('RPI') _, _, nz, _, _, _, pz, _ = im_tmp.dim del im_tmp params = AnatomicalParams() # setting maximum number of points to a reasonable value params.num_points = np.ceil(nz * pz / interslice_gap) + 2 params.interval_in_mm = interslice_gap # Starting at the top slice minus 1 in cases where the first slice is almost zero, due to gradient # non-linearity correction: # https://forum.spinalcordmri.org/t/centerline-viewer-enhancements-sct-v5-0-1/605/4?u=jcohenadad params.starting_slice = 'top_minus_one' im_mask_viewer = zeros_like(im_data) launch_centerline_dialog(im_data, im_mask_viewer, params) return im_mask_viewer
def continuous_vertebral_levels(self): """ This function transforms the vertebral levels file from the template into a continuous file. Instead of having integer representing the vertebral level on each slice, a continuous value that represents the position of the slice in the vertebral level coordinate system. The image must be RPI :return: """ im_input = Image(self.image_input, self.verbose) im_output = msct_image.zeros_like(self.image_input) # 1. extract vertebral levels from input image # a. extract centerline # b. for each slice, extract corresponding level nx, ny, nz, nt, px, py, pz, pt = im_input.dim from spinalcordtoolbox.centerline.core import get_centerline _, arr_ctl, _ = get_centerline(self.image_input, algo_fitting='bspline') x_centerline_fit, y_centerline_fit, z_centerline = arr_ctl value_centerline = np.array( [im_input.data[int(x_centerline_fit[it]), int(y_centerline_fit[it]), int(z_centerline[it])] for it in range(len(z_centerline))]) # 2. compute distance for each vertebral level --> Di for i being the vertebral levels vertebral_levels = {} for slice_image, level in enumerate(value_centerline): if level not in vertebral_levels: vertebral_levels[level] = slice_image length_levels = {} for level in vertebral_levels: indexes_slice = np.where(value_centerline == level) length_levels[level] = np.sum([np.sqrt(((x_centerline_fit[indexes_slice[0][index_slice + 1]] - x_centerline_fit[indexes_slice[0][index_slice]]) * px)**2 + ((y_centerline_fit[indexes_slice[0][index_slice + 1]] - y_centerline_fit[indexes_slice[0][index_slice]]) * py)**2 + ((z_centerline[indexes_slice[0][index_slice + 1]] - z_centerline[indexes_slice[0][index_slice]]) * pz)**2) for index_slice in range(len(indexes_slice[0]) - 1)]) # 2. for each slice: # a. identify corresponding vertebral level --> i # b. calculate distance of slice from upper vertebral level --> d # c. compute relative distance in the vertebral level coordinate system --> d/Di continuous_values = {} for it, iz in enumerate(z_centerline): level = value_centerline[it] indexes_slice = np.where(value_centerline == level) indexes_slice = indexes_slice[0][indexes_slice[0] >= it] distance_from_level = np.sum([np.sqrt(((x_centerline_fit[indexes_slice[index_slice + 1]] - x_centerline_fit[indexes_slice[index_slice]]) * px * px) ** 2 + ((y_centerline_fit[indexes_slice[index_slice + 1]] - y_centerline_fit[indexes_slice[index_slice]]) * py * py) ** 2 + ((z_centerline[indexes_slice[index_slice + 1]] - z_centerline[indexes_slice[index_slice]]) * pz * pz) ** 2) for index_slice in range(len(indexes_slice) - 1)]) continuous_values[iz] = level + 2.0 * distance_from_level / float(length_levels[level]) # 3. saving data # for each slice, get all non-zero pixels and replace with continuous values coordinates_input = self.image_input.getNonZeroCoordinates() im_output.change_type(np.float32) # for all points in input, find the value that has to be set up, depending on the vertebral level for i, coord in enumerate(coordinates_input): im_output.data[int(coord.x), int(coord.y), int(coord.z)] = continuous_values[coord.z] return im_output
def remove_or_keep_labels(self, labels, action): """ Create or remove labels from self.image_input :param list(int): Labels to keep or remove :param str: 'remove': remove specified labels (i.e. set to zero), 'keep': keep specified labels and remove the others """ if action == 'keep': image_output = msct_image.zeros_like(self.image_input) elif action == 'remove': image_output = self.image_input.copy() coordinates_input = self.image_input.getNonZeroCoordinates() for labelNumber in labels: isInLabels = False for coord in coordinates_input: if labelNumber == coord.value: new_coord = coord isInLabels = True if isInLabels: if action == 'keep': image_output.data[int(new_coord.x), int(new_coord.y), int(new_coord.z)] = new_coord.value elif action == 'remove': image_output.data[int(new_coord.x), int(new_coord.y), int(new_coord.z)] = 0.0 else: sct.printv("WARNING: Label " + str(float(labelNumber)) + " not found in input image.", type='warning') return image_output
def _call_viewer_centerline(fname_in, interslice_gap=20.0): from spinalcordtoolbox.gui.base import AnatomicalParams from spinalcordtoolbox.gui.centerline import launch_centerline_dialog im_data = Image(fname_in) # Get the number of slice along the (IS) axis im_tmp = msct_image.change_orientation(im_data, 'RPI') _, _, nz, _, _, _, pz, _ = im_tmp.dim del im_tmp params = AnatomicalParams() # setting maximum number of points to a reasonable value params.num_points = np.ceil(nz * pz / interslice_gap) + 2 params.interval_in_mm = interslice_gap params.starting_slice = 'top' im_mask_viewer = msct_image.zeros_like(im_data) controller = launch_centerline_dialog(im_data, im_mask_viewer, params) fname_labels_viewer = sct.add_suffix(fname_in, '_viewer') if not controller.saved: sct.log.error( 'The viewer has been closed before entering all manual points. Please try again.' ) sys.exit(1) # save labels controller.as_niftii(fname_labels_viewer) return fname_labels_viewer
def remove_label(self, symmetry=False): """ Compare two label images and remove any labels in input image that are not in reference image. The symmetry option enables to remove labels from reference image that are not in input image """ # image_output = Image(self.image_input.dim, orientation=self.image_input.orientation, hdr=self.image_input.hdr, verbose=self.verbose) image_output = msct_image.zeros_like(self.image_input) result_coord_input, result_coord_ref = self.remove_label_coord(self.image_input.getNonZeroCoordinates(coordValue=True), self.image_ref.getNonZeroCoordinates(coordValue=True), symmetry) for coord in result_coord_input: image_output.data[int(coord.x), int(coord.y), int(coord.z)] = int(np.round(coord.value)) if symmetry: # image_output_ref = Image(self.image_ref.dim, orientation=self.image_ref.orientation, hdr=self.image_ref.hdr, verbose=self.verbose) image_output_ref = Image(self.image_ref, verbose=self.verbose) for coord in result_coord_ref: image_output_ref.data[int(coord.x), int(coord.y), int(coord.z)] = int(np.round(coord.value)) image_output_ref.absolutepath = self.fname_output[1] image_output_ref.save('minimize_int') self.fname_output = self.fname_output[0] return image_output
def test_segment_2d(): from keras import backend as K K.set_image_data_format( "channels_last" ) # Set at channels_first in test_deepseg_lesion.test_segment() contrast_test = 't2' model_path = os.path.join(sct.__sct_dir__, 'data', 'deepseg_sc_models', '{}_sc.h5'.format(contrast_test)) fname_t2 = os.path.join(sct.__sct_dir__, 'sct_testing_data/t2/t2.nii.gz' ) # install: sct_download_data -d sct_testing_data fname_t2_seg = os.path.join( sct.__sct_dir__, 'sct_testing_data/t2/t2_seg.nii.gz' ) # install: sct_download_data -d sct_testing_data img, gt = _preprocess_segment(fname_t2, fname_t2_seg, contrast_test) seg = deepseg_sc.segment_2d(model_fname=model_path, contrast_type=contrast_test, input_size=(64, 64), im_in=img) seg_im = msct_image.zeros_like(img) seg_im.data = seg assert msct_image.compute_dice(seg_im, gt) > 0.80
def segment_2d(model_fname, contrast_type, input_size, im_in): """Segment data using 2D convolutions.""" seg_model = nn_architecture_seg(height=input_size[0], width=input_size[1], depth=2 if contrast_type != 't2' else 3, features=32, batchnorm=False, dropout=0.0) seg_model.load_weights(model_fname) seg_crop = zeros_like(im_in, dtype=np.uint8) data_norm = im_in.data x_cOm, y_cOm = None, None for zz in range(im_in.dim[2]): pred_seg = seg_model.predict(np.expand_dims(np.expand_dims(data_norm[:, :, zz], -1), 0), batch_size=BATCH_SIZE)[0, :, :, 0] pred_seg_th = (pred_seg > 0.5).astype(int) pred_seg_pp = post_processing_slice_wise(pred_seg_th, x_cOm, y_cOm) seg_crop.data[:, :, zz] = pred_seg_pp if 1 in pred_seg_pp: x_cOm, y_cOm = center_of_mass(pred_seg_pp) x_cOm, y_cOm = np.round(x_cOm), np.round(y_cOm) return seg_crop.data
def create_label_along_segmentation(self): """ Create an image with labels defined along the spinal cord segmentation (or centerline) Example: object_define=ProcessLabels(fname_segmentation, coordinates=[coord_1, coord_2, coord_i]), where coord_i='z,value'. If z=-1, then use z=nz/2 (i.e. center of FOV in superior-inferior direction) Returns ------- image_output: Image object with labels. """ image_output = msct_image.zeros_like(self.image_input) # loop across labels for i, coord in enumerate(self.coordinates): # split coord string list_coord = coord.split(',') # convert to int() and assign to variable z, value = [int(i) for i in list_coord] # if z=-1, replace with nz/2 if z == -1: z = int(np.round(image_output.dim[2] / 2.0)) # get center of mass of segmentation at given z x, y = ndimage.measurements.center_of_mass(np.array(self.image_input.data[:, :, z])) # round values to make indices x, y = int(np.round(x)), int(np.round(y)) # display info sct.printv('Label #' + str(i) + ': ' + str(x) + ',' + str(y) + ',' + str(z) + ' --> ' + str(value), 1) if len(image_output.data.shape) == 3: image_output.data[x, y, z] = value elif len(image_output.data.shape) == 2: assert str(z) == '0', "ERROR: 2D coordinates should have a Z value of 0. Z coordinate is :" + str(z) image_output.data[x, y] = value return image_output
def segment_2d(model_fname, contrast_type, input_size, im_in): """Segment data using 2D convolutions.""" seg_model = nn_architecture_seg(height=input_size[0], width=input_size[1], depth=2 if contrast_type != 't2' else 3, features=32, batchnorm=False, dropout=0.0) seg_model.load_weights(model_fname) seg_crop = zeros_like(im_in, dtype=np.uint8) data_norm = im_in.data x_cOm, y_cOm = None, None for zz in range(im_in.dim[2]): pred_seg = seg_model.predict(np.expand_dims( np.expand_dims(data_norm[:, :, zz], -1), 0), batch_size=BATCH_SIZE)[0, :, :, 0] pred_seg_th = (pred_seg > 0.5).astype(int) pred_seg_pp = post_processing_slice_wise(pred_seg_th, x_cOm, y_cOm) seg_crop.data[:, :, zz] = pred_seg_pp if 1 in pred_seg_pp: x_cOm, y_cOm = center_of_mass(pred_seg_pp) x_cOm, y_cOm = np.round(x_cOm), np.round(y_cOm) return seg_crop.data
def create_label(self, add=False): """ Create an image with labels listed by the user. This method works only if the user inserted correct coordinates. self.coordinates is a list of coordinates (class in msct_types). a Coordinate contains x, y, z and value. If only one label is to be added, coordinates must be completed with '[]' examples: For one label: object_define=ProcessLabels( fname_label, coordinates=[coordi]) where coordi is a 'Coordinate' object from msct_types For two labels: object_define=ProcessLabels( fname_label, coordinates=[coordi1, coordi2]) where coordi1 and coordi2 are 'Coordinate' objects from msct_types """ image_output = self.image_input.copy() if add else msct_image.zeros_like(self.image_input) # loop across labels for i, coord in enumerate(self.coordinates): if len(image_output.data.shape) == 3: image_output.data[int(coord.x), int(coord.y), int(coord.z)] = coord.value elif len(image_output.data.shape) == 2: assert str(coord.z) == '0', "ERROR: 2D coordinates should have a Z value of 0. Z coordinate is :" + str(coord.z) image_output.data[int(coord.x), int(coord.y)] = coord.value else: sct.printv('ERROR: Data should be 2D or 3D. Current shape is: ' + str(image_output.data.shape), 1, 'error') # display info sct.printv('Label #' + str(i) + ': ' + str(coord.x) + ',' + str(coord.y) + ',' + str(coord.z) + ' --> ' + str(coord.value), 1) return image_output
def cubic_to_point(self): """ Calculate the center of mass of each group of labels and returns a file of same size with only a label by group at the center of mass of this group. It is to be used after applying homothetic warping field to a label file as the labels will be dilated. Be careful: this algorithm computes the center of mass of voxels with same value, if two groups of voxels with the same value are present but separated in space, this algorithm will compute the center of mass of the two groups together. :return: image_output """ # 0. Initialization of output image output_image = msct_image.zeros_like(self.image_input) # 1. Extraction of coordinates from all non-null voxels in the image. Coordinates are sorted by value. coordinates = self.image_input.getNonZeroCoordinates(sorting='value') # 2. Separate all coordinates into groups by value groups = dict() for coord in coordinates: if coord.value in groups: groups[coord.value].append(coord) else: groups[coord.value] = [coord] # 3. Compute the center of mass of each group of voxels and write them into the output image for value, list_coord in groups.items(): center_of_mass = sum(list_coord) / float(len(list_coord)) sct.printv("Value = " + str(center_of_mass.value) + " : (" + str(center_of_mass.x) + ", " + str(center_of_mass.y) + ", " + str(center_of_mass.z) + ") --> ( " + str(np.round(center_of_mass.x)) + ", " + str(np.round(center_of_mass.y)) + ", " + str(np.round(center_of_mass.z)) + ")", verbose=self.verbose) output_image.data[int(np.round(center_of_mass.x)), int(np.round(center_of_mass.y)), int(np.round(center_of_mass.z))] = center_of_mass.value return output_image
def increment_z_inverse(img: Image) -> Image: """ Take all non-zero values, sort them along the inverse z direction, and attributes the values 1, 2, 3, etc. :param img: source image :returns: image with non-zero values sorted along inverse z """ og_orientation = img.orientation if og_orientation != "RPI": img.change_orientation("RPI") out = zeros_like(img) coordinates_input = img.getNonZeroCoordinates(sorting='z', reverse_coord=True) # for all points with non-zeros neighbors, force the neighbors to 0 for i, (x, y, z, _) in enumerate(coordinates_input): out.data[int(x), int(y), int(z)] = i + 1 if out.orientation != og_orientation: out.change_orientation(og_orientation) img.change_orientation(og_orientation) return out
def custom_brainstem(ifolder, ofolder, thr): cst_r_ifile = os.path.join(ifolder, BRAINSTEM_DCT['CST_R']) cst_l_ifile = os.path.join(ifolder, BRAINSTEM_DCT['CST_L']) cst_r_ofile = os.path.join(ofolder, 'brainstem_CST_R.nii.gz') cst_l_ofile = os.path.join(ofolder, 'brainstem_CST_L.nii.gz') cst_ofile = os.path.join(ofolder, 'brainstem_CST.nii.gz') cst_r_im, cst_l_im = Image(cst_r_ifile), Image(cst_l_ifile) cst_im = zeros_like(cst_r_im) cst_r_im.data[:, :, BRAMSTEM_ZTOP + 1:] = 0. cst_l_im.data[:, :, BRAMSTEM_ZTOP + 1:] = 0. cst_r_im.data[:, :, :BRAMSTEM_ZBOT] = 0. cst_l_im.data[:, :, :BRAMSTEM_ZBOT] = 0. cst_r_im.data[cst_r_im.data > thr] = 1.0 cst_r_im.data[cst_r_im.data <= thr] = 0.0 cst_l_im.data[cst_l_im.data > thr] = 1.0 cst_l_im.data[cst_l_im.data <= thr] = 0.0 cst_im.data = cst_r_im.data + cst_l_im.data cst_im.data[cst_im.data > 0.0] = 1.0 cst_r_im.save(cst_r_ofile) cst_l_im.save(cst_l_ofile) cst_im.save(cst_ofile) del cst_r_im, cst_l_im, cst_im
def add_mask(fname_new, fname_out): img_new, img_in = Image(fname_new), Image(fname_out) img_out = zeros_like(img_in) img_out.data = img_new.data + img_in.data del img_new, img_in img_out.save(fname_out) del img_out
def launch_sagittal_viewer(img: Image, labels: Sequence[int], msg: str, previous_points: Sequence[Coordinate] = None, output_img: Image = None) -> Image: from spinalcordtoolbox.gui import base from spinalcordtoolbox.gui.sagittal import launch_sagittal_dialog params = base.AnatomicalParams() params.vertebraes = labels params.input_file_name = img.absolutepath if output_img is not None: params.output_file_name = output_img.absolutepath else: params.output_file_name = img.absolutepath params.subtitle = msg if previous_points is not None: params.message_warn = 'Please select the label you want to add \nor correct in the list below before clicking \non the image' out = zeros_like(img, dtype='uint8') out.absolutepath = params.output_file_name launch_sagittal_dialog(img, out, params, previous_points) return out
def test_label_vertebrae(): a = fake_3dimage_sct2() expected = zeros_like(a) expected.data[0, 0, 0] = 111 b = sct_labels.label_vertebrae(a, [111]) diff = b.data == expected.data assert diff.all()
def segment_3d(model_fname, contrast_type, im): """Perform segmentation with 3D convolutions.""" from spinalcordtoolbox.deepseg_sc.cnn_models_3d import load_trained_model dct_patch_3d = { 't2': { 'size': (48, 48, 48), 'mean': 871.309, 'std': 557.916 }, 't2_ax': { 'size': (48, 48, 48), 'mean': 835.592, 'std': 528.386 }, 't2s': { 'size': (48, 48, 48), 'mean': 1011.31, 'std': 678.985 } } # load 3d model seg_model = load_trained_model(model_fname) out_data = np.zeros(im.data.shape) # segment the spinal cord z_patch_size = dct_patch_3d[contrast_type]['size'][2] z_step_keep = list(range(0, im.data.shape[2], z_patch_size)) for zz in z_step_keep: if zz == z_step_keep[ -1]: # deal with instances where the im.data.shape[2] % patch_size_z != 0 patch_im = np.zeros(dct_patch_3d[contrast_type]['size']) z_patch_extracted = im.data.shape[2] - zz patch_im[:, :, :z_patch_extracted] = im.data[:, :, zz:] else: z_patch_extracted = z_patch_size patch_im = im.data[:, :, zz:z_patch_size + zz] if np.any( patch_im ): # Check if the patch is (not) empty, which could occur after a brain detection. patch_norm = _normalize_data(patch_im, dct_patch_3d[contrast_type]['mean'], dct_patch_3d[contrast_type]['std']) patch_pred_proba = seg_model.predict(np.expand_dims( np.expand_dims(patch_norm, 0), 0), batch_size=BATCH_SIZE) pred_seg_th = (patch_pred_proba > 0.1).astype(int)[0, 0, :, :, :] if zz == z_step_keep[-1]: out_data[:, :, zz:] = pred_seg_th[:, :, :z_patch_extracted] else: out_data[:, :, zz:z_patch_size + zz] = pred_seg_th out = msct_image.zeros_like(im, dtype=np.uint8) out.data = out_data return out.copy()
def create_labels_along_segmentation( img: Image, labels: Sequence[Tuple[int, int]]) -> Image: """ Create an image with labels defined along the spinal cord segmentation (or centerline). Input image does **not** need to be RPI (re-orientation is done within this function). :param img: source segmentation :param labels: list of label tuples as (z_value, label_value) :returns: labeled segmentation (Image) """ og_orientation = img.orientation if og_orientation != "RPI": img.change_orientation("RPI") out = zeros_like(img) for idx_label, label in enumerate(labels): z, value = label # update z based on native image orientation (z should represent superior-inferior axis) coord = Coordinate( [z, z, z] ) # since we don't know which dimension corresponds to the superior-inferior # axis, we put z in all dimensions (we don't care about x and y here) _, _, z_rpi = coord.permute(img, 'RPI') # if z=-1, replace with nz/2 if z == -1: z_rpi = int(np.round(out.dim[2] / 2.0)) # get center of mass of segmentation at given z x, y = ndimage.measurements.center_of_mass( np.array(img.data[:, :, z_rpi])) # round values to make indices x, y = int(np.round(x)), int(np.round(y)) # display info logger.debug(f"Label # {idx_label}: {x}, {y}. {z_rpi} --> {value}") if len(out.data.shape) == 3: out.data[x, y, z_rpi] = value elif len(out.data.shape) == 2: if z != 0: raise ValueError( f"2D coordinates should have a Z value of 0! Current value: {coord.z}" ) out.data[x, y] = value if out.orientation != og_orientation: out.change_orientation(og_orientation) img.change_orientation(og_orientation) return out
def run_main(args): ifolder = args.i fname_top = os.path.join(ifolder, 'template_label_top_spinal_levels.nii.gz') fname_mid = os.path.join(ifolder, 'template_label_spinal_levels.nii.gz') fname_continuous = os.path.join( ifolder, 'template_centerline_spinal_levels.nii.gz') fname_ctrl = os.path.join(ifolder, 'template_centerline.nii.gz') if all([os.path.isfile(f) for f in [fname_top, fname_ctrl]]): im_top, im_ctrl = Image(fname_top), Image(fname_ctrl) im_mid, im_continuous = zeros_like(im_top), zeros_like(im_top) # get z coordinate of the labels at the top of each level z_top_dct = get_label_z(im_top.data) lb_lst = list(z_top_dct.keys()) del im_top # fill im_continuous and im_mid for lb in lb_lst: if (lb + 1) in lb_lst: z_min_lb, z_max_lb = z_top_dct[lb + 1] + 1, z_top_dct[lb] im_continuous.data[:, :, z_min_lb:z_max_lb + 1] = lb * im_ctrl.data[:, :, z_min_lb:z_max_lb + 1] z_mid_lb = z_min_lb + int( round((z_max_lb - z_min_lb) * 1.0 / 2)) im_mid.data[:, :, z_mid_lb] = lb * im_ctrl.data[:, :, z_mid_lb] # top section min_lb = min(lb_lst) zmin_top_lb = z_top_dct[min_lb] im_continuous.data[:, :, zmin_top_lb + 1:] = (min_lb - 1) * im_ctrl.data[:, :, zmin_top_lb + 1:] # save outputs im_continuous.save(fname_continuous) im_mid.save(fname_mid) del im_ctrl, im_continuous, im_mid
def project_labels_on_spinalcord(fname_label, fname_seg, param_centerline): """ Project labels orthogonally on the spinal cord centerline. The algorithm works by finding the smallest distance between each label and the spinal cord center of mass. :param fname_label: file name of labels :param fname_seg: file name of cord segmentation (could also be of centerline) :return: file name of projected labels """ # build output name fname_label_projected = sct.add_suffix(fname_label, "_projected") # open labels and segmentation im_label = Image(fname_label).change_orientation("RPI") im_seg = Image(fname_seg) native_orient = im_seg.orientation im_seg.change_orientation("RPI") # smooth centerline and return fitted coordinates in voxel space _, arr_ctl, _, _ = get_centerline(im_seg, param_centerline) x_centerline_fit, y_centerline_fit, z_centerline = arr_ctl # convert pixel into physical coordinates centerline_xyz_transposed = \ [im_seg.transfo_pix2phys([[x_centerline_fit[i], y_centerline_fit[i], z_centerline[i]]])[0] for i in range(len(x_centerline_fit))] # transpose list centerline_phys_x = [i[0] for i in centerline_xyz_transposed] centerline_phys_y = [i[1] for i in centerline_xyz_transposed] centerline_phys_z = [i[2] for i in centerline_xyz_transposed] # get center of mass of label labels = im_label.getCoordinatesAveragedByValue() # initialize image of projected labels. Note that we use the space of the seg (not label). im_label_projected = msct_image.zeros_like(im_seg, dtype=np.uint8) # loop across label values for label in labels: # convert pixel into physical coordinates for the label label_phys_x, label_phys_y, label_phys_z = im_label.transfo_pix2phys([[label.x, label.y, label.z]])[0] # calculate distance between label and each point of the centerline distance_centerline = [np.linalg.norm([centerline_phys_x[i] - label_phys_x, centerline_phys_y[i] - label_phys_y, centerline_phys_z[i] - label_phys_z]) for i in range(len(x_centerline_fit))] # get the index corresponding to the min distance ind_min_distance = np.argmin(distance_centerline) # get centerline coordinate (in physical space) [min_phy_x, min_phy_y, min_phy_z] = [centerline_phys_x[ind_min_distance], centerline_phys_y[ind_min_distance], centerline_phys_z[ind_min_distance]] # convert coordinate to voxel space minx, miny, minz = im_seg.transfo_phys2pix([[min_phy_x, min_phy_y, min_phy_z]])[0] # use that index to assign projected label in the centerline im_label_projected.data[minx, miny, minz] = label.value # re-orient projected labels to native orientation and save im_label_projected.change_orientation(native_orient).save(fname_label_projected) return fname_label_projected
def mask_CST(fname_LFM, fname_LFM_CST, mask_lst): img_lfm = Image(fname_LFM) img_cst = zeros_like(img_lfm) img_cst.data = img_lfm.data del img_lfm cst_mask_data = np.sum([Image(mask_fname).data for mask_fname in mask_lst], axis=0) cst_mask_data = (cst_mask_data > 0.0).astype(np.int_) img_cst.data[np.where(cst_mask_data == 0.0)] = 0.0 img_cst.save(fname_LFM_CST)
def project_labels_on_spinalcord(fname_label, fname_seg): """ Project labels orthogonally on the spinal cord centerline. The algorithm works by finding the smallest distance between each label and the spinal cord center of mass. :param fname_label: file name of labels :param fname_seg: file name of cord segmentation (could also be of centerline) :return: file name of projected labels """ # build output name fname_label_projected = sct.add_suffix(fname_label, "_projected") # open labels and segmentation im_label = Image(fname_label).change_orientation("RPI") im_seg = Image(fname_seg) native_orient = im_seg.orientation im_seg.change_orientation("RPI") # smooth centerline and return fitted coordinates in voxel space _, arr_ctl, _ = get_centerline(im_seg, algo_fitting='bspline') x_centerline_fit, y_centerline_fit, z_centerline = arr_ctl # convert pixel into physical coordinates centerline_xyz_transposed = \ [im_seg.transfo_pix2phys([[x_centerline_fit[i], y_centerline_fit[i], z_centerline[i]]])[0] for i in range(len(x_centerline_fit))] # transpose list centerline_phys_x = [i[0] for i in centerline_xyz_transposed] centerline_phys_y = [i[1] for i in centerline_xyz_transposed] centerline_phys_z = [i[2] for i in centerline_xyz_transposed] # get center of mass of label labels = im_label.getCoordinatesAveragedByValue() # initialize image of projected labels. Note that we use the space of the seg (not label). im_label_projected = msct_image.zeros_like(im_seg, dtype=np.uint8) # loop across label values for label in labels: # convert pixel into physical coordinates for the label label_phys_x, label_phys_y, label_phys_z = im_label.transfo_pix2phys([[label.x, label.y, label.z]])[0] # calculate distance between label and each point of the centerline distance_centerline = [np.linalg.norm([centerline_phys_x[i] - label_phys_x, centerline_phys_y[i] - label_phys_y, centerline_phys_z[i] - label_phys_z]) for i in range(len(x_centerline_fit))] # get the index corresponding to the min distance ind_min_distance = np.argmin(distance_centerline) # get centerline coordinate (in physical space) [min_phy_x, min_phy_y, min_phy_z] = [centerline_phys_x[ind_min_distance], centerline_phys_y[ind_min_distance], centerline_phys_z[ind_min_distance]] # convert coordinate to voxel space minx, miny, minz = im_seg.transfo_phys2pix([[min_phy_x, min_phy_y, min_phy_z]])[0] # use that index to assign projected label in the centerline im_label_projected.data[minx, miny, minz] = label.value # re-orient projected labels to native orientation and save im_label_projected.change_orientation(native_orient).save(fname_label_projected) return fname_label_projected
def test_create_labels_empty(test_image): a = test_image.copy() expected = zeros_like(a) labels = [Coordinate(l) for l in [[0, 0, 0, 7], [0, 1, 2, 5]]] expected.data[0, 0, 0] = 7 expected.data[0, 1, 2] = 5 b = sct_labels.create_labels_empty(a, labels) diff = b.data == expected.data assert diff.all()
def create_labels_empty(img: Image, coordinates: Sequence[Coordinate]) -> Image: """ Create an empty image with labels listed by the user. This method works only if the user inserted correct coordinates. If only one label is to be added, coordinates must be completed with '[]' :param img: source image :param coordinates: list of Coordinate objects (see spinalcordtoolbox.types) :returns: empty image with labels """ out = _add_labels(zeros_like(img), coordinates) return out
def plan(self, width, offset=0, gap=1): """ Create a plane of thickness="width" and changes its value with an offset and a gap between labels. """ image_output = msct_image.zeros_like(self.image_input) coordinates_input = self.image_input.getNonZeroCoordinates() # for all points with non-zeros neighbors, force the neighbors to 0 for coord in coordinates_input: image_output.data[:, :, int(coord.z) - width:int(coord.z) + width] = offset + gap * coord.value return image_output
def uncrop_image(ref_in, data_crop, x_crop_lst, y_crop_lst, z_crop_lst): """ Reconstruct the data from the cropped segmentation. """ seg_unCrop = zeros_like(ref_in, dtype=np.float32) crop_size_x, crop_size_y = data_crop.shape[:2] for i_z, zz in enumerate(z_crop_lst): pred_seg = data_crop[:, :, zz] x_start, y_start = int(x_crop_lst[i_z]), int(y_crop_lst[i_z]) x_end = x_start + crop_size_x if x_start + crop_size_x < seg_unCrop.dim[0] else seg_unCrop.dim[0] y_end = y_start + crop_size_y if y_start + crop_size_y < seg_unCrop.dim[1] else seg_unCrop.dim[1] seg_unCrop.data[x_start:x_end, y_start:y_end, zz] = pred_seg[0:x_end - x_start, 0:y_end - y_start] return seg_unCrop
def launch_sagittal_viewer(self, labels): from spinalcordtoolbox.gui import base from spinalcordtoolbox.gui.sagittal import launch_sagittal_dialog params = base.AnatomicalParams() params.vertebraes = labels params.input_file_name = self.image_input.absolutepath params.output_file_name = self.fname_output params.subtitle = self.msg output = msct_image.zeros_like(self.image_input) output.absolutepath = self.fname_output launch_sagittal_dialog(self.image_input, output, params) return output
def generate_mask_pmj(self): """Output the PMJ mask.""" if self.pa_coord != -1: # If PMJ has been detected im = Image(''.join(sct.extract_fname(self.fname_im)[1:])) # image in PIR orientation im_mask = msct_image.zeros_like(im) im_mask.data[self.pa_coord, self.is_coord, self.rl_coord] = 50 # voxel with value = 50 im_mask.change_orientation(self.orientation_im).save(self.fname_out) x_pmj, y_pmj, z_pmj = np.where(im_mask.data == 50) sct.printv('\tx_pmj = ' + str(x_pmj[0]), self.verbose, 'info') sct.printv('\ty_pmj = ' + str(y_pmj[0]), self.verbose, 'info') sct.printv('\tz_pmj = ' + str(z_pmj[0]), self.verbose, 'info')
def increment_z_inverse(self): """ Take all non-zero values, sort them along the inverse z direction, and attributes the values 1, 2, 3, etc. This function assuming RPI orientation. """ image_output = msct_image.zeros_like(self.image_input) coordinates_input = self.image_input.getNonZeroCoordinates(sorting='z', reverse_coord=True) # for all points with non-zeros neighbors, force the neighbors to 0 for i, coord in enumerate(coordinates_input): image_output.data[int(coord.x), int(coord.y), int(coord.z)] = i + 1 return image_output
def uncrop_image(ref_in, data_crop, x_crop_lst, y_crop_lst, z_crop_lst): """Reconstruc the data from the crop segmentation.""" seg_unCrop = zeros_like(ref_in, dtype=np.uint8) crop_size_x, crop_size_y = data_crop.shape[:2] for i_z, zz in enumerate(z_crop_lst): pred_seg = data_crop[:, :, zz] x_start, y_start = int(x_crop_lst[i_z]), int(y_crop_lst[i_z]) x_end = x_start + crop_size_x if x_start + crop_size_x < seg_unCrop.dim[0] else seg_unCrop.dim[0] y_end = y_start + crop_size_y if y_start + crop_size_y < seg_unCrop.dim[1] else seg_unCrop.dim[1] seg_unCrop.data[x_start:x_end, y_start:y_end, zz] = pred_seg[0:x_end - x_start, 0:y_end - y_start] return seg_unCrop
def compute_texture(self): offset = int(self.param_glcm.distance) sct.printv('\nCompute texture metrics...', self.param.verbose, 'normal') # open image and re-orient it to RPI if needed im_tmp = Image(self.param.fname_im) if self.orientation_im != self.orientation_extraction: im_tmp.change_orientation(self.orientation_extraction) dct_metric = {} for m in self.metric_lst: im_2save = msct_image.zeros_like(im_tmp, dtype='float64') dct_metric[m] = im_2save # dct_metric[m] = Image(self.fname_metric_lst[m]) with tqdm.tqdm() as pbar: for im_z, seg_z, zz in zip(self.dct_im_seg['im'], self.dct_im_seg['seg'], range(len(self.dct_im_seg['im']))): for xx in range(im_z.shape[0]): for yy in range(im_z.shape[1]): if not seg_z[xx, yy]: continue if xx < offset or yy < offset: continue if xx > (im_z.shape[0] - offset - 1) or yy > (im_z.shape[1] - offset - 1): continue # to check if the whole glcm_window is in the axial_slice if False in np.unique(seg_z[xx - offset: xx + offset + 1, yy - offset: yy + offset + 1]): continue # to check if the whole glcm_window is in the mask of the axial_slice glcm_window = im_z[xx - offset: xx + offset + 1, yy - offset: yy + offset + 1] glcm_window = glcm_window.astype(np.uint8) dct_glcm = {} for a in self.param_glcm.angle.split(','): # compute the GLCM for self.param_glcm.distance and for each self.param_glcm.angle dct_glcm[a] = greycomatrix(glcm_window, [self.param_glcm.distance], [np.radians(int(a))], symmetric=self.param_glcm.symmetric, normed=self.param_glcm.normed) for m in self.metric_lst: # compute the GLCM property (m.split('_')[0]) of the voxel xx,yy,zz dct_metric[m].data[xx, yy, zz] = greycoprops(dct_glcm[m.split('_')[2]], m.split('_')[0])[0][0] pbar.set_postfix(pos="{}/{}".format(zz, len(self.dct_im_seg["im"]))) pbar.update(1) for m in self.metric_lst: fname_out = sct.add_suffix(''.join(sct.extract_fname(self.param.fname_im)[1:]), '_' + m) dct_metric[m].save(fname_out) self.fname_metric_lst[m] = fname_out
def labelize_from_disks(self): """ Create an image with regions labelized depending on values from reference. Typically, user inputs a segmentation image, and labels with disks position, and this function produces a segmentation image with vertebral levels labelized. Labels are assumed to be non-zero and incremented from top to bottom, assuming a RPI orientation """ image_output = msct_image.zeros_like(self.image_input) coordinates_input = self.image_input.getNonZeroCoordinates() coordinates_ref = self.image_ref.getNonZeroCoordinates(sorting='value') # for all points in input, find the value that has to be set up, depending on the vertebral level for i, coord in enumerate(coordinates_input): for j in range(0, len(coordinates_ref) - 1): if coordinates_ref[j + 1].z < coord.z <= coordinates_ref[j].z: image_output.data[int(coord.x), int(coord.y), int(coord.z)] = coordinates_ref[j].value return image_output
def segment_3d(model_fname, contrast_type, im_in): """Perform segmentation with 3D convolutions.""" from spinalcordtoolbox.deepseg_sc.cnn_models_3d import load_trained_model dct_patch_sc_3d = {'t2': {'size': (64, 64, 48), 'mean': 65.8562, 'std': 59.7999}, 't2s': {'size': (96, 96, 48), 'mean': 87.0212, 'std': 64.425}, 't1': {'size': (64, 64, 48), 'mean': 88.5001, 'std': 66.275}} # load 3d model seg_model = load_trained_model(model_fname) out = zeros_like(im_in, dtype=np.uint8) # segment the spinal cord z_patch_size = dct_patch_sc_3d[contrast_type]['size'][2] z_step_keep = list(range(0, im_in.data.shape[2], z_patch_size)) for zz in z_step_keep: if zz == z_step_keep[-1]: # deal with instances where the im.data.shape[2] % patch_size_z != 0 patch_im = np.zeros(dct_patch_sc_3d[contrast_type]['size']) z_patch_extracted = im_in.data.shape[2] - zz patch_im[:, :, :z_patch_extracted] = im_in.data[:, :, zz:] else: z_patch_extracted = z_patch_size patch_im = im_in.data[:, :, zz:z_patch_size + zz] if np.any(patch_im): # Check if the patch is (not) empty, which could occur after a brain detection. patch_norm = \ _normalize_data(patch_im, dct_patch_sc_3d[contrast_type]['mean'], dct_patch_sc_3d[contrast_type]['std']) patch_pred_proba = \ seg_model.predict(np.expand_dims(np.expand_dims(patch_norm, 0), 0), batch_size=BATCH_SIZE) pred_seg_th = (patch_pred_proba > 0.5).astype(int)[0, 0, :, :, :] x_cOm, y_cOm = None, None for zz_pp in range(z_patch_size): pred_seg_pp = post_processing_slice_wise(pred_seg_th[:, :, zz_pp], x_cOm, y_cOm) pred_seg_th[:, :, zz_pp] = pred_seg_pp x_cOm, y_cOm = center_of_mass(pred_seg_pp) x_cOm, y_cOm = np.round(x_cOm), np.round(y_cOm) if zz == z_step_keep[-1]: out.data[:, :, zz:] = pred_seg_th[:, :, :z_patch_extracted] else: out.data[:, :, zz:z_patch_size + zz] = pred_seg_th return out.data
def test_integrity(param_test): """ Test integrity of function """ # open ground truth im_seg_manual = Image(param_test.fname_gt).change_orientation("RPI") # Compute center of mass of the SC seg on each axial slice. center_of_mass_x_y_z_lst = [[int(center_of_mass(im_seg_manual.data[:, :, zz])[0]), int(center_of_mass(im_seg_manual.data[:, :, zz])[1]), zz] for zz in range(im_seg_manual.dim[2])] im_ctr_manual = msct_image.zeros_like(im_seg_manual) for x_y_z in center_of_mass_x_y_z_lst: im_ctr_manual.data[x_y_z[0], x_y_z[1], x_y_z[2]] = 1 # open output segmentation path_in, file_in, _ = sct.extract_fname(param_test.file_input) file_ctr = os.path.join(param_test.path_data, 't2s', sct.add_suffix(param_test.file_input, '_centerline')) im_ctr = Image(file_ctr).change_orientation("RPI") # compute MSE between generated ctr and ctr from database mse_detection = compute_mse(im_ctr, im_ctr_manual) param_test.output += 'Computed MSE: ' + str(mse_detection) param_test.output += 'MSE threshold (if computed MSE higher: fail): ' + str(param_test.mse_threshold) if mse_detection > param_test.mse_threshold: param_test.status = 99 param_test.output += '--> FAILED' else: param_test.output += '--> PASSED' # update Panda structure param_test.results['mse_detection'] = mse_detection return param_test
def segment_3d(model_fname, contrast_type, im): """Perform segmentation with 3D convolutions.""" from spinalcordtoolbox.deepseg_sc.cnn_models_3d import load_trained_model dct_patch_3d = {'t2': {'size': (48, 48, 48), 'mean': 871.309, 'std': 557.916}, 't2_ax': {'size': (48, 48, 48), 'mean': 835.592, 'std': 528.386}, 't2s': {'size': (48, 48, 48), 'mean': 1011.31, 'std': 678.985}} # load 3d model seg_model = load_trained_model(model_fname) out_data = np.zeros(im.data.shape) # segment the spinal cord z_patch_size = dct_patch_3d[contrast_type]['size'][2] z_step_keep = list(range(0, im.data.shape[2], z_patch_size)) for zz in z_step_keep: if zz == z_step_keep[-1]: # deal with instances where the im.data.shape[2] % patch_size_z != 0 patch_im = np.zeros(dct_patch_3d[contrast_type]['size']) z_patch_extracted = im.data.shape[2] - zz patch_im[:, :, :z_patch_extracted] = im.data[:, :, zz:] else: z_patch_extracted = z_patch_size patch_im = im.data[:, :, zz:z_patch_size + zz] if np.any(patch_im): # Check if the patch is (not) empty, which could occur after a brain detection. patch_norm = _normalize_data(patch_im, dct_patch_3d[contrast_type]['mean'], dct_patch_3d[contrast_type]['std']) patch_pred_proba = seg_model.predict(np.expand_dims(np.expand_dims(patch_norm, 0), 0), batch_size=BATCH_SIZE) pred_seg_th = (patch_pred_proba > 0.1).astype(int)[0, 0, :, :, :] if zz == z_step_keep[-1]: out_data[:, :, zz:] = pred_seg_th[:, :, :z_patch_extracted] else: out_data[:, :, zz:z_patch_size + zz] = pred_seg_th out = msct_image.zeros_like(im, dtype=np.uint8) out.data = out_data return out.copy()
def create_label_along_segmentation(self): """ Create an image with labels defined along the spinal cord segmentation (or centerline). Input image does **not** need to be RPI (re-orientation is done within this function). Example: object_define=ProcessLabels(fname_segmentation, coordinates=[coord_1, coord_2, coord_i]), where coord_i='z,value'. If z=-1, then use z=nz/2 (i.e. center of FOV in superior-inferior direction) Returns """ # reorient input image to RPI im_rpi = self.image_input.copy().change_orientation('RPI') im_output_rpi = zeros_like(im_rpi) # loop across labels for ilabel, coord in enumerate(self.coordinates): # split coord string list_coord = coord.split(',') # convert to int() and assign to variable z, value = [int(i) for i in list_coord] # update z based on native image orientation (z should represent superior-inferior axis) coord = Coordinate([z, z, z]) # since we don't know which dimension corresponds to the superior-inferior # axis, we put z in all dimensions (we don't care about x and y here) _, _, z_rpi = coord.permute(self.image_input, 'RPI') # if z=-1, replace with nz/2 if z == -1: z_rpi = int(np.round(im_output_rpi.dim[2] / 2.0)) # get center of mass of segmentation at given z x, y = ndimage.measurements.center_of_mass(np.array(im_rpi.data[:, :, z_rpi])) # round values to make indices x, y = int(np.round(x)), int(np.round(y)) # display info sct.printv('Label #' + str(ilabel) + ': ' + str(x) + ',' + str(y) + ',' + str(z_rpi) + ' --> ' + str(value), 1) if len(im_output_rpi.data.shape) == 3: im_output_rpi.data[x, y, z_rpi] = value elif len(im_output_rpi.data.shape) == 2: assert str(z) == '0', "ERROR: 2D coordinates should have a Z value of 0. Z coordinate is :" + str(z) im_output_rpi.data[x, y] = value # change orientation back to native return im_output_rpi.change_orientation(self.image_input.orientation)
def detect_c2c3(nii_im, nii_seg, contrast, nb_sag_avg=7.0, verbose=1): """ Detect the posterior edge of C2-C3 disc. :param nii_im: :param nii_seg: :param contrast: :param verbose: :return: """ # path to the pmj detector path_model = os.path.join(sct.__data_dir__, 'c2c3_disc_models', '{}_model'.format(contrast)) orientation_init = nii_im.orientation z_seg_max = np.max(np.where(nii_seg.change_orientation('PIR').data)[1]) # Flatten sagittal nii_im = flatten_sagittal(nii_im, nii_seg,verbose=verbose) nii_seg_flat = flatten_sagittal(nii_seg, nii_seg, verbose=verbose) # create temporary folder with intermediate results logger.info("Creating temporary folder...") tmp_folder = sct.TempFolder() tmp_folder.chdir() # Extract mid-slice nii_im.change_orientation('PIR') nii_seg_flat.change_orientation('PIR') mid_RL = int(np.rint(nii_im.dim[2] * 1.0 / 2)) nb_sag_avg_half = int(nb_sag_avg / 2 / nii_im.dim[6]) midSlice = np.mean(nii_im.data[:, :, mid_RL-nb_sag_avg_half:mid_RL+nb_sag_avg_half+1], 2) # average 7 slices midSlice_seg = nii_seg_flat.data[:, :, mid_RL] nii_midSlice = msct_image.zeros_like(nii_im) nii_midSlice.data = midSlice nii_midSlice.save('data_midSlice.nii') # Run detection logger.info('Run C2-C3 detector...') os.environ["FSLOUTPUTTYPE"] = "NIFTI_PAIR" cmd_detection = 'isct_spine_detect -ctype=dpdt "%s" "%s" "%s"' % \ (path_model, 'data_midSlice', 'data_midSlice_pred') # The command below will fail, but we don't care because it will output an image (prediction), which we # will use later on. s, o = sct.run(cmd_detection, verbose=0, is_sct_binary=True, raise_exception=False) pred = nib.load('data_midSlice_pred_svm.hdr').get_data() if verbose >= 2: # copy the "prediction data before post-processing" in an Image object nii_pred_before_postPro = nii_midSlice.copy() nii_pred_before_postPro.data = pred # 2D data with orientation, mid sag slice of the original data nii_pred_before_postPro.save("pred_midSlice_before_postPro.nii.gz") # save it) # Create mask along centerline midSlice_mask = np.zeros(midSlice_seg.shape) mask_halfSize = int(np.rint(25.0 / nii_midSlice.dim[4])) for z in range(midSlice_mask.shape[1]): row = midSlice_seg[:, z] # 2D data with PI orientation, mid sag slice of the original data if np.any(row > 0): med_y = int(np.rint(np.median(np.where(row > 0)))) midSlice_mask[med_y-mask_halfSize:med_y+mask_halfSize, z] = 1 # 2D data with PI orientation, mid sag slice of the original data if verbose >= 2: # copy the created mask in an Image object nii_postPro_mask = nii_midSlice.copy() nii_postPro_mask.data = midSlice_mask # 2D data with PI orientation, mid sag slice of the original data nii_postPro_mask.save("mask_midSlice.nii.gz") # save it # mask prediction pred[midSlice_mask == 0] = 0 pred[:, z_seg_max:] = 0 # Mask above SC segmentation if verbose >= 2: # copy the "prediction data after post-processing" in an Image object nii_pred_after_postPro = nii_midSlice.copy() nii_pred_after_postPro.data = pred nii_pred_after_postPro.save("pred_midSlice_after_postPro.nii.gz") # save it # assign label to voxel nii_c2c3 = zeros_like(nii_seg_flat) # 3D data with PIR orientaion if np.any(pred > 0): logger.info('C2-C3 detected...') pred_bin = (pred > 0).astype(np.int_) coord_max = np.where(pred == np.max(pred)) pa_c2c3, is_c2c3 = coord_max[0][0], coord_max[1][0] nii_seg.change_orientation('PIR') rl_c2c3 = int(np.rint(center_of_mass(np.array(nii_seg.data[:, is_c2c3, :]))[1])) nii_c2c3.data[pa_c2c3, is_c2c3, rl_c2c3] = 3 else: logger.warning('C2-C3 not detected...') # remove temporary files tmp_folder.chdir_undo() if verbose < 2: logger.info("Remove temporary files...") tmp_folder.cleanup() nii_c2c3.change_orientation(orientation_init) return nii_c2c3
def propseg(img_input, options_dict): """ :param img_input: source image, to be segmented :param options_dict: arguments as dictionary :return: segmented Image """ arguments = options_dict fname_input_data = img_input.absolutepath fname_data = os.path.abspath(fname_input_data) contrast_type = arguments["-c"] contrast_type_conversion = {'t1': 't1', 't2': 't2', 't2s': 't2', 'dwi': 't1'} contrast_type_propseg = contrast_type_conversion[contrast_type] # Starting building the command cmd = ['isct_propseg', '-t', contrast_type_propseg] if "-ofolder" in arguments: folder_output = arguments["-ofolder"] else: folder_output = './' cmd += ['-o', folder_output] if not os.path.isdir(folder_output) and os.path.exists(folder_output): logger.error("output directory %s is not a valid directory" % folder_output) if not os.path.exists(folder_output): os.makedirs(folder_output) if "-down" in arguments: cmd += ["-down", str(arguments["-down"])] if "-up" in arguments: cmd += ["-up", str(arguments["-up"])] remove_temp_files = 1 if "-r" in arguments: remove_temp_files = int(arguments["-r"]) verbose = int(arguments.get('-v')) sct.init_sct(log_level=verbose, update=True) # Update log level # Update for propseg binary if verbose > 0: cmd += ["-verbose"] # Output options if "-mesh" in arguments: cmd += ["-mesh"] if "-centerline-binary" in arguments: cmd += ["-centerline-binary"] if "-CSF" in arguments: cmd += ["-CSF"] if "-centerline-coord" in arguments: cmd += ["-centerline-coord"] if "-cross" in arguments: cmd += ["-cross"] if "-init-tube" in arguments: cmd += ["-init-tube"] if "-low-resolution-mesh" in arguments: cmd += ["-low-resolution-mesh"] if "-detect-nii" in arguments: cmd += ["-detect-nii"] if "-detect-png" in arguments: cmd += ["-detect-png"] # Helping options use_viewer = None use_optic = True # enabled by default init_option = None rescale_header = arguments["-rescale"] if "-init" in arguments: init_option = float(arguments["-init"]) if init_option < 0: sct.printv('Command-line usage error: ' + str(init_option) + " is not a valid value for '-init'", 1, 'error') sys.exit(1) if "-init-centerline" in arguments: if str(arguments["-init-centerline"]) == "viewer": use_viewer = "centerline" elif str(arguments["-init-centerline"]) == "hough": use_optic = False else: if rescale_header is not 1: fname_labels_viewer = func_rescale_header(str(arguments["-init-centerline"]), rescale_header, verbose=verbose) else: fname_labels_viewer = str(arguments["-init-centerline"]) cmd += ["-init-centerline", fname_labels_viewer] use_optic = False if "-init-mask" in arguments: if str(arguments["-init-mask"]) == "viewer": use_viewer = "mask" else: if rescale_header is not 1: fname_labels_viewer = func_rescale_header(str(arguments["-init-mask"]), rescale_header) else: fname_labels_viewer = str(arguments["-init-mask"]) cmd += ["-init-mask", fname_labels_viewer] use_optic = False if "-mask-correction" in arguments: cmd += ["-mask-correction", str(arguments["-mask-correction"])] if "-radius" in arguments: cmd += ["-radius", str(arguments["-radius"])] if "-detect-n" in arguments: cmd += ["-detect-n", str(arguments["-detect-n"])] if "-detect-gap" in arguments: cmd += ["-detect-gap", str(arguments["-detect-gap"])] if "-init-validation" in arguments: cmd += ["-init-validation"] if "-nbiter" in arguments: cmd += ["-nbiter", str(arguments["-nbiter"])] if "-max-area" in arguments: cmd += ["-max-area", str(arguments["-max-area"])] if "-max-deformation" in arguments: cmd += ["-max-deformation", str(arguments["-max-deformation"])] if "-min-contrast" in arguments: cmd += ["-min-contrast", str(arguments["-min-contrast"])] if "-d" in arguments: cmd += ["-d", str(arguments["-d"])] if "-distance-search" in arguments: cmd += ["-dsearch", str(arguments["-distance-search"])] if "-alpha" in arguments: cmd += ["-alpha", str(arguments["-alpha"])] # check if input image is in 3D. Otherwise itk image reader will cut the 4D image in 3D volumes and only take the first one. image_input = Image(fname_data) image_input_rpi = image_input.copy().change_orientation('RPI') nx, ny, nz, nt, px, py, pz, pt = image_input_rpi.dim if nt > 1: sct.printv('ERROR: your input image needs to be 3D in order to be segmented.', 1, 'error') path_data, file_data, ext_data = sct.extract_fname(fname_data) path_tmp = sct.tmp_create(basename="label_vertebrae", verbose=verbose) # rescale header (see issue #1406) if rescale_header is not 1: fname_data_propseg = func_rescale_header(fname_data, rescale_header) else: fname_data_propseg = fname_data # add to command cmd += ['-i', fname_data_propseg] # if centerline or mask is asked using viewer if use_viewer: from spinalcordtoolbox.gui.base import AnatomicalParams from spinalcordtoolbox.gui.centerline import launch_centerline_dialog params = AnatomicalParams() if use_viewer == 'mask': params.num_points = 3 params.interval_in_mm = 15 # superior-inferior interval between two consecutive labels params.starting_slice = 'midfovminusinterval' if use_viewer == 'centerline': # setting maximum number of points to a reasonable value params.num_points = 20 params.interval_in_mm = 30 params.starting_slice = 'top' im_data = Image(fname_data_propseg) im_mask_viewer = msct_image.zeros_like(im_data) # im_mask_viewer.absolutepath = sct.add_suffix(fname_data_propseg, '_labels_viewer') controller = launch_centerline_dialog(im_data, im_mask_viewer, params) fname_labels_viewer = sct.add_suffix(fname_data_propseg, '_labels_viewer') if not controller.saved: sct.printv('The viewer has been closed before entering all manual points. Please try again.', 1, 'error') sys.exit(1) # save labels controller.as_niftii(fname_labels_viewer) # add mask filename to parameters string if use_viewer == "centerline": cmd += ["-init-centerline", fname_labels_viewer] elif use_viewer == "mask": cmd += ["-init-mask", fname_labels_viewer] # If using OptiC elif use_optic: image_centerline = optic.detect_centerline(image_input, contrast_type, verbose) fname_centerline_optic = os.path.join(path_tmp, 'centerline_optic.nii.gz') image_centerline.save(fname_centerline_optic) cmd += ["-init-centerline", fname_centerline_optic] if init_option is not None: if init_option > 1: init_option /= (nz - 1) cmd += ['-init', str(init_option)] # enabling centerline extraction by default (needed by check_and_correct_segmentation() ) cmd += ['-centerline-binary'] # run propseg status, output = sct.run(cmd, verbose, raise_exception=False, is_sct_binary=True) # check status is not 0 if not status == 0: sct.printv('Automatic cord detection failed. Please initialize using -init-centerline or -init-mask (see help)', 1, 'error') sys.exit(1) # build output filename fname_seg = os.path.join(folder_output, os.path.basename(sct.add_suffix(fname_data, "_seg"))) fname_centerline = os.path.join(folder_output, os.path.basename(sct.add_suffix(fname_data, "_centerline"))) # in case header was rescaled, we need to update the output file names by removing the "_rescaled" if rescale_header is not 1: sct.mv(os.path.join(folder_output, sct.add_suffix(os.path.basename(fname_data_propseg), "_seg")), fname_seg) sct.mv(os.path.join(folder_output, sct.add_suffix(os.path.basename(fname_data_propseg), "_centerline")), fname_centerline) # if user was used, copy the labelled points to the output folder (they will then be scaled back) if use_viewer: fname_labels_viewer_new = os.path.join(folder_output, os.path.basename(sct.add_suffix(fname_data, "_labels_viewer"))) sct.copy(fname_labels_viewer, fname_labels_viewer_new) # update variable (used later) fname_labels_viewer = fname_labels_viewer_new # check consistency of segmentation if arguments["-correct-seg"] == "1": check_and_correct_segmentation(fname_seg, fname_centerline, folder_output=folder_output, threshold_distance=3.0, remove_temp_files=remove_temp_files, verbose=verbose) # copy header from input to segmentation to make sure qform is the same sct.printv("Copy header input --> output(s) to make sure qform is the same.", verbose) list_fname = [fname_seg, fname_centerline] if use_viewer: list_fname.append(fname_labels_viewer) for fname in list_fname: im = Image(fname) im.header = image_input.header im.save(dtype='int8') # they are all binary masks hence fine to save as int8 return Image(fname_seg)