def use_viewer_to_define_labels(fname_data, first_label, nb_of_slices_to_mean):
    from sct_viewer import ClickViewerGroundTruth
    from msct_image import Image
    import sct_image

    image_input = Image(fname_data)

    image_input_orientation = sct_image.orientation(image_input,
                                                    get=True,
                                                    verbose=False)
    reoriented_image_filename = 'reoriented_image_source.nii.gz'
    path_tmp_viewer = sct.tmp_create(verbose=False)
    cmd_image = 'sct_image -i "%s" -o "%s" -setorient SAL -v 0' % (
        fname_data, reoriented_image_filename)
    sct.run(cmd_image, verbose=False)

    im_input_SAL = prepare_input_image_for_viewer(fname_data)
    viewer = ClickViewerGroundTruth(im_input_SAL,
                                    first_label,
                                    orientation_subplot=['sag', 'ax'])
    set_viewer_parameters(viewer, nb_of_slices_to_mean)

    mask_points = viewer.start()
    if not mask_points and viewer.closed:
        mask_points = viewer.list_points_useful_notation
    make_labels_image_from_list_points(mask_points, reoriented_image_filename,
                                       image_input_orientation)
Exemplo n.º 2
0
def prepare(list_images):
    fname_images, orientation_images = [], []
    for fname_im in list_images:
        from sct_image import orientation
        orientation_images.append(orientation(Image(fname_im), get=True, verbose=False))
        path_fname, file_fname, ext_fname = sct.extract_fname(fname_im)
        reoriented_image_filename = 'tmp.' + sct.add_suffix(file_fname + ext_fname, "_SAL")
        sct.run('sct_image -i ' + fname_im + ' -o ' + reoriented_image_filename + ' -setorient SAL -v 0', verbose=False)
        fname_images.append(reoriented_image_filename)
    return fname_images, orientation_images
Exemplo n.º 3
0
def check_and_correct_segmentation(fname_segmentation,
                                   fname_centerline,
                                   folder_output='',
                                   threshold_distance=5.0,
                                   remove_temp_files=1,
                                   verbose=0):
    """
    This function takes the outputs of isct_propseg (centerline and segmentation) and check if the centerline of the
    segmentation is coherent with the centerline provided by the isct_propseg, especially on the edges (related
    to issue #1074).
    Args:
        fname_segmentation: filename of binary segmentation
        fname_centerline: filename of binary centerline
        threshold_distance: threshold, in mm, beyond which centerlines are not coherent
        verbose:

    Returns: None
    """
    sct.printv('\nCheck consistency of segmentation...', verbose)
    # creating a temporary folder in which all temporary files will be placed and deleted afterwards
    path_tmp = sct.tmp_create(basename="propseg", verbose=verbose)
    from sct_convert import convert
    convert(fname_segmentation,
            os.path.join(path_tmp, "tmp.segmentation.nii.gz"),
            squeeze_data=False,
            verbose=0)
    convert(fname_centerline,
            os.path.join(path_tmp, "tmp.centerline.nii.gz"),
            squeeze_data=False,
            verbose=0)
    fname_seg_absolute = os.path.abspath(fname_segmentation)

    # go to tmp folder
    curdir = os.getcwd()
    os.chdir(path_tmp)

    # convert segmentation image to RPI
    im_input = Image('tmp.segmentation.nii.gz')
    image_input_orientation = orientation(im_input, get=True, verbose=False)

    sct_image.main(
        "-i tmp.segmentation.nii.gz -setorient RPI -o tmp.segmentation_RPI.nii.gz -v 0"
        .split())
    sct_image.main(
        "-i tmp.centerline.nii.gz -setorient RPI -o tmp.centerline_RPI.nii.gz -v 0"
        .split())

    # go through segmentation image, and compare with centerline from propseg
    im_seg = Image('tmp.segmentation_RPI.nii.gz')
    im_centerline = Image('tmp.centerline_RPI.nii.gz')

    # Get size of data
    sct.printv('\nGet data dimensions...', verbose)
    nx, ny, nz, nt, px, py, pz, pt = im_seg.dim

    # extraction of centerline provided by isct_propseg and computation of center of mass for each slice
    # the centerline is defined as the center of the tubular mesh outputed by propseg.
    centerline, key_centerline = {}, []
    for i in range(nz):
        slice = im_centerline.data[:, :, i]
        if np.any(slice):
            x_centerline, y_centerline = ndi.measurements.center_of_mass(slice)
            centerline[str(i)] = [x_centerline, y_centerline]
            key_centerline.append(i)

    minz_centerline = np.min(key_centerline)
    maxz_centerline = np.max(key_centerline)
    mid_slice = int((maxz_centerline - minz_centerline) / 2)

    # for each slice of the segmentation, check if only one object is present. If not, remove the slice from segmentation.
    # If only one object (the spinal cord) is present in the slice, check if its center of mass is close to the centerline of isct_propseg.
    slices_to_remove = [
        False
    ] * nz  # flag that decides if the slice must be removed
    for i in range(minz_centerline, maxz_centerline + 1):
        # extraction of slice
        slice = im_seg.data[:, :, i]
        distance = -1
        label_objects, nb_labels = ndi.label(
            slice)  # count binary objects in the slice
        if nb_labels > 1:  # if there is more that one object in the slice, the slice is removed from the segmentation
            slices_to_remove[i] = True
        elif nb_labels == 1:  # check if the centerline is coherent with the one from isct_propseg
            x_centerline, y_centerline = ndi.measurements.center_of_mass(slice)
            slice_nearest_coord = min(key_centerline, key=lambda x: abs(x - i))
            coord_nearest_coord = centerline[str(slice_nearest_coord)]
            distance = np.sqrt((
                (x_centerline - coord_nearest_coord[0]) * px)**2 + (
                    (y_centerline - coord_nearest_coord[1]) * py)**2 +
                               ((i - slice_nearest_coord) * pz)**2)

            if distance >= threshold_distance:  # threshold must be adjusted, default is 5 mm
                slices_to_remove[i] = True

    # Check list of removal and keep one continuous centerline (improve this comment)
    # Method:
    # starting from mid-centerline (in both directions), the first True encountered is applied to all following slices
    slice_to_change = False
    for i in range(mid_slice, nz):
        if slice_to_change:
            slices_to_remove[i] = True
        elif slices_to_remove[i]:
            slice_to_change = True

    slice_to_change = False
    for i in range(mid_slice, 0, -1):
        if slice_to_change:
            slices_to_remove[i] = True
        elif slices_to_remove[i]:
            slice_to_change = True

    for i in range(0, nz):
        # remove the slice
        if slices_to_remove[i]:
            im_seg.data[:, :, i] *= 0

    # saving the image
    im_seg.setFileName('tmp.segmentation_RPI_c.nii.gz')
    im_seg.save()

    # replacing old segmentation with the corrected one
    sct_image.main(
        '-i tmp.segmentation_RPI_c.nii.gz -setorient {} -o {} -v 0'.format(
            image_input_orientation, fname_seg_absolute).split())

    os.chdir(curdir)

    # display information about how much of the segmentation has been corrected

    # remove temporary files
    if remove_temp_files:
        # sct.printv("\nRemove temporary files...", verbose)
        sct.rmtree(path_tmp)
Exemplo n.º 4
0
def main():

    # initialization
    fname_mask = ''

    # Get parser info
    parser = get_parser()
    arguments = parser.parse(sys.argv[1:])
    fname_data = arguments['-i']
    fname_mask = arguments['-m']
    vert_label_fname = arguments["-vertfile"]
    vert_levels = arguments["-vert"]
    slices_of_interest = arguments["-z"]
    index_vol = arguments['-vol']
    method = arguments["-method"]
    remove_temp_files = int(arguments['-r'])
    verbose = int(arguments['-v'])

    # Check if data are in RPI
    input_im = Image(fname_data)
    input_orient = get_orientation(input_im)

    # If orientation is not RPI, change to RPI
    if input_orient != 'RPI':
        sct.printv(
            '\nCreate temporary folder to change the orientation of the NIFTI files into RPI...',
            verbose)
        path_tmp = sct.tmp_create()
        # change orientation and load data
        sct.printv('\nChange input image orientation and load it...', verbose)
        input_im_rpi = orientation(input_im,
                                   ori='RPI',
                                   set=True,
                                   fname_out=os.path.join(
                                       path_tmp, "input_RPI.nii"))
        input_data = input_im_rpi.data
        # Do the same for the mask
        sct.printv('\nChange mask orientation and load it...', verbose)
        mask_im_rpi = orientation(Image(fname_mask),
                                  ori='RPI',
                                  set=True,
                                  fname_out=os.path.join(
                                      path_tmp, "mask_RPI.nii"))
        mask_data = mask_im_rpi.data
        # Do the same for vertebral labeling if present
        if vert_levels != 'None':
            sct.printv(
                '\nChange vertebral labeling file orientation and load it...',
                verbose)
            vert_label_im_rpi = orientation(Image(vert_label_fname),
                                            ori='RPI',
                                            set=True,
                                            fname_out=os.path.join(
                                                path_tmp,
                                                "vert_labeling_RPI.nii"))
            vert_labeling_data = vert_label_im_rpi.data
        # Remove the temporary folder used to change the NIFTI files orientation into RPI
        if remove_temp_files:
            sct.printv('\nRemove the temporary folder...', verbose)
            sct.rmtree(path_tmp, True)
    else:
        # Load data
        sct.printv('\nLoad data...', verbose)
        input_data = input_im.data
        mask_data = Image(fname_mask).data
        if vert_levels != 'None':
            vert_labeling_data = Image(vert_label_fname).data
    sct.printv('\tDone.', verbose)

    # Get slices corresponding to vertebral levels
    if vert_levels != 'None':
        from sct_extract_metric import get_slices_matching_with_vertebral_levels
        slices_of_interest, actual_vert_levels, warning_vert_levels = get_slices_matching_with_vertebral_levels(
            mask_data, vert_levels, vert_labeling_data, verbose)

    # Remove slices that were not selected
    if slices_of_interest == 'None':
        slices_of_interest = '0:' + str(mask_data.shape[2] - 1)
    slices_boundary = slices_of_interest.split(':')
    slices_of_interest_list = range(int(slices_boundary[0]),
                                    int(slices_boundary[1]) + 1)
    # Crop
    input_data = input_data[:, :, slices_of_interest_list, :]
    mask_data = mask_data[:, :, slices_of_interest_list]

    # if user selected all slices (-vol -1), then assign index_vol
    if index_vol[0] == -1:
        index_vol = range(0, input_data.shape[3], 1)

    # Get signal and noise
    indexes_roi = np.where(mask_data == 1)
    if method == 'mult':
        # get voxels in ROI to obtain a (x*y*z)*t 2D matrix
        input_data_in_roi = input_data[indexes_roi]
        # compute signal and STD across by averaging across time
        signal = np.mean(input_data_in_roi[:, index_vol])
        std_input_temporal = np.std(input_data_in_roi[:, index_vol], 1)
        noise = np.mean(std_input_temporal)
    elif method == 'diff':
        # if user did not select two volumes, then exit with error
        if not len(index_vol) == 2:
            sct.printv(
                'ERROR: ' + str(len(index_vol)) +
                ' volumes were specified. Method "diff" should be used with exactly two volumes.',
                1, 'error')
        data_1 = input_data[:, :, :, index_vol[0]]
        data_2 = input_data[:, :, :, index_vol[1]]
        # compute voxel-average of voxelwise sum
        signal = np.mean(np.add(data_1[indexes_roi], data_2[indexes_roi]))
        # compute voxel-STD of voxelwise substraction, multiplied by sqrt(2) as described in equation 7 of Dietrich et al.
        noise = np.std(np.subtract(data_1[indexes_roi],
                                   data_2[indexes_roi])) * np.sqrt(2)

    # compute SNR
    SNR = signal / noise

    # Display result
    sct.printv('\nSNR_' + method + ' = ' + str(SNR) + '\n', type='info')
Exemplo n.º 5
0
def run_main():
    sct.start_stream_logger()
    parser = get_parser()
    args = sys.argv[1:]
    arguments = parser.parse(args)

    # Input filename
    fname_input_data = arguments["-i"]
    fname_data = os.path.abspath(fname_input_data)

    # Method used
    method = 'optic'
    if "-method" in arguments:
        method = arguments["-method"]

    # Contrast type
    contrast_type = ''
    if "-c" in arguments:
        contrast_type = arguments["-c"]
    if method == 'optic' and not contrast_type:
        # Contrast must be
        error = 'ERROR: -c is a mandatory argument when using Optic method.'
        sct.printv(error, type='error')
        return

    # Ga between slices
    interslice_gap = 10.0
    if "-gap" in arguments:
        interslice_gap = float(arguments["-gap"])

    # Output folder
    if "-ofolder" in arguments:
        folder_output = sct.slash_at_the_end(arguments["-ofolder"], slash=1)
    else:
        folder_output = './'

    # Remove temporary files
    remove_temp_files = True
    if "-r" in arguments:
        remove_temp_files = bool(int(arguments["-r"]))

    # Outputs a ROI file
    output_roi = False
    if "-roi" in arguments:
        output_roi = bool(int(arguments["-roi"]))

    # Verbosity
    verbose = 0
    if "-v" in arguments:
        verbose = int(arguments["-v"])

    if method == 'viewer':
        path_data, file_data, ext_data = sct.extract_fname(fname_data)

        # create temporary folder
        temp_folder = sct.TempFolder()
        temp_folder.copy_from(fname_data)
        temp_folder.chdir()

        # make sure image is in SAL orientation, as it is the orientation used by the viewer
        image_input = Image(fname_data)
        image_input_orientation = orientation(image_input,
                                              get=True,
                                              verbose=False)
        reoriented_image_filename = sct.add_suffix(file_data + ext_data,
                                                   "_SAL")
        cmd_image = 'sct_image -i "%s" -o "%s" -setorient SAL -v 0' % (
            fname_data, reoriented_image_filename)
        sct.run(cmd_image, verbose=False)

        # extract points manually using the viewer
        fname_points = viewer_centerline(image_fname=reoriented_image_filename,
                                         interslice_gap=interslice_gap,
                                         verbose=verbose)

        if fname_points is not None:
            image_points_RPI = sct.add_suffix(fname_points, "_RPI")
            cmd_image = 'sct_image -i "%s" -o "%s" -setorient RPI -v 0' % (
                fname_points, image_points_RPI)
            sct.run(cmd_image, verbose=False)

            image_input_reoriented = Image(image_points_RPI)

            # fit centerline, smooth it and return the first derivative (in physical space)
            x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline(
                image_points_RPI,
                algo_fitting='nurbs',
                nurbs_pts_number=3000,
                phys_coordinates=True,
                verbose=verbose,
                all_slices=False)
            centerline = Centerline(x_centerline_fit, y_centerline_fit,
                                    z_centerline, x_centerline_deriv,
                                    y_centerline_deriv, z_centerline_deriv)

            # average centerline coordinates over slices of the image
            x_centerline_fit_rescorr, y_centerline_fit_rescorr, z_centerline_rescorr, x_centerline_deriv_rescorr, y_centerline_deriv_rescorr, z_centerline_deriv_rescorr = centerline.average_coordinates_over_slices(
                image_input_reoriented)

            # compute z_centerline in image coordinates for usage in vertebrae mapping
            voxel_coordinates = image_input_reoriented.transfo_phys2pix([[
                x_centerline_fit_rescorr[i], y_centerline_fit_rescorr[i],
                z_centerline_rescorr[i]
            ] for i in range(len(z_centerline_rescorr))])
            x_centerline_voxel = [coord[0] for coord in voxel_coordinates]
            y_centerline_voxel = [coord[1] for coord in voxel_coordinates]
            z_centerline_voxel = [coord[2] for coord in voxel_coordinates]

            # compute z_centerline in image coordinates with continuous precision
            voxel_coordinates = image_input_reoriented.transfo_phys2continuouspix(
                [[
                    x_centerline_fit_rescorr[i], y_centerline_fit_rescorr[i],
                    z_centerline_rescorr[i]
                ] for i in range(len(z_centerline_rescorr))])
            x_centerline_voxel_cont = [coord[0] for coord in voxel_coordinates]
            y_centerline_voxel_cont = [coord[1] for coord in voxel_coordinates]
            z_centerline_voxel_cont = [coord[2] for coord in voxel_coordinates]

            # Create an image with the centerline
            image_input_reoriented.data *= 0
            min_z_index, max_z_index = int(round(
                min(z_centerline_voxel))), int(round(max(z_centerline_voxel)))
            for iz in range(min_z_index, max_z_index + 1):
                image_input_reoriented.data[
                    int(round(x_centerline_voxel[iz - min_z_index])),
                    int(round(y_centerline_voxel[iz - min_z_index])),
                    int(
                        iz
                    )] = 1  # if index is out of bounds here for hanning: either the segmentation has holes or labels have been added to the file

            # Write the centerline image
            sct.printv('\nWrite NIFTI volumes...', verbose)
            fname_centerline_oriented = file_data + '_centerline' + ext_data
            image_input_reoriented.setFileName(fname_centerline_oriented)
            image_input_reoriented.changeType('uint8')
            image_input_reoriented.save()

            sct.printv('\nSet to original orientation...', verbose)
            sct.run('sct_image -i ' + fname_centerline_oriented +
                    ' -setorient ' + image_input_orientation + ' -o ' +
                    fname_centerline_oriented)

            # create a txt file with the centerline
            fname_centerline_oriented_txt = file_data + '_centerline.txt'
            file_results = open(fname_centerline_oriented_txt, 'w')
            for i in range(min_z_index, max_z_index + 1):
                file_results.write(
                    str(int(i)) + ' ' +
                    str(round(x_centerline_voxel_cont[i - min_z_index], 2)) +
                    ' ' +
                    str(round(y_centerline_voxel_cont[i - min_z_index], 2)) +
                    '\n')
            file_results.close()

            fname_centerline_oriented_roi = optic.centerline2roi(
                fname_image=fname_centerline_oriented,
                folder_output='./',
                verbose=verbose)

            # return to initial folder
            temp_folder.chdir_undo()

            # copy result to output folder
            shutil.copy(temp_folder.get_path() + fname_centerline_oriented,
                        folder_output)
            shutil.copy(temp_folder.get_path() + fname_centerline_oriented_txt,
                        folder_output)
            if output_roi:
                shutil.copy(
                    temp_folder.get_path() + fname_centerline_oriented_roi,
                    folder_output)
            centerline_filename = folder_output + fname_centerline_oriented

        else:
            centerline_filename = 'error'

        # delete temporary folder
        if remove_temp_files:
            temp_folder.cleanup()

    else:
        # condition on verbose when using OptiC
        if verbose == 1:
            verbose = 2

        # OptiC models
        path_script = os.path.dirname(__file__)
        path_sct = os.path.dirname(path_script)
        optic_models_path = os.path.join(path_sct, 'data/optic_models',
                                         '{}_model'.format(contrast_type))

        # Execute OptiC binary
        _, centerline_filename = optic.detect_centerline(
            image_fname=fname_data,
            contrast_type=contrast_type,
            optic_models_path=optic_models_path,
            folder_output=folder_output,
            remove_temp_files=remove_temp_files,
            output_roi=output_roi,
            verbose=verbose)

    sct.printv('\nDone! To view results, type:', verbose)
    sct.printv(
        "fslview " + fname_input_data + " " + centerline_filename +
        " -l Red -b 0,1 -t 0.7 &\n", verbose, 'info')
Exemplo n.º 6
0
def detect_centerline(image_fname,
                      contrast_type,
                      optic_models_path,
                      folder_output,
                      remove_temp_files=False,
                      init_option=None,
                      output_roi=False,
                      verbose=0):
    """This method will use the OptiC to detect the centerline.

    :param image_fname: The input image filename.
    :param init_option: Axial slice where the propagation starts.
    :param contrast_type: The contrast type.
    :param optic_models_path: The path with the Optic model files.
    :param folder_output: The OptiC output folder.
    :param remove_temp_files: Remove the temporary created files.
    :param verbose: Adjusts the verbosity of the logging.

    :returns: The OptiC output filename.
    """

    image_input = Image(image_fname)
    path_data, file_data, ext_data = sct.extract_fname(image_fname)

    sct.printv('Detecting the spinal cord using OptiC', verbose=verbose)
    image_input_orientation = orientation(image_input, get=True, verbose=False)

    temp_folder = sct.TempFolder()
    temp_folder.copy_from(image_fname)
    curdir = os.getcwd()
    temp_folder.chdir()

    # convert image data type to int16, as required by opencv (backend in OptiC)
    image_int_filename = sct.add_suffix(file_data + ext_data, "_int16")
    img = Image(image_fname)
    img_int16 = img.copy()

    # rescale intensity
    min_out = np.iinfo('uint16').min
    max_out = np.iinfo('uint16').max
    min_in = np.nanmin(img.data)
    max_in = np.nanmax(img.data)
    data_rescaled = img.data * (max_out - min_out) / (max_in - min_in)
    img_int16.data = data_rescaled - (data_rescaled.min() - min_out)

    # change data type
    img_int16.changeType('uint16')
    img_int16.setFileName(image_int_filename)
    img_int16.save()
    del img, img_int16

    # reorient the input image to RPI + convert to .nii
    reoriented_image_filename = sct.add_suffix(image_int_filename, "_RPI")
    img_filename = ''.join(sct.extract_fname(reoriented_image_filename)[:2])
    reoriented_image_filename_nii = img_filename + '.nii'
    cmd_reorient = 'sct_image -i "%s" -o "%s" -setorient RPI -v 0' % \
                (image_int_filename, reoriented_image_filename_nii)
    sct.run(cmd_reorient, verbose=0)

    image_rpi_init = Image(reoriented_image_filename_nii)
    nxr, nyr, nzr, ntr, pxr, pyr, pzr, ptr = image_rpi_init.dim
    if init_option is not None:
        if init_option > 1:
            init_option /= (nzr - 1)

    # call the OptiC method to generate the spinal cord centerline
    optic_input = img_filename
    optic_filename = img_filename + '_optic'

    os.environ["FSLOUTPUTTYPE"] = "NIFTI_PAIR"
    cmd_optic = 'isct_spine_detect -ctype=dpdt -lambda=1 "%s" "%s" "%s"' % \
                (optic_models_path, optic_input, optic_filename)
    sct.run(cmd_optic, verbose=0)

    # convert .img and .hdr files to .nii.gz
    optic_hdr_filename = img_filename + '_optic_ctr.hdr'
    centerline_optic_RPI_filename = sct.add_suffix(file_data + ext_data,
                                                   "_centerline_optic_RPI")
    img = nib.load(optic_hdr_filename)
    nib.save(img, centerline_optic_RPI_filename)

    # reorient the output image to initial orientation
    centerline_optic_filename = sct.add_suffix(file_data + ext_data,
                                               "_centerline_optic")
    cmd_reorient = 'sct_image -i "%s" -o "%s" -setorient "%s" -v 0' % \
                   (centerline_optic_RPI_filename,
                    centerline_optic_filename,
                    image_input_orientation)
    sct.run(cmd_reorient, verbose=0)

    # copy centerline to parent folder
    folder_output_from_temp = folder_output
    if not os.path.isabs(folder_output):
        folder_output_from_temp = os.path.join(curdir, folder_output)

    sct.printv('Copy output to ' + folder_output, verbose=0)
    sct.copy(centerline_optic_filename, folder_output_from_temp)

    if output_roi:
        fname_roi_centerline = centerline2roi(
            fname_image=centerline_optic_RPI_filename,
            folder_output=folder_output_from_temp,
            verbose=verbose)

        # Note: the .roi file is defined in RPI orientation. To be used, it must be applied on the original image with
        # a RPI orientation. For this reason, this script also outputs the input image in RPI orientation
        sct.copy(reoriented_image_filename_nii, folder_output_from_temp)

    # return to initial folder
    temp_folder.chdir_undo()

    # delete temporary folder
    if remove_temp_files:
        temp_folder.cleanup()

    return init_option, os.path.join(folder_output, centerline_optic_filename)
Exemplo n.º 7
0
    # check if input image is in 3D. Otherwise itk image reader will cut the 4D image in 3D volumes and only take the first one.
    from msct_image import Image
    image_input = Image(fname_data)
    nx, ny, nz, nt, px, py, pz, pt = image_input.dim
    if nt > 1:
        sct.printv(
            'ERROR: your input image needs to be 3D in order to be segmented.',
            1, 'error')

    path_data, file_data, ext_data = sct.extract_fname(fname_data)

    # if centerline or mask is asked using viewer
    if use_viewer:
        # make sure image is in SAL orientation, as it is the orientation used by PropSeg
        image_input_orientation = orientation(image_input,
                                              get=True,
                                              verbose=False)
        reoriented_image_filename = 'tmp.' + sct.add_suffix(
            file_data + ext_data, "_SAL")
        path_tmp_viewer = sct.tmp_create(verbose=verbose)
        cmd_image = 'sct_image -i "%s" -o "%s" -setorient SAL -v 0' % (
            fname_data, os.path.join(path_tmp_viewer,
                                     reoriented_image_filename))
        sct.run(cmd_image, verbose=False)

        from sct_viewer import ClickViewerPropseg
        image_input_reoriented = Image(path_tmp_viewer +
                                       reoriented_image_filename)
        viewer = ClickViewerPropseg(image_input_reoriented)
        if use_viewer == "mask":
            viewer.input_type = 'mask'
Exemplo n.º 8
0
def check_and_correct_segmentation(fname_segmentation, fname_centerline, threshold_distance=5.0, remove_temp_files=1, verbose=0):
    """
    This function takes the outputs of isct_propseg (centerline and segmentation) and check if the centerline of the
    segmentation is coherent with the centerline provided by the isct_propseg, especially on the edges (related
    to issue #1074).
    Args:
        fname_segmentation: filename of binary segmentation
        fname_centerline: filename of binary centerline
        threshold_distance: threshold, in mm, beyond which centerlines are not coherent
        verbose:

    Returns: None
    """

    # creating a temporary folder in which all temporary files will be placed and deleted afterwards
    path_tmp = sct.tmp_create(verbose=verbose)
    shutil.copy(fname_segmentation, path_tmp + 'tmp.segmentation.nii.gz')
    shutil.copy(fname_centerline, path_tmp + 'tmp.centerline.nii.gz')

    # go to tmp folder
    os.chdir(path_tmp)

    # convert segmentation image to RPI
    im_input = Image('tmp.segmentation.nii.gz')
    image_input_orientation = orientation(im_input, get=True, verbose=False)
    sct.run('sct_image -i tmp.segmentation.nii.gz -setorient RPI -o tmp.segmentation_RPI.nii.gz', verbose)
    sct.run('sct_image -i tmp.centerline.nii.gz -setorient RPI -o tmp.centerline_RPI.nii.gz', verbose)

    # go through segmentation image, and compare with centerline from propseg
    im_seg = Image('tmp.segmentation_RPI.nii.gz')
    im_centerline = Image('tmp.centerline_RPI.nii.gz')

    # Get size of data
    sct.printv('\nGet data dimensions...', verbose)
    nx, ny, nz, nt, px, py, pz, pt = im_seg.dim

    # extraction of centerline provided by isct_propseg and computation of center of mass for each slice
    # the centerline is defined as the center of the tubular mesh outputed by propseg.
    centerline, key_centerline = {}, []
    for i in range(nz):
        slice = im_centerline.data[:, :, i]
        if np.any(slice):
            x_centerline, y_centerline = ndi.measurements.center_of_mass(slice)
            centerline[str(i)] = [x_centerline, y_centerline]
            key_centerline.append(i)

    # for each slice of the segmentation, check if only one object is present. If not, remove the slice from segmentation.
    # If only one object (the spinal cord) is present in the slice, check if its center of mass is close to the centerline of isct_propseg.
    slices_to_remove = [False] * nz  # flag that decides if the slice must be removed
    for i in range(nz):
        # extraction of slice
        slice = im_seg.data[:, :, i]
        distance = -1
        label_objects, nb_labels = ndi.label(slice)  # count binary objects in the slice
        if nb_labels > 1:  # if there is more that one object in the slice, the slice is removed from the segmentation
            slices_to_remove[i] = True
        elif nb_labels == 1:  # check if the centerline is coherent with the one from isct_propseg
            x_centerline, y_centerline = ndi.measurements.center_of_mass(slice)
            slice_nearest_coord = min(key_centerline, key=lambda x:abs(x-i))
            coord_nearest_coord = centerline[str(slice_nearest_coord)]
            distance = np.sqrt(((x_centerline - coord_nearest_coord[0]) * px) ** 2 +
                               ((y_centerline - coord_nearest_coord[1]) * py) ** 2 +
                               ((i - slice_nearest_coord) * pz) ** 2)

            if distance >= threshold_distance:  # threshold must be adjusted, default is 5 mm
                slices_to_remove[i] = True

    # Check list of removal and keep one continuous centerline (improve this comment)
    # Method:
    # starting from mid-centerline (in both directions), the first True encountered is applied to all following slices
    slice_to_change = False
    for i in range(nz / 2, nz):
        if slice_to_change:
            slices_to_remove[i] = True
        elif slices_to_remove[i]:
            slices_to_remove[i] = True
            slice_to_change = True
    slice_to_change = False
    for i in range(nz / 2, -1, -1):
        if slice_to_change:
            slices_to_remove[i] = True
        elif slices_to_remove[i]:
            slices_to_remove[i] = True
            slice_to_change = True

    for i in range(nz):
        # remove the slice
        if slices_to_remove[i]:
            im_seg.data[:, :, i] *= 0

    # saving the image
    im_seg.setFileName('tmp.segmentation_RPI_c.nii.gz')
    im_seg.save()

    # replacing old segmentation with the corrected one
    sct.run('sct_image -i tmp.segmentation_RPI_c.nii.gz -setorient ' + image_input_orientation + ' -o ../' + fname_segmentation, verbose)

    os.chdir('..')

    # display information about how much of the segmentation has been corrected

    # remove temporary files
    if remove_temp_files:
        sct.printv("\nRemove temporary files...", verbose)
        shutil.rmtree(path_tmp, ignore_errors=True)
Exemplo n.º 9
0
    if "-alpha" in arguments:
        cmd += " -alpha " + str(arguments["-alpha"])

    # check if input image is in 3D. Otherwise itk image reader will cut the 4D image in 3D volumes and only take the first one.
    from msct_image import Image
    image_input = Image(input_filename)
    nx, ny, nz, nt, px, py, pz, pt = image_input.dim
    if nt > 1:
        sct.printv('ERROR: your input image needs to be 3D in order to be segmented.', 1, 'error')

    path_fname, file_fname, ext_fname = sct.extract_fname(input_filename)

    # if centerline or mask is asked using viewer
    if use_viewer:
        # make sure image is in SAL orientation, as it is the orientation used by PropSeg
        image_input_orientation = orientation(image_input, get=True, verbose=False)
        reoriented_image_filename = 'tmp.' + sct.add_suffix(file_fname + ext_fname, "_SAL")
        path_tmp_viewer = sct.tmp_create(verbose=verbose)
        sct.run('sct_image -i ' + input_filename + ' -o ' + path_tmp_viewer + reoriented_image_filename + ' -setorient SAL -v 0', verbose=False)

        from sct_viewer import ClickViewer
        image_input_reoriented = Image(path_tmp_viewer + reoriented_image_filename)
        viewer = ClickViewer(image_input_reoriented)
        viewer.help_url = 'https://sourceforge.net/p/spinalcordtoolbox/wiki/correction_PropSeg/attachment/propseg_viewer.png'
        if use_viewer == "mask":
            viewer.number_of_slices = 3
            viewer.gap_inter_slice = int(10 / pz)
            if viewer.gap_inter_slice == 0:
                viewer.gap_inter_slice = 1
            viewer.calculate_list_slices()
        #else: