def main():
    """Main function."""
    parser = get_parser()
    args = parser.parse_args(args=None if sys.argv[1:] else ['--help'])

    fname_image = os.path.abspath(args.i)
    contrast_type = args.c

    ctr_algo = args.centerline

    if args.brain is None:
        if contrast_type in ['t2s', 'dwi']:
            brain_bool = False
        if contrast_type in ['t1', 't2']:
            brain_bool = True
    else:
        brain_bool = bool(args.brain)

    kernel_size = args.kernel
    if kernel_size == '3d' and contrast_type == 'dwi':
        kernel_size = '2d'
        sct.printv('3D kernel model for dwi contrast is not available. 2D kernel model is used instead.',
                   type="warning")


    if ctr_algo == 'file' and args.file_centerline is None:
        sct.printv('Please use the flag -file_centerline to indicate the centerline filename.', 1, 'warning')
        sys.exit(1)

    if args.file_centerline is not None:
        manual_centerline_fname = args.file_centerline
        ctr_algo = 'file'
    else:
        manual_centerline_fname = None

    remove_temp_files = args.r
    verbose = args.v
    sct.init_sct(log_level=verbose, update=True)  # Update log level

    path_qc = args.qc
    qc_dataset = args.qc_dataset
    qc_subject = args.qc_subject
    output_folder = args.ofolder

    algo_config_stg = '\nMethod:'
    algo_config_stg += '\n\tCenterline algorithm: ' + str(ctr_algo)
    algo_config_stg += '\n\tAssumes brain section included in the image: ' + str(brain_bool)
    algo_config_stg += '\n\tDimension of the segmentation kernel convolutions: ' + kernel_size + '\n'
    sct.printv(algo_config_stg)

    # Segment image
    from spinalcordtoolbox.image import Image
    from spinalcordtoolbox.deepseg_sc.core import deep_segmentation_spinalcord
    from spinalcordtoolbox.reports.qc import generate_qc

    im_image = Image(fname_image)
    # note: below we pass im_image.copy() otherwise the field absolutepath becomes None after execution of this function
    im_seg, im_image_RPI_upsamp, im_seg_RPI_upsamp, im_labels_viewer, im_ctr = \
        deep_segmentation_spinalcord(im_image.copy(), contrast_type, ctr_algo=ctr_algo,
                                     ctr_file=manual_centerline_fname, brain_bool=brain_bool, kernel_size=kernel_size,
                                     remove_temp_files=remove_temp_files, verbose=verbose)

    # Save segmentation
    fname_seg = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_seg' +
                                             sct.extract_fname(fname_image)[2]))

    # copy q/sform from input image to output segmentation
    im_seg.copy_qform_from_ref(im_image)
    im_seg.save(fname_seg)

    if ctr_algo == 'viewer':
        # Save labels
        fname_labels = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_labels-centerline' +
                                               sct.extract_fname(fname_image)[2]))
        im_labels_viewer.save(fname_labels)

    if verbose == 2:
        # Save ctr
        fname_ctr = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_centerline' +
                                               sct.extract_fname(fname_image)[2]))
        im_ctr.save(fname_ctr)

    if path_qc is not None:
        generate_qc(fname_image, fname_seg=fname_seg, args=sys.argv[1:], path_qc=os.path.abspath(path_qc),
    dataset=qc_dataset, subject=qc_subject, process='sct_deepseg_sc')
    sct.display_viewer_syntax([fname_image, fname_seg], colormaps=['gray', 'red'], opacities=['', '0.7'])
示例#2
0
def interpolate_im_to_ref(im_input, im_input_sc, new_res=0.3, sq_size_size_mm=22.5, interpolation_mode=3):
    nx, ny, nz, nt, px, py, pz, pt = im_input.dim

    im_input_sc = im_input_sc.copy()
    im_input = im_input.copy()

    # keep only spacing and origin in qform to avoid rotation issues
    input_qform = im_input.hdr.get_qform()
    for i in range(4):
        for j in range(4):
            if i != j and j != 3:
                input_qform[i, j] = 0

    im_input.hdr.set_qform(input_qform)
    im_input.hdr.set_sform(input_qform)
    im_input_sc.hdr = im_input.hdr

    sq_size = int(sq_size_size_mm / new_res)
    # create a reference image : square of ones
    im_ref = Image(np.ones((sq_size, sq_size, 1), dtype=np.int), dim=(sq_size, sq_size, 1, 0, new_res, new_res, pz, 0), orientation='RPI')

    # copy input qform matrix to reference image
    im_ref.hdr.set_qform(im_input.hdr.get_qform())
    im_ref.hdr.set_sform(im_input.hdr.get_sform())

    # set correct header to reference image
    im_ref.hdr.set_data_shape((sq_size, sq_size, 1))
    im_ref.hdr.set_zooms((new_res, new_res, pz))

    # save image to set orientation to RPI (not properly done at the creation of the image)
    fname_ref = 'im_ref.nii.gz'
    im_ref.save(fname_ref).change_orientation("RPI")

    # set header origin to zero to get physical coordinates of the center of the square
    im_ref.hdr.as_analyze_map()['qoffset_x'] = 0
    im_ref.hdr.as_analyze_map()['qoffset_y'] = 0
    im_ref.hdr.as_analyze_map()['qoffset_z'] = 0
    im_ref.hdr.set_sform(im_ref.hdr.get_qform())
    im_ref.hdr.set_qform(im_ref.hdr.get_qform())
    [[x_square_center_phys, y_square_center_phys, z_square_center_phys]] = im_ref.transfo_pix2phys(coordi=[[int(sq_size / 2), int(sq_size / 2), 0]])

    list_interpolate_images = []
    # iterate on z dimension of input image
    for iz in range(nz):
        # copy reference image: one reference image per slice
        im_ref_slice_iz = im_ref.copy()

        # get center of mass of SC for slice iz
        x_seg, y_seg = (im_input_sc.data[:, :, iz] > 0).nonzero()
        x_center, y_center = np.mean(x_seg), np.mean(y_seg)
        [[x_center_phys, y_center_phys, z_center_phys]] = im_input_sc.transfo_pix2phys(coordi=[[x_center, y_center, iz]])

        # center reference image on SC for slice iz
        im_ref_slice_iz.hdr.as_analyze_map()['qoffset_x'] = x_center_phys - x_square_center_phys
        im_ref_slice_iz.hdr.as_analyze_map()['qoffset_y'] = y_center_phys - y_square_center_phys
        im_ref_slice_iz.hdr.as_analyze_map()['qoffset_z'] = z_center_phys
        im_ref_slice_iz.hdr.set_sform(im_ref_slice_iz.hdr.get_qform())
        im_ref_slice_iz.hdr.set_qform(im_ref_slice_iz.hdr.get_qform())

        # interpolate input image to reference image
        im_input_interpolate_iz = im_input.interpolate_from_image(im_ref_slice_iz, interpolation_mode=interpolation_mode, border='nearest')
        # reshape data to 2D if needed
        if len(im_input_interpolate_iz.data.shape) == 3:
            im_input_interpolate_iz.data = im_input_interpolate_iz.data.reshape(im_input_interpolate_iz.data.shape[:-1])
        # add slice to list
        list_interpolate_images.append(im_input_interpolate_iz)

    return list_interpolate_images
def main(args=None):

    # Initialization
    param = Param()
    start_time = time.time()

    parser = get_parser()
    arguments = parser.parse_args(args=None if sys.argv[1:] else ['--help'])

    fname_anat = arguments.i
    fname_centerline = arguments.s
    param.algo_fitting = arguments.algo_fitting
    if arguments.smooth is not None:
        sigma = arguments.smooth
    remove_temp_files = arguments.r
    verbose = int(arguments.v)
    init_sct(log_level=verbose, update=True)  # Update log level

    # Display arguments
    printv('\nCheck input arguments...')
    printv('  Volume to smooth .................. ' + fname_anat)
    printv('  Centerline ........................ ' + fname_centerline)
    printv('  Sigma (mm) ........................ ' + str(sigma))
    printv('  Verbose ........................... ' + str(verbose))

    # Check that input is 3D:
    nx, ny, nz, nt, px, py, pz, pt = Image(fname_anat).dim
    dim = 4  # by default, will be adjusted later
    if nt == 1:
        dim = 3
    if nz == 1:
        dim = 2
    if dim == 4:
        printv('WARNING: the input image is 4D, please split your image to 3D before smoothing spinalcord using :\n'
               'sct_image -i ' + fname_anat + ' -split t -o ' + fname_anat, verbose, 'warning')
        printv('4D images not supported, aborting ...', verbose, 'error')

    # Extract path/file/extension
    path_anat, file_anat, ext_anat = extract_fname(fname_anat)
    path_centerline, file_centerline, ext_centerline = extract_fname(fname_centerline)

    path_tmp = tmp_create(basename="smooth_spinalcord")

    # Copying input data to tmp folder
    printv('\nCopying input data to tmp folder and convert to nii...', verbose)
    copy(fname_anat, os.path.join(path_tmp, "anat" + ext_anat))
    copy(fname_centerline, os.path.join(path_tmp, "centerline" + ext_centerline))

    # go to tmp folder
    curdir = os.getcwd()
    os.chdir(path_tmp)

    # convert to nii format
    convert('anat' + ext_anat, 'anat.nii')
    convert('centerline' + ext_centerline, 'centerline.nii')

    # Change orientation of the input image into RPI
    printv('\nOrient input volume to RPI orientation...')
    fname_anat_rpi = Image("anat.nii") \
        .change_orientation("RPI", generate_path=True) \
        .save() \
        .absolutepath

    # Change orientation of the input image into RPI
    printv('\nOrient centerline to RPI orientation...')
    fname_centerline_rpi = Image("centerline.nii") \
        .change_orientation("RPI", generate_path=True) \
        .save() \
        .absolutepath

    # Straighten the spinal cord
    # straighten segmentation
    printv('\nStraighten the spinal cord using centerline/segmentation...', verbose)
    cache_sig = cache_signature(input_files=[fname_anat_rpi, fname_centerline_rpi],
                                input_params={"x": "spline"})
    cachefile = os.path.join(curdir, "straightening.cache")
    if cache_valid(cachefile, cache_sig) and os.path.isfile(os.path.join(curdir, 'warp_curve2straight.nii.gz')) and os.path.isfile(os.path.join(curdir, 'warp_straight2curve.nii.gz')) and os.path.isfile(os.path.join(curdir, 'straight_ref.nii.gz')):
        # if they exist, copy them into current folder
        printv('Reusing existing warping field which seems to be valid', verbose, 'warning')
        copy(os.path.join(curdir, 'warp_curve2straight.nii.gz'), 'warp_curve2straight.nii.gz')
        copy(os.path.join(curdir, 'warp_straight2curve.nii.gz'), 'warp_straight2curve.nii.gz')
        copy(os.path.join(curdir, 'straight_ref.nii.gz'), 'straight_ref.nii.gz')
        # apply straightening
        run_proc(['sct_apply_transfo', '-i', fname_anat_rpi, '-w', 'warp_curve2straight.nii.gz', '-d', 'straight_ref.nii.gz', '-o', 'anat_rpi_straight.nii', '-x', 'spline'], verbose)
    else:
        run_proc(['sct_straighten_spinalcord', '-i', fname_anat_rpi, '-o', 'anat_rpi_straight.nii', '-s', fname_centerline_rpi, '-x', 'spline', '-param', 'algo_fitting=' + param.algo_fitting], verbose)
        cache_save(cachefile, cache_sig)
        # move warping fields locally (to use caching next time)
        copy('warp_curve2straight.nii.gz', os.path.join(curdir, 'warp_curve2straight.nii.gz'))
        copy('warp_straight2curve.nii.gz', os.path.join(curdir, 'warp_straight2curve.nii.gz'))

    # Smooth the straightened image along z
    printv('\nSmooth the straightened image...')
    sigma_smooth = ",".join([str(i) for i in sigma])
    sct_maths.main(args=['-i', 'anat_rpi_straight.nii',
                         '-smooth', sigma_smooth,
                         '-o', 'anat_rpi_straight_smooth.nii',
                         '-v', '0'])
    # Apply the reversed warping field to get back the curved spinal cord
    printv('\nApply the reversed warping field to get back the curved spinal cord...')
    run_proc(['sct_apply_transfo', '-i', 'anat_rpi_straight_smooth.nii', '-o', 'anat_rpi_straight_smooth_curved.nii', '-d', 'anat.nii', '-w', 'warp_straight2curve.nii.gz', '-x', 'spline'], verbose)

    # replace zeroed voxels by original image (issue #937)
    printv('\nReplace zeroed voxels by original image...', verbose)
    nii_smooth = Image('anat_rpi_straight_smooth_curved.nii')
    data_smooth = nii_smooth.data
    data_input = Image('anat.nii').data
    indzero = np.where(data_smooth == 0)
    data_smooth[indzero] = data_input[indzero]
    nii_smooth.data = data_smooth
    nii_smooth.save('anat_rpi_straight_smooth_curved_nonzero.nii')

    # come back
    os.chdir(curdir)

    # Generate output file
    printv('\nGenerate output file...')
    generate_output_file(os.path.join(path_tmp, "anat_rpi_straight_smooth_curved_nonzero.nii"),
                         file_anat + '_smooth' + ext_anat)

    # Remove temporary files
    if remove_temp_files == 1:
        printv('\nRemove temporary files...')
        rmtree(path_tmp)

    # Display elapsed time
    elapsed_time = time.time() - start_time
    printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's\n')

    display_viewer_syntax([file_anat, file_anat + '_smooth'], verbose=verbose)
 def _orient(self, fname, orientation):
     return Image(fname).change_orientation(orientation).save(fname,
                                                              mutable=True)
示例#5
0
def main(args=None):
    """
    Main function
    :param args:
    :return:
    """
    # initializations
    output_type = None
    dim_list = ['x', 'y', 'z', 't']

    # Get parser args
    if args is None:
        args = None if sys.argv[1:] else ['--help']
    parser = get_parser()
    arguments = parser.parse_args(args=args)
    fname_in = arguments.i
    n_in = len(fname_in)
    verbose = arguments.v
    sct.init_sct(log_level=verbose, update=True)  # Update log level

    if arguments.o is not None:
        fname_out = arguments.o
    else:
        fname_out = None

    # Run command
    # Arguments are sorted alphabetically (not according to the usage order)
    if arguments.concat is not None:
        dim = arguments.concat
        assert dim in dim_list
        dim = dim_list.index(dim)
        im_out = [concat_data(fname_in, dim)]  # TODO: adapt to fname_in

    elif arguments.copy_header is not None:
        im_in = Image(fname_in[0])
        im_dest = Image(arguments.copy_header)
        im_dest_new = im_in.copy()
        im_dest_new.data = im_dest.data.copy()
        # im_dest.header = im_in.header
        im_dest_new.absolutepath = im_dest.absolutepath
        im_out = [im_dest_new]
        fname_out = arguments.copy_header

    elif arguments.display_warp:
        im_in = fname_in[0]
        visualize_warp(im_in, fname_grid=None, step=3, rm_tmp=True)
        im_out = None

    elif arguments.getorient:
        im_in = Image(fname_in[0])
        orient = im_in.orientation
        im_out = None

    elif arguments.keep_vol is not None:
        index_vol = (arguments.keep_vol).split(',')
        for iindex_vol, vol in enumerate(index_vol):
            index_vol[iindex_vol] = int(vol)
        im_in = Image(fname_in[0])
        im_out = [remove_vol(im_in, index_vol, todo='keep')]

    elif arguments.mcs:
        im_in = Image(fname_in[0])
        if n_in != 1:
            sct.printv(parser.error('ERROR: -mcs need only one input'))
        if len(im_in.data.shape) != 5:
            sct.printv(
                parser.error(
                    'ERROR: -mcs input need to be a multi-component image'))
        im_out = multicomponent_split(im_in)

    elif arguments.omc:
        im_ref = Image(fname_in[0])
        for fname in fname_in:
            im = Image(fname)
            if im.data.shape != im_ref.data.shape:
                sct.printv(
                    parser.error(
                        'ERROR: -omc inputs need to have all the same shapes'))
            del im
        im_out = [multicomponent_merge(fname_in)]  # TODO: adapt to fname_in

    elif arguments.pad is not None:
        im_in = Image(fname_in[0])
        ndims = len(im_in.data.shape)
        if ndims != 3:
            sct.printv('ERROR: you need to specify a 3D input file.', 1,
                       'error')
            return

        pad_arguments = arguments.pad.split(',')
        if len(pad_arguments) != 3:
            sct.printv('ERROR: you need to specify 3 padding values.', 1,
                       'error')

        padx, pady, padz = pad_arguments
        padx, pady, padz = int(padx), int(pady), int(padz)
        im_out = [
            pad_image(im_in,
                      pad_x_i=padx,
                      pad_x_f=padx,
                      pad_y_i=pady,
                      pad_y_f=pady,
                      pad_z_i=padz,
                      pad_z_f=padz)
        ]

    elif arguments.pad_asym is not None:
        im_in = Image(fname_in[0])
        ndims = len(im_in.data.shape)
        if ndims != 3:
            sct.printv('ERROR: you need to specify a 3D input file.', 1,
                       'error')
            return

        pad_arguments = arguments.pad_asym.split(',')
        if len(pad_arguments) != 6:
            sct.printv('ERROR: you need to specify 6 padding values.', 1,
                       'error')

        padxi, padxf, padyi, padyf, padzi, padzf = pad_arguments
        padxi, padxf, padyi, padyf, padzi, padzf = int(padxi), int(padxf), int(
            padyi), int(padyf), int(padzi), int(padzf)
        im_out = [
            pad_image(im_in,
                      pad_x_i=padxi,
                      pad_x_f=padxf,
                      pad_y_i=padyi,
                      pad_y_f=padyf,
                      pad_z_i=padzi,
                      pad_z_f=padzf)
        ]

    elif arguments.remove_vol is not None:
        index_vol = (arguments.remove_vol).split(',')
        for iindex_vol, vol in enumerate(index_vol):
            index_vol[iindex_vol] = int(vol)
        im_in = Image(fname_in[0])
        im_out = [remove_vol(im_in, index_vol, todo='remove')]

    elif arguments.setorient is not None:
        sct.printv(fname_in[0])
        im_in = Image(fname_in[0])
        im_out = [msct_image.change_orientation(im_in, arguments.setorient)]

    elif arguments.setorient_data is not None:
        im_in = Image(fname_in[0])
        im_out = [
            msct_image.change_orientation(im_in,
                                          arguments.setorient_data,
                                          data_only=True)
        ]

    elif arguments.split is not None:
        dim = arguments.split
        assert dim in dim_list
        im_in = Image(fname_in[0])
        dim = dim_list.index(dim)
        im_out = split_data(im_in, dim)

    elif arguments.type is not None:
        output_type = arguments.type
        im_in = Image(fname_in[0])
        im_out = [im_in]  # TODO: adapt to fname_in

    elif arguments.to_fsl is not None:
        space_files = arguments.to_fsl
        if len(space_files) > 2 or len(space_files) < 1:
            sct.printv(parser.error('ERROR: -to-fsl expects 1 or 2 arguments'))
            return
        spaces = [Image(s) for s in space_files]
        if len(spaces) < 2:
            spaces.append(None)
        im_out = [
            displacement_to_abs_fsl(Image(fname_in[0]), spaces[0], spaces[1])
        ]

    else:
        im_out = None
        sct.printv(
            parser.error(
                'ERROR: you need to specify an operation to do on the input image'
            ))

    # in case fname_out is not defined, use first element of input file name list
    if fname_out is None:
        fname_out = fname_in[0]

    # Write output
    if im_out is not None:
        sct.printv('Generate output files...', verbose)
        # if only one output
        if len(im_out) == 1 and not '-split' in arguments:
            im_out[0].save(fname_out, dtype=output_type, verbose=verbose)
            sct.display_viewer_syntax([fname_out], verbose=verbose)
        if arguments.mcs:
            # use input file name and add _X, _Y _Z. Keep the same extension
            l_fname_out = []
            for i_dim in range(3):
                l_fname_out.append(
                    sct.add_suffix(fname_out or fname_in[0],
                                   '_' + dim_list[i_dim].upper()))
                im_out[i_dim].save(l_fname_out[i_dim], verbose=verbose)
            sct.display_viewer_syntax(fname_out)
        if arguments.split is not None:
            # use input file name and add _"DIM+NUMBER". Keep the same extension
            l_fname_out = []
            for i, im in enumerate(im_out):
                l_fname_out.append(
                    sct.add_suffix(
                        fname_out or fname_in[0],
                        '_' + dim_list[dim].upper() + str(i).zfill(4)))
                im.save(l_fname_out[i])
            sct.display_viewer_syntax(l_fname_out)

    elif arguments.getorient:
        sct.printv(orient)

    elif arguments.display_warp:
        sct.printv('Warping grid generated.', verbose, 'info')
    def apply(self):
        # Initialization
        fname_src = self.input_filename  # source image (moving)
        list_warp = self.list_warp  # list of warping fields
        fname_out = self.output_filename  # output
        fname_dest = self.fname_dest  # destination image (fix)
        verbose = self.verbose
        remove_temp_files = self.remove_temp_files
        crop_reference = self.crop  # if = 1, put 0 everywhere around warping field, if = 2, real crop

        islabel = False
        if self.interp == 'label':
            islabel = True
            self.interp = 'nn'

        interp = get_interpolation('isct_antsApplyTransforms', self.interp)

        # Parse list of warping fields
        printv('\nParse list of warping fields...', verbose)
        use_inverse = []
        fname_warp_list_invert = []
        # list_warp = list_warp.replace(' ', '')  # remove spaces
        # list_warp = list_warp.split(",")  # parse with comma
        for idx_warp, path_warp in enumerate(self.list_warp):
            # Check if this transformation should be inverted
            if path_warp in self.list_warpinv:
                use_inverse.append('-i')
                # list_warp[idx_warp] = path_warp[1:]  # remove '-'
                fname_warp_list_invert += [[
                    use_inverse[idx_warp], list_warp[idx_warp]
                ]]
            else:
                use_inverse.append('')
                fname_warp_list_invert += [[path_warp]]
            path_warp = list_warp[idx_warp]
            if path_warp.endswith((".nii", ".nii.gz")) \
                    and Image(list_warp[idx_warp]).header.get_intent()[0] != 'vector':
                raise ValueError(
                    "Displacement field in {} is invalid: should be encoded"
                    " in a 5D file with vector intent code"
                    " (see https://nifti.nimh.nih.gov/pub/dist/src/niftilib/nifti1.h"
                    .format(path_warp))
        # need to check if last warping field is an affine transfo
        isLastAffine = False
        path_fname, file_fname, ext_fname = extract_fname(
            fname_warp_list_invert[-1][-1])
        if ext_fname in ['.txt', '.mat']:
            isLastAffine = True

        # check if destination file is 3d
        # check_dim(fname_dest, dim_lst=[3]) # PR 2598: we decided to skip this line.

        # N.B. Here we take the inverse of the warp list, because sct_WarpImageMultiTransform concatenates in the reverse order
        fname_warp_list_invert.reverse()
        fname_warp_list_invert = functools.reduce(lambda x, y: x + y,
                                                  fname_warp_list_invert)

        # Extract path, file and extension
        path_src, file_src, ext_src = extract_fname(fname_src)
        path_dest, file_dest, ext_dest = extract_fname(fname_dest)

        # Get output folder and file name
        if fname_out == '':
            path_out = ''  # output in user's current directory
            file_out = file_src + '_reg'
            ext_out = ext_src
            fname_out = os.path.join(path_out, file_out + ext_out)

        # Get dimensions of data
        printv('\nGet dimensions of data...', verbose)
        img_src = Image(fname_src)
        nx, ny, nz, nt, px, py, pz, pt = img_src.dim
        # nx, ny, nz, nt, px, py, pz, pt = get_dimension(fname_src)
        printv(
            '  ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz) + ' x ' +
            str(nt), verbose)

        # if 3d
        if nt == 1:
            # Apply transformation
            printv('\nApply transformation...', verbose)
            if nz in [0, 1]:
                dim = '2'
            else:
                dim = '3'
            # if labels, dilate before resampling
            if islabel:
                printv("\nDilate labels before warping...")
                path_tmp = tmp_create(basename="apply_transfo")
                fname_dilated_labels = os.path.join(path_tmp,
                                                    "dilated_data.nii")
                # dilate points
                dilate(Image(fname_src), 4, 'ball').save(fname_dilated_labels)
                fname_src = fname_dilated_labels

            printv(
                "\nApply transformation and resample to destination space...",
                verbose)
            run_proc([
                'isct_antsApplyTransforms', '-d', dim, '-i', fname_src, '-o',
                fname_out, '-t'
            ] + fname_warp_list_invert + ['-r', fname_dest] + interp,
                     is_sct_binary=True)

        # if 4d, loop across the T dimension
        else:
            if islabel:
                raise NotImplementedError

            dim = '4'
            path_tmp = tmp_create(basename="apply_transfo")

            # convert to nifti into temp folder
            printv('\nCopying input data to tmp folder and convert to nii...',
                   verbose)
            img_src.save(os.path.join(path_tmp, "data.nii"))
            copy(fname_dest, os.path.join(path_tmp, file_dest + ext_dest))
            fname_warp_list_tmp = []
            for fname_warp in list_warp:
                path_warp, file_warp, ext_warp = extract_fname(fname_warp)
                copy(fname_warp, os.path.join(path_tmp, file_warp + ext_warp))
                fname_warp_list_tmp.append(file_warp + ext_warp)
            fname_warp_list_invert_tmp = fname_warp_list_tmp[::-1]

            curdir = os.getcwd()
            os.chdir(path_tmp)

            # split along T dimension
            printv('\nSplit along T dimension...', verbose)

            im_dat = Image('data.nii')
            im_header = im_dat.hdr
            data_split_list = sct_image.split_data(im_dat, 3)
            for im in data_split_list:
                im.save()

            # apply transfo
            printv('\nApply transformation to each 3D volume...', verbose)
            for it in range(nt):
                file_data_split = 'data_T' + str(it).zfill(4) + '.nii'
                file_data_split_reg = 'data_reg_T' + str(it).zfill(4) + '.nii'

                status, output = run_proc([
                    'isct_antsApplyTransforms',
                    '-d',
                    '3',
                    '-i',
                    file_data_split,
                    '-o',
                    file_data_split_reg,
                    '-t',
                ] + fname_warp_list_invert_tmp + [
                    '-r',
                    file_dest + ext_dest,
                ] + interp,
                                          verbose,
                                          is_sct_binary=True)

            # Merge files back
            printv('\nMerge file back...', verbose)
            import glob
            path_out, name_out, ext_out = extract_fname(fname_out)
            # im_list = [Image(file_name) for file_name in glob.glob('data_reg_T*.nii')]
            # concat_data use to take a list of image in input, now takes a list of file names to open the files one by one (see issue #715)
            fname_list = glob.glob('data_reg_T*.nii')
            fname_list.sort()
            im_list = [Image(fname) for fname in fname_list]
            im_out = sct_image.concat_data(im_list, 3, im_header['pixdim'])
            im_out.save(name_out + ext_out)

            os.chdir(curdir)
            generate_output_file(os.path.join(path_tmp, name_out + ext_out),
                                 fname_out)
            # Delete temporary folder if specified
            if remove_temp_files:
                printv('\nRemove temporary files...', verbose)
                rmtree(path_tmp, verbose=verbose)

        # Copy affine matrix from destination space to make sure qform/sform are the same
        printv(
            "Copy affine matrix from destination space to make sure qform/sform are the same.",
            verbose)
        im_src_reg = Image(fname_out)
        im_src_reg.copy_qform_from_ref(Image(fname_dest))
        im_src_reg.save(
            verbose=0
        )  # set verbose=0 to avoid warning message about rewriting file

        if islabel:
            printv(
                "\nTake the center of mass of each registered dilated labels..."
            )
            labeled_img = cubic_to_point(im_src_reg)
            labeled_img.save(path=fname_out)
            if remove_temp_files:
                printv('\nRemove temporary files...', verbose)
                rmtree(path_tmp, verbose=verbose)

        # Crop the resulting image using dimensions from the warping field
        warping_field = fname_warp_list_invert[-1]
        # If the last transformation is not an affine transfo, we need to compute the matrix space of the concatenated
        # warping field
        if not isLastAffine and crop_reference in [1, 2]:
            printv('Last transformation is not affine.')
            if crop_reference in [1, 2]:
                # Extract only the first ndim of the warping field
                img_warp = Image(warping_field)
                if dim == '2':
                    img_warp_ndim = Image(img_src.data[:, :], hdr=img_warp.hdr)
                elif dim in ['3', '4']:
                    img_warp_ndim = Image(img_src.data[:, :, :],
                                          hdr=img_warp.hdr)
                # Set zero to everything outside the warping field
                cropper = ImageCropper(Image(fname_out))
                cropper.get_bbox_from_ref(img_warp_ndim)
                if crop_reference == 1:
                    printv(
                        'Cropping strategy is: keep same matrix size, put 0 everywhere around warping field'
                    )
                    img_out = cropper.crop(background=0)
                elif crop_reference == 2:
                    printv(
                        'Cropping strategy is: crop around warping field (the size of warping field will '
                        'change)')
                    img_out = cropper.crop()
                img_out.save(fname_out)

        display_viewer_syntax([fname_dest, fname_out], verbose=verbose)
    def __init__(self, fname_mask, fname_sc, fname_ref, path_template,
                 path_ofolder, verbose):
        self.fname_mask = fname_mask

        self.fname_sc = fname_sc
        self.fname_ref = fname_ref
        self.path_template = path_template
        self.path_ofolder = path_ofolder
        self.verbose = verbose
        self.wrk_dir = os.getcwd()

        if not set(np.unique(Image(fname_mask).data)) == set([0.0, 1.0]):
            if set(np.unique(Image(fname_mask).data)) == set([0.0]):
                printv('WARNING: Empty masked image', self.verbose, 'warning')
            else:
                printv(
                    "ERROR input file %s is not binary file with 0 and 1 values"
                    % fname_mask, 1, 'error')

        # create tmp directory
        self.tmp_dir = tmp_create()  # path to tmp directory

        # lesion file where each lesion has a different value
        self.fname_label = extract_fname(
            self.fname_mask)[1] + '_label' + extract_fname(self.fname_mask)[2]

        # initialization of measure sheet
        measure_lst = [
            'label', 'volume [mm3]', 'length [mm]',
            'max_equivalent_diameter [mm]'
        ]
        if self.fname_ref is not None:
            for measure in ['mean', 'std']:
                measure_lst.append(measure + '_' +
                                   extract_fname(self.fname_ref)[1])
        measure_dct = {}
        for column in measure_lst:
            measure_dct[column] = None
        self.measure_pd = pd.DataFrame(data=measure_dct,
                                       index=range(0),
                                       columns=measure_lst)

        # orientation of the input image
        self.orientation = None

        # volume object
        self.volumes = None

        # initialization of proportion measures, related to registrated atlas
        if self.path_template is not None:
            self.path_atlas = os.path.join(self.path_template, "atlas")
            self.path_levels = os.path.join(self.path_template, "template",
                                            "PAM50_levels.nii.gz")
        else:
            self.path_atlas, self.path_levels = None, None
        self.vert_lst = None
        self.atlas_roi_lst = None
        self.distrib_matrix_dct = {}

        # output names
        self.pickle_name = extract_fname(self.fname_mask)[1] + '_analyzis.pkl'
        self.excel_name = extract_fname(self.fname_mask)[1] + '_analyzis.xls'
示例#8
0
    # path_tmp = sct.tmp_create()
    # tmp_copy_nifti(input_file, path_tmp, 'raw.nii')
    # sct.run('cp '+warping_fields_filename[0]+' '+path_tmp)
    # curdir = os.getcwd()
    # os.chdir(path_tmp)
    sct.mkdir("images")
    sct.mkdir("niftis")
    while True:
        try:
            warping_fields[0].num_of_frames = number_of_frames
            image_output_iter, iteration = next(warping_fields[0])
            image_output_iter.save()
            filename_warp = image_output_iter.path + image_output_iter.file_name + image_output_iter.ext
            filename_output = "niftis/tmp.warped_image_" + str(
                iteration - 1) + image_output_iter.ext
            sct.run([
                "sct_apply_transfo", "-i", input_file, "-d", reference_image,
                "-w", filename_warp, "-o", filename_output
            ])
            result = Image(filename_output).change_orientation("RPI")

            toimage(result.data[int(result.data.shape[0] / 2)].squeeze(),
                    cmin=0.0).save('images/' +
                                   extract_fname(filename_output)[1] + '.jpg')
            filenames_output.append(filename_output)
        except ValueError:
            printv('\nError during warping field generation...', 1, 'error')
        except StopIteration:
            printv('\nFinished iterations.')
            break
示例#9
0
def main(args=None):
    """
    Main function
    :param args:
    :return:
    """
    # get parser args
    if args is None:
        args = None if sys.argv[1:] else ['--help']
    else:
        # flatten the list of input arguments because -w and -winv carry a nested list
        lst = []
        for line in args:
            lst.append(line) if isinstance(line, str) else lst.extend(line)
        args = lst
    parser = get_parser()
    arguments = parser.parse_args(args=args)

    # Initialization
    fname_warp_final = ''  # concatenated transformations
    fname_dest = arguments.d
    fname_warp_list = arguments.w
    warpinv_filename = arguments.winv

    if arguments.o is not None:
        fname_warp_final = arguments.o
    verbose = arguments.v
    sct.init_sct(log_level=verbose, update=True)  # Update log level

    # Parse list of warping fields
    sct.printv('\nParse list of warping fields...', verbose)
    use_inverse = []
    fname_warp_list_invert = []
    # list_warp = list_warp.replace(' ', '')  # remove spaces
    # list_warp = list_warp.split(",")  # parse with comma
    for idx_warp, path_warp in enumerate(fname_warp_list):
        # Check if this transformation should be inverted
        if path_warp in warpinv_filename:
            use_inverse.append('-i')
            # list_warp[idx_warp] = path_warp[1:]  # remove '-'
            fname_warp_list_invert += [[
                use_inverse[idx_warp], fname_warp_list[idx_warp]
            ]]
        else:
            use_inverse.append('')
            fname_warp_list_invert += [[path_warp]]
        path_warp = fname_warp_list[idx_warp]
        if path_warp.endswith((".nii", ".nii.gz")) \
                and Image(fname_warp_list[idx_warp]).header.get_intent()[0] != 'vector':
            raise ValueError("Displacement field in {} is invalid: should be encoded" \
                             " in a 5D file with vector intent code" \
                             " (see https://nifti.nimh.nih.gov/pub/dist/src/niftilib/nifti1.h" \
                             .format(path_warp))
    # need to check if last warping field is an affine transfo
    isLastAffine = False
    path_fname, file_fname, ext_fname = sct.extract_fname(
        fname_warp_list_invert[-1][-1])
    if ext_fname in ['.txt', '.mat']:
        isLastAffine = True

    # check if destination file is 3d
    if not sct.check_dim(fname_dest, dim_lst=[3]):
        sct.printv('ERROR: Destination data must be 3d')

    # Here we take the inverse of the warp list, because sct_WarpImageMultiTransform concatenates in the reverse order
    fname_warp_list_invert.reverse()
    fname_warp_list_invert = functools.reduce(lambda x, y: x + y,
                                              fname_warp_list_invert)

    # Check file existence
    sct.printv('\nCheck file existence...', verbose)
    sct.check_file_exist(fname_dest, verbose)
    for i in range(len(fname_warp_list)):
        sct.check_file_exist(fname_warp_list[i], verbose)

    # Get output folder and file name
    if fname_warp_final == '':
        path_out, file_out, ext_out = sct.extract_fname(param.fname_warp_final)
    else:
        path_out, file_out, ext_out = sct.extract_fname(fname_warp_final)

    # Check dimension of destination data (cf. issue #1419, #1429)
    im_dest = Image(fname_dest)
    if im_dest.dim[2] == 1:
        dimensionality = '2'
    else:
        dimensionality = '3'

    cmd = [
        'isct_ComposeMultiTransform', dimensionality, 'warp_final' + ext_out,
        '-R', fname_dest
    ] + fname_warp_list_invert
    status, output = sct.run(cmd, verbose=verbose, is_sct_binary=True)

    # check if output was generated
    if not os.path.isfile('warp_final' + ext_out):
        sct.printv('ERROR: Warping field was not generated.\n' + output, 1,
                   'error')

    # Generate output files
    sct.printv('\nGenerate output files...', verbose)
    sct.generate_output_file('warp_final' + ext_out,
                             os.path.join(path_out, file_out + ext_out))
示例#10
0
def check_and_correct_segmentation(fname_segmentation,
                                   fname_centerline,
                                   folder_output='',
                                   threshold_distance=5.0,
                                   remove_temp_files=1,
                                   verbose=0):
    """
    This function takes the outputs of isct_propseg (centerline and segmentation) and check if the centerline of the
    segmentation is coherent with the centerline provided by the isct_propseg, especially on the edges (related
    to issue #1074).
    Args:
        fname_segmentation: filename of binary segmentation
        fname_centerline: filename of binary centerline
        threshold_distance: threshold, in mm, beyond which centerlines are not coherent
        verbose:

    Returns: None
    """
    sct.printv('\nCheck consistency of segmentation...', verbose)
    # creating a temporary folder in which all temporary files will be placed and deleted afterwards
    path_tmp = sct.tmp_create(basename="propseg", verbose=verbose)
    from sct_convert import convert
    convert(fname_segmentation,
            os.path.join(path_tmp, "tmp.segmentation.nii.gz"),
            verbose=0)
    convert(fname_centerline,
            os.path.join(path_tmp, "tmp.centerline.nii.gz"),
            verbose=0)
    fname_seg_absolute = os.path.abspath(fname_segmentation)

    # go to tmp folder
    curdir = os.getcwd()
    os.chdir(path_tmp)

    # convert segmentation image to RPI
    im_input = Image('tmp.segmentation.nii.gz')
    image_input_orientation = im_input.orientation

    sct_image.main(
        "-i tmp.segmentation.nii.gz -setorient RPI -o tmp.segmentation_RPI.nii.gz -v 0"
        .split())
    sct_image.main(
        "-i tmp.centerline.nii.gz -setorient RPI -o tmp.centerline_RPI.nii.gz -v 0"
        .split())

    # go through segmentation image, and compare with centerline from propseg
    im_seg = Image('tmp.segmentation_RPI.nii.gz')
    im_centerline = Image('tmp.centerline_RPI.nii.gz')

    # Get size of data
    sct.printv('\nGet data dimensions...', verbose)
    nx, ny, nz, nt, px, py, pz, pt = im_seg.dim

    # extraction of centerline provided by isct_propseg and computation of center of mass for each slice
    # the centerline is defined as the center of the tubular mesh outputed by propseg.
    centerline, key_centerline = {}, []
    for i in range(nz):
        slice = im_centerline.data[:, :, i]
        if np.any(slice):
            x_centerline, y_centerline = ndi.measurements.center_of_mass(slice)
            centerline[str(i)] = [x_centerline, y_centerline]
            key_centerline.append(i)

    minz_centerline = np.min(key_centerline)
    maxz_centerline = np.max(key_centerline)
    mid_slice = int((maxz_centerline - minz_centerline) / 2)

    # for each slice of the segmentation, check if only one object is present. If not, remove the slice from segmentation.
    # If only one object (the spinal cord) is present in the slice, check if its center of mass is close to the centerline of isct_propseg.
    slices_to_remove = [
        False
    ] * nz  # flag that decides if the slice must be removed
    for i in range(minz_centerline, maxz_centerline + 1):
        # extraction of slice
        slice = im_seg.data[:, :, i]
        distance = -1
        label_objects, nb_labels = ndi.label(
            slice)  # count binary objects in the slice
        if nb_labels > 1:  # if there is more that one object in the slice, the slice is removed from the segmentation
            slices_to_remove[i] = True
        elif nb_labels == 1:  # check if the centerline is coherent with the one from isct_propseg
            x_centerline, y_centerline = ndi.measurements.center_of_mass(slice)
            slice_nearest_coord = min(key_centerline, key=lambda x: abs(x - i))
            coord_nearest_coord = centerline[str(slice_nearest_coord)]
            distance = np.sqrt((
                (x_centerline - coord_nearest_coord[0]) * px)**2 + (
                    (y_centerline - coord_nearest_coord[1]) * py)**2 +
                               ((i - slice_nearest_coord) * pz)**2)

            if distance >= threshold_distance:  # threshold must be adjusted, default is 5 mm
                slices_to_remove[i] = True

    # Check list of removal and keep one continuous centerline (improve this comment)
    # Method:
    # starting from mid-centerline (in both directions), the first True encountered is applied to all following slices
    slice_to_change = False
    for i in range(mid_slice, nz):
        if slice_to_change:
            slices_to_remove[i] = True
        elif slices_to_remove[i]:
            slice_to_change = True

    slice_to_change = False
    for i in range(mid_slice, 0, -1):
        if slice_to_change:
            slices_to_remove[i] = True
        elif slices_to_remove[i]:
            slice_to_change = True

    for i in range(0, nz):
        # remove the slice
        if slices_to_remove[i]:
            im_seg.data[:, :, i] *= 0

    # saving the image
    im_seg.save('tmp.segmentation_RPI_c.nii.gz')

    # replacing old segmentation with the corrected one
    sct_image.main(
        '-i tmp.segmentation_RPI_c.nii.gz -setorient {} -o {} -v 0'.format(
            image_input_orientation, fname_seg_absolute).split())

    os.chdir(curdir)

    # display information about how much of the segmentation has been corrected

    # remove temporary files
    if remove_temp_files:
        # sct.printv("\nRemove temporary files...", verbose)
        sct.rmtree(path_tmp)
示例#11
0
def propseg(img_input, options_dict):
    """
    :param img_input: source image, to be segmented
    :param options_dict: arguments as dictionary
    :return: segmented Image
    """
    arguments = options_dict
    fname_input_data = img_input.absolutepath
    fname_data = os.path.abspath(fname_input_data)
    contrast_type = arguments["-c"]
    contrast_type_conversion = {
        't1': 't1',
        't2': 't2',
        't2s': 't2',
        'dwi': 't1'
    }
    contrast_type_propseg = contrast_type_conversion[contrast_type]

    # Starting building the command
    cmd = ['isct_propseg', '-t', contrast_type_propseg]

    if "-ofolder" in arguments:
        folder_output = arguments["-ofolder"]
    else:
        folder_output = './'
    cmd += ['-o', folder_output]
    if not os.path.isdir(folder_output) and os.path.exists(folder_output):
        sct.log.error("output directory %s is not a valid directory" %
                      folder_output)
    if not os.path.exists(folder_output):
        os.makedirs(folder_output)

    if "-down" in arguments:
        cmd += ["-down", str(arguments["-down"])]
    if "-up" in arguments:
        cmd += ["-up", str(arguments["-up"])]

    remove_temp_files = 1
    if "-r" in arguments:
        remove_temp_files = int(arguments["-r"])

    verbose = 0
    if "-v" in arguments:
        if arguments["-v"] is "1":
            verbose = 2
            cmd += ["-verbose"]

    # Output options
    if "-mesh" in arguments:
        cmd += ["-mesh"]
    if "-centerline-binary" in arguments:
        cmd += ["-centerline-binary"]
    if "-CSF" in arguments:
        cmd += ["-CSF"]
    if "-centerline-coord" in arguments:
        cmd += ["-centerline-coord"]
    if "-cross" in arguments:
        cmd += ["-cross"]
    if "-init-tube" in arguments:
        cmd += ["-init-tube"]
    if "-low-resolution-mesh" in arguments:
        cmd += ["-low-resolution-mesh"]
    if "-detect-nii" in arguments:
        cmd += ["-detect-nii"]
    if "-detect-png" in arguments:
        cmd += ["-detect-png"]

    # Helping options
    use_viewer = None
    use_optic = True  # enabled by default
    init_option = None
    rescale_header = arguments["-rescale"]
    if "-init" in arguments:
        init_option = float(arguments["-init"])
        if init_option < 0:
            sct.log.error('Command-line usage error: ' + str(init_option) +
                          " is not a valid value for '-init'")
            sys.exit(1)
    if "-init-centerline" in arguments:
        if str(arguments["-init-centerline"]) == "viewer":
            use_viewer = "centerline"
        elif str(arguments["-init-centerline"]) == "hough":
            use_optic = False
        else:
            if rescale_header is not 1:
                fname_labels_viewer = func_rescale_header(str(
                    arguments["-init-centerline"]),
                                                          rescale_header,
                                                          verbose=verbose)
            else:
                fname_labels_viewer = str(arguments["-init-centerline"])
            cmd += ["-init-centerline", fname_labels_viewer]
            use_optic = False
    if "-init-mask" in arguments:
        if str(arguments["-init-mask"]) == "viewer":
            use_viewer = "mask"
        else:
            if rescale_header is not 1:
                fname_labels_viewer = func_rescale_header(
                    str(arguments["-init-mask"]), rescale_header)
            else:
                fname_labels_viewer = str(arguments["-init-mask"])
            cmd += ["-init-mask", fname_labels_viewer]
            use_optic = False
    if "-mask-correction" in arguments:
        cmd += ["-mask-correction", str(arguments["-mask-correction"])]
    if "-radius" in arguments:
        cmd += ["-radius", str(arguments["-radius"])]
    if "-detect-n" in arguments:
        cmd += ["-detect-n", str(arguments["-detect-n"])]
    if "-detect-gap" in arguments:
        cmd += ["-detect-gap", str(arguments["-detect-gap"])]
    if "-init-validation" in arguments:
        cmd += ["-init-validation"]
    if "-nbiter" in arguments:
        cmd += ["-nbiter", str(arguments["-nbiter"])]
    if "-max-area" in arguments:
        cmd += ["-max-area", str(arguments["-max-area"])]
    if "-max-deformation" in arguments:
        cmd += ["-max-deformation", str(arguments["-max-deformation"])]
    if "-min-contrast" in arguments:
        cmd += ["-min-contrast", str(arguments["-min-contrast"])]
    if "-d" in arguments:
        cmd += ["-d", str(arguments["-d"])]
    if "-distance-search" in arguments:
        cmd += ["-dsearch", str(arguments["-distance-search"])]
    if "-alpha" in arguments:
        cmd += ["-alpha", str(arguments["-alpha"])]

    # check if input image is in 3D. Otherwise itk image reader will cut the 4D image in 3D volumes and only take the first one.
    image_input = Image(fname_data)
    nx, ny, nz, nt, px, py, pz, pt = image_input.dim
    if nt > 1:
        sct.log.error(
            'ERROR: your input image needs to be 3D in order to be segmented.')

    path_data, file_data, ext_data = sct.extract_fname(fname_data)

    # rescale header (see issue #1406)
    if rescale_header is not 1:
        fname_data_propseg = func_rescale_header(fname_data, rescale_header)
    else:
        fname_data_propseg = fname_data

    # add to command
    cmd += ['-i', fname_data_propseg]

    # if centerline or mask is asked using viewer
    if use_viewer:
        from spinalcordtoolbox.gui.base import AnatomicalParams
        from spinalcordtoolbox.gui.centerline import launch_centerline_dialog

        params = AnatomicalParams()
        if use_viewer == 'mask':
            params.num_points = 3
            params.interval_in_mm = 15  # superior-inferior interval between two consecutive labels
            params.starting_slice = 'midfovminusinterval'
        if use_viewer == 'centerline':
            # setting maximum number of points to a reasonable value
            params.num_points = 20
            params.interval_in_mm = 30
            params.starting_slice = 'top'
        im_data = Image(fname_data_propseg)

        im_mask_viewer = msct_image.zeros_like(im_data)
        # im_mask_viewer.absolutepath = sct.add_suffix(fname_data_propseg, '_labels_viewer')
        controller = launch_centerline_dialog(im_data, im_mask_viewer, params)
        fname_labels_viewer = sct.add_suffix(fname_data_propseg,
                                             '_labels_viewer')

        if not controller.saved:
            sct.log.error(
                'The viewer has been closed before entering all manual points. Please try again.'
            )
            sys.exit(1)
        # save labels
        controller.as_niftii(fname_labels_viewer)

        # add mask filename to parameters string
        if use_viewer == "centerline":
            cmd += ["-init-centerline", fname_labels_viewer]
        elif use_viewer == "mask":
            cmd += ["-init-mask", fname_labels_viewer]

    # If using OptiC
    elif use_optic:
        path_script = os.path.dirname(__file__)
        path_sct = os.path.dirname(path_script)
        path_classifier = os.path.join(path_sct, 'data/optic_models',
                                       '{}_model'.format(contrast_type))

        init_option_optic, fname_centerline = optic.detect_centerline(
            fname_data_propseg,
            contrast_type,
            path_classifier,
            folder_output,
            remove_temp_files,
            init_option,
            verbose=verbose)
        if init_option is not None:
            # TODO: what's this???
            cmd += ["-init", str(init_option_optic)]

        cmd += ["-init-centerline", fname_centerline]

    # enabling centerline extraction by default (needed by check_and_correct_segmentation() )
    cmd += ['-centerline-binary']

    # run propseg
    status, output = sct.run(cmd, verbose, raise_exception=False)

    # check status is not 0
    if not status == 0:
        sct.log.error(
            'Automatic cord detection failed. Please initialize using -init-centerline or '
            '-init-mask (see help).')
        sys.exit(1)

    # build output filename
    fname_seg = os.path.join(
        folder_output, os.path.basename(sct.add_suffix(fname_data, "_seg")))
    fname_centerline = os.path.join(
        folder_output,
        os.path.basename(sct.add_suffix(fname_data, "_centerline")))
    # in case header was rescaled, we need to update the output file names by removing the "_rescaled"
    if rescale_header is not 1:
        sct.mv(
            os.path.join(
                folder_output,
                sct.add_suffix(os.path.basename(fname_data_propseg), "_seg")),
            fname_seg)
        sct.mv(
            os.path.join(
                folder_output,
                sct.add_suffix(os.path.basename(fname_data_propseg),
                               "_centerline")), fname_centerline)
        # if user was used, copy the labelled points to the output folder (they will then be scaled back)
        if use_viewer:
            fname_labels_viewer_new = os.path.join(
                folder_output,
                os.path.basename(sct.add_suffix(fname_data, "_labels_viewer")))
            sct.copy(fname_labels_viewer, fname_labels_viewer_new)
            # update variable (used later)
            fname_labels_viewer = fname_labels_viewer_new

    # check consistency of segmentation
    if arguments["-correct-seg"] == "1":
        check_and_correct_segmentation(fname_seg,
                                       fname_centerline,
                                       folder_output=folder_output,
                                       threshold_distance=3.0,
                                       remove_temp_files=remove_temp_files,
                                       verbose=verbose)

    # copy header from input to segmentation to make sure qform is the same
    sct.printv(
        "Copy header input --> output(s) to make sure qform is the same.",
        verbose)
    list_fname = [fname_seg, fname_centerline]
    if use_viewer:
        list_fname.append(fname_labels_viewer)
    for fname in list_fname:
        im = Image(fname)
        im.header = image_input.header
        im.save(dtype='int8'
                )  # they are all binary masks hence fine to save as int8

    return Image(fname_seg)
def main():
    """Main function."""
    sct.init_sct()
    parser = get_parser()
    args = sys.argv[1:]
    arguments = parser.parse(args)

    fname_image = os.path.abspath(arguments['-i'])
    contrast_type = arguments['-c']

    ctr_algo = arguments["-centerline"]

    if "-brain" not in args:
        if contrast_type in ['t2s', 'dwi']:
            brain_bool = False
        if contrast_type in ['t1', 't2']:
            brain_bool = True
    else:
        brain_bool = bool(int(arguments["-brain"]))

    kernel_size = arguments["-kernel"]
    if kernel_size == '3d' and contrast_type == 'dwi':
        kernel_size = '2d'
        sct.printv('3D kernel model for dwi contrast is not available. 2D kernel model is used instead.', type="warning")

    if '-ofolder' not in args:
        output_folder = os.getcwd()
    else:
        output_folder = arguments["-ofolder"]

    if ctr_algo == 'file' and "-file_centerline" not in args:
        sct.log.warning('Please use the flag -file_centerline to indicate the centerline filename.')
        sys.exit(1)
    
    if "-file_centerline" in args:
        manual_centerline_fname = arguments["-file_centerline"]
        ctr_algo = 'file'
    else:
        manual_centerline_fname = None

    remove_temp_files = int(arguments['-r'])

    verbose = int(arguments['-v'])

    path_qc = arguments.get("-qc", None)

    algo_config_stg = '\nMethod:'
    algo_config_stg += '\n\tCenterline algorithm: ' + str(ctr_algo)
    algo_config_stg += '\n\tAssumes brain section included in the image: ' + str(brain_bool)
    algo_config_stg += '\n\tDimension of the segmentation kernel convolutions: ' + kernel_size + '\n'
    sct.printv(algo_config_stg)

    im_image = Image(fname_image)
    # note: below we pass im_image.copy() otherwise the field absolutepath becomes None after execution of this function
    im_seg, im_image_RPI_upsamp, im_seg_RPI_upsamp, im_labels_viewer, im_ctr = deep_segmentation_spinalcord(
        im_image.copy(), contrast_type, ctr_algo=ctr_algo, ctr_file=manual_centerline_fname,
        brain_bool=brain_bool, kernel_size=kernel_size, remove_temp_files=remove_temp_files, verbose=verbose)

    # Save segmentation
    fname_seg = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_seg' +
                                             sct.extract_fname(fname_image)[2]))
    im_seg.save(fname_seg)

    if ctr_algo == 'viewer':
        # Save labels
        fname_labels = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_labels-centerline' +
                                               sct.extract_fname(fname_image)[2]))
        im_labels_viewer.save(fname_labels)

    if verbose == 2:
        # Save ctr
        fname_ctr = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_centerline' +
                                               sct.extract_fname(fname_image)[2]))
        im_ctr.save(fname_ctr)

    if path_qc is not None:
        generate_qc(fname_image, fname_seg=fname_seg, args=args, path_qc=os.path.abspath(path_qc),
                    process='sct_deepseg_sc')
    sct.display_viewer_syntax([fname_image, fname_seg], colormaps=['gray', 'red'], opacities=['', '0.7'])
def test_integrity(param_test):
    """
    Test integrity of function
    """

    if param_test.args.startswith(default_args[1]):
        return param_test  # no integrity test

    # apply transformation to binary mask: template --> anat
    sct_apply_transfo.main(args=[
        '-i', param_test.fname_gt, '-d', param_test.dict_args_with_path['-s'],
        '-w',
        os.path.join(param_test.path_output,
                     'warp_template2anat.nii.gz'), '-o',
        os.path.join(param_test.path_output,
                     'test_template2anat.nii.gz'), '-x', 'nn', '-v', '0'
    ])

    # apply transformation to binary mask: anat --> template
    sct_apply_transfo.main(args=[
        '-i', param_test.dict_args_with_path['-s'], '-d', param_test.fname_gt,
        '-w',
        os.path.join(param_test.path_output,
                     'warp_anat2template.nii.gz'), '-o',
        os.path.join(param_test.path_output,
                     'test_anat2template.nii.gz'), '-x', 'nn', '-v', '0'
    ])

    # compute dice coefficient between template segmentation warped to anat and segmentation from anat
    im_seg = Image(param_test.dict_args_with_path['-s'])
    im_template_seg_reg = Image(
        os.path.join(param_test.path_output, 'test_template2anat.nii.gz'))
    dice_template2anat = msct_image.compute_dice(im_seg,
                                                 im_template_seg_reg,
                                                 mode='3d',
                                                 zboundaries=True)
    # check
    param_test.output += 'Dice[seg,template_seg_reg]: ' + str(
        dice_template2anat)
    if dice_template2anat > param_test.dice_threshold:
        param_test.output += '\n--> PASSED'
    else:
        param_test.status = 99
        param_test.output += '\n--> FAILED'

    # compute dice coefficient between anat segmentation warped to template and segmentation from template
    im_seg_reg = Image(
        os.path.join(param_test.path_output, 'test_anat2template.nii.gz'))
    im_template_seg = Image(param_test.fname_gt)
    dice_anat2template = msct_image.compute_dice(im_seg_reg,
                                                 im_template_seg,
                                                 mode='3d',
                                                 zboundaries=True)
    # check
    param_test.output += '\n\nDice[seg_reg,template_seg]: ' + str(
        dice_anat2template)
    if dice_anat2template > param_test.dice_threshold:
        param_test.output += '\n--> PASSED'
    else:
        param_test.status = 99
        param_test.output += '\n--> FAILED'

    # update Panda structure
    param_test.results['dice_template2anat'] = dice_template2anat
    param_test.results['dice_anat2template'] = dice_anat2template

    return param_test
示例#14
0
def main(fname_data, path_label, method, slices, levels, fname_output, labels_user, append_csv,
         fname_vertebral_labeling="", perslice=1, perlevel=1, verbose=1, combine_labels=True):
    """
    Extract metrics from MRI data based on mask (could be single file of folder to atlas)
    :param fname_data: data to extract metric from
    :param path_label: mask: could be single file or folder to atlas (which contains info_label.txt)
    :param method {'wa', 'bin', 'ml', 'map'}
    :param slices. Slices of interest. Accepted format:
           "0,1,2,3": slices 0,1,2,3
           "0:3": slices 0,1,2,3
    :param levels: Vertebral levels to extract metrics from. Should be associated with a template
           (e.g. PAM50/template/) or a specified file: fname_vertebral_labeling. Same format as slices_of_interest.
    :param fname_output:
    :param labels_user:
    :param append_csv: Append to csv file
    :param fname_normalizing_label:
    :param fname_vertebral_labeling: vertebral labeling to be used with vertebral_levels
    :param perslice: if user selected several slices, then the function outputs a metric within each slice
           instead of a single average output.
    :param perlevel: if user selected several levels, then the function outputs a metric within each vertebral level
           instead of a single average output.
    :param verbose
    :param combine_labels: bool: Combine labels into a single value
    :return:
    """

    # check if path_label is a file (e.g., single binary mask) instead of a folder (e.g., SCT atlas structure which
    # contains info_label.txt file)
    if os.path.isfile(path_label):
        # Label is a single file
        indiv_labels_ids = [0]
        indiv_labels_files = [path_label]
        combined_labels_ids = []
        label_struc = {0: LabelStruc(id=0,
                                     name=sct.extract_fname(path_label)[1],
                                     filename=path_label)}
        # set path_label to empty string, because indiv_labels_files will replace it from now on
        path_label = ''
    elif os.path.isdir(path_label):
        # Labels is an SCT atlas folder structure
        # Parse labels according to the file info_label.txt
        # Note: the "combined_labels_*" is a list of single labels that are defined in the section defined by the keyword
        # "# Keyword=CombinedLabels" in info_label.txt.
        # TODO: redirect to appropriate Sphinx documentation
        # TODO: output Class instead of multiple variables.
        #   Example 1:
        #     label_struc[2].id = (2)
        #     label_struc[2].name = "left fasciculus cuneatus"
        #     label_struc[2].filename = "PAM50_atlas_02.nii.gz"
        #   Example 2:
        #     label_struc[51].id = (1, 2, 3, ..., 29)
        #     label_struc[51].name = "White Matter"
        #     label_struc[51].filename = ""  # no name because it is combined
        indiv_labels_ids, indiv_labels_names, indiv_labels_files, \
        combined_labels_ids, combined_labels_names, combined_labels_id_groups, map_clusters \
            = read_label_file(path_label, param_default.file_info_label)

        label_struc = {}
        # fill IDs for indiv labels
        for i_label in range(len(indiv_labels_ids)):
            label_struc[indiv_labels_ids[i_label]] = LabelStruc(id=indiv_labels_ids[i_label],
                                                                name=indiv_labels_names[i_label],
                                                                filename=indiv_labels_files[i_label],
                                                                map_cluster=[indiv_labels_ids[i_label] in map_cluster for
                                                                             map_cluster in map_clusters].index(True))
        # fill IDs for combined labels
        # TODO: problem for defining map_cluster: if labels overlap two regions, e.g. WM and GM (e.g. id=50),
        #  map_cluster will take value 0, which is wrong.
        for i_label in range(len(combined_labels_ids)):
            label_struc[combined_labels_ids[i_label]] = LabelStruc(id=combined_labels_id_groups[i_label],
                                                                   name=combined_labels_names[i_label],
                                                                   map_cluster=[indiv_labels_ids[i_label] in map_cluster for
                                                                                map_cluster in map_clusters].index(True))
    else:
        raise RuntimeError(path_label + ' does not exist')

    # check syntax of labels asked by user
    labels_id_user = check_labels(indiv_labels_ids + combined_labels_ids, parse_num_list(labels_user))
    nb_labels = len(indiv_labels_files)

    # Load data and systematically reorient to RPI because we need the 3rd dimension to be z
    sct.printv('\nLoad metric image...', verbose)
    input_im = Image(fname_data).change_orientation("RPI")

    data = Metric(data=input_im.data, label='')
    # Load labels
    labels_tmp = np.empty([nb_labels], dtype=object)
    for i_label in range(nb_labels):
        im_label = Image(os.path.join(path_label, indiv_labels_files[i_label])).change_orientation("RPI")
        labels_tmp[i_label] = np.expand_dims(im_label.data, 3)  # TODO: generalize to 2D input label
    labels = np.concatenate(labels_tmp[:], 3)  # labels: (x,y,z,label)
    # Load vertebral levels
    if vertebral_levels:
        im_vertebral_labeling = Image(fname_vertebral_labeling).change_orientation("RPI")
    else:
        im_vertebral_labeling = None

    # Get dimensions of data and labels
    nx, ny, nz = data.data.shape
    nx_atlas, ny_atlas, nz_atlas, nt_atlas = labels.shape

    # Check dimensions consistency between atlas and data
    if (nx, ny, nz) != (nx_atlas, ny_atlas, nz_atlas):
        sct.printv('\nERROR: Metric data and labels DO NOT HAVE SAME DIMENSIONS.', 1, type='error')

    # Combine individual labels for estimation
    if combine_labels:
        # Add entry with internal ID value (99) which corresponds to combined labels
        label_struc[99] = LabelStruc(id=labels_id_user, name=','.join([str(i) for i in labels_id_user]),
                                     map_cluster=None)
        labels_id_user = [99]

    for id_label in labels_id_user:
        sct.printv('Estimation for label: '+label_struc[id_label].name, verbose)
        agg_metric = extract_metric(data, labels=labels, slices=slices, levels=levels, perslice=perslice,
                                    perlevel=perlevel, vert_level=im_vertebral_labeling, method=method,
                                    label_struc=label_struc, id_label=id_label, indiv_labels_ids=indiv_labels_ids)

        save_as_csv(agg_metric, fname_output, fname_in=fname_data, append=append_csv)
        append_csv = True  # when looping across labels, need to append results in the same file
    sct.display_open(fname_output)
def main():

    # Default params
    param = Param()

    # Get parser info
    parser = get_parser()
    arguments = parser.parse(sys.argv[1:])
    fname_data = arguments['-i']
    if '-m' in arguments:
        fname_mask = arguments['-m']
    else:
        fname_mask = ''
    method = arguments["-method"]
    if '-vol' in arguments:
        index_vol_user = arguments['-vol']
    else:
        index_vol_user = ''

    # Check parameters
    if method == 'diff':
        if not fname_mask:
            sct.printv('You need to provide a mask with -method diff. Exit.',
                       1,
                       type='error')

    # Load data and orient to RPI
    im_data = Image(fname_data).change_orientation('RPI')
    data = im_data.data
    if fname_mask:
        mask = Image(fname_mask).change_orientation('RPI').data

    # Retrieve selected volumes
    if index_vol_user:
        index_vol = parse_num_list(index_vol_user)
    else:
        index_vol = range(data.shape[3])

    # Make sure user selected 2 volumes with diff method
    if method == 'diff':
        if not len(index_vol) == 2:
            sct.printv(
                'Method "diff" should be used with exactly two volumes (specify with flag "-vol").',
                1, 'error')

    # Compute SNR
    # NB: "time" is assumed to be the 4th dimension of the variable "data"
    if method == 'mult':
        # Compute mean and STD across time
        data_mean = np.mean(data[:, :, :, index_vol], axis=3)
        data_std = np.std(data[:, :, :, index_vol], axis=3)
        # Generate mask where std is different from 0
        mask_std_nonzero = np.where(data_std > param.almost_zero)
        snr_map = np.zeros_like(data_mean)
        snr_map[mask_std_nonzero] = data_mean[mask_std_nonzero] / data_std[
            mask_std_nonzero]
        # Output SNR map
        fname_snr = sct.add_suffix(fname_data, '_SNR-' + method)
        im_snr = empty_like(im_data)
        im_snr.data = snr_map
        im_snr.save(fname_snr, dtype=np.float32)
        # Output non-zero mask
        fname_stdnonzero = sct.add_suffix(fname_data,
                                          '_mask-STD-nonzero' + method)
        im_stdnonzero = empty_like(im_data)
        data_stdnonzero = np.zeros_like(data_mean)
        data_stdnonzero[mask_std_nonzero] = 1
        im_stdnonzero.data = data_stdnonzero
        im_stdnonzero.save(fname_stdnonzero, dtype=np.float32)
        # Compute SNR in ROI
        if fname_mask:
            mean_in_roi = np.average(data_mean[mask_std_nonzero],
                                     weights=mask[mask_std_nonzero])
            std_in_roi = np.average(data_std[mask_std_nonzero],
                                    weights=mask[mask_std_nonzero])
            snr_roi = mean_in_roi / std_in_roi
            # snr_roi = np.average(snr_map[mask_std_nonzero], weights=mask[mask_std_nonzero])

    elif method == 'diff':
        data_2vol = np.take(data, index_vol, axis=3)
        # Compute mean in ROI
        data_mean = np.mean(data_2vol, axis=3)
        mean_in_roi = np.average(data_mean, weights=mask)
        data_sub = np.subtract(data_2vol[:, :, :, 1], data_2vol[:, :, :, 0])
        _, std_in_roi = weighted_avg_and_std(data_sub, mask)
        # Compute SNR, correcting for Rayleigh noise (see eq. 7 in Dietrich et al.)
        snr_roi = (2 / np.sqrt(2)) * mean_in_roi / std_in_roi

    # Display result
    if fname_mask:
        sct.printv('\nSNR_' + method + ' = ' + str(snr_roi) + '\n',
                   type='info')
示例#16
0
def resample_nib(image, new_size=None, new_size_type=None, image_dest=None, interpolation='linear', mode='nearest'):
    """
    Resample a nibabel or Image object based on a specified resampling factor.
    Can deal with 2d, 3d or 4d image objects.

    :param image: nibabel or Image image.
    :param new_size: list of float: Resampling factor, final dimension or resolution, depending on new_size_type.
    :param new_size_type: {'vox', 'factor', 'mm'}: Feature used for resampling. Examples:
        new_size=[128, 128, 90], new_size_type='vox' --> Resampling to a dimension of 128x128x90 voxels
        new_size=[2, 2, 2], new_size_type='factor' --> 2x isotropic upsampling
        new_size=[1, 1, 5], new_size_type='mm' --> Resampling to a resolution of 1x1x5 mm
    :param image_dest: Destination image to resample the input image to. In this case, new_size and new_size_type
        are ignored
    :param interpolation: {'nn', 'linear', 'spline'}. The interpolation type
    :param mode: Outside values are filled with 0 ('constant') or nearest value ('nearest').
    :return: The resampled nibabel or Image image (depending on the input object type).
    """

    # set interpolation method
    dict_interp = {'nn': 0, 'linear': 1, 'spline': 2}

    # If input is an Image object, create nibabel object from it
    if type(image) == nib.nifti1.Nifti1Image:
        img = image
    elif type(image) == Image:
        img = nib.nifti1.Nifti1Image(image.data, image.hdr.get_best_affine())
    else:
        raise Exception(TypeError)

    if image_dest is None:
        # Get dimensions of data
        p = img.header.get_zooms()
        shape = img.header.get_data_shape()

        if img.ndim == 4:
            new_size += ['1']  # needed because the code below is general, i.e., does not assume 3d input and uses img.shape

        # compute new shape based on specific resampling method
        if new_size_type == 'vox':
            shape_r = tuple([int(new_size[i]) for i in range(img.ndim)])
        elif new_size_type == 'factor':
            if len(new_size) == 1:
                # isotropic resampling
                new_size = tuple([new_size[0] for i in range(img.ndim)])
            # compute new shape as: shape_r = shape * f
            shape_r = tuple([int(np.round(shape[i] * float(new_size[i]))) for i in range(img.ndim)])
        elif new_size_type == 'mm':
            if len(new_size) == 1:
                # isotropic resampling
                new_size = tuple([new_size[0] for i in range(img.ndim)])
            # compute new shape as: shape_r = shape * (p_r / p)
            shape_r = tuple([int(np.round(shape[i] * float(p[i]) / float(new_size[i]))) for i in range(img.ndim)])
        else:
            logger.error('new_size_type is not recognized.')

        # Generate 3d affine transformation: R
        affine = img.affine[:4, :4]
        affine[3, :] = np.array([0, 0, 0, 1])  # satisfy to nifti convention. Otherwise it grabs the temporal
        logger.debug('Affine matrix: \n' + str(affine))
        R = np.eye(4)
        for i in range(3):
            R[i, i] = img.shape[i] / float(shape_r[i])
        affine_r = np.dot(affine, R)
        reference = (shape_r, affine_r)

    # If reference is provided
    else:
        if type(image_dest) == nib.nifti1.Nifti1Image:
            reference = image_dest
        elif type(image_dest) == Image:
            reference = nib.nifti1.Nifti1Image(image_dest.data, image_dest.hdr.get_best_affine())
        else:
            raise Exception(TypeError)

    if img.ndim == 3:
        # we use mode 'nearest' to overcome issue #2453
        img_r = resample_from_to(
            img, to_vox_map=reference, order=dict_interp[interpolation], mode=mode, cval=0.0, out_class=None)

    elif img.ndim == 4:
        # TODO: Cover img_dest with 4D volumes
        # Import here instead of top of the file because this is an isolated case and nibabel takes time to import
        data4d = np.zeros(shape_r)
        # Loop across 4th dimension and resample each 3d volume
        for it in range(img.shape[3]):
            # Create dummy 3d nibabel image
            nii_tmp = nib.nifti1.Nifti1Image(img.get_data()[..., it], affine)
            img3d_r = resample_from_to(
                nii_tmp, to_vox_map=(shape_r[:-1], affine_r), order=dict_interp[interpolation], mode=mode,
                cval=0.0, out_class=None)
            data4d[..., it] = img3d_r.get_data()
        # Create 4d nibabel Image
        img_r = nib.nifti1.Nifti1Image(data4d, affine_r)

    # Convert back to proper type
    if type(image) == nib.nifti1.Nifti1Image:
        return img_r
    elif type(image) == Image:
        return Image(img_r.get_data(), hdr=img_r.header, orientation=image.orientation, dim=img_r.header.get_data_shape())
def main(argv=None):
    """Main function."""
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_loglevel(verbose=verbose)

    fname_image = arguments.i
    contrast_type = arguments.c

    ctr_algo = arguments.centerline

    brain_bool = bool(arguments.brain)
    if arguments.brain is None and contrast_type in ['t2s', 't2_ax']:
        brain_bool = False

    output_folder = arguments.ofolder

    if ctr_algo == 'file' and arguments.file_centerline is None:
        printv(
            'Please use the flag -file_centerline to indicate the centerline filename.',
            1, 'error')
        sys.exit(1)

    if arguments.file_centerline is not None:
        manual_centerline_fname = arguments.file_centerline
        ctr_algo = 'file'
    else:
        manual_centerline_fname = None

    remove_temp_files = arguments.r

    algo_config_stg = '\nMethod:'
    algo_config_stg += '\n\tCenterline algorithm: ' + str(ctr_algo)
    algo_config_stg += '\n\tAssumes brain section included in the image: ' + str(
        brain_bool) + '\n'
    printv(algo_config_stg)

    # Segment image
    from spinalcordtoolbox.image import Image
    from spinalcordtoolbox.deepseg_lesion.core import deep_segmentation_MSlesion
    im_image = Image(fname_image)
    im_seg, im_labels_viewer, im_ctr = deep_segmentation_MSlesion(
        im_image,
        contrast_type,
        ctr_algo=ctr_algo,
        ctr_file=manual_centerline_fname,
        brain_bool=brain_bool,
        remove_temp_files=remove_temp_files,
        verbose=verbose)

    # Save segmentation
    fname_seg = os.path.abspath(
        os.path.join(
            output_folder,
            extract_fname(fname_image)[1] + '_lesionseg' +
            extract_fname(fname_image)[2]))
    im_seg.save(fname_seg)

    if ctr_algo == 'viewer':
        # Save labels
        fname_labels = os.path.abspath(
            os.path.join(
                output_folder,
                extract_fname(fname_image)[1] + '_labels-centerline' +
                extract_fname(fname_image)[2]))
        im_labels_viewer.save(fname_labels)

    if verbose == 2:
        # Save ctr
        fname_ctr = os.path.abspath(
            os.path.join(
                output_folder,
                extract_fname(fname_image)[1] + '_centerline' +
                extract_fname(fname_image)[2]))
        im_ctr.save(fname_ctr)

    display_viewer_syntax([fname_image, fname_seg],
                          colormaps=['gray', 'red'],
                          opacities=['', '0.7'])
示例#18
0
        tmp_dir = sct.tmp_create()
        im1_name = "im1.nii.gz"
        sct.copy(input_fname, os.path.join(tmp_dir, im1_name))
        if input_second_fname != '':
            im2_name = 'im2.nii.gz'
            sct.copy(input_second_fname, os.path.join(tmp_dir, im2_name))
        else:
            im2_name = None

        curdir = os.getcwd()
        os.chdir(tmp_dir)

        # now = time.time()
        input_im1 = Image(
            resample_image(im1_name,
                           binary=True,
                           thr=0.5,
                           npx=resample_to,
                           npy=resample_to))
        input_im1.absolutepath = os.path.basename(input_fname)
        if im2_name is not None:
            input_im2 = Image(
                resample_image(im2_name,
                               binary=True,
                               thr=0.5,
                               npx=resample_to,
                               npy=resample_to))
            input_im2.absolutepath = os.path.basename(input_second_fname)
        else:
            input_im2 = None

        computation = ComputeDistances(input_im1, im2=input_im2, param=param)
示例#19
0
def vertebral_detection(fname,
                        fname_seg,
                        contrast,
                        param,
                        init_disc,
                        verbose=1,
                        path_template='',
                        path_output='../',
                        scale_dist=1.):
    """
    Find intervertebral discs in straightened image using template matching
    :param fname: file name of straigthened spinal cord
    :param fname_seg: file name of straigthened spinal cord segmentation
    :param contrast: t1 or t2
    :param param:  advanced parameters
    :param init_disc:
    :param verbose:
    :param path_template:
    :param path_output: output path for verbose=2 pictures
    :param scale_dist: float: Scaling factor to adjust average distance between two adjacent intervertebral discs
    :return:
    """
    sct.printv('\nLook for template...', verbose)
    sct.printv('Path template: ' + path_template, verbose)

    # adjust file names if MNI-Poly-AMU template is used (by default: PAM50)
    fname_level = get_file_label(os.path.join(path_template, 'template'),
                                 'vertebral labeling',
                                 output='filewithpath')
    fname_template = get_file_label(os.path.join(path_template, 'template'),
                                    contrast.upper() + '-weighted template',
                                    output='filewithpath')

    # Open template and vertebral levels
    sct.printv('\nOpen template and vertebral levels...', verbose)
    data_template = Image(fname_template).data
    data_disc_template = Image(fname_level).data

    # open anatomical volume
    im_input = Image(fname)
    data = im_input.data

    # smooth data
    data = gaussian_filter(data,
                           param.smooth_factor,
                           output=None,
                           mode="reflect")

    # get dimension of src
    nx, ny, nz = data.shape
    # define xc and yc (centered in the field of view)
    xc = int(np.round(nx / 2))  # direction RL
    yc = int(np.round(ny / 2))  # direction AP
    # get dimension of template
    nxt, nyt, nzt = data_template.shape
    # define xc and yc (centered in the field of view)
    xct = int(np.round(nxt / 2))  # direction RL
    yct = int(np.round(nyt / 2))  # direction AP

    # define mean distance (in voxel) between adjacent discs: [C1/C2 -> C2/C3], [C2/C3 -> C4/C5], ..., [L1/L2 -> L2/L3]
    centerline_level = data_disc_template[xct, yct, :]
    # attribute value to each disc. Starts from max level, then decrease.
    # NB: value 2 means disc C2/C3 (and so on and so forth).
    min_level = centerline_level[centerline_level.nonzero()].min()
    max_level = centerline_level[centerline_level.nonzero()].max()
    list_disc_value_template = list(range(min_level, max_level))
    # add disc above top one
    list_disc_value_template.insert(int(0), min_level - 1)
    sct.printv('\nDisc values from template: ' + str(list_disc_value_template),
               verbose)
    # get diff to find transitions (i.e., discs)
    diff_centerline_level = np.diff(centerline_level)
    # get disc z-values
    list_disc_z_template = diff_centerline_level.nonzero()[0].tolist()
    list_disc_z_template.reverse()
    sct.printv('Z-values for each disc: ' + str(list_disc_z_template), verbose)
    list_distance_template = (
        np.diff(list_disc_z_template) *
        (-1)).tolist()  # multiplies by -1 to get positive distances
    # Update distance with scaling factor
    list_distance_template = [i * scale_dist for i in list_distance_template]
    sct.printv(
        'Distances between discs (in voxel): ' + str(list_distance_template),
        verbose)

    # display init disc
    if verbose == 2:
        import matplotlib
        matplotlib.use('Agg')
        import matplotlib.pyplot as plt
        # get percentile for automatic contrast adjustment
        data_display = np.mean(data[xc - param.size_RL:xc +
                                    param.size_RL, :, :],
                               axis=0).transpose()
        percmin = np.percentile(data_display, 10)
        percmax = np.percentile(data_display, 90)
        # display image
        plt.matshow(data_display,
                    fignum=50,
                    cmap=plt.cm.gray,
                    clim=[percmin, percmax],
                    origin='lower')
        plt.title('Anatomical image')
        plt.autoscale(
            enable=False)  # to prevent autoscale of axis when displaying plot
        plt.figure(50), plt.scatter(yc + param.shift_AP_visu,
                                    init_disc[0],
                                    c='yellow',
                                    s=50)
        plt.text(yc + param.shift_AP_visu + 4,
                 init_disc[0],
                 str(init_disc[1]) + '/' + str(init_disc[1] + 1),
                 verticalalignment='center',
                 horizontalalignment='left',
                 color='pink',
                 fontsize=15), plt.draw()
        # plt.ion()  # enables interactive mode

    # FIND DISCS
    # ===========================================================================
    sct.printv('\nDetect intervertebral discs...', verbose)
    # assign initial z and disc
    current_z = init_disc[0]
    current_disc = init_disc[1]
    # create list for z and disc
    list_disc_z = []
    list_disc_value = []
    zrange = list(range(-10, 10))
    direction = 'superior'
    search_next_disc = True
    while search_next_disc:
        sct.printv(
            'Current disc: ' + str(current_disc) + ' (z=' + str(current_z) +
            '). Direction: ' + direction, verbose)
        try:
            # get z corresponding to current disc on template
            current_z_template = list_disc_z_template[current_disc]
        except:
            # in case reached the bottom (see issue #849)
            sct.printv(
                'WARNING: Reached the bottom of the template. Stop searching.',
                verbose, 'warning')
            break
        # find next disc
        # N.B. Do not search for C1/C2 disc (because poorly visible), use template distance instead
        if current_disc != 1:
            current_z = compute_corr_3d(data,
                                        data_template,
                                        x=xc,
                                        xshift=0,
                                        xsize=param.size_RL,
                                        y=yc,
                                        yshift=param.shift_AP,
                                        ysize=param.size_AP,
                                        z=current_z,
                                        zshift=0,
                                        zsize=param.size_IS,
                                        xtarget=xct,
                                        ytarget=yct,
                                        ztarget=current_z_template,
                                        zrange=zrange,
                                        verbose=verbose,
                                        save_suffix='_disc' +
                                        str(current_disc),
                                        gaussian_std=999,
                                        path_output=path_output)

        # display new disc
        if verbose == 2:
            plt.figure(50), plt.scatter(yc + param.shift_AP_visu,
                                        current_z,
                                        c='yellow',
                                        s=50)
            plt.text(yc + param.shift_AP_visu + 4,
                     current_z,
                     str(current_disc) + '/' + str(current_disc + 1),
                     verticalalignment='center',
                     horizontalalignment='left',
                     color='yellow',
                     fontsize=15), plt.draw()

        # append to main list
        if direction == 'superior':
            # append at the beginning
            list_disc_z.insert(0, current_z)
            list_disc_value.insert(0, current_disc)
        elif direction == 'inferior':
            # append at the end
            list_disc_z.append(current_z)
            list_disc_value.append(current_disc)

        # adjust correcting factor based on already-identified discs
        if len(list_disc_z) > 1:
            # compute distance between already-identified discs
            list_distance_current = (np.diff(list_disc_z) * (-1)).tolist()
            # retrieve the template distance corresponding to the already-identified discs
            index_disc_identified = [
                i for i, j in enumerate(list_disc_value_template)
                if j in list_disc_value[:-1]
            ]
            list_distance_template_identified = [
                list_distance_template[i] for i in index_disc_identified
            ]
            # divide subject and template distances for the identified discs
            list_subject_to_template_distance = [
                float(list_distance_current[i]) /
                list_distance_template_identified[i]
                for i in range(len(list_distance_current))
            ]
            # average across identified discs to obtain an average correcting factor
            correcting_factor = np.mean(list_subject_to_template_distance)
            sct.printv('.. correcting factor: ' + str(correcting_factor),
                       verbose)
        else:
            correcting_factor = 1
        # update list_distance specific for the subject
        list_distance = [
            int(np.round(list_distance_template[i] * correcting_factor))
            for i in range(len(list_distance_template))
        ]

        # assign new current_z and disc value
        if direction == 'superior':
            try:
                approx_distance_to_next_disc = list_distance[
                    list_disc_value_template.index(current_disc - 1)]
            except ValueError:
                sct.printv(
                    'WARNING: Disc value not included in template. Using previously-calculated distance: '
                    + str(approx_distance_to_next_disc))
            # assign new current_z and disc value
            current_z = current_z + approx_distance_to_next_disc
            current_disc = current_disc - 1
        elif direction == 'inferior':
            try:
                approx_distance_to_next_disc = list_distance[
                    list_disc_value_template.index(current_disc)]
            except:
                sct.printv(
                    'WARNING: Disc value not included in template. Using previously-calculated distance: '
                    + str(approx_distance_to_next_disc))
            # assign new current_z and disc value
            current_z = current_z - approx_distance_to_next_disc
            current_disc = current_disc + 1

        # if current_z is larger than searching zone, switch direction (and start from initial z minus approximate
        # distance from updated template distance)
        if current_z >= nz or current_disc == 0:
            sct.printv('.. Switching to inferior direction.', verbose)
            direction = 'inferior'
            current_disc = init_disc[1] + 1
            current_z = init_disc[0] - list_distance[
                list_disc_value_template.index(current_disc)]
        # if current_z is lower than searching zone, stop searching
        if current_z <= 0:
            search_next_disc = False

    # if upper disc is not 1, add disc above top disc based on mean_distance_adjusted
    upper_disc = min(list_disc_value)
    # if not upper_disc == 1:
    sct.printv(
        'Adding top disc based on adjusted template distance: #' +
        str(upper_disc - 1), verbose)
    approx_distance_to_next_disc = list_distance[
        list_disc_value_template.index(upper_disc - 1)]
    next_z = max(list_disc_z) + approx_distance_to_next_disc
    sct.printv('.. approximate distance: ' + str(approx_distance_to_next_disc),
               verbose)
    # make sure next disc does not go beyond FOV in superior direction
    if next_z > nz:
        list_disc_z.insert(0, nz)
    else:
        list_disc_z.insert(0, next_z)
    # assign disc value
    list_disc_value.insert(0, upper_disc - 1)

    # Label segmentation
    label_segmentation(fname_seg,
                       list_disc_z,
                       list_disc_value,
                       verbose=verbose)

    # save figure
    if verbose == 2:
        plt.figure(50), plt.savefig(
            os.path.join(path_output, "fig_anat_straight_with_labels.png"))
示例#20
0
def resample_image(fname,
                   suffix='_resampled.nii.gz',
                   binary=False,
                   npx=0.3,
                   npy=0.3,
                   thr=0.0,
                   interpolation='spline'):
    """
    Resampling function: add a padding, resample, crop the padding
    :param fname: name of the image file to be resampled
    :param suffix: suffix added to the original fname after resampling
    :param binary: boolean, image is binary or not
    :param npx: new pixel size in the x direction
    :param npy: new pixel size in the y direction
    :param thr: if the image is binary, it will be thresholded at thr (default=0) after the resampling
    :param interpolation: type of interpolation used for the resampling
    :return: file name after resampling (or original fname if it was already in the correct resolution)
    """
    im_in = Image(fname)
    orientation = im_in.orientation
    if orientation != 'RPI':
        fname = im_in.change_orientation(
            im_in, 'RPI', generate_path=True).save().absolutepath

    nx, ny, nz, nt, px, py, pz, pt = im_in.dim

    if np.round(px, 2) != np.round(npx, 2) or np.round(py, 2) != np.round(
            npy, 2):
        name_resample = sct.extract_fname(fname)[1] + suffix
        if binary:
            interpolation = 'nn'

        if nz == 1:  # when data is 2d: we convert it to a 3d image in order to avoid nipy problem of conversion nifti-->nipy with 2d data
            sct.run([
                'sct_image', '-i', ','.join([fname, fname]), '-concat', 'z',
                '-o', fname
            ])

        sct.run([
            'sct_resample', '-i', fname, '-mm',
            str(npx) + 'x' + str(npy) + 'x' + str(pz), '-o', name_resample,
            '-x', interpolation
        ])

        if nz == 1:  # when input data was 2d: re-convert data 3d-->2d
            sct.run(['sct_image', '-i', name_resample, '-split', 'z'])
            im_split = Image(
                name_resample.split('.nii.gz')[0] + '_Z0000.nii.gz')
            im_split.save(name_resample)

        if binary:
            sct.run([
                'sct_maths', '-i', name_resample, '-bin',
                str(thr), '-o', name_resample
            ])

        if orientation != 'RPI':
            name_resample = Image(name_resample) \
             .change_orientation(orientation, generate_path=True) \
             .save() \
             .absolutepath

        return name_resample
    else:
        if orientation != 'RPI':
            fname = sct.add_suffix(fname, "_RPI")
            im_in = msct_image.change_orientation(im_in,
                                                  orientation).save(fname)

        sct.printv('Image resolution already ' + str(npx) + 'x' + str(npy) +
                   'xpz')
        return fname
    def measure(self):
        im_lesion = Image(self.fname_label)
        im_lesion_data = im_lesion.data
        p_lst = im_lesion.dim[4:7]  # voxel size

        label_lst = [l for l in np.unique(im_lesion_data)
                     if l]  # lesion label IDs list

        if self.path_template is not None:
            if os.path.isfile(self.path_levels):
                img_vert = Image(self.path_levels)
                im_vert_data = img_vert.data
                self.vert_lst = [
                    v for v in np.unique(im_vert_data) if v
                ]  # list of vertebral levels available in the input image

            else:
                im_vert_data = None
                printv(
                    'ERROR: the file ' + self.path_levels +
                    ' does not exist. Please make sure the template was correctly registered and warped (sct_register_to_template or sct_register_multimodal and sct_warp_template)',
                    type='error')

            # In order to open atlas images only one time
            atlas_data_dct = {
            }  # dict containing the np.array of the registrated atlas
            for fname_atlas_roi in self.atlas_roi_lst:
                tract_id = int(
                    fname_atlas_roi.split('_')[-1].split('.nii.gz')[0])
                img_cur = Image(fname_atlas_roi)
                img_cur_copy = img_cur.copy()
                atlas_data_dct[tract_id] = img_cur_copy.data
                del img_cur

        self.volumes = np.zeros((im_lesion.dim[2], len(label_lst)))

        # iteration across each lesion to measure statistics
        for lesion_label in label_lst:
            im_lesion_data_cur = np.copy(im_lesion_data == lesion_label)
            printv('\nMeasures on lesion #' + str(lesion_label) + '...',
                   self.verbose, 'normal')

            label_idx = self.measure_pd[self.measure_pd.label ==
                                        lesion_label].index
            self._measure_volume(im_lesion_data_cur, p_lst, label_idx)
            self._measure_length(im_lesion_data_cur, p_lst, label_idx)
            self._measure_diameter(im_lesion_data_cur, p_lst, label_idx)

            # compute lesion distribution for each lesion
            if self.path_template is not None:
                self._measure_eachLesion_distribution(
                    lesion_id=lesion_label,
                    atlas_data=atlas_data_dct,
                    im_vert=im_vert_data,
                    im_lesion=im_lesion_data_cur,
                    p_lst=p_lst)

        if self.path_template is not None:
            # compute total lesion distribution
            self._measure_totLesion_distribution(
                im_lesion=np.copy(im_lesion_data > 0),
                atlas_data=atlas_data_dct,
                im_vert=im_vert_data,
                p_lst=p_lst)

        if self.fname_ref is not None:
            # Compute mean and std value in each labeled lesion
            self._measure_within_im(im_lesion=im_lesion_data,
                                    im_ref=Image(self.fname_ref).data,
                                    label_lst=label_lst)
示例#22
0
def register_data(im_src,
                  im_dest,
                  param_reg,
                  path_copy_warp=None,
                  rm_tmp=True):
    '''

    Parameters
    ----------
    im_src: class Image: source image
    im_dest: class Image: destination image
    param_reg: str: registration parameter
    path_copy_warp: path: path to copy the warping fields

    Returns: im_src_reg: class Image: source image registered on destination image
    -------

    '''
    # im_src and im_dest are already preprocessed (in theory: im_dest = mean_image)
    # binarize images to get seg
    im_src_seg = binarize(im_src, thr_min=1, thr_max=1)
    im_dest_seg = binarize(im_dest)
    # create tmp dir and go in it
    tmp_dir = tmp_create()
    curdir = os.getcwd()
    os.chdir(tmp_dir)
    # save image and seg
    fname_src = 'src.nii.gz'
    im_src.save(fname_src)
    fname_src_seg = 'src_seg.nii.gz'
    im_src_seg.save(fname_src_seg)
    fname_dest = 'dest.nii.gz'
    im_dest.save(fname_dest)
    fname_dest_seg = 'dest_seg.nii.gz'
    im_dest_seg.save(fname_dest_seg)
    # do registration using param_reg
    sct_register_multimodal.main(args=[
        '-i', fname_src, '-d', fname_dest, '-iseg', fname_src_seg, '-dseg',
        fname_dest_seg, '-param', param_reg
    ])

    # get registration result
    fname_src_reg = add_suffix(fname_src, '_reg')
    im_src_reg = Image(fname_src_reg)
    # get out of tmp dir
    os.chdir(curdir)

    # copy warping fields
    if path_copy_warp is not None and os.path.isdir(
            os.path.abspath(path_copy_warp)):
        path_copy_warp = os.path.abspath(path_copy_warp)
        file_src = extract_fname(fname_src)[1]
        file_dest = extract_fname(fname_dest)[1]
        fname_src2dest = 'warp_' + file_src + '2' + file_dest + '.nii.gz'
        fname_dest2src = 'warp_' + file_dest + '2' + file_src + '.nii.gz'
        copy(os.path.join(tmp_dir, fname_src2dest), path_copy_warp)
        copy(os.path.join(tmp_dir, fname_dest2src), path_copy_warp)

    if rm_tmp:
        # remove tmp dir
        rmtree(tmp_dir)
    # return res image
    return im_src_reg, fname_src2dest, fname_dest2src
def main(args=None):
    """
    Main function
    :param args:
    :return:
    """
    dim_list = ['x', 'y', 'z', 't']

    # Get parser args
    if args is None:
        args = None if sys.argv[1:] else ['--help']
    parser = get_parser()
    arguments = parser.parse_args(args=args)
    fname_in = arguments.i
    fname_out = arguments.o
    verbose = arguments.v
    sct.init_sct(log_level=verbose, update=True)  # Update log level
    if '-type' in arguments:
        output_type = arguments.type
    else:
        output_type = None

    # Open file(s)
    im = Image(fname_in)
    data = im.data  # 3d or 4d numpy array
    dim = im.dim

    # run command
    if arguments.otsu is not None:
        param = arguments.otsu
        data_out = otsu(data, param)

    elif arguments.adap is not None:
        param = convert_list_str(arguments.adap, "int")
        data_out = adap(data, param[0], param[1])

    elif arguments.otsu_median is not None:
        param = convert_list_str(arguments.otsu_median, "int")
        data_out = otsu_median(data, param[0], param[1])

    elif arguments.thr is not None:
        param = arguments.thr
        data_out = threshold(data, param)

    elif arguments.percent is not None:
        param = arguments.percent
        data_out = perc(data, param)

    elif arguments.bin is not None:
        bin_thr = arguments.bin
        data_out = binarise(data, bin_thr=bin_thr)

    elif arguments.add is not None:
        from numpy import sum
        data2 = get_data_or_scalar(arguments.add, data)
        data_concat = concatenate_along_4th_dimension(data, data2)
        data_out = sum(data_concat, axis=3)

    elif arguments.sub is not None:
        data2 = get_data_or_scalar(arguments.sub, data)
        data_out = data - data2

    elif arguments.laplacian is not None:
        sigmas = convert_list_str(arguments.laplacian, "float")
        if len(sigmas) == 1:
            sigmas = [sigmas for i in range(len(data.shape))]
        elif len(sigmas) != len(data.shape):
            printv(parser.usage.generate(error='ERROR: -laplacian need the same number of inputs as the number of image dimension OR only one input'))
        # adjust sigma based on voxel size
        sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
        # smooth data
        data_out = laplacian(data, sigmas)

    elif arguments.mul is not None:
        from numpy import prod
        data2 = get_data_or_scalar(arguments.mul, data)
        data_concat = concatenate_along_4th_dimension(data, data2)
        data_out = prod(data_concat, axis=3)

    elif arguments.div is not None:
        from numpy import divide
        data2 = get_data_or_scalar(arguments.div, data)
        data_out = divide(data, data2)

    elif arguments.mean is not None:
        from numpy import mean
        dim = dim_list.index(arguments.mean)
        if dim + 1 > len(np.shape(data)):  # in case input volume is 3d and dim=t
            data = data[..., np.newaxis]
        data_out = mean(data, dim)

    elif arguments.rms is not None:
        from numpy import mean, sqrt, square
        dim = dim_list.index(arguments.rms)
        if dim + 1 > len(np.shape(data)):  # in case input volume is 3d and dim=t
            data = data[..., np.newaxis]
        data_out = sqrt(mean(square(data.astype(float)), dim))

    elif arguments.std is not None:
        from numpy import std
        dim = dim_list.index(arguments.std)
        if dim + 1 > len(np.shape(data)):  # in case input volume is 3d and dim=t
            data = data[..., np.newaxis]
        data_out = std(data, dim, ddof=1)

    elif arguments.smooth is not None:
        sigmas = convert_list_str(arguments.smooth, "float")
        if len(sigmas) == 1:
            sigmas = [sigmas[0] for i in range(len(data.shape))]
        elif len(sigmas) != len(data.shape):
            printv(parser.usage.generate(error='ERROR: -smooth need the same number of inputs as the number of image dimension OR only one input'))
        # adjust sigma based on voxel size
        sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
        # smooth data
        data_out = smooth(data, sigmas)

    elif arguments.dilate is not None:
        data_out = dilate(data, convert_list_str(arguments.dilate, "int"))

    elif arguments.erode is not None:
        data_out = erode(data, convert_list_str(arguments.erode))

    elif arguments.denoise is not None:
        # parse denoising arguments
        p, b = 1, 5  # default arguments
        list_denoise = (arguments.denoise).split(",")
        for i in list_denoise:
            if 'p' in i:
                p = int(i.split('=')[1])
            if 'b' in i:
                b = int(i.split('=')[1])
        data_out = denoise_nlmeans(data, patch_radius=p, block_radius=b)

    elif arguments.symmetrize is not None:
        data_out = (data + data[list(range(data.shape[0] - 1, -1, -1)), :, :]) / float(2)

    elif arguments.mi is not None:
        # input 1 = from flag -i --> im
        # input 2 = from flag -mi
        im_2 = Image(arguments.mi)
        compute_similarity(im.data, im_2.data, fname_out, metric='mi', verbose=verbose)
        data_out = None

    elif arguments.minorm is not None:
        im_2 = Image(arguments.minorm)
        compute_similarity(im.data, im_2.data, fname_out, metric='minorm', verbose=verbose)
        data_out = None

    elif arguments.corr is not None:
        # input 1 = from flag -i --> im
        # input 2 = from flag -mi
        im_2 = Image(arguments.corr)
        compute_similarity(im.data, im_2.data, fname_out, metric='corr', verbose=verbose)
        data_out = None

    # if no flag is set
    else:
        data_out = None
        printv(parser.usage.generate(error='ERROR: you need to specify an operation to do on the input image'))

    if data_out is not None:
        # Write output
        nii_out = Image(fname_in)  # use header of input file
        nii_out.data = data_out
        nii_out.save(fname_out, dtype=output_type)
    # TODO: case of multiple outputs
    # assert len(data_out) == n_out
    # if n_in == n_out:
    #     for im_in, d_out, fn_out in zip(nii, data_out, fname_out):
    #         im_in.data = d_out
    #         im_in.absolutepath = fn_out
    #         if "-w" in arguments:
    #             im_in.hdr.set_intent('vector', (), '')
    #         im_in.save()
    # elif n_out == 1:
    #     nii[0].data = data_out[0]
    #     nii[0].absolutepath = fname_out[0]
    #     if "-w" in arguments:
    #             nii[0].hdr.set_intent('vector', (), '')
    #     nii[0].save()
    # elif n_out > n_in:
    #     for dat_out, name_out in zip(data_out, fname_out):
    #         im_out = nii[0].copy()
    #         im_out.data = dat_out
    #         im_out.absolutepath = name_out
    #         if "-w" in arguments:
    #             im_out.hdr.set_intent('vector', (), '')
    #         im_out.save()
    # else:
    #     printv(parser.usage.generate(error='ERROR: not the correct numbers of inputs and outputs'))

    # display message
    if data_out is not None:
        sct.display_viewer_syntax([fname_out], verbose=verbose)
    else:
        printv('\nDone! File created: ' + fname_out, verbose, 'info')
def main(args=None):
    parser = get_parser()
    if args:
        arguments = parser.parse_args(args)
    else:
        arguments = parser.parse_args(args=None if sys.argv[1:] else ['--help'])

    verbosity = arguments.v
    init_sct(log_level=verbosity, update=True)  # Update log level

    input_filename = arguments.i
    output_fname = arguments.o

    img = Image(input_filename)
    dtype = None

    if arguments.add is not None:
        value = arguments.add
        out = sct_labels.add(img, value)
    elif arguments.create is not None:
        labels = arguments.create
        out = sct_labels.create_labels_empty(img, labels)
    elif arguments.create_add is not None:
        labels = arguments.create_add
        out = sct_labels.create_labels(img, labels)
    elif arguments.create_seg is not None:
        labels = arguments.create_seg
        out = sct_labels.create_labels_along_segmentation(img, labels)
    elif arguments.cubic_to_point:
        out = sct_labels.cubic_to_point(img)
    elif arguments.display:
        display_voxel(img, verbosity)
        return
    elif arguments.increment:
        out = sct_labels.increment_z_inverse(img)
    elif arguments.disc is not None:
        ref = Image(arguments.disc)
        out = sct_labels.labelize_from_discs(img, ref)
    elif arguments.vert_body is not None:
        levels = arguments.vert_body
        if len(levels) == 1 and levels[0] == 0:
            levels = None  # all levels
        out = sct_labels.label_vertebrae(img, levels)
    elif arguments.vert_continuous:
        out = sct_labels.continuous_vertebral_levels(img)
        dtype = 'float32'
    elif arguments.MSE is not None:
        ref = Image(arguments.MSE)
        mse = sct_labels.compute_mean_squared_error(img, ref)
        printv(f"Computed MSE: {mse}")
        return
    elif arguments.remove_reference is not None:
        ref = Image(arguments.remove_reference)
        out = sct_labels.remove_missing_labels(img, ref)
    elif arguments.remove_sym is not None:
        # first pass use img as source
        ref = Image(arguments.remove_reference)
        out = sct_labels.remove_missing_labels(img, ref)

        # second pass use previous pass result as reference
        ref_out = sct_labels.remove_missing_labels(ref, out)
        ref_out.save(path=ref.absolutepath)
    elif arguments.remove is not None:
        labels = arguments.remove
        out = sct_labels.remove_labels_from_image(img, labels)
    elif arguments.keep is not None:
        labels = arguments.keep
        out = sct_labels.remove_other_labels_from_image(img, labels)
    elif arguments.create_viewer is not None:
        msg = "" if arguments.msg is None else f"{arguments.msg}\n"
        if arguments.ilabel is not None:
            input_labels_img = Image(arguments.ilabel)
            out = launch_manual_label_gui(img, input_labels_img, parse_num_list(arguments.create_viewer), msg)
        else:
            out = launch_sagittal_viewer(img, parse_num_list(arguments.create_viewer), msg)

    printv("Generating output files...")
    out.save(path=output_fname, dtype=dtype)
    display_viewer_syntax([input_filename, output_fname])

    if arguments.qc is not None:
        generate_qc(fname_in1=input_filename, fname_seg=output_fname, args=args,
                    path_qc=os.path.abspath(arguments.qc), dataset=arguments.qc_dataset,
                    subject=arguments.qc_subject, process='sct_label_utils')
示例#25
0
def pre_processing(fname_target, fname_sc_seg, fname_level=None, fname_manual_gmseg=None, new_res=0.3, square_size_size_mm=22.5, denoising=True, verbose=1, rm_tmp=True, for_model=False):
    printv('\nPre-process data...', verbose, 'normal')

    tmp_dir = tmp_create()

    copy(fname_target, tmp_dir)
    fname_target = ''.join(extract_fname(fname_target)[1:])
    copy(fname_sc_seg, tmp_dir)
    fname_sc_seg = ''.join(extract_fname(fname_sc_seg)[1:])

    curdir = os.getcwd()
    os.chdir(tmp_dir)

    original_info = {'orientation': None, 'im_sc_seg_rpi': None, 'interpolated_images': []}

    im_target = Image(fname_target).copy()
    im_sc_seg = Image(fname_sc_seg).copy()

    # get original orientation
    printv('  Reorient...', verbose, 'normal')
    original_info['orientation'] = im_target.orientation

    # assert images are in the same orientation
    assert im_target.orientation == im_sc_seg.orientation, "ERROR: the image to segment and it's SC segmentation are not in the same orientation"

    im_target_rpi = im_target.copy().change_orientation('RPI', generate_path=True).save()
    im_sc_seg_rpi = im_sc_seg.copy().change_orientation('RPI', generate_path=True).save()
    original_info['im_sc_seg_rpi'] = im_sc_seg_rpi.copy()  # target image in RPI will be used to post-process segmentations

    # denoise using P. Coupe non local means algorithm (see [Manjon et al. JMRI 2010]) implemented in dipy
    if denoising:
        printv('  Denoise...', verbose, 'normal')
        # crop image before denoising to fasten denoising
        nx, ny, nz, nt, px, py, pz, pt = im_target_rpi.dim
        size_x, size_y = (square_size_size_mm + 1) / px, (square_size_size_mm + 1) / py
        size = int(np.ceil(max(size_x, size_y)))
        # create mask
        fname_mask = 'mask_pre_crop.nii.gz'
        sct_create_mask.main(['-i', im_target_rpi.absolutepath, '-p', 'centerline,' + im_sc_seg_rpi.absolutepath, '-f', 'box', '-size', str(size), '-o', fname_mask])
        # crop image
        cropper = ImageCropper(im_target_rpi)
        cropper.get_bbox_from_mask(Image(fname_mask))
        im_target_rpi_crop = cropper.crop()
        # crop segmentation
        cropper = ImageCropper(im_sc_seg_rpi)
        cropper.get_bbox_from_mask(Image(fname_mask))
        im_sc_seg_rpi_crop = cropper.crop()
        # denoising
        from spinalcordtoolbox.math import denoise_nlmeans
        block_radius = 3
        block_radius = int(im_target_rpi_crop.data.shape[2] / 2) if im_target_rpi_crop.data.shape[2] < (block_radius * 2) else block_radius
        patch_radius = block_radius - 1
        data_denoised = denoise_nlmeans(im_target_rpi_crop.data, block_radius=block_radius, patch_radius=patch_radius)
        im_target_rpi_crop.data = data_denoised

        im_target_rpi = im_target_rpi_crop
        im_sc_seg_rpi = im_sc_seg_rpi_crop
    else:
        fname_mask = None

    # interpolate image to reference square image (resample and square crop centered on SC)
    printv('  Interpolate data to the model space...', verbose, 'normal')
    list_im_slices = interpolate_im_to_ref(im_target_rpi, im_sc_seg_rpi, new_res=new_res, sq_size_size_mm=square_size_size_mm)
    original_info['interpolated_images'] = list_im_slices  # list of images (not Slice() objects)

    printv('  Mask data using the spinal cord segmentation...', verbose, 'normal')
    list_sc_seg_slices = interpolate_im_to_ref(im_sc_seg_rpi, im_sc_seg_rpi, new_res=new_res, sq_size_size_mm=square_size_size_mm, interpolation_mode=1)
    for i in range(len(list_im_slices)):
        # list_im_slices[i].data[list_sc_seg_slices[i].data == 0] = 0
        list_sc_seg_slices[i] = binarize(list_sc_seg_slices[i], thr_min=0.5, thr_max=1)
        list_im_slices[i].data = list_im_slices[i].data * list_sc_seg_slices[i].data

    printv('  Split along rostro-caudal direction...', verbose, 'normal')
    list_slices_target = [Slice(slice_id=i, im=im_slice.data, gm_seg=[], wm_seg=[]) for i, im_slice in enumerate(list_im_slices)]

    # load vertebral levels
    if fname_level is not None:
        printv('  Load vertebral levels...', verbose, 'normal')
        # copy level file to tmp dir
        os.chdir(curdir)
        copy(fname_level, tmp_dir)
        os.chdir(tmp_dir)
        # change fname level to only file name (path = tmp dir now)
        fname_level = ''.join(extract_fname(fname_level)[1:])
        # load levels
        list_slices_target = load_level(list_slices_target, fname_level)

    os.chdir(curdir)

    # load manual gmseg if there is one (model data)
    if fname_manual_gmseg is not None:
        printv('\n\tLoad manual GM segmentation(s) ...', verbose, 'normal')
        list_slices_target = load_manual_gmseg(list_slices_target, fname_manual_gmseg, tmp_dir, im_sc_seg_rpi, new_res, square_size_size_mm, for_model=for_model, fname_mask=fname_mask)

    if rm_tmp:
        # remove tmp folder
        rmtree(tmp_dir)
    return list_slices_target, original_info
示例#26
0
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_loglevel(verbose=verbose)

    fname_input1 = arguments.i
    fname_input2 = arguments.d

    tmp_dir = tmp_create()  # create tmp directory
    tmp_dir = os.path.abspath(tmp_dir)

    # copy input files to tmp directory
    fname_input1_tmp = 'tmp1_' + ''.join(extract_fname(fname_input1)[1:])
    fname_input2_tmp = 'tmp2_' + ''.join(extract_fname(fname_input2)[1:])
    copy(fname_input1, os.path.join(tmp_dir, fname_input1_tmp))
    copy(fname_input2, os.path.join(tmp_dir, fname_input2_tmp))
    fname_input1 = fname_input1_tmp
    fname_input2 = fname_input2_tmp

    curdir = os.getcwd()
    os.chdir(tmp_dir)  # go to tmp directory

    im_1 = Image(fname_input1)
    im_2 = Image(fname_input2)

    if arguments.bin is not None:
        im_1.data = binarize(im_1.data, 0)
        fname_input1_bin = add_suffix(fname_input1, '_bin')
        im_1.save(fname_input1_bin, mutable=True)

        im_2.data = binarize(im_2.data, 0)
        fname_input2_bin = add_suffix(fname_input2, '_bin')
        im_2.save(fname_input2_bin, mutable=True)

        # Use binarized images in subsequent steps
        fname_input1 = fname_input1_bin
        fname_input2 = fname_input2_bin

    # copy header of im_1 to im_2
    im_2.header = im_1.header
    im_2.save()

    cmd = ['isct_dice_coefficient', fname_input1, fname_input2]

    if vars(arguments)["2d_slices"] is not None:
        cmd += ['-2d-slices', str(vars(arguments)["2d_slices"])]
    if arguments.b is not None:
        bounding_box = arguments.b
        cmd += ['-b'] + bounding_box
    if arguments.bmax is not None and arguments.bmax == 1:
        cmd += ['-bmax']
    if arguments.bzmax is not None and arguments.bzmax == 1:
        cmd += ['-bzmax']
    if arguments.o is not None:
        path_output, fname_output, ext = extract_fname(arguments.o)
        cmd += ['-o', fname_output + ext]

    rm_tmp = bool(arguments.r)

    # # Computation of Dice coefficient using Python implementation.
    # # commented for now as it does not cover all the feature of isct_dice_coefficient
    # #from spinalcordtoolbox.image import Image, compute_dice
    # #dice = compute_dice(Image(fname_input1), Image(fname_input2), mode='3d', zboundaries=False)
    # #printv('Dice (python-based) = ' + str(dice), verbose)

    status, output = run_proc(cmd, verbose, is_sct_binary=True)

    os.chdir(curdir)  # go back to original directory

    # copy output file into original directory
    if arguments.o is not None:
        copy(os.path.join(tmp_dir, fname_output + ext),
             os.path.join(path_output, fname_output + ext))

    # remove tmp_dir
    if rm_tmp:
        rmtree(tmp_dir)

    printv(output, verbose)
示例#27
0
def load_level(list_slices_target, fname_level):
    verbose = 1
    path_level, file_level, ext_level = extract_fname(fname_level)

    #  ####### Check if the level file is an image or a text file
    # Level file is an image
    if ext_level in ['.nii', '.nii.gz']:
        im_level = Image(fname_level).change_orientation("IRP")

        list_level = []
        list_med_level = []
        for slice_level in im_level.data:
            try:
                # vertebral level of the slice
                l = np.mean(slice_level[slice_level > 0])
                # median of the vertebral level of the slice: if all voxels are int, med will be an int.
                med = np.median(slice_level[slice_level > 0])
                # change med in int if it is an int
                med = int(med) if int(med) == med else med
            except Exception as e:
                printv('WARNING: ' + str(e) + '\nNo level label found. Level will be set to 0 for this slice', verbose, 'warning')
                l = 0
                med = 0
            list_level.append(l)
            list_med_level.append(med)

        # if all median of level are int for all slices : consider level as int
        if all([isinstance(med, int) for med in list_med_level]):
            # level as int are placed in the middle of each vertebra (that's why there is a "+0.5")
            list_level = [int(np.round(l)) + 0.5 for l in list_level]

    # Level file is a text file
    elif ext_level == '.txt':
        file_level = open(fname_level, 'r')
        lines_level = file_level.readlines()
        file_level.close()

        list_level_by_slice = []
        list_type_level = []  # True or int, False for float
        for line in lines_level[1:]:
            i_slice, level = line.split(',')

            # correct level value
            for c in [' ', '\n', '\r', '\t']:
                level = level.replace(c, '')

            try:
                level = float(level)
            except Exception as e:
                # adapt if level value is not unique
                if len(level) > 2:
                    l1 = l2 = 0
                    if '-' in level:
                        l1, l2 = level.split('-')
                    elif '/' in level:
                        l1, l2 = level.split('/')
                    # convention = the vertebral disk between two levels belong to the lower level (C2-C3 = C3)
                    level = max(float(l1), float(l2))
                else:
                    # level unrecognized
                    level = 0

            i_slice = int(i_slice)

            list_type_level.append(int(level) == level)
            list_level_by_slice.append((i_slice, level))

        # sort list by number of slice
        list_level_by_slice.sort()

        if all(list_type_level):  # levels are int
            # add 0.5 to the int level to place in the middle of the vertebra
            to_add = 0.5
        else:
            # levels are float: keep them untouched
            to_add = 0

        list_level = [l[1] + to_add for l in list_level_by_slice]

    # Level file is not recognized
    else:
        list_level = None
        printv('ERROR: the level file is nor an image nor a text file ...', verbose, 'error')

    #  ####### Set level number for each slice of list_slices_target:
    for target_slice, level in zip(list_slices_target, list_level):
        target_slice.set(level=level)

    return list_slices_target
示例#28
0
def main(argv=None):
    """
    Main function
    :param argv:
    :return:
    """
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    dim_list = ['x', 'y', 'z', 't']

    fname_in = arguments.i
    fname_out = arguments.o
    output_type = arguments.type

    # Open file(s)
    im = Image(fname_in)
    data = im.data  # 3d or 4d numpy array
    dim = im.dim

    # run command
    if arguments.otsu is not None:
        param = arguments.otsu
        data_out = sct_math.otsu(data, param)

    elif arguments.adap is not None:
        param = arguments.adap
        data_out = sct_math.adap(data, param[0], param[1])

    elif arguments.otsu_median is not None:
        param = arguments.otsu_median
        data_out = sct_math.otsu_median(data, param[0], param[1])

    elif arguments.thr is not None:
        param = arguments.thr
        data_out = sct_math.threshold(data, param)

    elif arguments.percent is not None:
        param = arguments.percent
        data_out = sct_math.perc(data, param)

    elif arguments.bin is not None:
        bin_thr = arguments.bin
        data_out = sct_math.binarize(data, bin_thr=bin_thr)

    elif arguments.add is not None:
        data2 = get_data_or_scalar(arguments.add, data)
        data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
        data_out = np.sum(data_concat, axis=3)

    elif arguments.sub is not None:
        data2 = get_data_or_scalar(arguments.sub, data)
        data_out = data - data2

    elif arguments.laplacian is not None:
        sigmas = arguments.laplacian
        if len(sigmas) == 1:
            sigmas = [sigmas for i in range(len(data.shape))]
        elif len(sigmas) != len(data.shape):
            printv(
                parser.error(
                    'ERROR: -laplacian need the same number of inputs as the number of image dimension OR only one input'
                ))
        # adjust sigma based on voxel size
        sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
        # smooth data
        data_out = sct_math.laplacian(data, sigmas)

    elif arguments.mul is not None:
        data2 = get_data_or_scalar(arguments.mul, data)
        data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
        data_out = np.prod(data_concat, axis=3)

    elif arguments.div is not None:
        data2 = get_data_or_scalar(arguments.div, data)
        data_out = np.divide(data, data2)

    elif arguments.mean is not None:
        dim = dim_list.index(arguments.mean)
        if dim + 1 > len(
                np.shape(data)):  # in case input volume is 3d and dim=t
            data = data[..., np.newaxis]
        data_out = np.mean(data, dim)

    elif arguments.rms is not None:
        dim = dim_list.index(arguments.rms)
        if dim + 1 > len(
                np.shape(data)):  # in case input volume is 3d and dim=t
            data = data[..., np.newaxis]
        data_out = np.sqrt(np.mean(np.square(data.astype(float)), dim))

    elif arguments.std is not None:
        dim = dim_list.index(arguments.std)
        if dim + 1 > len(
                np.shape(data)):  # in case input volume is 3d and dim=t
            data = data[..., np.newaxis]
        data_out = np.std(data, dim, ddof=1)

    elif arguments.smooth is not None:
        sigmas = arguments.smooth
        if len(sigmas) == 1:
            sigmas = [sigmas[0] for i in range(len(data.shape))]
        elif len(sigmas) != len(data.shape):
            printv(
                parser.error(
                    'ERROR: -smooth need the same number of inputs as the number of image dimension OR only one input'
                ))
        # adjust sigma based on voxel size
        sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
        # smooth data
        data_out = sct_math.smooth(data, sigmas)

    elif arguments.dilate is not None:
        if arguments.shape in ['disk', 'square'] and arguments.dim is None:
            printv(
                parser.error(
                    'ERROR: -dim is required for -dilate with 2D morphological kernel'
                ))
        data_out = sct_math.dilate(data,
                                   size=arguments.dilate,
                                   shape=arguments.shape,
                                   dim=arguments.dim)

    elif arguments.erode is not None:
        if arguments.shape in ['disk', 'square'] and arguments.dim is None:
            printv(
                parser.error(
                    'ERROR: -dim is required for -erode with 2D morphological kernel'
                ))
        data_out = sct_math.erode(data,
                                  size=arguments.erode,
                                  shape=arguments.shape,
                                  dim=arguments.dim)

    elif arguments.denoise is not None:
        # parse denoising arguments
        p, b = 1, 5  # default arguments
        list_denoise = (arguments.denoise).split(",")
        for i in list_denoise:
            if 'p' in i:
                p = int(i.split('=')[1])
            if 'b' in i:
                b = int(i.split('=')[1])
        data_out = sct_math.denoise_nlmeans(data,
                                            patch_radius=p,
                                            block_radius=b)

    elif arguments.symmetrize is not None:
        data_out = (data + data[list(range(data.shape[0] -
                                           1, -1, -1)), :, :]) / float(2)

    elif arguments.mi is not None:
        # input 1 = from flag -i --> im
        # input 2 = from flag -mi
        im_2 = Image(arguments.mi)
        compute_similarity(im,
                           im_2,
                           fname_out,
                           metric='mi',
                           metric_full='Mutual information',
                           verbose=verbose)
        data_out = None

    elif arguments.minorm is not None:
        im_2 = Image(arguments.minorm)
        compute_similarity(im,
                           im_2,
                           fname_out,
                           metric='minorm',
                           metric_full='Normalized Mutual information',
                           verbose=verbose)
        data_out = None

    elif arguments.corr is not None:
        # input 1 = from flag -i --> im
        # input 2 = from flag -mi
        im_2 = Image(arguments.corr)
        compute_similarity(im,
                           im_2,
                           fname_out,
                           metric='corr',
                           metric_full='Pearson correlation coefficient',
                           verbose=verbose)
        data_out = None

    # if no flag is set
    else:
        data_out = None
        printv(
            parser.error(
                'ERROR: you need to specify an operation to do on the input image'
            ))

    if data_out is not None:
        # Write output
        nii_out = Image(fname_in)  # use header of input file
        nii_out.data = data_out
        nii_out.save(fname_out, dtype=output_type)
    # TODO: case of multiple outputs
    # assert len(data_out) == n_out
    # if n_in == n_out:
    #     for im_in, d_out, fn_out in zip(nii, data_out, fname_out):
    #         im_in.data = d_out
    #         im_in.absolutepath = fn_out
    #         if arguments.w is not None:
    #             im_in.hdr.set_intent('vector', (), '')
    #         im_in.save()
    # elif n_out == 1:
    #     nii[0].data = data_out[0]
    #     nii[0].absolutepath = fname_out[0]
    #     if arguments.w is not None:
    #             nii[0].hdr.set_intent('vector', (), '')
    #     nii[0].save()
    # elif n_out > n_in:
    #     for dat_out, name_out in zip(data_out, fname_out):
    #         im_out = nii[0].copy()
    #         im_out.data = dat_out
    #         im_out.absolutepath = name_out
    #         if arguments.w is not None:
    #             im_out.hdr.set_intent('vector', (), '')
    #         im_out.save()
    # else:
    #     printv(parser.usage.generate(error='ERROR: not the correct numbers of inputs and outputs'))

    # display message
    if data_out is not None:
        display_viewer_syntax([fname_out], verbose=verbose)
    else:
        printv('\nDone! File created: ' + fname_out, verbose, 'info')
示例#29
0
def compute_dti(fname_in, fname_bvals, fname_bvecs, prefix, method, evecs,
                file_mask, verbose):
    """
    Compute DTI.
    :param fname_in: input 4d file.
    :param bvals: bvals txt file
    :param bvecs: bvecs txt file
    :param prefix: output prefix. Example: "dti_"
    :param method: algo for computing dti
    :param evecs: bool: output diffusion tensor eigenvectors and eigenvalues
    :return: True/False
    """
    # Open file.
    from spinalcordtoolbox.image import Image
    nii = Image(fname_in)
    data = nii.data
    printv('data.shape (%d, %d, %d, %d)' % data.shape)

    # open bvecs/bvals
    from dipy.io import read_bvals_bvecs
    bvals, bvecs = read_bvals_bvecs(fname_bvals, fname_bvecs)
    from dipy.core.gradients import gradient_table
    gtab = gradient_table(bvals, bvecs)

    # mask and crop the data. This is a quick way to avoid calculating Tensors on the background of the image.
    if not file_mask == '':
        printv('Open mask file...', verbose)
        # open mask file
        nii_mask = Image(file_mask)
        mask = nii_mask.data

    # fit tensor model
    printv('Computing tensor using "' + method + '" method...', verbose)
    import dipy.reconst.dti as dti
    if method == 'standard':
        tenmodel = dti.TensorModel(gtab)
        if file_mask == '':
            tenfit = tenmodel.fit(data)
        else:
            tenfit = tenmodel.fit(data, mask)
    elif method == 'restore':
        import dipy.denoise.noise_estimate as ne
        sigma = ne.estimate_sigma(data)
        dti_restore = dti.TensorModel(gtab, fit_method='RESTORE', sigma=sigma)
        if file_mask == '':
            tenfit = dti_restore.fit(data)
        else:
            tenfit = dti_restore.fit(data, mask)

    # Compute metrics
    printv('Computing metrics...', verbose)
    # FA
    nii.data = tenfit.fa
    nii.save(prefix + 'FA.nii.gz', dtype='float32')
    # MD
    nii.data = tenfit.md
    nii.save(prefix + 'MD.nii.gz', dtype='float32')
    # RD
    nii.data = tenfit.rd
    nii.save(prefix + 'RD.nii.gz', dtype='float32')
    # AD
    nii.data = tenfit.ad
    nii.save(prefix + 'AD.nii.gz', dtype='float32')
    if evecs:
        data_evecs = tenfit.evecs
        data_evals = tenfit.evals
        # output 1st (V1), 2nd (V2) and 3rd (V3) eigenvectors as 4d data
        for idim in range(3):
            nii.data = data_evecs[:, :, :, :, idim]
            nii.save(prefix + 'V' + str(idim + 1) + '.nii.gz', dtype="float32")
            nii.data = data_evals[:, :, :, idim]
            nii.save(prefix + 'E' + str(idim + 1) + '.nii.gz', dtype="float32")

    return True
示例#30
0
def propseg(img_input, options_dict):
    """
    :param img_input: source image, to be segmented
    :param options_dict: arguments as dictionary
    :return: segmented Image
    """
    arguments = options_dict
    fname_input_data = img_input.absolutepath
    fname_data = os.path.abspath(fname_input_data)
    contrast_type = arguments.c
    contrast_type_conversion = {
        't1': 't1',
        't2': 't2',
        't2s': 't2',
        'dwi': 't1'
    }
    contrast_type_propseg = contrast_type_conversion[contrast_type]

    # Starting building the command
    cmd = ['isct_propseg', '-t', contrast_type_propseg]

    if arguments.o is not None:
        fname_out = arguments.o
    else:
        fname_out = os.path.basename(add_suffix(fname_data, "_seg"))

    folder_output = str(pathlib.Path(fname_out).parent)
    cmd += ['-o', folder_output]
    if not os.path.isdir(folder_output) and os.path.exists(folder_output):
        logger.error("output directory %s is not a valid directory" %
                     folder_output)
    if not os.path.exists(folder_output):
        os.makedirs(folder_output)

    if arguments.down is not None:
        cmd += ["-down", str(arguments.down)]
    if arguments.up is not None:
        cmd += ["-up", str(arguments.up)]

    remove_temp_files = arguments.r

    verbose = int(arguments.v)
    # Update for propseg binary
    if verbose > 0:
        cmd += ["-verbose"]

    # Output options
    if arguments.mesh is not None:
        cmd += ["-mesh"]
    if arguments.centerline_binary is not None:
        cmd += ["-centerline-binary"]
    if arguments.CSF is not None:
        cmd += ["-CSF"]
    if arguments.centerline_coord is not None:
        cmd += ["-centerline-coord"]
    if arguments.cross is not None:
        cmd += ["-cross"]
    if arguments.init_tube is not None:
        cmd += ["-init-tube"]
    if arguments.low_resolution_mesh is not None:
        cmd += ["-low-resolution-mesh"]
    # TODO: Not present. Why is this here? Was this renamed?
    # if arguments.detect_nii is not None:
    #     cmd += ["-detect-nii"]
    # TODO: Not present. Why is this here? Was this renamed?
    # if arguments.detect_png is not None:
    #     cmd += ["-detect-png"]

    # Helping options
    use_viewer = None
    use_optic = True  # enabled by default
    init_option = None
    rescale_header = arguments.rescale
    if arguments.init is not None:
        init_option = float(arguments.init)
        if init_option < 0:
            printv(
                'Command-line usage error: ' + str(init_option) +
                " is not a valid value for '-init'", 1, 'error')
            sys.exit(1)
    if arguments.init_centerline is not None:
        if str(arguments.init_centerline) == "viewer":
            use_viewer = "centerline"
        elif str(arguments.init_centerline) == "hough":
            use_optic = False
        else:
            if rescale_header is not 1:
                fname_labels_viewer = func_rescale_header(str(
                    arguments.init_centerline),
                                                          rescale_header,
                                                          verbose=verbose)
            else:
                fname_labels_viewer = str(arguments.init_centerline)
            cmd += ["-init-centerline", fname_labels_viewer]
            use_optic = False
    if arguments.init_mask is not None:
        if str(arguments.init_mask) == "viewer":
            use_viewer = "mask"
        else:
            if rescale_header is not 1:
                fname_labels_viewer = func_rescale_header(
                    str(arguments.init_mask), rescale_header)
            else:
                fname_labels_viewer = str(arguments.init_mask)
            cmd += ["-init-mask", fname_labels_viewer]
            use_optic = False
    if arguments.mask_correction is not None:
        cmd += ["-mask-correction", str(arguments.mask_correction)]
    if arguments.radius is not None:
        cmd += ["-radius", str(arguments.radius)]
    # TODO: Not present. Why is this here? Was this renamed?
    # if arguments.detect_n is not None:
    #     cmd += ["-detect-n", str(arguments.detect_n)]
    # TODO: Not present. Why is this here? Was this renamed?
    # if arguments.detect_gap is not None:
    #     cmd += ["-detect-gap", str(arguments.detect_gap)]
    # TODO: Not present. Why is this here? Was this renamed?
    # if arguments.init_validation is not None:
    #     cmd += ["-init-validation"]
    if arguments.nbiter is not None:
        cmd += ["-nbiter", str(arguments.nbiter)]
    if arguments.max_area is not None:
        cmd += ["-max-area", str(arguments.max_area)]
    if arguments.max_deformation is not None:
        cmd += ["-max-deformation", str(arguments.max_deformation)]
    if arguments.min_contrast is not None:
        cmd += ["-min-contrast", str(arguments.min_contrast)]
    if arguments.d is not None:
        cmd += ["-d", str(arguments["-d"])]
    if arguments.distance_search is not None:
        cmd += ["-dsearch", str(arguments.distance_search)]
    if arguments.alpha is not None:
        cmd += ["-alpha", str(arguments.alpha)]

    # check if input image is in 3D. Otherwise itk image reader will cut the 4D image in 3D volumes and only take the first one.
    image_input = Image(fname_data)
    image_input_rpi = image_input.copy().change_orientation('RPI')
    nx, ny, nz, nt, px, py, pz, pt = image_input_rpi.dim
    if nt > 1:
        printv(
            'ERROR: your input image needs to be 3D in order to be segmented.',
            1, 'error')

    path_data, file_data, ext_data = extract_fname(fname_data)
    path_tmp = tmp_create(basename="label_vertebrae")

    # rescale header (see issue #1406)
    if rescale_header is not 1:
        fname_data_propseg = func_rescale_header(fname_data, rescale_header)
    else:
        fname_data_propseg = fname_data

    # add to command
    cmd += ['-i', fname_data_propseg]

    # if centerline or mask is asked using viewer
    if use_viewer:
        from spinalcordtoolbox.gui.base import AnatomicalParams
        from spinalcordtoolbox.gui.centerline import launch_centerline_dialog

        params = AnatomicalParams()
        if use_viewer == 'mask':
            params.num_points = 3
            params.interval_in_mm = 15  # superior-inferior interval between two consecutive labels
            params.starting_slice = 'midfovminusinterval'
        if use_viewer == 'centerline':
            # setting maximum number of points to a reasonable value
            params.num_points = 20
            params.interval_in_mm = 30
            params.starting_slice = 'top'
        im_data = Image(fname_data_propseg)

        im_mask_viewer = zeros_like(im_data)
        # im_mask_viewer.absolutepath = add_suffix(fname_data_propseg, '_labels_viewer')
        controller = launch_centerline_dialog(im_data, im_mask_viewer, params)
        fname_labels_viewer = add_suffix(fname_data_propseg, '_labels_viewer')

        if not controller.saved:
            printv(
                'The viewer has been closed before entering all manual points. Please try again.',
                1, 'error')
            sys.exit(1)
        # save labels
        controller.as_niftii(fname_labels_viewer)

        # add mask filename to parameters string
        if use_viewer == "centerline":
            cmd += ["-init-centerline", fname_labels_viewer]
        elif use_viewer == "mask":
            cmd += ["-init-mask", fname_labels_viewer]

    # If using OptiC
    elif use_optic:
        image_centerline = optic.detect_centerline(image_input, contrast_type,
                                                   verbose)
        fname_centerline_optic = os.path.join(path_tmp,
                                              'centerline_optic.nii.gz')
        image_centerline.save(fname_centerline_optic)
        cmd += ["-init-centerline", fname_centerline_optic]

    if init_option is not None:
        if init_option > 1:
            init_option /= (nz - 1)
        cmd += ['-init', str(init_option)]

    # enabling centerline extraction by default (needed by check_and_correct_segmentation() )
    cmd += ['-centerline-binary']

    # run propseg
    status, output = run_proc(cmd,
                              verbose,
                              raise_exception=False,
                              is_sct_binary=True)

    # check status is not 0
    if not status == 0:
        printv(
            'Automatic cord detection failed. Please initialize using -init-centerline or -init-mask (see help)',
            1, 'error')
        sys.exit(1)

    # build output filename
    fname_seg = os.path.join(folder_output, fname_out)
    fname_centerline = os.path.join(
        folder_output, os.path.basename(add_suffix(fname_data, "_centerline")))
    # in case header was rescaled, we need to update the output file names by removing the "_rescaled"
    if rescale_header is not 1:
        mv(
            os.path.join(
                folder_output,
                add_suffix(os.path.basename(fname_data_propseg), "_seg")),
            fname_seg)
        mv(
            os.path.join(
                folder_output,
                add_suffix(os.path.basename(fname_data_propseg),
                           "_centerline")), fname_centerline)
        # if user was used, copy the labelled points to the output folder (they will then be scaled back)
        if use_viewer:
            fname_labels_viewer_new = os.path.join(
                folder_output,
                os.path.basename(add_suffix(fname_data, "_labels_viewer")))
            copy(fname_labels_viewer, fname_labels_viewer_new)
            # update variable (used later)
            fname_labels_viewer = fname_labels_viewer_new

    # check consistency of segmentation
    if arguments.correct_seg:
        check_and_correct_segmentation(fname_seg,
                                       fname_centerline,
                                       folder_output=folder_output,
                                       threshold_distance=3.0,
                                       remove_temp_files=remove_temp_files,
                                       verbose=verbose)

    # copy header from input to segmentation to make sure qform is the same
    printv("Copy header input --> output(s) to make sure qform is the same.",
           verbose)
    list_fname = [fname_seg, fname_centerline]
    if use_viewer:
        list_fname.append(fname_labels_viewer)
    for fname in list_fname:
        im = Image(fname)
        im.header = image_input.header
        im.save(dtype='int8'
                )  # they are all binary masks hence fine to save as int8

    return Image(fname_seg)