def resample_image(fname, suffix='_resampled.nii.gz', binary=False, npx=0.3, npy=0.3, thr=0.0, interpolation='spline'):
    """
    Resampling function: add a padding, resample, crop the padding
    :param fname: name of the image file to be resampled
    :param suffix: suffix added to the original fname after resampling
    :param binary: boolean, image is binary or not
    :param npx: new pixel size in the x direction
    :param npy: new pixel size in the y direction
    :param thr: if the image is binary, it will be thresholded at thr (default=0) after the resampling
    :param interpolation: type of interpolation used for the resampling
    :return: file name after resampling (or original fname if it was already in the correct resolution)
    """
    im_in = Image(fname)
    orientation = im_in.orientation
    if orientation != 'RPI':
        im_in.change_orientation('RPI')
        fname = add_suffix(im_in.absolutepath, "_rpi")
        im_in.save(path=fname, mutable=True)

    nx, ny, nz, nt, px, py, pz, pt = im_in.dim

    if np.round(px, 2) != np.round(npx, 2) or np.round(py, 2) != np.round(npy, 2):
        name_resample = extract_fname(fname)[1] + suffix
        if binary:
            interpolation = 'nn'

        if nz == 1:
            # when data is 2d: we convert it to a 3d image in order to avoid conversion problem with 2d data
            # TODO: check if this above problem is still present (now that we are using nibabel instead of nipy)
            run_proc(['sct_image', '-i', ','.join([fname, fname]), '-concat', 'z', '-o', fname])

        run_proc(['sct_resample', '-i', fname, '-mm', str(npx) + 'x' + str(npy) + 'x' + str(pz), '-o', name_resample, '-x', interpolation])

        if nz == 1:  # when input data was 2d: re-convert data 3d-->2d
            run_proc(['sct_image', '-i', name_resample, '-split', 'z'])
            im_split = Image(name_resample.split('.nii.gz')[0] + '_Z0000.nii.gz')
            im_split.save(name_resample)

        if binary:
            img = Image(name_resample)
            img.data = binarize(img.data, thr)
            img.save()

        if orientation != 'RPI':
            img = Image(name_resample)
            img.change_orientation(orientation)
            name_resample = add_suffix(img.absolutepath, "_{}".format(orientation.lower()))
            img.save(path=name_resample, mutable=True)

        return name_resample
    else:
        if orientation != 'RPI':
            fname = add_suffix(fname, "_RPI")
            im_in = change_orientation(im_in, orientation).save(fname)

        printv('Image resolution already ' + str(npx) + 'x' + str(npy) + 'xpz')
        return fname
Пример #2
0
def merge_images(list_fname_src, fname_dest, list_fname_warp, param):
    """
    Merge multiple source images onto destination space. All images are warped to the destination space and then added.
    To deal with overlap during merging (e.g. one voxel in destination image is shared with two input images), the
    resulting voxel is divided by the sum of the partial volume of each image. For example, if src(x,y,z)=1 is mapped to
    dest(i,j,k) with a partial volume of 0.5 (because destination voxel is bigger), then its value after linear interpolation
    will be 0.5. To account for partial volume, the resulting voxel will be: dest(i,j,k) = 0.5*0.5/0.5 = 0.5.
    Now, if two voxels overlap in the destination space, let's say: src(x,y,z)=1 and src2'(x',y',z')=1, then the
    resulting value will be: dest(i,j,k) = (0.5*0.5 + 0.5*0.5) / (0.5+0.5) = 0.5. So this function acts like a weighted
    average operator, only in destination voxels that share multiple source voxels.

    Parameters
    ----------
    list_fname_src
    fname_dest
    list_fname_warp
    param

    Returns
    -------

    """
    # create temporary folder
    path_tmp = tmp_create()

    # get dimensions of destination file
    nii_dest = Image(fname_dest)

    # initialize variables
    data = np.zeros([
        nii_dest.dim[0], nii_dest.dim[1], nii_dest.dim[2],
        len(list_fname_src)
    ])
    partial_volume = np.zeros([
        nii_dest.dim[0], nii_dest.dim[1], nii_dest.dim[2],
        len(list_fname_src)
    ])
    data_merge = np.zeros([nii_dest.dim[0], nii_dest.dim[1], nii_dest.dim[2]])

    # loop across files
    i_file = 0
    for fname_src in list_fname_src:

        # apply transformation src --> dest
        sct_apply_transfo.main(argv=[
            '-i', fname_src, '-d', fname_dest, '-w', list_fname_warp[i_file],
            '-x', param.interp, '-o', 'src_' + str(i_file) +
            '_template.nii.gz', '-v',
            str(param.verbose)
        ])

        # create binary mask from input file by assigning one to all non-null voxels
        img = Image(fname_src)
        out = img.copy()
        out.data = binarize(out.data, param.almost_zero)
        out.save(path=f"src_{i_file}native_bin.nii.gz")

        # apply transformation to binary mask to compute partial volume
        sct_apply_transfo.main(argv=[
            '-i', 'src_' + str(i_file) + 'native_bin.nii.gz', '-d', fname_dest,
            '-w', list_fname_warp[i_file], '-x', param.interp, '-o', 'src_' +
            str(i_file) + '_template_partialVolume.nii.gz'
        ])

        # open data
        data[:, :, :,
             i_file] = Image('src_' + str(i_file) + '_template.nii.gz').data
        partial_volume[:, :, :,
                       i_file] = Image('src_' + str(i_file) +
                                       '_template_partialVolume.nii.gz').data
        i_file += 1

    # merge files using partial volume information (and convert nan resulting from division by zero to zeros)
    data_merge = np.divide(np.sum(data * partial_volume, axis=3),
                           np.sum(partial_volume, axis=3))
    data_merge = np.nan_to_num(data_merge)

    # write result in file
    nii_dest.data = data_merge
    nii_dest.save(param.fname_out)

    # remove temporary folder
    if param.rm_tmp:
        rmtree(path_tmp)
Пример #3
0
def main(argv=None):
    """
    Main function
    :param argv:
    :return:
    """
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    dim_list = ['x', 'y', 'z', 't']

    fname_in = arguments.i
    fname_out = arguments.o
    output_type = arguments.type

    # Open file(s)
    im = Image(fname_in)
    data = im.data  # 3d or 4d numpy array
    dim = im.dim

    # run command
    if arguments.otsu is not None:
        param = arguments.otsu
        data_out = sct_math.otsu(data, param)

    elif arguments.adap is not None:
        param = arguments.adap
        data_out = sct_math.adap(data, param[0], param[1])

    elif arguments.otsu_median is not None:
        param = arguments.otsu_median
        data_out = sct_math.otsu_median(data, param[0], param[1])

    elif arguments.thr is not None:
        param = arguments.thr
        data_out = sct_math.threshold(data, param)

    elif arguments.percent is not None:
        param = arguments.percent
        data_out = sct_math.perc(data, param)

    elif arguments.bin is not None:
        bin_thr = arguments.bin
        data_out = sct_math.binarize(data, bin_thr=bin_thr)

    elif arguments.add is not None:
        data2 = get_data_or_scalar(arguments.add, data)
        data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
        data_out = np.sum(data_concat, axis=3)

    elif arguments.sub is not None:
        data2 = get_data_or_scalar(arguments.sub, data)
        data_out = data - data2

    elif arguments.laplacian is not None:
        sigmas = arguments.laplacian
        if len(sigmas) == 1:
            sigmas = [sigmas for i in range(len(data.shape))]
        elif len(sigmas) != len(data.shape):
            printv(
                parser.error(
                    'ERROR: -laplacian need the same number of inputs as the number of image dimension OR only one input'
                ))
        # adjust sigma based on voxel size
        sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
        # smooth data
        data_out = sct_math.laplacian(data, sigmas)

    elif arguments.mul is not None:
        data2 = get_data_or_scalar(arguments.mul, data)
        data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
        data_out = np.prod(data_concat, axis=3)

    elif arguments.div is not None:
        data2 = get_data_or_scalar(arguments.div, data)
        data_out = np.divide(data, data2)

    elif arguments.mean is not None:
        dim = dim_list.index(arguments.mean)
        if dim + 1 > len(
                np.shape(data)):  # in case input volume is 3d and dim=t
            data = data[..., np.newaxis]
        data_out = np.mean(data, dim)

    elif arguments.rms is not None:
        dim = dim_list.index(arguments.rms)
        if dim + 1 > len(
                np.shape(data)):  # in case input volume is 3d and dim=t
            data = data[..., np.newaxis]
        data_out = np.sqrt(np.mean(np.square(data.astype(float)), dim))

    elif arguments.std is not None:
        dim = dim_list.index(arguments.std)
        if dim + 1 > len(
                np.shape(data)):  # in case input volume is 3d and dim=t
            data = data[..., np.newaxis]
        data_out = np.std(data, dim, ddof=1)

    elif arguments.smooth is not None:
        sigmas = arguments.smooth
        if len(sigmas) == 1:
            sigmas = [sigmas[0] for i in range(len(data.shape))]
        elif len(sigmas) != len(data.shape):
            printv(
                parser.error(
                    'ERROR: -smooth need the same number of inputs as the number of image dimension OR only one input'
                ))
        # adjust sigma based on voxel size
        sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
        # smooth data
        data_out = sct_math.smooth(data, sigmas)

    elif arguments.dilate is not None:
        if arguments.shape in ['disk', 'square'] and arguments.dim is None:
            printv(
                parser.error(
                    'ERROR: -dim is required for -dilate with 2D morphological kernel'
                ))
        data_out = sct_math.dilate(data,
                                   size=arguments.dilate,
                                   shape=arguments.shape,
                                   dim=arguments.dim)

    elif arguments.erode is not None:
        if arguments.shape in ['disk', 'square'] and arguments.dim is None:
            printv(
                parser.error(
                    'ERROR: -dim is required for -erode with 2D morphological kernel'
                ))
        data_out = sct_math.erode(data,
                                  size=arguments.erode,
                                  shape=arguments.shape,
                                  dim=arguments.dim)

    elif arguments.denoise is not None:
        # parse denoising arguments
        p, b = 1, 5  # default arguments
        list_denoise = (arguments.denoise).split(",")
        for i in list_denoise:
            if 'p' in i:
                p = int(i.split('=')[1])
            if 'b' in i:
                b = int(i.split('=')[1])
        data_out = sct_math.denoise_nlmeans(data,
                                            patch_radius=p,
                                            block_radius=b)

    elif arguments.symmetrize is not None:
        data_out = (data + data[list(range(data.shape[0] -
                                           1, -1, -1)), :, :]) / float(2)

    elif arguments.mi is not None:
        # input 1 = from flag -i --> im
        # input 2 = from flag -mi
        im_2 = Image(arguments.mi)
        compute_similarity(im,
                           im_2,
                           fname_out,
                           metric='mi',
                           metric_full='Mutual information',
                           verbose=verbose)
        data_out = None

    elif arguments.minorm is not None:
        im_2 = Image(arguments.minorm)
        compute_similarity(im,
                           im_2,
                           fname_out,
                           metric='minorm',
                           metric_full='Normalized Mutual information',
                           verbose=verbose)
        data_out = None

    elif arguments.corr is not None:
        # input 1 = from flag -i --> im
        # input 2 = from flag -mi
        im_2 = Image(arguments.corr)
        compute_similarity(im,
                           im_2,
                           fname_out,
                           metric='corr',
                           metric_full='Pearson correlation coefficient',
                           verbose=verbose)
        data_out = None

    # if no flag is set
    else:
        data_out = None
        printv(
            parser.error(
                'ERROR: you need to specify an operation to do on the input image'
            ))

    if data_out is not None:
        # Write output
        nii_out = Image(fname_in)  # use header of input file
        nii_out.data = data_out
        nii_out.save(fname_out, dtype=output_type)
    # TODO: case of multiple outputs
    # assert len(data_out) == n_out
    # if n_in == n_out:
    #     for im_in, d_out, fn_out in zip(nii, data_out, fname_out):
    #         im_in.data = d_out
    #         im_in.absolutepath = fn_out
    #         if arguments.w is not None:
    #             im_in.hdr.set_intent('vector', (), '')
    #         im_in.save()
    # elif n_out == 1:
    #     nii[0].data = data_out[0]
    #     nii[0].absolutepath = fname_out[0]
    #     if arguments.w is not None:
    #             nii[0].hdr.set_intent('vector', (), '')
    #     nii[0].save()
    # elif n_out > n_in:
    #     for dat_out, name_out in zip(data_out, fname_out):
    #         im_out = nii[0].copy()
    #         im_out.data = dat_out
    #         im_out.absolutepath = name_out
    #         if arguments.w is not None:
    #             im_out.hdr.set_intent('vector', (), '')
    #         im_out.save()
    # else:
    #     printv(parser.usage.generate(error='ERROR: not the correct numbers of inputs and outputs'))

    # display message
    if data_out is not None:
        display_viewer_syntax([fname_out], verbose=verbose)
    else:
        printv('\nDone! File created: ' + fname_out, verbose, 'info')
Пример #4
0
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_loglevel(verbose=verbose)

    fname_input1 = arguments.i
    fname_input2 = arguments.d

    tmp_dir = tmp_create()  # create tmp directory
    tmp_dir = os.path.abspath(tmp_dir)

    # copy input files to tmp directory
    fname_input1_tmp = 'tmp1_' + ''.join(extract_fname(fname_input1)[1:])
    fname_input2_tmp = 'tmp2_' + ''.join(extract_fname(fname_input2)[1:])
    copy(fname_input1, os.path.join(tmp_dir, fname_input1_tmp))
    copy(fname_input2, os.path.join(tmp_dir, fname_input2_tmp))
    fname_input1 = fname_input1_tmp
    fname_input2 = fname_input2_tmp

    curdir = os.getcwd()
    os.chdir(tmp_dir)  # go to tmp directory

    im_1 = Image(fname_input1)
    im_2 = Image(fname_input2)

    if arguments.bin is not None:
        im_1.data = binarize(im_1.data, 0)
        fname_input1_bin = add_suffix(fname_input1, '_bin')
        im_1.save(fname_input1_bin, mutable=True)

        im_2.data = binarize(im_2.data, 0)
        fname_input2_bin = add_suffix(fname_input2, '_bin')
        im_2.save(fname_input2_bin, mutable=True)

        # Use binarized images in subsequent steps
        fname_input1 = fname_input1_bin
        fname_input2 = fname_input2_bin

    # copy header of im_1 to im_2
    im_2.header = im_1.header
    im_2.save()

    cmd = ['isct_dice_coefficient', fname_input1, fname_input2]

    if vars(arguments)["2d_slices"] is not None:
        cmd += ['-2d-slices', str(vars(arguments)["2d_slices"])]
    if arguments.b is not None:
        bounding_box = arguments.b
        cmd += ['-b'] + bounding_box
    if arguments.bmax is not None and arguments.bmax == 1:
        cmd += ['-bmax']
    if arguments.bzmax is not None and arguments.bzmax == 1:
        cmd += ['-bzmax']
    if arguments.o is not None:
        path_output, fname_output, ext = extract_fname(arguments.o)
        cmd += ['-o', fname_output + ext]

    rm_tmp = bool(arguments.r)

    # # Computation of Dice coefficient using Python implementation.
    # # commented for now as it does not cover all the feature of isct_dice_coefficient
    # #from spinalcordtoolbox.image import Image, compute_dice
    # #dice = compute_dice(Image(fname_input1), Image(fname_input2), mode='3d', zboundaries=False)
    # #printv('Dice (python-based) = ' + str(dice), verbose)

    status, output = run_proc(cmd, verbose, is_sct_binary=True)

    os.chdir(curdir)  # go back to original directory

    # copy output file into original directory
    if arguments.o is not None:
        copy(os.path.join(tmp_dir, fname_output + ext),
             os.path.join(path_output, fname_output + ext))

    # remove tmp_dir
    if rm_tmp:
        rmtree(tmp_dir)

    printv(output, verbose)