def create_line(param, fname, coord, nz):
    """
    Create vertical line in 3D volume
    :param param:
    :param fname:
    :param coord:
    :param nz:
    :return:
    """

    # duplicate volume (assumes input file is nifti)
    copy(fname, 'line.nii', verbose=param.verbose)

    # set all voxels to zero
    img = Image('line.nii')
    data = get_data_or_scalar('0', img.data)
    data_concat = concatenate_along_4th_dimension(img.data, data)
    img.data = np.prod(data_concat, axis=3)
    img.save()

    labels = []

    if isinstance(coord[0], Coordinate):
        for x, y, _, _ in coord:
            labels.extend([Coordinate([x, y, iz, 1]) for iz in range(nz)])
    else:
        # backwards compat
        labels.extend(
            [Coordinate([coord[0], coord[1], iz, 1]) for iz in range(nz)])

    create_labels(img, labels).save()

    return 'line.nii'
Example #2
0
def get_data(list_fname):
    """
    Get data from list of file names
    :param list_fname:
    :return: 3D or 4D numpy array.
    """
    try:
        nii = [Image(f_in) for f_in in list_fname]
    except Exception as e:
        printv(str(e), 1, 'error')  # file does not exist, exit program
    data0 = nii[0].data
    data = nii[0].data
    # check that every images have same shape
    for i in range(1, len(nii)):
        if not np.shape(nii[i].data) == np.shape(data0):
            printv('\nWARNING: shape(' + list_fname[i] + ')=' + str(np.shape(nii[i].data)) + ' incompatible with shape(' + list_fname[0] + ')=' + str(np.shape(data0)), 1, 'warning')
            printv('\nERROR: All input images must have same dimensions.', 1, 'error')
        else:
            data = sct_math.concatenate_along_4th_dimension(data, nii[i].data)
    return data
Example #3
0
def main(argv=None):
    """
    Main function
    :param argv:
    :return:
    """
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    dim_list = ['x', 'y', 'z', 't']

    fname_in = arguments.i
    fname_out = arguments.o
    output_type = arguments.type

    # Open file(s)
    im = Image(fname_in)
    data = im.data  # 3d or 4d numpy array
    dim = im.dim

    # run command
    if arguments.otsu is not None:
        param = arguments.otsu
        data_out = sct_math.otsu(data, param)

    elif arguments.adap is not None:
        param = arguments.adap
        data_out = sct_math.adap(data, param[0], param[1])

    elif arguments.otsu_median is not None:
        param = arguments.otsu_median
        data_out = sct_math.otsu_median(data, param[0], param[1])

    elif arguments.thr is not None:
        param = arguments.thr
        data_out = sct_math.threshold(data, param)

    elif arguments.percent is not None:
        param = arguments.percent
        data_out = sct_math.perc(data, param)

    elif arguments.bin is not None:
        bin_thr = arguments.bin
        data_out = sct_math.binarize(data, bin_thr=bin_thr)

    elif arguments.add is not None:
        data2 = get_data_or_scalar(arguments.add, data)
        data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
        data_out = np.sum(data_concat, axis=3)

    elif arguments.sub is not None:
        data2 = get_data_or_scalar(arguments.sub, data)
        data_out = data - data2

    elif arguments.laplacian is not None:
        sigmas = arguments.laplacian
        if len(sigmas) == 1:
            sigmas = [sigmas for i in range(len(data.shape))]
        elif len(sigmas) != len(data.shape):
            printv(
                parser.error(
                    'ERROR: -laplacian need the same number of inputs as the number of image dimension OR only one input'
                ))
        # adjust sigma based on voxel size
        sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
        # smooth data
        data_out = sct_math.laplacian(data, sigmas)

    elif arguments.mul is not None:
        data2 = get_data_or_scalar(arguments.mul, data)
        data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
        data_out = np.prod(data_concat, axis=3)

    elif arguments.div is not None:
        data2 = get_data_or_scalar(arguments.div, data)
        data_out = np.divide(data, data2)

    elif arguments.mean is not None:
        dim = dim_list.index(arguments.mean)
        if dim + 1 > len(
                np.shape(data)):  # in case input volume is 3d and dim=t
            data = data[..., np.newaxis]
        data_out = np.mean(data, dim)

    elif arguments.rms is not None:
        dim = dim_list.index(arguments.rms)
        if dim + 1 > len(
                np.shape(data)):  # in case input volume is 3d and dim=t
            data = data[..., np.newaxis]
        data_out = np.sqrt(np.mean(np.square(data.astype(float)), dim))

    elif arguments.std is not None:
        dim = dim_list.index(arguments.std)
        if dim + 1 > len(
                np.shape(data)):  # in case input volume is 3d and dim=t
            data = data[..., np.newaxis]
        data_out = np.std(data, dim, ddof=1)

    elif arguments.smooth is not None:
        sigmas = arguments.smooth
        if len(sigmas) == 1:
            sigmas = [sigmas[0] for i in range(len(data.shape))]
        elif len(sigmas) != len(data.shape):
            printv(
                parser.error(
                    'ERROR: -smooth need the same number of inputs as the number of image dimension OR only one input'
                ))
        # adjust sigma based on voxel size
        sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
        # smooth data
        data_out = sct_math.smooth(data, sigmas)

    elif arguments.dilate is not None:
        if arguments.shape in ['disk', 'square'] and arguments.dim is None:
            printv(
                parser.error(
                    'ERROR: -dim is required for -dilate with 2D morphological kernel'
                ))
        data_out = sct_math.dilate(data,
                                   size=arguments.dilate,
                                   shape=arguments.shape,
                                   dim=arguments.dim)

    elif arguments.erode is not None:
        if arguments.shape in ['disk', 'square'] and arguments.dim is None:
            printv(
                parser.error(
                    'ERROR: -dim is required for -erode with 2D morphological kernel'
                ))
        data_out = sct_math.erode(data,
                                  size=arguments.erode,
                                  shape=arguments.shape,
                                  dim=arguments.dim)

    elif arguments.denoise is not None:
        # parse denoising arguments
        p, b = 1, 5  # default arguments
        list_denoise = (arguments.denoise).split(",")
        for i in list_denoise:
            if 'p' in i:
                p = int(i.split('=')[1])
            if 'b' in i:
                b = int(i.split('=')[1])
        data_out = sct_math.denoise_nlmeans(data,
                                            patch_radius=p,
                                            block_radius=b)

    elif arguments.symmetrize is not None:
        data_out = (data + data[list(range(data.shape[0] -
                                           1, -1, -1)), :, :]) / float(2)

    elif arguments.mi is not None:
        # input 1 = from flag -i --> im
        # input 2 = from flag -mi
        im_2 = Image(arguments.mi)
        compute_similarity(im,
                           im_2,
                           fname_out,
                           metric='mi',
                           metric_full='Mutual information',
                           verbose=verbose)
        data_out = None

    elif arguments.minorm is not None:
        im_2 = Image(arguments.minorm)
        compute_similarity(im,
                           im_2,
                           fname_out,
                           metric='minorm',
                           metric_full='Normalized Mutual information',
                           verbose=verbose)
        data_out = None

    elif arguments.corr is not None:
        # input 1 = from flag -i --> im
        # input 2 = from flag -mi
        im_2 = Image(arguments.corr)
        compute_similarity(im,
                           im_2,
                           fname_out,
                           metric='corr',
                           metric_full='Pearson correlation coefficient',
                           verbose=verbose)
        data_out = None

    # if no flag is set
    else:
        data_out = None
        printv(
            parser.error(
                'ERROR: you need to specify an operation to do on the input image'
            ))

    if data_out is not None:
        # Write output
        nii_out = Image(fname_in)  # use header of input file
        nii_out.data = data_out
        nii_out.save(fname_out, dtype=output_type)
    # TODO: case of multiple outputs
    # assert len(data_out) == n_out
    # if n_in == n_out:
    #     for im_in, d_out, fn_out in zip(nii, data_out, fname_out):
    #         im_in.data = d_out
    #         im_in.absolutepath = fn_out
    #         if arguments.w is not None:
    #             im_in.hdr.set_intent('vector', (), '')
    #         im_in.save()
    # elif n_out == 1:
    #     nii[0].data = data_out[0]
    #     nii[0].absolutepath = fname_out[0]
    #     if arguments.w is not None:
    #             nii[0].hdr.set_intent('vector', (), '')
    #     nii[0].save()
    # elif n_out > n_in:
    #     for dat_out, name_out in zip(data_out, fname_out):
    #         im_out = nii[0].copy()
    #         im_out.data = dat_out
    #         im_out.absolutepath = name_out
    #         if arguments.w is not None:
    #             im_out.hdr.set_intent('vector', (), '')
    #         im_out.save()
    # else:
    #     printv(parser.usage.generate(error='ERROR: not the correct numbers of inputs and outputs'))

    # display message
    if data_out is not None:
        display_viewer_syntax([fname_out], verbose=verbose)
    else:
        printv('\nDone! File created: ' + fname_out, verbose, 'info')