def test_add_suffix():
    assert msct_image.add_suffix('t2.nii', '_mean') == 't2_mean.nii'
    assert msct_image.add_suffix('t2.nii.gz', 'a') == 't2a.nii.gz'
    assert msct_image.add_suffix(os.path.join('var', 'lib', 'usr', 't2.nii.gz'), 'sfx') \
           == os.path.join('var', 'lib', 'usr', 't2sfx.nii.gz')
    assert msct_image.add_suffix(os.path.join('var', 'lib.version.3', 'usr', 't2.nii.gz'), 'sfx') \
           == os.path.join('var', 'lib.version.3', 'usr', 't2sfx.nii.gz')
Exemple #2
0
def test_add_suffix():
    assert msct_image.add_suffix('t2.nii', '_mean') == 't2_mean.nii'
    assert msct_image.add_suffix('t2.nii.gz', 'a') == 't2a.nii.gz'
    assert msct_image.add_suffix('var/lib/usr/t2.nii.gz',
                                 'sfx') == 'var/lib/usr/t2sfx.nii.gz'
    assert msct_image.add_suffix('var/lib.version.3/usr/t2.nii.gz',
                                 'sfx') == 'var/lib.version.3/usr/t2sfx.nii.gz'
def resample_image(fname, suffix='_resampled.nii.gz', binary=False, npx=0.3, npy=0.3, thr=0.0, interpolation='spline'):
    """
    Resampling function: add a padding, resample, crop the padding
    :param fname: name of the image file to be resampled
    :param suffix: suffix added to the original fname after resampling
    :param binary: boolean, image is binary or not
    :param npx: new pixel size in the x direction
    :param npy: new pixel size in the y direction
    :param thr: if the image is binary, it will be thresholded at thr (default=0) after the resampling
    :param interpolation: type of interpolation used for the resampling
    :return: file name after resampling (or original fname if it was already in the correct resolution)
    """
    im_in = Image(fname)
    orientation = im_in.orientation
    if orientation != 'RPI':
        im_in.change_orientation('RPI')
        fname = add_suffix(im_in.absolutepath, "_rpi")
        im_in.save(path=fname, mutable=True)

    nx, ny, nz, nt, px, py, pz, pt = im_in.dim

    if np.round(px, 2) != np.round(npx, 2) or np.round(py, 2) != np.round(npy, 2):
        name_resample = extract_fname(fname)[1] + suffix
        if binary:
            interpolation = 'nn'

        if nz == 1:
            # when data is 2d: we convert it to a 3d image in order to avoid conversion problem with 2d data
            # TODO: check if this above problem is still present (now that we are using nibabel instead of nipy)
            run_proc(['sct_image', '-i', ','.join([fname, fname]), '-concat', 'z', '-o', fname])

        run_proc(['sct_resample', '-i', fname, '-mm', str(npx) + 'x' + str(npy) + 'x' + str(pz), '-o', name_resample, '-x', interpolation])

        if nz == 1:  # when input data was 2d: re-convert data 3d-->2d
            run_proc(['sct_image', '-i', name_resample, '-split', 'z'])
            im_split = Image(name_resample.split('.nii.gz')[0] + '_Z0000.nii.gz')
            im_split.save(name_resample)

        if binary:
            img = Image(name_resample)
            img.data = binarize(img.data, thr)
            img.save()

        if orientation != 'RPI':
            img = Image(name_resample)
            img.change_orientation(orientation)
            name_resample = add_suffix(img.absolutepath, "_{}".format(orientation.lower()))
            img.save(path=name_resample, mutable=True)

        return name_resample
    else:
        if orientation != 'RPI':
            fname = add_suffix(fname, "_RPI")
            im_in = change_orientation(im_in, orientation).save(fname)

        printv('Image resolution already ' + str(npx) + 'x' + str(npy) + 'xpz')
        return fname
Exemple #4
0
 def reorient_data(self):
     for f in self.fname_metric_lst:
         os.rename(
             self.fname_metric_lst[f],
             add_suffix("".join(extract_fname(self.param.fname_im)[1:]),
                        '_2reorient'))
         im = Image(add_suffix("".join(extract_fname(self.param.fname_im)[1:]), '_2reorient')) \
             .change_orientation(self.orientation_im) \
             .save(self.fname_metric_lst[f])
def visualize_warp(im_warp: Image, im_grid: Image = None, step=3, rm_tmp=True):
    fname_warp = im_warp.absolutepath
    if im_grid:
        fname_grid = im_grid.absolutepath
    else:
        tmp_dir = tmp_create()
        nx, ny, nz = im_warp.data.shape[0:3]
        curdir = os.getcwd()
        os.chdir(tmp_dir)
        sq = np.zeros((step, step))
        sq[step - 1] = 1
        sq[:, step - 1] = 1
        dat = np.zeros((nx, ny, nz))
        for i in range(0, dat.shape[0], step):
            for j in range(0, dat.shape[1], step):
                for k in range(dat.shape[2]):
                    if dat[i:i + step, j:j + step, k].shape == (step, step):
                        dat[i:i + step, j:j + step, k] = sq
        im_grid = Image(param=dat)
        grid_hdr = im_warp.hdr
        im_grid.hdr = grid_hdr
        fname_grid = 'grid_' + str(step) + '.nii.gz'
        im_grid.save(fname_grid)
        fname_grid_resample = add_suffix(fname_grid, '_resample')
        sct_resample.main(argv=['-i', fname_grid, '-f', '3x3x1', '-x', 'nn', '-o', fname_grid_resample])
        fname_grid = os.path.join(tmp_dir, fname_grid_resample)
        os.chdir(curdir)
    path_warp, file_warp, ext_warp = extract_fname(fname_warp)
    grid_warped = os.path.join(path_warp, extract_fname(fname_grid)[1] + '_' + file_warp + ext_warp)
    sct_apply_transfo.main(argv=['-i', fname_grid, '-d', fname_grid, '-w', fname_warp, '-o', grid_warped])
    if rm_tmp:
        rmtree(tmp_dir)
Exemple #6
0
def multicomponent_merge(fname_list):
    from numpy import zeros
    # WARNING: output multicomponent is not optimal yet, some issues may be related to the use of this function

    im_0 = Image(fname_list[0])
    new_shape = list(im_0.data.shape)
    if len(new_shape) == 3:
        new_shape.append(1)
    new_shape.append(len(fname_list))
    new_shape = tuple(new_shape)

    data_out = zeros(new_shape)
    for i, fname in enumerate(fname_list):
        im = Image(fname)
        dat = im.data
        if len(dat.shape) == 2:
            data_out[:, :, 0, 0, i] = dat.astype('float32')
        elif len(dat.shape) == 3:
            data_out[:, :, :, 0, i] = dat.astype('float32')
        elif len(dat.shape) == 4:
            data_out[:, :, :, :, i] = dat.astype('float32')
        del im
        del dat
    im_out = im_0.copy()
    im_out.data = data_out.astype('float32')
    im_out.hdr.set_intent('vector', (), '')
    im_out.absolutepath = add_suffix(im_out.absolutepath, '_multicomponent')
    return im_out
Exemple #7
0
def label_discs(fname_seg, list_disc_z, list_disc_value, verbose=1):
    """
    Create file with single voxel label in the middle of the spinal cord for each disc.

    :param fname_seg: fname of the segmentation, no orientation expected
    :param list_disc_z: list of z that correspond to a disc
    :param list_disc_value: list of associated disc values
    :param verbose:
    :return:
    """
    seg = Image(fname_seg)
    init_orientation = seg.orientation
    seg.change_orientation("RPI")
    disc_data = np.zeros_like(seg.data)
    nx, ny, nz = seg.data.shape

    for i in range(len(list_disc_z)):
        if list_disc_z[i] < nz:
            try:
                slices = seg.data[:, :, list_disc_z[i]]
                cx, cy = [int(x) for x in np.round(center_of_mass(slices)).tolist()]
            except EmptyArrayError:
                logger.warning("During disc labeling, center of mass calculation failed due to discontinuities in "
                               "segmented spinal cord; please check the quality of your segmentation. Using "
                               "interpolated centerline as a fallback.")
                interpolated_centerline, _, _, _ = get_centerline(seg)
                slices = interpolated_centerline.data[:, :, list_disc_z[i]]
                cx, cy = [int(x) for x in np.round(center_of_mass(slices)).tolist()]

            # Disc value are offset by one due to legacy code
            disc_data[cx, cy, list_disc_z[i]] = list_disc_value[i] + 1

    seg.data = disc_data
    seg.change_orientation(init_orientation).save(add_suffix(fname_seg, '_labeled_disc'))
Exemple #8
0
def main(argv=None):
    """
    Main function
    :param fname_anat:
    :param fname_centerline:
    :param verbose:
    :return:
    """
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    fname_anat = arguments.i
    fname_centerline = arguments.s

    # load input images
    im_anat = Image(fname_anat)
    im_centerline = Image(fname_centerline)

    # flatten sagittal
    im_anat_flattened = flatten_sagittal(im_anat, im_centerline, verbose)

    # save output
    fname_out = add_suffix(fname_anat, '_flatten')
    im_anat_flattened.save(fname_out)

    display_viewer_syntax([fname_anat, fname_out])
def apply_transfo(im_src, im_dest, warp, interp='spline', rm_tmp=True):
    # create tmp dir and go in it
    tmp_dir = tmp_create()
    # copy warping field to tmp dir
    copy(warp, tmp_dir)
    warp = ''.join(extract_fname(warp)[1:])
    # go to tmp dir
    curdir = os.getcwd()
    os.chdir(tmp_dir)
    # save image and seg
    fname_src = 'src.nii.gz'
    im_src.save(fname_src)
    fname_dest = 'dest.nii.gz'
    im_dest.save(fname_dest)
    # apply warping field
    fname_src_reg = add_suffix(fname_src, '_reg')
    sct_apply_transfo.main(
        argv=['-i', fname_src, '-d', fname_dest, '-w', warp, '-x', interp])

    im_src_reg = Image(fname_src_reg)
    # get out of tmp dir
    os.chdir(curdir)
    if rm_tmp:
        # remove tmp dir
        rmtree(tmp_dir)
    # return res image
    return im_src_reg
Exemple #10
0
def func_rescale_header(fname_data, rescale_factor, verbose=0):
    """
    Rescale the voxel dimension by modifying the NIFTI header qform. Write the output file in a temp folder.
    :param fname_data:
    :param rescale_factor:
    :return: fname_data_rescaled
    """
    import nibabel as nib
    img = nib.load(fname_data)
    # get qform
    qform = img.header.get_qform()
    # multiply by scaling factor
    qform[0:3, 0:3] *= rescale_factor
    # generate a new nifti file
    header_rescaled = img.header.copy()
    header_rescaled.set_qform(qform)
    # the data are the same-- only the header changes
    img_rescaled = nib.nifti1.Nifti1Image(img.get_data(),
                                          None,
                                          header=header_rescaled)
    path_tmp = tmp_create(basename="propseg")
    fname_data_rescaled = os.path.join(
        path_tmp, os.path.basename(add_suffix(fname_data, "_rescaled")))
    nib.save(img_rescaled, fname_data_rescaled)
    return fname_data_rescaled
Exemple #11
0
def label_discs(fname_seg, list_disc_z, list_disc_value, verbose=1):
    """
    Create file with single voxel label in the middle of the spinal cord for each disc.

    :param fname_seg: fname of the segmentation, no orientation expected
    :param list_disc_z: list of z that correspond to a disc
    :param list_disc_value: list of associated disc values
    :param verbose:
    :return:
    """
    seg = Image(fname_seg)
    init_orientation = seg.orientation
    seg.change_orientation("RPI")
    disc_data = np.zeros_like(seg.data)
    nx, ny, nz = seg.data.shape

    for i in range(len(list_disc_z)):
        if list_disc_z[i] < nz:
            slices = seg.data[:, :, list_disc_z[i]]
            cx, cy = [
                int(x) for x in np.round(center_of_mass(slices)).tolist()
            ]
            # Disc value are offset by one due to legacy code
            disc_data[cx, cy, list_disc_z[i]] = list_disc_value[i] + 1

    seg.data = disc_data
    seg.change_orientation(init_orientation).save(
        add_suffix(fname_seg, '_labeled_disc'))
Exemple #12
0
def register_data(im_src, im_dest, param_reg, path_copy_warp=None, rm_tmp=True):
    '''

    Parameters
    ----------
    im_src: class Image: source image
    im_dest: class Image: destination image
    param_reg: str: registration parameter
    path_copy_warp: path: path to copy the warping fields

    Returns: im_src_reg: class Image: source image registered on destination image
    -------

    '''
    # im_src and im_dest are already preprocessed (in theory: im_dest = mean_image)
    # binarize images to get seg
    im_src_seg = binarize(im_src, thr_min=1, thr_max=1)
    im_dest_seg = binarize(im_dest)
    # create tmp dir and go in it
    tmp_dir = tmp_create()
    curdir = os.getcwd()
    os.chdir(tmp_dir)
    # save image and seg
    fname_src = 'src.nii.gz'
    im_src.save(fname_src)
    fname_src_seg = 'src_seg.nii.gz'
    im_src_seg.save(fname_src_seg)
    fname_dest = 'dest.nii.gz'
    im_dest.save(fname_dest)
    fname_dest_seg = 'dest_seg.nii.gz'
    im_dest_seg.save(fname_dest_seg)
    # do registration using param_reg
    sct_register_multimodal.main(args=['-i', fname_src,
                                       '-d', fname_dest,
                                       '-iseg', fname_src_seg,
                                       '-dseg', fname_dest_seg,
                                       '-param', param_reg])

    # get registration result
    fname_src_reg = add_suffix(fname_src, '_reg')
    im_src_reg = Image(fname_src_reg)
    # get out of tmp dir
    os.chdir(curdir)

    # copy warping fields
    if path_copy_warp is not None and os.path.isdir(os.path.abspath(path_copy_warp)):
        path_copy_warp = os.path.abspath(path_copy_warp)
        file_src = extract_fname(fname_src)[1]
        file_dest = extract_fname(fname_dest)[1]
        fname_src2dest = 'warp_' + file_src + '2' + file_dest + '.nii.gz'
        fname_dest2src = 'warp_' + file_dest + '2' + file_src + '.nii.gz'
        copy(os.path.join(tmp_dir, fname_src2dest), path_copy_warp)
        copy(os.path.join(tmp_dir, fname_dest2src), path_copy_warp)

    if rm_tmp:
        # remove tmp dir
        rmtree(tmp_dir)
    # return res image
    return im_src_reg, fname_src2dest, fname_dest2src
def pad_image(im,
              pad_x_i=0,
              pad_x_f=0,
              pad_y_i=0,
              pad_y_f=0,
              pad_z_i=0,
              pad_z_f=0):

    nx, ny, nz, nt, px, py, pz, pt = im.dim
    pad_x_i, pad_x_f, pad_y_i, pad_y_f, pad_z_i, pad_z_f = int(pad_x_i), int(
        pad_x_f), int(pad_y_i), int(pad_y_f), int(pad_z_i), int(pad_z_f)

    if len(im.data.shape) == 2:
        new_shape = list(im.data.shape)
        new_shape.append(1)
        im.data = im.data.reshape(new_shape)

    # initialize padded_data, with same type as im.data
    padded_data = np.zeros((nx + pad_x_i + pad_x_f, ny + pad_y_i + pad_y_f,
                            nz + pad_z_i + pad_z_f),
                           dtype=im.data.dtype)

    if pad_x_f == 0:
        pad_x_f = None
    elif pad_x_f > 0:
        pad_x_f *= -1
    if pad_y_f == 0:
        pad_y_f = None
    elif pad_y_f > 0:
        pad_y_f *= -1
    if pad_z_f == 0:
        pad_z_f = None
    elif pad_z_f > 0:
        pad_z_f *= -1

    padded_data[pad_x_i:pad_x_f, pad_y_i:pad_y_f, pad_z_i:pad_z_f] = im.data
    im_out = im.copy()
    # TODO: Do not copy the Image(), because the dim field and hdr.get_data_shape() will not be updated properly.
    #   better to just create a new Image() from scratch.
    im_out.data = padded_data  # done after the call of the function
    im_out.absolutepath = add_suffix(im_out.absolutepath, "_pad")

    # adapt the origin in the sform and qform matrix
    new_origin = np.dot(im_out.hdr.get_qform(),
                        [-pad_x_i, -pad_y_i, -pad_z_i, 1])

    im_out.hdr.structarr['qoffset_x'] = new_origin[0]
    im_out.hdr.structarr['qoffset_y'] = new_origin[1]
    im_out.hdr.structarr['qoffset_z'] = new_origin[2]
    im_out.hdr.structarr['srow_x'][-1] = new_origin[0]
    im_out.hdr.structarr['srow_y'][-1] = new_origin[1]
    im_out.hdr.structarr['srow_z'][-1] = new_origin[2]

    return im_out
    def __init__(self, im, v=1):
        printv('Thinning ... ', v, 'normal')
        self.image = im
        self.image.data = bin_data(self.image.data)
        self.dim_im = len(self.image.data.shape)

        if self.dim_im == 2:
            self.thinned_image = empty_like(self.image)
            self.thinned_image.data = self.zhang_suen(self.image.data)
            self.thinned_image.absolutepath = add_suffix(self.image.absolutepath, "_thinned")

        elif self.dim_im == 3:
            if not self.image.orientation == 'IRP':
                printv('-- changing orientation ...')
                self.image.change_orientation('IRP')

            thinned_data = np.asarray([self.zhang_suen(im_slice) for im_slice in self.image.data])

            self.thinned_image = empty_like(self.image)
            self.thinned_image.data = thinned_data
            self.thinned_image.absolutepath = add_suffix(self.image.absolutepath, "_thinned")
Exemple #15
0
    def compute_texture(self):

        offset = int(self.param_glcm.distance)
        printv('\nCompute texture metrics...', self.param.verbose, 'normal')

        # open image and re-orient it to RPI if needed
        im_tmp = Image(self.param.fname_im)
        if self.orientation_im != self.orientation_extraction:
            im_tmp.change_orientation(self.orientation_extraction)

        dct_metric = {}
        for m in self.metric_lst:
            im_2save = zeros_like(im_tmp, dtype='float64')
            dct_metric[m] = im_2save
            # dct_metric[m] = Image(self.fname_metric_lst[m])

        with sct_progress_bar() as pbar:
            for im_z, seg_z, zz in zip(self.dct_im_seg['im'], self.dct_im_seg['seg'], range(len(self.dct_im_seg['im']))):
                for xx in range(im_z.shape[0]):
                    for yy in range(im_z.shape[1]):
                        if not seg_z[xx, yy]:
                            continue
                        if xx < offset or yy < offset:
                            continue
                        if xx > (im_z.shape[0] - offset - 1) or yy > (im_z.shape[1] - offset - 1):
                            continue  # to check if the whole glcm_window is in the axial_slice
                        if False in np.unique(seg_z[xx - offset: xx + offset + 1, yy - offset: yy + offset + 1]):
                            continue  # to check if the whole glcm_window is in the mask of the axial_slice

                        glcm_window = im_z[xx - offset: xx + offset + 1, yy - offset: yy + offset + 1]
                        glcm_window = glcm_window.astype(np.uint8)

                        dct_glcm = {}
                        for a in self.param_glcm.angle.split(','):  # compute the GLCM for self.param_glcm.distance and for each self.param_glcm.angle
                            dct_glcm[a] = greycomatrix(glcm_window,
                                                       [self.param_glcm.distance], [np.radians(int(a))],
                                                       symmetric=self.param_glcm.symmetric,
                                                       normed=self.param_glcm.normed)

                        for m in self.metric_lst:  # compute the GLCM property (m.split('_')[0]) of the voxel xx,yy,zz
                            dct_metric[m].data[xx, yy, zz] = greycoprops(dct_glcm[m.split('_')[2]], m.split('_')[0])[0][0]

                        pbar.set_postfix(pos="{}/{}".format(zz, len(self.dct_im_seg["im"])))
                        pbar.update(1)

        for m in self.metric_lst:
            fname_out = add_suffix("".join(extract_fname(self.param.fname_im)[1:]), '_' + m)
            dct_metric[m].save(fname_out)
            self.fname_metric_lst[m] = fname_out
Exemple #16
0
def visualize_warp(fname_warp, fname_grid=None, step=3, rm_tmp=True):
    if fname_grid is None:
        from numpy import zeros
        tmp_dir = tmp_create()
        im_warp = Image(fname_warp)
        status, out = run_proc(['fslhd', fname_warp])
        curdir = os.getcwd()
        os.chdir(tmp_dir)
        dim1 = 'dim1           '
        dim2 = 'dim2           '
        dim3 = 'dim3           '
        nx = int(
            out[out.find(dim1):][len(dim1):out[out.find(dim1):].find('\n')])
        ny = int(
            out[out.find(dim2):][len(dim2):out[out.find(dim2):].find('\n')])
        nz = int(
            out[out.find(dim3):][len(dim3):out[out.find(dim3):].find('\n')])
        sq = zeros((step, step))
        sq[step - 1] = 1
        sq[:, step - 1] = 1
        dat = zeros((nx, ny, nz))
        for i in range(0, dat.shape[0], step):
            for j in range(0, dat.shape[1], step):
                for k in range(dat.shape[2]):
                    if dat[i:i + step, j:j + step, k].shape == (step, step):
                        dat[i:i + step, j:j + step, k] = sq
        fname_grid = 'grid_' + str(step) + '.nii.gz'
        im_grid = Image(param=dat)
        grid_hdr = im_warp.hdr
        im_grid.hdr = grid_hdr
        im_grid.absolutepath = fname_grid
        im_grid.save()
        fname_grid_resample = add_suffix(fname_grid, '_resample')
        run_proc([
            'sct_resample', '-i', fname_grid, '-f', '3x3x1', '-x', 'nn', '-o',
            fname_grid_resample
        ])
        fname_grid = os.path.join(tmp_dir, fname_grid_resample)
        os.chdir(curdir)
    path_warp, file_warp, ext_warp = extract_fname(fname_warp)
    grid_warped = os.path.join(
        path_warp,
        extract_fname(fname_grid)[1] + '_' + file_warp + ext_warp)
    run_proc([
        'sct_apply_transfo', '-i', fname_grid, '-d', fname_grid, '-w',
        fname_warp, '-o', grid_warped
    ])
    if rm_tmp:
        rmtree(tmp_dir)
Exemple #17
0
def label_discs(fname_seg_labeled, verbose=1):
    """
    Label discs from labeled_segmentation. The convention is C2/C3-->3, C3/C4-->4, etc.

    :param fname_seg_labeld: fname of the labeled segmentation
    :param verbose:
    :return:
    """
    # open labeled segmentation
    im_seg_labeled = Image(fname_seg_labeled)
    orientation_native = im_seg_labeled.orientation
    im_seg_labeled.change_orientation("RPI")
    nx, ny, nz = im_seg_labeled.dim[0], im_seg_labeled.dim[
        1], im_seg_labeled.dim[2]
    data_disc = np.zeros([nx, ny, nz])
    vertebral_level_previous = np.max(
        im_seg_labeled.data)  # initialize with the max label value
    # loop across z in the superior direction (i.e. starts with the bottom slice), and each time the i/i+1 interface
    # between two levels is found, create a label at the center of the cord with the value corresponding to the
    # vertebral level below the point. E.g., at interface C3/C4, the value would be 4.
    for iz in range(nz):
        # get 2d slice
        slice = im_seg_labeled.data[:, :, iz]
        # check if at least one voxel is non-zero
        if np.any(slice):
            slice_one = np.copy(slice)
            # set all non-zero values to 1 (to compute center of mass)
            # Note: the reason we do this is because if one slice includes part of vertebral level i and i+1, the
            # center of mass will be shifted towards the i+1 level.We don't want that here (i.e. the goal is to be at
            # the center of the cord)
            slice_one[slice.nonzero()] = 1
            # compute center of mass
            cx, cy = [
                int(x) for x in np.round(center_of_mass(slice_one)).tolist()
            ]
            # retrieve vertebral level
            vertebral_level = slice[cx, cy]
            # if smaller than previous level, then labeled as a disc
            if vertebral_level < vertebral_level_previous:
                # label disc
                data_disc[cx, cy, iz] = vertebral_level + 1
            # update variable
            vertebral_level_previous = vertebral_level
    # save disc labeled file
    im_seg_labeled.data = data_disc
    im_seg_labeled.change_orientation(orientation_native).save(
        add_suffix(fname_seg_labeled, '_disc'))
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv if argv else ['--help'])
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    input_filename = arguments.i
    if arguments.o is not None:
        output_filename = arguments.o
    else:
        output_filename = add_suffix(input_filename, '_gmseg')

    use_tta = arguments.t
    model_name = arguments.m
    threshold = arguments.thr

    if threshold > 1.0 or threshold < 0.0:
        raise RuntimeError("Threshold should be between 0.0 and 1.0.")

    # Threshold zero means no thresholding
    if threshold == 0.0:
        threshold = None

    from spinalcordtoolbox.deepseg_gm import deepseg_gm
    deepseg_gm.check_backend()

    out_fname = deepseg_gm.segment_file(input_filename,
                                        output_filename, model_name, threshold,
                                        int(verbose), use_tta)

    path_qc = arguments.qc
    qc_dataset = arguments.qc_dataset
    qc_subject = arguments.qc_subject
    if path_qc is not None:
        generate_qc(fname_in1=input_filename,
                    fname_seg=out_fname,
                    args=sys.argv[1:],
                    path_qc=os.path.abspath(path_qc),
                    dataset=qc_dataset,
                    subject=qc_subject,
                    process='sct_deepseg_gm')

    display_viewer_syntax([input_filename, format(out_fname)],
                          colormaps=['gray', 'red'],
                          opacities=['1', '0.7'],
                          verbose=verbose)
def resample_file(fname_data,
                  fname_out,
                  new_size,
                  new_size_type,
                  interpolation,
                  verbose,
                  fname_ref=None):
    """This function will resample the specified input
    image file to the target size.
    Can deal with 2d, 3d or 4d image objects.

    :param fname_data: The input image filename.
    :param fname_out: The output image filename.
    :param new_size: The target size, i.e. 0.25x0.25
    :param new_size_type: Unit of resample (mm, vox, factor)
    :param interpolation: The interpolation type
    :param verbose: verbosity level
    :param fname_ref: Reference image to resample input image to
    """
    # Load data
    logger.info('load data...')
    nii = nib.load(fname_data)
    if fname_ref is not None:
        nii_ref = nib.load(fname_ref)
    else:
        nii_ref = None

    nii_r = resample_nib(nii,
                         new_size.split('x'),
                         new_size_type,
                         image_dest=nii_ref,
                         interpolation=interpolation)

    # build output file name
    if fname_out == '':
        fname_out = add_suffix(fname_data, '_r')
    else:
        fname_out = fname_out

    # save data
    nib.save(nii_r, fname_out)

    # to view results
    display_viewer_syntax([fname_out], verbose=verbose)

    return nii_r
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_loglevel(verbose=verbose)

    param = Param()

    fname_src = arguments.i
    if arguments.o is not None:
        fname_dst = arguments.o
    else:
        fname_dst = add_suffix(fname_src, "_tsnr")

    # call main function
    tsnr = Tsnr(param=param, fmri=fname_src, out=fname_dst)
    tsnr.compute()
def test_sct_dmri_denoise_patch2self(dmri_concat, tmp_path, model, radius):
    fname_dmri, fname_bvals, _ = dmri_concat  # dMRI file with >=10 volumes
    fname_out = str(tmp_path / "dmri_denoised.nii.gz")
    sct_dmri_denoise_patch2self.main(argv=[
        '-i', fname_dmri, '-b', fname_bvals, '-model', model, '-radius',
        radius, '-o', fname_out, '-v', '2'
    ])
    snr = []
    for fname in fname_dmri, fname_out:
        sct_compute_snr.main(argv=['-i', fname, '-method', 'mult'])
        img_snr = Image(add_suffix(fname, "_SNR-mult"))
        snr.append(img_snr.data.mean())

    # Mean SNR should increase. Not a thorough test; I just wanted to verify
    # that it even works at all, as the actual underlying method is tested
    # extensively by dipy upstream.
    assert snr[1] > snr[0]
Exemple #22
0
def main(args=None):

    parser = get_parser()
    param = Param()

    arguments = parser.parse_args(args=None if sys.argv[1:] else ['--help'])
    fname_src = arguments.i
    if arguments.o is not None:
        fname_dst = arguments.o
    else:
        fname_dst = add_suffix(fname_src, "_tsnr")

    verbose = int(arguments.v)
    init_sct(log_level=verbose, update=True)  # Update log level

    # call main function
    tsnr = Tsnr(param=param, fmri=fname_src, out=fname_dst)
    tsnr.compute()
Exemple #23
0
def main(fname_anat, fname_centerline, verbose):
    """
    Main function
    :param fname_anat:
    :param fname_centerline:
    :param verbose:
    :return:
    """
    # load input images
    im_anat = Image(fname_anat)
    im_centerline = Image(fname_centerline)

    # flatten sagittal
    im_anat_flattened = flatten_sagittal(im_anat, im_centerline, verbose)

    # save output
    fname_out = add_suffix(fname_anat, '_flatten')
    im_anat_flattened.save(fname_out)

    display_viewer_syntax([fname_anat, fname_out])
def main(args=None):
    """
    Main function
    :param args:
    :return:
    """
    # get parser args
    if args is None:
        args = None if sys.argv[1:] else ['--help']
    parser = get_parser()
    arguments = parser.parse_args(args=args)

    # initialize ImageCropper
    cropper = ImageCropper(Image(arguments.i))
    cropper.verbose = arguments.v
    init_sct(log_level=cropper.verbose, update=True)  # Update log level

    # Switch across cropping methods
    if arguments.g:
        cropper.get_bbox_from_gui()
    elif arguments.m:
        cropper.get_bbox_from_mask(Image(arguments.m))
    elif arguments.ref:
        cropper.get_bbox_from_ref(Image(arguments.ref))
    else:
        cropper.get_bbox_from_minmax(
            BoundingBox(arguments.xmin, arguments.xmax, arguments.ymin,
                        arguments.ymax, arguments.zmin, arguments.zmax))

    # Crop image
    img_crop = cropper.crop(background=arguments.b)

    # Write cropped image to file
    if arguments.o is None:
        fname_out = add_suffix(arguments.i, '_crop')
    else:
        fname_out = arguments.o
    img_crop.save(fname_out)

    display_viewer_syntax([arguments.i, fname_out])
def main(argv=None):
    """
    Main function
    :param argv:
    :return:
    """
    parser = get_parser()
    arguments = parser.parse_args(argv if argv else ['--help'])
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    # initialize ImageCropper
    cropper = ImageCropper(Image(arguments.i))
    cropper.verbose = verbose

    # Switch across cropping methods
    if arguments.g:
        cropper.get_bbox_from_gui()
    elif arguments.m:
        cropper.get_bbox_from_mask(Image(arguments.m))
    elif arguments.ref:
        cropper.get_bbox_from_ref(Image(arguments.ref))
    else:
        cropper.get_bbox_from_minmax(
            BoundingBox(arguments.xmin, arguments.xmax,
                        arguments.ymin, arguments.ymax,
                        arguments.zmin, arguments.zmax)
        )

    # Crop image
    img_crop = cropper.crop(background=arguments.b)

    # Write cropped image to file
    if arguments.o is None:
        fname_out = add_suffix(arguments.i, '_crop')
    else:
        fname_out = arguments.o
    img_crop.save(fname_out)

    display_viewer_syntax([arguments.i, fname_out])
Exemple #26
0
def label_segmentation(fname_seg, list_disc_z, list_disc_value, verbose=1):
    """
    Label segmentation image

    :param fname_seg: fname of the segmentation, no orientation expected
    :param list_disc_z: list of z that correspond to a disc
    :param list_disc_value: list of associated disc values
    :param verbose:
    :return:
    """

    # open segmentation
    seg = Image(fname_seg)
    init_orientation = seg.orientation
    seg.change_orientation("RPI")

    dim = seg.dim
    ny = dim[1]
    nz = dim[2]
    # loop across z
    for iz in range(nz):
        # get index of the disc right above iz
        try:
            ind_above_iz = max(
                [i for i in range(len(list_disc_z)) if list_disc_z[i] > iz])
        except ValueError:
            # if ind_above_iz is empty, attribute value 0
            vertebral_level = 0
        else:
            # assign vertebral level (add one because iz is BELOW the disk)
            vertebral_level = list_disc_value[ind_above_iz] + 1
        # get voxels in mask
        ind_nonzero = np.nonzero(seg.data[:, :, iz])
        seg.data[ind_nonzero[0], ind_nonzero[1], iz] = vertebral_level

    # write file
    seg.change_orientation(init_orientation).save(
        add_suffix(fname_seg, '_labeled'))
Exemple #27
0
def multicomponent_split(im):
    """
    Convert composite image (e.g., ITK warping field, 5dim) into several 3d volumes.
    Replaces "c3d -mcs warp_comp.nii -oo warp_vecx.nii warp_vecy.nii warp_vecz.nii"
    :param im:
    :return:
    """
    data = im.data
    assert len(data.shape) == 5
    data_out = []
    for i in range(data.shape[-1]):
        dat_out = data[:, :, :, :, i]
        '''
        while dat_out.shape[-1] == 1:
            dat_out = reshape(dat_out, dat_out.shape[:-1])
        '''
        data_out.append(dat_out)  # .astype('float32'))
    im_out = [im.copy() for j in range(len(data_out))]
    for i, im in enumerate(im_out):
        im.data = data_out[i]
        im.hdr.set_intent('vector', (), '')
        im.absolutepath = add_suffix(im.absolutepath, '_{}'.format(i))
    return im_out
Exemple #28
0
def moco(param):
    """
    Main function that performs motion correction.

    :param param:
    :return:
    """
    # retrieve parameters
    file_data = param.file_data
    file_target = param.file_target
    folder_mat = param.mat_moco  # output folder of mat file
    todo = param.todo
    suffix = param.suffix
    verbose = param.verbose

    # other parameters
    file_mask = 'mask.nii'

    printv('\nInput parameters:', param.verbose)
    printv('  Input file ............ ' + file_data, param.verbose)
    printv('  Reference file ........ ' + file_target, param.verbose)
    printv('  Polynomial degree ..... ' + param.poly, param.verbose)
    printv('  Smoothing kernel ...... ' + param.smooth, param.verbose)
    printv('  Gradient step ......... ' + param.gradStep, param.verbose)
    printv('  Metric ................ ' + param.metric, param.verbose)
    printv('  Sampling .............. ' + param.sampling, param.verbose)
    printv('  Todo .................. ' + todo, param.verbose)
    printv('  Mask  ................. ' + param.fname_mask, param.verbose)
    printv('  Output mat folder ..... ' + folder_mat, param.verbose)

    try:
        os.makedirs(folder_mat)
    except FileExistsError:
        pass

    # Get size of data
    printv('\nData dimensions:', verbose)
    im_data = Image(param.file_data)
    nx, ny, nz, nt, px, py, pz, pt = im_data.dim
    printv(
        ('  ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz) + ' x ' + str(nt)),
        verbose)

    # copy file_target to a temporary file
    printv('\nCopy file_target to a temporary file...', verbose)
    file_target = "target.nii.gz"
    convert(param.file_target, file_target, verbose=0)

    # Check if user specified a mask
    if not param.fname_mask == '':
        # Check if this mask is soft (i.e., non-binary, such as a Gaussian mask)
        im_mask = Image(param.fname_mask)
        if not np.array_equal(im_mask.data, im_mask.data.astype(bool)):
            # If it is a soft mask, multiply the target by the soft mask.
            im = Image(file_target)
            im_masked = im.copy()
            im_masked.data = im.data * im_mask.data
            im_masked.save(
                verbose=0)  # silence warning about file overwritting

    # If scan is sagittal, split src and target along Z (slice)
    if param.is_sagittal:
        dim_sag = 2  # TODO: find it
        # z-split data (time series)
        im_z_list = split_data(im_data, dim=dim_sag, squeeze_data=False)
        file_data_splitZ = []
        for im_z in im_z_list:
            im_z.save(verbose=0)
            file_data_splitZ.append(im_z.absolutepath)
        # z-split target
        im_targetz_list = split_data(Image(file_target),
                                     dim=dim_sag,
                                     squeeze_data=False)
        file_target_splitZ = []
        for im_targetz in im_targetz_list:
            im_targetz.save(verbose=0)
            file_target_splitZ.append(im_targetz.absolutepath)
        # z-split mask (if exists)
        if not param.fname_mask == '':
            im_maskz_list = split_data(Image(file_mask),
                                       dim=dim_sag,
                                       squeeze_data=False)
            file_mask_splitZ = []
            for im_maskz in im_maskz_list:
                im_maskz.save(verbose=0)
                file_mask_splitZ.append(im_maskz.absolutepath)
        # initialize file list for output matrices
        file_mat = np.empty((nz, nt), dtype=object)

    # axial orientation
    else:
        file_data_splitZ = [file_data]  # TODO: make it absolute like above
        file_target_splitZ = [file_target]  # TODO: make it absolute like above
        # initialize file list for output matrices
        file_mat = np.empty((1, nt), dtype=object)

        # deal with mask
        if not param.fname_mask == '':
            convert(param.fname_mask, file_mask, squeeze_data=False, verbose=0)
            im_maskz_list = [Image(file_mask)
                             ]  # use a list with single element

    # Loop across file list, where each file is either a 2D volume (if sagittal) or a 3D volume (otherwise)
    # file_mat = tuple([[[] for i in range(nt)] for i in range(nz)])

    file_data_splitZ_moco = []
    printv(
        '\nRegister. Loop across Z (note: there is only one Z if orientation is axial)'
    )
    for file in file_data_splitZ:
        iz = file_data_splitZ.index(file)
        # Split data along T dimension
        # printv('\nSplit data along T dimension.', verbose)
        im_z = Image(file)
        list_im_zt = split_data(im_z, dim=3)
        file_data_splitZ_splitT = []
        for im_zt in list_im_zt:
            im_zt.save(verbose=0)
            file_data_splitZ_splitT.append(im_zt.absolutepath)
        # file_data_splitT = file_data + '_T'

        # Motion correction: initialization
        index = np.arange(nt)
        file_data_splitT_num = []
        file_data_splitZ_splitT_moco = []
        failed_transfo = [0 for i in range(nt)]

        # Motion correction: Loop across T
        for indice_index in sct_progress_bar(range(nt),
                                             unit='iter',
                                             unit_scale=False,
                                             desc="Z=" + str(iz) + "/" +
                                             str(len(file_data_splitZ) - 1),
                                             ascii=False,
                                             ncols=80):

            # create indices and display stuff
            it = index[indice_index]
            file_mat[iz][it] = os.path.join(
                folder_mat,
                "mat.Z") + str(iz).zfill(4) + 'T' + str(it).zfill(4)
            file_data_splitZ_splitT_moco.append(
                add_suffix(file_data_splitZ_splitT[it], '_moco'))
            # deal with masking (except in the 'apply' case, where masking is irrelevant)
            input_mask = None
            if not param.fname_mask == '' and not param.todo == 'apply':
                # Check if mask is binary
                if np.array_equal(im_maskz_list[iz].data,
                                  im_maskz_list[iz].data.astype(bool)):
                    # If it is, pass this mask into register() to be used
                    input_mask = im_maskz_list[iz]
                else:
                    # If not, do not pass this mask into register() because ANTs cannot handle non-binary masks.
                    #  Instead, multiply the input data by the Gaussian mask.
                    im = Image(file_data_splitZ_splitT[it])
                    im_masked = im.copy()
                    im_masked.data = im.data * im_maskz_list[iz].data
                    im_masked.save(
                        verbose=0)  # silence warning about file overwritting

            # run 3D registration
            failed_transfo[it] = register(param,
                                          file_data_splitZ_splitT[it],
                                          file_target_splitZ[iz],
                                          file_mat[iz][it],
                                          file_data_splitZ_splitT_moco[it],
                                          im_mask=input_mask)

            # average registered volume with target image
            # N.B. use weighted averaging: (target * nb_it + moco) / (nb_it + 1)
            if param.iterAvg and indice_index < 10 and failed_transfo[
                    it] == 0 and not param.todo == 'apply':
                im_targetz = Image(file_target_splitZ[iz])
                data_targetz = im_targetz.data
                data_mocoz = Image(file_data_splitZ_splitT_moco[it]).data
                data_targetz = (data_targetz * (indice_index + 1) +
                                data_mocoz) / (indice_index + 2)
                im_targetz.data = data_targetz
                im_targetz.save(verbose=0)

        # Replace failed transformation with the closest good one
        fT = [i for i, j in enumerate(failed_transfo) if j == 1]
        gT = [i for i, j in enumerate(failed_transfo) if j == 0]
        for it in range(len(fT)):
            abs_dist = [np.abs(gT[i] - fT[it]) for i in range(len(gT))]
            if not abs_dist == []:
                index_good = abs_dist.index(min(abs_dist))
                printv(
                    '  transfo #' + str(fT[it]) + ' --> use transfo #' +
                    str(gT[index_good]), verbose)
                # copy transformation
                copy(file_mat[iz][gT[index_good]] + 'Warp.nii.gz',
                     file_mat[iz][fT[it]] + 'Warp.nii.gz')
                # apply transformation
                sct_apply_transfo.main(argv=[
                    '-i', file_data_splitZ_splitT[fT[it]], '-d', file_target,
                    '-w', file_mat[iz][fT[it]] + 'Warp.nii.gz', '-o',
                    file_data_splitZ_splitT_moco[fT[it]], '-x', param.interp
                ])
            else:
                # exit program if no transformation exists.
                printv(
                    '\nERROR in ' + os.path.basename(__file__) +
                    ': No good transformation exist. Exit program.\n', verbose,
                    'error')
                sys.exit(2)

        # Merge data along T
        file_data_splitZ_moco.append(add_suffix(file, suffix))
        if todo != 'estimate':
            im_data_splitZ_splitT_moco = [
                Image(fname) for fname in file_data_splitZ_splitT_moco
            ]
            im_out = concat_data(im_data_splitZ_splitT_moco, 3)
            im_out.absolutepath = file_data_splitZ_moco[iz]
            im_out.save(verbose=0)

    # If sagittal, merge along Z
    if param.is_sagittal:
        # TODO: im_out.dim is incorrect: Z value is one
        im_data_splitZ_moco = [Image(fname) for fname in file_data_splitZ_moco]
        im_out = concat_data(im_data_splitZ_moco, 2)
        dirname, basename, ext = extract_fname(file_data)
        path_out = os.path.join(dirname, basename + suffix + ext)
        im_out.absolutepath = path_out
        im_out.save(verbose=0)

    return file_mat, im_out
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv if argv else ['--help'])
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    fname_input1 = arguments.i
    fname_input2 = arguments.d

    tmp_dir = tmp_create()  # create tmp directory
    tmp_dir = os.path.abspath(tmp_dir)

    # copy input files to tmp directory
    # for fname in [fname_input1, fname_input2]:
    copy(fname_input1, tmp_dir)
    copy(fname_input2, tmp_dir)
    fname_input1 = ''.join(extract_fname(fname_input1)[1:])
    fname_input2 = ''.join(extract_fname(fname_input2)[1:])

    curdir = os.getcwd()
    os.chdir(tmp_dir)  # go to tmp directory

    if arguments.bin is not None:
        fname_input1_bin = add_suffix(fname_input1, '_bin')
        run_proc(['sct_maths', '-i', fname_input1, '-bin', '0', '-o', fname_input1_bin])
        fname_input1 = fname_input1_bin
        fname_input2_bin = add_suffix(fname_input2, '_bin')
        run_proc(['sct_maths', '-i', fname_input2, '-bin', '0', '-o', fname_input2_bin])
        fname_input2 = fname_input2_bin

    # copy header of im_1 to im_2
    from spinalcordtoolbox.image import Image
    im_1 = Image(fname_input1)
    im_2 = Image(fname_input2)
    im_2.header = im_1.header
    im_2.save()

    cmd = ['isct_dice_coefficient', fname_input1, fname_input2]

    if vars(arguments)["2d_slices"] is not None:
        cmd += ['-2d-slices', str(vars(arguments)["2d_slices"])]
    if arguments.b is not None:
        bounding_box = arguments.b
        cmd += ['-b'] + bounding_box
    if arguments.bmax is not None and arguments.bmax == 1:
        cmd += ['-bmax']
    if arguments.bzmax is not None and arguments.bzmax == 1:
        cmd += ['-bzmax']
    if arguments.o is not None:
        path_output, fname_output, ext = extract_fname(arguments.o)
        cmd += ['-o', fname_output + ext]

    rm_tmp = bool(arguments.r)

    # # Computation of Dice coefficient using Python implementation.
    # # commented for now as it does not cover all the feature of isct_dice_coefficient
    # #from spinalcordtoolbox.image import Image, compute_dice
    # #dice = compute_dice(Image(fname_input1), Image(fname_input2), mode='3d', zboundaries=False)
    # #printv('Dice (python-based) = ' + str(dice), verbose)

    status, output = run_proc(cmd, verbose, is_sct_binary=True)

    os.chdir(curdir)  # go back to original directory

    # copy output file into original directory
    if arguments.o is not None:
        copy(os.path.join(tmp_dir, fname_output + ext), os.path.join(path_output, fname_output + ext))

    # remove tmp_dir
    if rm_tmp:
        rmtree(tmp_dir)

    printv(output, verbose)
Exemple #30
0
def moco_wrapper(param):
    """
    Wrapper that performs motion correction.

    :param param: ParamMoco class
    :return: None
    """
    file_data = 'data.nii'  # corresponds to the full input data (e.g. dmri or fmri)
    file_data_dirname, file_data_basename, file_data_ext = extract_fname(
        file_data)
    file_b0 = 'b0.nii'
    file_datasub = 'datasub.nii'  # corresponds to the full input data minus the b=0 scans (if param.is_diffusion=True)
    file_datasubgroup = 'datasub-groups.nii'  # concatenation of the average of each file_datasub
    file_mask = 'mask.nii'
    file_moco_params_csv = 'moco_params.tsv'
    file_moco_params_x = 'moco_params_x.nii.gz'
    file_moco_params_y = 'moco_params_y.nii.gz'
    ext_data = '.nii.gz'  # workaround "too many open files" by slurping the data
    # TODO: check if .nii can be used
    mat_final = 'mat_final/'
    # ext_mat = 'Warp.nii.gz'  # warping field

    # Start timer
    start_time = time.time()

    printv('\nInput parameters:', param.verbose)
    printv('  Input file ............ ' + param.fname_data, param.verbose)
    printv('  Group size ............ {}'.format(param.group_size),
           param.verbose)

    # Get full path
    # param.fname_data = os.path.abspath(param.fname_data)
    # param.fname_bvecs = os.path.abspath(param.fname_bvecs)
    # if param.fname_bvals != '':
    #     param.fname_bvals = os.path.abspath(param.fname_bvals)

    # Extract path, file and extension
    # path_data, file_data, ext_data = extract_fname(param.fname_data)
    # path_mask, file_mask, ext_mask = extract_fname(param.fname_mask)

    path_tmp = tmp_create(basename="moco")

    # Copying input data to tmp folder
    printv('\nCopying input data to tmp folder and convert to nii...',
           param.verbose)
    convert(param.fname_data, os.path.join(path_tmp, file_data))
    if param.fname_mask != '':
        convert(param.fname_mask,
                os.path.join(path_tmp, file_mask),
                verbose=param.verbose)
        # Update field in param (because used later in another function, and param class will be passed)
        param.fname_mask = file_mask

    # Build absolute output path and go to tmp folder
    curdir = os.getcwd()
    path_out_abs = os.path.abspath(param.path_out)
    os.chdir(path_tmp)

    # Get dimensions of data
    printv('\nGet dimensions of data...', param.verbose)
    im_data = Image(file_data)
    nx, ny, nz, nt, px, py, pz, pt = im_data.dim
    printv('  ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), param.verbose)

    # Get orientation
    printv('\nData orientation: ' + im_data.orientation, param.verbose)
    if im_data.orientation[2] in 'LR':
        param.is_sagittal = True
        printv('  Treated as sagittal')
    elif im_data.orientation[2] in 'IS':
        param.is_sagittal = False
        printv('  Treated as axial')
    else:
        param.is_sagittal = False
        printv(
            'WARNING: Orientation seems to be neither axial nor sagittal. Treated as axial.'
        )

    printv(
        "\nSet suffix of transformation file name, which depends on the orientation:"
    )
    if param.is_sagittal:
        param.suffix_mat = '0GenericAffine.mat'
        printv(
            "Orientation is sagittal, suffix is '{}'. The image is split across the R-L direction, and the "
            "estimated transformation is a 2D affine transfo.".format(
                param.suffix_mat))
    else:
        param.suffix_mat = 'Warp.nii.gz'
        printv(
            "Orientation is axial, suffix is '{}'. The estimated transformation is a 3D warping field, which is "
            "composed of a stack of 2D Tx-Ty transformations".format(
                param.suffix_mat))

    # Adjust group size in case of sagittal scan
    if param.is_sagittal and param.group_size != 1:
        printv(
            'For sagittal data group_size should be one for more robustness. Forcing group_size=1.',
            1, 'warning')
        param.group_size = 1

    if param.is_diffusion:
        # Identify b=0 and DWI images
        index_b0, index_dwi, nb_b0, nb_dwi = \
            sct_dmri_separate_b0_and_dwi.identify_b0(param.fname_bvecs, param.fname_bvals, param.bval_min,
                                                     param.verbose)

        # check if dmri and bvecs are the same size
        if not nb_b0 + nb_dwi == nt:
            printv(
                '\nERROR in ' + os.path.basename(__file__) +
                ': Size of data (' + str(nt) + ') and size of bvecs (' +
                str(nb_b0 + nb_dwi) +
                ') are not the same. Check your bvecs file.\n', 1, 'error')
            sys.exit(2)

    # ==================================================================================================================
    # Prepare data (mean/groups...)
    # ==================================================================================================================

    # Split into T dimension
    printv('\nSplit along T dimension...', param.verbose)
    im_data_split_list = split_data(im_data, 3)
    for im in im_data_split_list:
        x_dirname, x_basename, x_ext = extract_fname(im.absolutepath)
        im.absolutepath = os.path.join(x_dirname, x_basename + ".nii.gz")
        im.save()

    if param.is_diffusion:
        # Merge and average b=0 images
        printv('\nMerge and average b=0 data...', param.verbose)
        im_b0_list = []
        for it in range(nb_b0):
            im_b0_list.append(im_data_split_list[index_b0[it]])
        im_b0 = concat_data(im_b0_list, 3).save(file_b0, verbose=0)
        # Average across time
        im_b0.mean(dim=3).save(add_suffix(file_b0, '_mean'))

        n_moco = nb_dwi  # set number of data to perform moco on (using grouping)
        index_moco = index_dwi

    # If not a diffusion scan, we will motion-correct all volumes
    else:
        n_moco = nt
        index_moco = list(range(0, nt))

    nb_groups = int(math.floor(n_moco / param.group_size))

    # Generate groups indexes
    group_indexes = []
    for iGroup in range(nb_groups):
        group_indexes.append(index_moco[(iGroup *
                                         param.group_size):((iGroup + 1) *
                                                            param.group_size)])

    # add the remaining images to a new last group (in case the total number of image is not divisible by group_size)
    nb_remaining = n_moco % param.group_size  # number of remaining images
    if nb_remaining > 0:
        nb_groups += 1
        group_indexes.append(index_moco[len(index_moco) -
                                        nb_remaining:len(index_moco)])

    _, file_dwi_basename, file_dwi_ext = extract_fname(file_datasub)
    # Group data
    list_file_group = []
    for iGroup in sct_progress_bar(range(nb_groups),
                                   unit='iter',
                                   unit_scale=False,
                                   desc="Merge within groups",
                                   ascii=False,
                                   ncols=80):
        # get index
        index_moco_i = group_indexes[iGroup]
        n_moco_i = len(index_moco_i)
        # concatenate images across time, within this group
        file_dwi_merge_i = os.path.join(file_dwi_basename + '_' + str(iGroup) +
                                        ext_data)
        im_dwi_list = []
        for it in range(n_moco_i):
            im_dwi_list.append(im_data_split_list[index_moco_i[it]])
        im_dwi_out = concat_data(im_dwi_list, 3).save(file_dwi_merge_i,
                                                      verbose=0)
        # Average across time
        list_file_group.append(
            os.path.join(file_dwi_basename + '_' + str(iGroup) + '_mean' +
                         ext_data))
        im_dwi_out.mean(dim=3).save(list_file_group[-1])

    # Merge across groups
    printv('\nMerge across groups...', param.verbose)
    # file_dwi_groups_means_merge = 'dwi_averaged_groups'
    fname_dw_list = []
    for iGroup in range(nb_groups):
        fname_dw_list.append(list_file_group[iGroup])
    im_dw_list = [Image(fname) for fname in fname_dw_list]
    concat_data(im_dw_list, 3).save(file_datasubgroup, verbose=0)

    # Cleanup
    del im, im_data_split_list

    # ==================================================================================================================
    # Estimate moco
    # ==================================================================================================================

    # Initialize another class instance that will be passed on to the moco() function
    param_moco = deepcopy(param)

    if param.is_diffusion:
        # Estimate moco on b0 groups
        printv(
            '\n-------------------------------------------------------------------------------',
            param.verbose)
        printv('  Estimating motion on b=0 images...', param.verbose)
        printv(
            '-------------------------------------------------------------------------------',
            param.verbose)
        param_moco.file_data = 'b0.nii'
        # Identify target image
        if index_moco[0] != 0:
            # If first DWI is not the first volume (most common), then there is a least one b=0 image before. In that
            # case select it as the target image for registration of all b=0
            param_moco.file_target = os.path.join(
                file_data_dirname, file_data_basename + '_T' +
                str(index_b0[index_moco[0] - 1]).zfill(4) + ext_data)
        else:
            # If first DWI is the first volume, then the target b=0 is the first b=0 from the index_b0.
            param_moco.file_target = os.path.join(
                file_data_dirname, file_data_basename + '_T' +
                str(index_b0[0]).zfill(4) + ext_data)
        # Run moco
        param_moco.path_out = ''
        param_moco.todo = 'estimate_and_apply'
        param_moco.mat_moco = 'mat_b0groups'
        file_mat_b0, _ = moco(param_moco)

    # Estimate moco across groups
    printv(
        '\n-------------------------------------------------------------------------------',
        param.verbose)
    printv('  Estimating motion across groups...', param.verbose)
    printv(
        '-------------------------------------------------------------------------------',
        param.verbose)
    param_moco.file_data = file_datasubgroup
    param_moco.file_target = list_file_group[
        0]  # target is the first volume (closest to the first b=0 if DWI scan)
    param_moco.path_out = ''
    param_moco.todo = 'estimate_and_apply'
    param_moco.mat_moco = 'mat_groups'
    file_mat_datasub_group, _ = moco(param_moco)

    # Spline Regularization along T
    if param.spline_fitting:
        # TODO: fix this scenario (haven't touched that code for a while-- it is probably buggy)
        raise NotImplementedError()
        # spline(mat_final, nt, nz, param.verbose, np.array(index_b0), param.plot_graph)

    # ==================================================================================================================
    # Apply moco
    # ==================================================================================================================

    # If group_size>1, assign transformation to each individual ungrouped 3d volume
    if param.group_size > 1:
        file_mat_datasub = []
        for iz in range(len(file_mat_datasub_group)):
            # duplicate by factor group_size the transformation file for each it
            #  example: [mat.Z0000T0001Warp.nii] --> [mat.Z0000T0001Warp.nii, mat.Z0000T0001Warp.nii] for group_size=2
            file_mat_datasub.append(
                functools.reduce(operator.iconcat,
                                 [[i] * param.group_size
                                  for i in file_mat_datasub_group[iz]], []))
    else:
        file_mat_datasub = file_mat_datasub_group

    # Copy transformations to mat_final folder and rename them appropriately
    copy_mat_files(nt, file_mat_datasub, index_moco, mat_final, param)
    if param.is_diffusion:
        copy_mat_files(nt, file_mat_b0, index_b0, mat_final, param)

    # Apply moco on all dmri data
    printv(
        '\n-------------------------------------------------------------------------------',
        param.verbose)
    printv('  Apply moco', param.verbose)
    printv(
        '-------------------------------------------------------------------------------',
        param.verbose)
    param_moco.file_data = file_data
    param_moco.file_target = list_file_group[
        0]  # reference for reslicing into proper coordinate system
    param_moco.path_out = ''  # TODO not used in moco()
    param_moco.mat_moco = mat_final
    param_moco.todo = 'apply'
    file_mat_data, im_moco = moco(param_moco)

    # copy geometric information from header
    # NB: this is required because WarpImageMultiTransform in 2D mode wrongly sets pixdim(3) to "1".
    im_moco.header = im_data.header
    im_moco.save(verbose=0)

    # Average across time
    if param.is_diffusion:
        # generate b0_moco_mean and dwi_moco_mean
        args = [
            '-i', im_moco.absolutepath, '-bvec', param.fname_bvecs, '-a', '1',
            '-v', '0'
        ]
        if not param.fname_bvals == '':
            # if bvals file is provided
            args += ['-bval', param.fname_bvals]
        fname_b0, fname_b0_mean, fname_dwi, fname_dwi_mean = sct_dmri_separate_b0_and_dwi.main(
            argv=args)
    else:
        fname_moco_mean = add_suffix(im_moco.absolutepath, '_mean')
        im_moco.mean(dim=3).save(fname_moco_mean)

    # Extract and output the motion parameters (doesn't work for sagittal orientation)
    printv('Extract motion parameters...')
    if param.output_motion_param:
        if param.is_sagittal:
            printv(
                'Motion parameters cannot be generated for sagittal images.',
                1, 'warning')
        else:
            files_warp_X, files_warp_Y = [], []
            moco_param = []
            for fname_warp in file_mat_data[0]:
                # Cropping the image to keep only one voxel in the XY plane
                im_warp = Image(fname_warp + param.suffix_mat)
                im_warp.data = np.expand_dims(np.expand_dims(
                    im_warp.data[0, 0, :, :, :], axis=0),
                                              axis=0)

                # These three lines allow to generate one file instead of two, containing X, Y and Z moco parameters
                #fname_warp_crop = fname_warp + '_crop_' + ext_mat
                # files_warp.append(fname_warp_crop)
                # im_warp.save(fname_warp_crop)

                # Separating the three components and saving X and Y only (Z is equal to 0 by default).
                im_warp_XYZ = multicomponent_split(im_warp)

                fname_warp_crop_X = fname_warp + '_crop_X_' + param.suffix_mat
                im_warp_XYZ[0].save(fname_warp_crop_X)
                files_warp_X.append(fname_warp_crop_X)

                fname_warp_crop_Y = fname_warp + '_crop_Y_' + param.suffix_mat
                im_warp_XYZ[1].save(fname_warp_crop_Y)
                files_warp_Y.append(fname_warp_crop_Y)

                # Calculating the slice-wise average moco estimate to provide a QC file
                moco_param.append([
                    np.mean(np.ravel(im_warp_XYZ[0].data)),
                    np.mean(np.ravel(im_warp_XYZ[1].data))
                ])

            # These two lines allow to generate one file instead of two, containing X, Y and Z moco parameters
            # im_warp = [Image(fname) for fname in files_warp]
            # im_warp_concat = concat_data(im_warp, dim=3)
            # im_warp_concat.save('fmri_moco_params.nii')

            # Concatenating the moco parameters into a time series for X and Y components.
            im_warp_X = [Image(fname) for fname in files_warp_X]
            im_warp_concat = concat_data(im_warp_X, dim=3)
            im_warp_concat.save(file_moco_params_x)

            im_warp_Y = [Image(fname) for fname in files_warp_Y]
            im_warp_concat = concat_data(im_warp_Y, dim=3)
            im_warp_concat.save(file_moco_params_y)

            # Writing a TSV file with the slicewise average estimate of the moco parameters. Useful for QC
            with open(file_moco_params_csv, 'wt') as out_file:
                tsv_writer = csv.writer(out_file, delimiter='\t')
                tsv_writer.writerow(['X', 'Y'])
                for mocop in moco_param:
                    tsv_writer.writerow([mocop[0], mocop[1]])

    # Generate output files
    printv('\nGenerate output files...', param.verbose)
    fname_moco = os.path.join(
        path_out_abs,
        add_suffix(os.path.basename(param.fname_data), param.suffix))
    generate_output_file(im_moco.absolutepath, fname_moco)
    if param.is_diffusion:
        generate_output_file(fname_b0_mean, add_suffix(fname_moco, '_b0_mean'))
        generate_output_file(fname_dwi_mean,
                             add_suffix(fname_moco, '_dwi_mean'))
    else:
        generate_output_file(fname_moco_mean, add_suffix(fname_moco, '_mean'))
    if os.path.exists(file_moco_params_csv):
        generate_output_file(file_moco_params_x,
                             os.path.join(path_out_abs, file_moco_params_x),
                             squeeze_data=False)
        generate_output_file(file_moco_params_y,
                             os.path.join(path_out_abs, file_moco_params_y),
                             squeeze_data=False)
        generate_output_file(file_moco_params_csv,
                             os.path.join(path_out_abs, file_moco_params_csv))

    # Delete temporary files
    if param.remove_temp_files == 1:
        printv('\nDelete temporary files...', param.verbose)
        rmtree(path_tmp, verbose=param.verbose)

    # come back to working directory
    os.chdir(curdir)

    # display elapsed time
    elapsed_time = time.time() - start_time
    printv(
        '\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's',
        param.verbose)

    display_viewer_syntax([
        os.path.join(
            param.path_out,
            add_suffix(os.path.basename(param.fname_data), param.suffix)),
        param.fname_data
    ],
                          mode='ortho,ortho')