Esempio n. 1
0
def test_realign4d():
    """This tests whether realign4d yields the same results depending on
    whether the slice order is input explicitely or as
    slice_times='ascending'.

    Due to the very small size of the image used for testing (only 3
    slices), optimization is numerically unstable. It seems to make
    the default optimizer, namely scipy.fmin.fmin_ncg, adopt a random
    behavior. To work around the resulting inconsistency in results,
    we use a custom steepest gradient descent as the optimizer,
    although it's generally not recommended in practice.
    """
    runs = [im, im]
    orient = io_orientation(im.get_affine())
    slice_axis = int(np.where(orient[:, 0] == 2)[0])
    R1 = SpaceTimeRealign(runs, tr=2., slice_times='ascending',
                          slice_info=slice_axis)
    R1.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest')
    nslices = im.shape[slice_axis]
    slice_times = (2. / float(nslices)) * np.arange(nslices)
    R2 = SpaceTimeRealign(runs, tr=2., slice_times=slice_times,
                          slice_info=slice_axis)
    R2.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest')
    for r in range(2):
        for i in range(im.shape[3]):
            assert_array_almost_equal(R1._transforms[r][i].translation,
                                      R2._transforms[r][i].translation)
            assert_array_almost_equal(R1._transforms[r][i].rotation,
                                      R2._transforms[r][i].rotation)
    for i in range(im.shape[3]):
            assert_array_almost_equal(R1._mean_transforms[r].translation,
                                      R2._mean_transforms[r].translation)
            assert_array_almost_equal(R1._mean_transforms[r].rotation,
                                      R2._mean_transforms[r].rotation)
Esempio n. 2
0
def flexi_tvis_affine(sl_vox_order, grid_affine, dim, voxel_size):
    """ Computes the mapping from voxel indices to streamline points,
        reconciling streamlines and grids with different voxel orders

    Parameters
    ----------
    sl_vox_order : string of length 3
        a string that describes the voxel order of the streamlines (ex: LPS)
    grid_affine : array (4, 4),
        An affine matrix describing the current space of the grid in relation
        to RAS+ scanner space
    dim : tuple of length 3
        dimension of the grid
    voxel_size : array (3,0)
        voxel size of the grid

    Returns
    -------
    flexi_tvis_aff : this affine maps between a grid and a trackvis space
    """

    sl_ornt = orientation_from_string(str(sl_vox_order))
    grid_ornt = nib.io_orientation(grid_affine)
    reorder_grid = reorder_voxels_affine(
        grid_ornt, sl_ornt, np.array(dim)-1, np.array([1,1,1]))

    tvis_aff = affine_for_trackvis(voxel_size)

    flexi_tvis_aff = np.dot(tvis_aff, reorder_grid)

    return flexi_tvis_aff
Esempio n. 3
0
def rasLimits(hdr):
    import nibabel
    q = hdr.get_best_affine();
    ornt = nibabel.io_orientation(q)

    #DEBUG HERE
    #verbose('q {}'.format(q))
    #verbose('ornt {}'.format(ornt))
    #print 'hdr {}'.format(hdr)
    dims = hdr.get_data_shape()
    rasLimitsT = numpy.array([
        [-0.5,-0.5,-0.5],
        [dims[0]-0.5,dims[1]-0.5,dims[2]-0.5]
    ])
    rasLimits = nibabel.affines.apply_affine(q,rasLimitsT).T
    #verbose('rasLimits {}'.format(rasLimits))
    for lim in rasLimits:
        if lim[1]<lim[0]: 
            tmp = lim[0]
            lim[0] = lim[1]
            lim[1] = tmp
            
    #verbose('rasLimits {}'.format(rasLimits))
    #if args.out:
    #    with open(args.out, 'w') as fp:
    #        json.dump(rasLimits.tolist(),fp)
    
    return rasLimits.tolist()
def guess_slice_axis_and_direction(slice_info, affine):
    if slice_info == None:
        orient = io_orientation(affine)
        slice_axis = int(np.where(orient[:, 0] == 2)[0])
        slice_direction = int(orient[slice_axis, 1])
    else:
        slice_axis = int(slice_info[0])
        slice_direction = int(slice_info[1])
    return slice_axis, slice_direction
Esempio n. 5
0
 def save_streamlines(self, streamlines, save_streamlines_to):
     trk_hdr = empty_header()
     voxel_order = orientation_to_string(nib.io_orientation(self.affine))
     trk_hdr['voxel_order'] = voxel_order
     trk_hdr['voxel_size'] = self.voxel_size
     trk_hdr['vox_to_ras'] = self.affine
     trk_hdr['dim'] = self.shape
     trk_tracks = ((ii, None, None) for ii in streamlines)
     write(save_streamlines_to, trk_tracks, trk_hdr)
     pickle.dump(self, open(save_streamlines_to + '.p', 'wb'))
Esempio n. 6
0
 def save_streamlines(self, streamlines, save_streamlines_to):
     trk_hdr = empty_header()
     voxel_order = orientation_to_string(nib.io_orientation(self.affine))
     trk_hdr['voxel_order'] = voxel_order
     trk_hdr['voxel_size'] = self.voxel_size
     trk_hdr['vox_to_ras'] = self.affine
     trk_hdr['dim'] = self.shape
     trk_tracks = ((ii, None, None) for ii in streamlines)
     write(save_streamlines_to, trk_tracks, trk_hdr)
     pickle.dump(self, open(save_streamlines_to + '.p', 'wb'))
Esempio n. 7
0
def orient_correctly(img_nii):
    orientation = nibabel.io_orientation(img_nii.affine)
    try:
        img_new = nibabel.as_closest_canonical(img_nii,
                                               True).get_data().astype(float)
    except:
        img_new = nibabel.as_closest_canonical(img_nii,
                                               False).get_data().astype(float)
    img_trans = np.transpose(img_new, (0, 2, 1))
    img_flip = np.flip(img_trans, 0)
    img_flip = np.flip(img_flip, 1)
    return img_flip, orientation
Esempio n. 8
0
 def read_data(self):
     data_img = nib.load(self.dwi_images)
     affine = data_img.get_affine()
     voxel_size = data_img.get_header().get_zooms()
     voxel_size = voxel_size[:3]
     fa_img = nib.load(self.fa_file)
     assert data_img.shape[:-1] == fa_img.shape
     bvec, bval = read_bvec_file(self.bvec_file)
     data_ornt = nib.io_orientation(affine)
     if self.bvec_orientation != 'IMG':
         bvec = reorient_vectors(bvec, self.bvec_orientation, data_ornt)
     fa = fa_img.get_data()
     data = data_img.get_data()
     return data, voxel_size, affine, fa, bvec, bval
Esempio n. 9
0
 def read_data(self):
     data_img = nib.load(self.dwi_images)
     affine = data_img.get_affine()
     voxel_size = data_img.get_header().get_zooms()
     voxel_size = voxel_size[:3]
     fa_img = nib.load(self.fa_file)
     assert data_img.shape[:-1] == fa_img.shape
     bvec, bval = read_bvec_file(self.bvec_file)
     data_ornt = nib.io_orientation(affine)
     if self.bvec_orientation != 'IMG':
         bvec = reorient_vectors(bvec, self.bvec_orientation, data_ornt)
     fa = fa_img.get_data()
     data = data_img.get_data()
     return data, voxel_size, affine, fa, bvec, bval
Esempio n. 10
0
def test_reorientation_backport():
    pixdims = ((1, 1, 1), (2, 2, 3))
    data = np.random.normal(size=(17, 18, 19, 2))

    for pixdim in pixdims:
        # Generate a randomly rotated affine
        angles = np.random.uniform(-np.pi, np.pi, 3) * [1, 0.5, 1]
        rot = nb.eulerangles.euler2mat(*angles)
        scale = np.diag(pixdim)
        translation = np.array((17, 18, 19)) / 2
        affine = nb.affines.from_matvec(rot.dot(scale), translation)

        # Create image
        img = nb.Nifti1Image(data, affine)
        dim_info = {"freq": 0, "phase": 1, "slice": 2}
        img.header.set_dim_info(**dim_info)

        # Find a random, non-identity transform
        targ_ornt = orig_ornt = nb.io_orientation(affine)
        while np.array_equal(targ_ornt, orig_ornt):
            new_code = np.random.choice(_orientations)
            targ_ornt = axcodes2ornt(new_code)

        identity = ornt_transform(orig_ornt, orig_ornt)
        transform = ornt_transform(orig_ornt, targ_ornt)

        # Identity transform returns exact image
        assert img.as_reoriented(identity) is img
        assert _as_reoriented_backport(img, identity) is img

        reoriented_a = img.as_reoriented(transform)
        reoriented_b = _as_reoriented_backport(img, transform)

        flips_only = img.shape == reoriented_a.shape

        # Reorientation changes affine and data array
        assert not np.allclose(img.affine, reoriented_a.affine)
        assert not (flips_only
                    and np.allclose(img.get_fdata(), reoriented_a.get_fdata()))
        # Dimension info changes iff axes are reordered
        assert flips_only == np.array_equal(img.header.get_dim_info(),
                                            reoriented_a.header.get_dim_info())

        # Both approaches produce equivalent images
        assert np.allclose(reoriented_a.affine, reoriented_b.affine)
        assert np.array_equal(reoriented_a.get_fdata(),
                              reoriented_b.get_fdata())
        assert np.array_equal(reoriented_a.header.get_dim_info(),
                              reoriented_b.header.get_dim_info())
Esempio n. 11
0
def test_reorientation_backport():
    pixdims = ((1, 1, 1), (2, 2, 3))
    data = np.random.normal(size=(17, 18, 19, 2))

    for pixdim in pixdims:
        # Generate a randomly rotated affine
        angles = np.random.uniform(-np.pi, np.pi, 3) * [1, 0.5, 1]
        rot = nb.eulerangles.euler2mat(*angles)
        scale = np.diag(pixdim)
        translation = np.array((17, 18, 19)) / 2
        affine = nb.affines.from_matvec(rot.dot(scale), translation)

        # Create image
        img = nb.Nifti1Image(data, affine)
        dim_info = {'freq': 0, 'phase': 1, 'slice': 2}
        img.header.set_dim_info(**dim_info)

        # Find a random, non-identity transform
        targ_ornt = orig_ornt = nb.io_orientation(affine)
        while np.array_equal(targ_ornt, orig_ornt):
            new_code = np.random.choice(_orientations)
            targ_ornt = axcodes2ornt(new_code)

        identity = ornt_transform(orig_ornt, orig_ornt)
        transform = ornt_transform(orig_ornt, targ_ornt)

        # Identity transform returns exact image
        assert img.as_reoriented(identity) is img
        assert _as_reoriented_backport(img, identity) is img

        reoriented_a = img.as_reoriented(transform)
        reoriented_b = _as_reoriented_backport(img, transform)

        flips_only = img.shape == reoriented_a.shape

        # Reorientation changes affine and data array
        assert not np.allclose(img.affine, reoriented_a.affine)
        assert not (flips_only and
                    np.allclose(img.get_data(), reoriented_a.get_data()))
        # Dimension info changes iff axes are reordered
        assert flips_only == np.array_equal(img.header.get_dim_info(),
                                            reoriented_a.header.get_dim_info())

        # Both approaches produce equivalent images
        assert np.allclose(reoriented_a.affine, reoriented_b.affine)
        assert np.array_equal(reoriented_a.get_data(), reoriented_b.get_data())
        assert np.array_equal(reoriented_a.header.get_dim_info(),
                              reoriented_b.header.get_dim_info())
Esempio n. 12
0
    def __init__(self, data, affine, tr, tr_slices=None, start=0.0,
                 slice_order=SLICE_ORDER, interleaved=INTERLEAVED,
                 slice_info=None):
        """
        Configure fMRI acquisition time parameters.

        tr  : inter-scan repetition time, i.e. the time elapsed
              between two consecutive scans
        tr_slices : inter-slice repetition time, same as tr for slices
        start   : starting acquisition time respective to the implicit
                  time origin
        slice_order : str or array-like, optional
            If str, one of {'ascending', 'descending'}.  If array-like, then the
            order in which the slices were collected in time.
        interleaved : bool, optional
            Whether slice acquisition order is interleaved.  Ignored if
            `slice_order` is array-like.
        slice_info : None or tuple, optional
            None, or a tuple with slice axis as the first element and direction
            as the second, for instance (2, 1).  If None, then guess the slice
            axis, and direction, as the closest to the z axis, as estimated from
            the affine.
        """
        self.affine = np.asarray(affine)
        self.tr = float(tr)
        self.start = float(start)
        self.interleaved = bool(interleaved)

        # guess the slice axis and direction (z-axis)
        if slice_info == None:
            orient = io_orientation(self.affine)
            self.slice_axis = int(np.where(orient[:, 0] == 2)[0])
            self.slice_direction = int(orient[self.slice_axis, 1])
        else:
            self.slice_axis = int(slice_info[0])
            self.slice_direction = int(slice_info[1])

        # unformatted parameters
        self._tr_slices = tr_slices
        self._slice_order = slice_order

        if isinstance(data, np.ndarray):
            self._data = data
            self._get_data = None
            self._init_timing_parameters()
        else:
            self._data = None
            self._get_data = data
Esempio n. 13
0
def revert_reorientation(image: str) -> None:
    assert image.endswith('.nii.gz')
    expected_pkl = image[:-7] + '_originalAffine.pkl'
    assert isfile(expected_pkl), 'Must have a file with the original affine, as created by ' \
                                 'reorient_to_ras. Expected filename: %s' % \
                                 expected_pkl
    original_affine, original_axcode = load_pickle(image[:-7] +
                                                   '_originalAffine.pkl')
    img = nib.load(image)
    before_revert = nib.aff2axcodes(img.affine)
    img = img.as_reoriented(io_orientation(original_affine))
    after_revert = nib.aff2axcodes(img.affine)
    print('before revert', before_revert, 'after revert', after_revert)
    restored_affine = img.affine
    assert np.all(np.isclose(original_affine, restored_affine)), 'restored affine does not match original affine, ' \
                                                                 'aborting!'
    nib.save(img, image)
    os.remove(expected_pkl)
Esempio n. 14
0
def reorient_to_ras(image: str) -> None:
    """
    Will overwrite image!!!
    :param image:
    :return:
    """
    assert image.endswith('.nii.gz')
    origaffine_pkl = image[:-7] + '_originalAffine.pkl'
    if not isfile(origaffine_pkl):
        img = nib.load(image)
        original_affine = img.affine
        original_axcode = nib.aff2axcodes(img.affine)
        img = img.as_reoriented(io_orientation(img.affine))
        new_axcode = nib.aff2axcodes(img.affine)
        print(
            image.split('/')[-1], 'original axcode', original_axcode,
            'now (should be ras)', new_axcode)
        nib.save(img, image)
        save_pickle((original_affine, original_axcode), origaffine_pkl)
Esempio n. 15
0
    def _run_interface(self, runtime):
        import numpy as np
        import nibabel as nb
        from nibabel.orientations import (axcodes2ornt, ornt_transform,
                                          inv_ornt_aff)

        fname = self.inputs.in_file
        orig_img = nb.load(fname)

        # Find transform from current (approximate) orientation to
        # target, in nibabel orientation matrix and affine forms
        orig_ornt = nb.io_orientation(orig_img.affine)
        targ_ornt = axcodes2ornt(self.inputs.orientation)
        transform = ornt_transform(orig_ornt, targ_ornt)
        affine_xfm = inv_ornt_aff(transform, orig_img.shape)

        # Check can be eliminated when minimum nibabel version >= 2.2
        if hasattr(orig_img, 'as_reoriented'):
            reoriented = orig_img.as_reoriented(transform)
        else:
            reoriented = _as_reoriented_backport(orig_img, transform)

        # Image may be reoriented
        if reoriented is not orig_img:
            suffix = '_' + self.inputs.orientation.lower()
            out_name = fname_presuffix(fname,
                                       suffix=suffix,
                                       newpath=runtime.cwd)
            reoriented.to_filename(out_name)
        else:
            out_name = fname

        mat_name = fname_presuffix(fname,
                                   suffix='.mat',
                                   newpath=runtime.cwd,
                                   use_ext=False)
        np.savetxt(mat_name, affine_xfm, fmt='%.08f')

        self._results['out_file'] = out_name
        self._results['transform'] = mat_name

        return runtime
Esempio n. 16
0
def run(args):
    if args.verbose:

        def verbose(msg):
            print(msg)
    else:

        def verbose(msg):
            pass

    verbose('Input Nifti: ' + args.nifti_src)
    try:
        import nibabel
        nii = nibabel.load(args.nifti_src)
        hdr = nii.get_header()
        q = hdr.get_best_affine()
        ornt = nibabel.io_orientation(q)
        verbose('q {}'.format(q))
        verbose('ornt {}'.format(ornt))
        #print 'hdr {}'.format(hdr)
        dims = hdr.get_data_shape()
        rasLimitsT = numpy.array(
            [[-0.5, -0.5, -0.5], [dims[0] - 0.5, dims[1] - 0.5,
                                  dims[2] - 0.5]])
        rasLimits = nibabel.affines.apply_affine(q, rasLimitsT).T
        verbose('rasLimits {}'.format(rasLimits))
        for lim in rasLimits:
            if lim[1] < lim[0]:
                tmp = lim[0]
                lim[0] = lim[1]
                lim[1] = tmp

        verbose('rasLimits {}'.format(rasLimits))
        if args.out:
            with open(args.out, 'w') as fp:
                json.dump(rasLimits.tolist(), fp)

        return rasLimits
    except:
        print "Unexpected error:", sys.exc_info()[0]
        raise
def run(args):
    if args.verbose:
        def verbose(msg):
            print(msg)
    else:
        def verbose(msg):
            pass

    verbose('Input Nifti: '+args.nifti_src)
    try:
        import nibabel
        nii = nibabel.load(args.nifti_src)
        hdr = nii.get_header()
        q = hdr.get_best_affine();
        ornt = nibabel.io_orientation(q)
        verbose('q {}'.format(q))
        verbose('ornt {}'.format(ornt))
        #print 'hdr {}'.format(hdr)
        dims = hdr.get_data_shape()
        rasLimitsT = numpy.array([
            [-0.5,-0.5,-0.5],
            [dims[0]-0.5,dims[1]-0.5,dims[2]-0.5]
        ])
        rasLimits = nibabel.affines.apply_affine(q,rasLimitsT).T
        verbose('rasLimits {}'.format(rasLimits))
        for lim in rasLimits:
            if lim[1]<lim[0]: 
                tmp = lim[0]
                lim[0] = lim[1]
                lim[1] = tmp
                
        verbose('rasLimits {}'.format(rasLimits))
        if args.out:
            with open(args.out, 'w') as fp:
                json.dump(rasLimits.tolist(),fp)
        
        return rasLimits
    except:
        print "Unexpected error:", sys.exc_info()[0]
        raise
Esempio n. 18
0
    def __init__(self, data, affine, tr, tr_slices=None, start=0.0,
                 slice_order=SLICE_ORDER, interleaved=INTERLEAVED,
                 slice_info=None):
        """
        Configure fMRI acquisition time parameters.

        tr  : inter-scan repetition time, i.e. the time elapsed
              between two consecutive scans
        tr_slices : inter-slice repetition time, same as tr for slices
        start   : starting acquisition time respective to the implicit
                  time origin
        slice_order : string or array
        slice_info : a tuple with slice axis as the first element and
          direction as the second, for instance (2, 1)
        """
        self.affine = np.asarray(affine)
        self.tr = float(tr)
        self.start = float(start)
        self.interleaved = bool(interleaved)

        # guess the slice axis and direction (z-axis)
        if slice_info == None:
            orient = io_orientation(self.affine)
            self.slice_axis = int(np.where(orient[:, 0] == 2)[0])
            self.slice_direction = int(orient[self.slice_axis, 1])
        else:
            self.slice_axis = int(slice_info[0])
            self.slice_direction = int(slice_info[1])

        # unformatted parameters
        self._tr_slices = tr_slices
        self._slice_order = slice_order

        if isinstance(data, np.ndarray):
            self._data = data
            self._get_data = None
            self._init_timing_parameters()
        else:
            self._data = None
            self._get_data = data
Esempio n. 19
0
    def __call__(self, data_array, affine=None):
        """
        original orientation of `data_array` is defined by `affine`.

        Args:
            data_array (ndarray): in shape (num_channels, H[, W, ...]).
            affine (matrix): (N+1)x(N+1) original affine matrix for spatially ND `data_array`. Defaults to identity.
        Returns:
            data_array (reoriented in `self.axcodes`), original axcodes, current axcodes.
        """
        sr = data_array.ndim - 1
        if sr <= 0:
            raise ValueError(
                'the array should have at least one spatial dimension.')
        if affine is None:
            affine = np.eye(sr + 1, dtype=np.float64)
            affine_ = np.eye(sr + 1, dtype=np.float64)
        else:
            affine_ = to_affine_nd(sr, affine)
        src = nib.io_orientation(affine_)
        if self.as_closest_canonical:
            spatial_ornt = src
        else:
            dst = nib.orientations.axcodes2ornt(self.axcodes[:sr],
                                                labels=self.labels)
            if len(dst) < sr:
                raise ValueError(
                    '`self.axcodes` should have at least {0} elements'
                    ' given the data array is in spatial {0}D, got "{1}"'.
                    format(sr, self.axcodes))
            spatial_ornt = nib.orientations.ornt_transform(src, dst)
        ornt = spatial_ornt.copy()
        ornt[:, 0] += 1  # skip channel dim
        ornt = np.concatenate([np.array([[0, 1]]), ornt])
        shape = data_array.shape[1:]
        data_array = nib.orientations.apply_orientation(data_array, ornt)
        new_affine = affine_ @ nib.orientations.inv_ornt_aff(
            spatial_ornt, shape)
        new_affine = to_affine_nd(affine, new_affine)
        return data_array, affine, new_affine
Esempio n. 20
0
File: image.py Progetto: nipy/nipype
    def _run_interface(self, runtime):
        import numpy as np
        import nibabel as nb
        from nibabel.orientations import (
            axcodes2ornt, ornt_transform, inv_ornt_aff)

        fname = self.inputs.in_file
        orig_img = nb.load(fname)

        # Find transform from current (approximate) orientation to
        # target, in nibabel orientation matrix and affine forms
        orig_ornt = nb.io_orientation(orig_img.affine)
        targ_ornt = axcodes2ornt(self.inputs.orientation)
        transform = ornt_transform(orig_ornt, targ_ornt)
        affine_xfm = inv_ornt_aff(transform, orig_img.shape)

        # Check can be eliminated when minimum nibabel version >= 2.4
        if LooseVersion(nb.__version__) >= LooseVersion('2.4.0'):
            reoriented = orig_img.as_reoriented(transform)
        else:
            reoriented = _as_reoriented_backport(orig_img, transform)

        # Image may be reoriented
        if reoriented is not orig_img:
            suffix = '_' + self.inputs.orientation.lower()
            out_name = fname_presuffix(fname, suffix=suffix,
                                       newpath=runtime.cwd)
            reoriented.to_filename(out_name)
        else:
            out_name = fname

        mat_name = fname_presuffix(fname, suffix='.mat',
                                   newpath=runtime.cwd, use_ext=False)
        np.savetxt(mat_name, affine_xfm, fmt='%.08f')

        self._results['out_file'] = out_name
        self._results['transform'] = mat_name

        return runtime
Esempio n. 21
0
def test_realign4d():
    """This tests whether realign4d yields the same results depending on
    whether the slice order is input explicitely or as
    slice_times='ascending'.

    Due to the very small size of the image used for testing (only 3
    slices), optimization is numerically unstable. It seems to make
    the default optimizer, namely scipy.fmin.fmin_ncg, adopt a random
    behavior. To work around the resulting inconsistency in results,
    we use a custom steepest gradient descent as the optimizer,
    although it's generally not recommended in practice.
    """
    runs = [im, im]
    orient = io_orientation(im.get_affine())
    slice_axis = int(np.where(orient[:, 0] == 2)[0])
    R1 = SpaceTimeRealign(runs,
                          tr=2.,
                          slice_times='ascending',
                          slice_info=slice_axis)
    R1.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest')
    nslices = im.shape[slice_axis]
    slice_times = (2. / float(nslices)) * np.arange(nslices)
    R2 = SpaceTimeRealign(runs,
                          tr=2.,
                          slice_times=slice_times,
                          slice_info=slice_axis)
    R2.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest')
    for r in range(2):
        for i in range(im.shape[3]):
            assert_array_almost_equal(R1._transforms[r][i].translation,
                                      R2._transforms[r][i].translation)
            assert_array_almost_equal(R1._transforms[r][i].rotation,
                                      R2._transforms[r][i].rotation)
    for i in range(im.shape[3]):
        assert_array_almost_equal(R1._mean_transforms[r].translation,
                                  R2._mean_transforms[r].translation)
        assert_array_almost_equal(R1._mean_transforms[r].rotation,
                                  R2._mean_transforms[r].rotation)
Esempio n. 22
0
def run(args):
    try:
        nii = nibabel.load(args.input)
        img = numpy.squeeze(nii.get_data())
        img_min = numpy.amin(img)
        img_max = numpy.amax(img)
        print 'Image type: {} {}-{}'.format(img.dtype,img_min,img_max)
        hdr = nii.get_header()
        q = hdr.get_best_affine();
        ornt = nibabel.io_orientation(q)
        print 'Orientation: {}'.format(ornt)

        from StringIO import StringIO
        file_map = nibabel.MincImage.make_file_map()
        file_map['image'].fileobj = StringIO()
        
        print sorted(file_map)
        mnc = nibabel.MincImage(img,q)
        mnc.file_map = file_map
        mnc.to_file_map()
    except:
        print "Unexpected error:", sys.exc_info()[0]
        raise
Esempio n. 23
0
def _reorient_image(image, orientation='RAS'):
    """
    Re-orients `image` to `orientation`

    Parameters
    ----------
    image : niimg_like
        Image to be re-oriented
    orientation : str or tuple-of-str
        Orientation, drawing from options ('L', 'R')('I', 'S')('P', 'S').
        Default: 'RAS'

    Returns
    -------
    reoriented : niimg_like
        Re-oriented image
    """

    orig_ornt = nib.io_orientation(image.affine)
    targ_ornt = nib.orientations.axcodes2ornt(orientation)
    transform = nib.orientations.ornt_transform(orig_ornt, targ_ornt)
    image = image.as_reoriented(transform)

    return image
Esempio n. 24
0
    def track_shm(self):

        data, voxel_size, affine, fa, bvec, bval = self.all_inputs.read_data()
        self.voxel_size = voxel_size
        self.affine = affine
        self.shape = fa.shape

        model_type = all_shmodels[self.model_type]
        model = model_type(self.sh_order, bval, bvec, self.Lambda)
        verts, edges, faces = create_half_unit_sphere(self.sphere_coverage)
        verts, pot = disperse_charges(verts, 40)
        model.set_sampling_points(verts, edges)

        if self.smoothing_kernel is not None:
            kernel = self.smoothing_kernel.get_kernel()
            data = np.asarray(data, 'float')
            convolve(data, kernel, data)

        data = normalize_data(data, bval, self.min_signal)
        if self.bootstrap_input:
            H = hat(model.B)
            R = lcr_matrix(H)
            dmin = data.min()
            data = bootstrap_data_array(data, H, R)
            data.clip(dmin, 1., data)

        mask = fa > self.fa_threshold
        _hack(mask)
        targets = [read_roi(tgt, shape=self.shape) for tgt in self.targets]
        if self.stop_on_target:
            for target_mask in targets:
                mask = mask & ~target_mask
        interpolator_type = all_interpolators[self.interpolator]
        interpolator = interpolator_type(data, voxel_size, mask)

        seed_mask = read_roi(self.seed_roi, shape=self.shape)
        seeds = seeds_from_mask(seed_mask, self.seed_density, voxel_size)

        peak_finder = ClosestPeakSelector(model, interpolator,
                                          self.min_relative_peak,
                                          self.min_peak_spacing)
        peak_finder.angle_limit = 90
        start_steps = []
        data_ornt = nib.io_orientation(self.affine)
        ind = np.asarray(data_ornt[:, 0], 'int')
        best_start = self.start_direction[ind]
        best_start *= data_ornt[:, 1]
        best_start /= np.sqrt((best_start * best_start).sum())
        for ii in seeds:
            try:
                step = peak_finder.next_step(ii, best_start)
                start_steps.append(step)
            except StopIteration:
                start_steps.append(best_start)
        if self.probabilistic:
            interpolator = ResidualBootstrapWrapper(interpolator, model.B,
                                                    data.min())
        peak_finder = ClosestPeakSelector(model, interpolator,
                                          self.min_relative_peak,
                                          self.min_peak_spacing)
        peak_finder.angle_limit = self.max_turn_angle
        integrator = BoundryIntegrator(voxel_size, overstep=.1)
        streamlines = generate_streamlines(peak_finder, integrator, seeds,
                                           start_steps)
        if self.track_two_directions:
            start_steps = [-ii for ii in start_steps]
            streamlinesB = generate_streamlines(peak_finder, integrator, seeds,
                                                start_steps)
            streamlines = merge_streamlines(streamlines, streamlinesB)

        for target_mask in targets:
            streamlines = target(streamlines, target_mask, voxel_size)

        return streamlines
Esempio n. 25
0
def run(args):
  print('Input Nifti: '+args.nifti_src)
  print('Input JSON colormap file: '+args.json_colmap)
  try:
    import nibabel
    nii = nibabel.load(args.nifti_src)
    img = numpy.squeeze(nii.get_data())
    hdr = nii.get_header()
    q = hdr.get_best_affine();
    ornt = nibabel.io_orientation(q)
    img = nibabel.apply_orientation(img,ornt)
    dims = img.shape
    print 'Nifti image loaded, data type "{}"'.format(img.dtype)

    global SliceDirs
    slice_dir_orientation = SliceDirs[args.slice_dir]
    slice_dir_index = {'x':0,'y':1,'z':2}[args.slice_dir]
    numSlices = img.shape[slice_dir_index];
    maxSlices = 1024;
    if numSlices>maxSlices:
      raise RuntimeError('too many slices (more than '+str(maxSlices)+')');

    baseName = op.basename(args.nifti_src)
    baseName = re.sub('.gz$', '',baseName)
    baseName = re.sub('.nii$', '',baseName)
    pngFolder = op.join(args.svg_dest,'png')
    if not(op.exists(pngFolder)):
      os.makedirs(pngFolder)
    print 'Created output folder "{}".'.format(pngFolder)

    with open(args.json_colmap,'r') as fp:
      colmap = json.load(fp)
      
    lookUpTable = {};
    for a in colmap:
      lookUpTable[a] = hex2rgb(colmap[a])

    if len(dims)==4:
      print 'Error: NIFTI file with RGB color data not supported yet.'
      exit(0)

    for i in range(0,numSlices):
      """
         In the code below, the transpose is needed because images are written with the 
         first dimension as rows, second as columns. This must be flipped to align with
         the common definition of x- and y axes .
         The ::-1 part is a mirroring operation on the y-axis, which is needed because
         images are written top to bottom, reversed from the common y-axis direction.
      """
      if args.slice_dir is 'x':
        slice = img[i,:,::-1].squeeze().transpose();
      elif args.slice_dir is 'y':
        slice = img[:,i,::-1].squeeze().transpose();
      elif args.slice_dir is 'z':
        slice = img[:,::-1,i].squeeze().transpose();

      """
        In the code below, the indexed image stored in "im" is converted to rgb image which is stored in 
        2d "finImg" numpy array.
      """
      shape = slice.shape
      slice = slice.reshape(-1)
      rgbImg = numpy.zeros(shape=(slice.shape[0],3), dtype=numpy.uint8)
      
      for grayvalue in numpy.unique(slice):
        mask = (slice == grayvalue)
        val = lookUpTable[str(grayvalue)]
        rgbImg[mask] = val

      pngFile = baseName+'_{:04d}.png'.format(i)
      scipy.misc.toimage(rgbImg.reshape(shape[0],shape[1],3)).save(op.join(pngFolder,pngFile))
      print 'image {} saved to png file "{}".'.format(i,pngFile)
      
  except:
    print "Unexpected error:", sys.exc_info()[0]
    raise
Esempio n. 26
0
    def _run_interface(self, runtime):
        # Load image, orient as LPS
        fname = self.inputs.in_file
        orig_img = nb.load(fname)
        reoriented = to_lps(orig_img)

        # Set target shape information
        target_zooms = np.array(self.inputs.target_zooms)
        target_shape = np.array(self.inputs.target_shape)
        target_span = target_shape * target_zooms

        zooms = np.array(reoriented.header.get_zooms()[:3])
        shape = np.array(reoriented.shape[:3])

        # Reconstruct transform from orig to reoriented image
        ornt_xfm = nb.orientations.inv_ornt_aff(
            nb.io_orientation(reoriented.affine), orig_img.shape)
        # Identity unless proven otherwise
        target_affine = reoriented.affine.copy()
        conform_xfm = np.eye(4)
        # conform_xfm = np.diag([-1, -1, 1, 1])

        xyz_unit = reoriented.header.get_xyzt_units()[0]
        if xyz_unit == 'unknown':
            # Common assumption; if we're wrong, unlikely to be the only thing that breaks
            xyz_unit = 'mm'

        # Set a 0.05mm threshold to performing rescaling
        atol = {'meter': 1e-5, 'mm': 0.01, 'micron': 10}[xyz_unit]

        # Rescale => change zooms
        # Resize => update image dimensions
        rescale = not np.allclose(zooms, target_zooms, atol=atol)
        resize = not np.all(shape == target_shape)
        if rescale or resize:
            if rescale:
                scale_factor = target_zooms / zooms
                target_affine[:3, :3] = reoriented.affine[:3, :3].dot(
                    np.diag(scale_factor))

            if resize:
                # The shift is applied after scaling.
                # Use a proportional shift to maintain relative position in dataset
                size_factor = target_span / (zooms * shape)
                # Use integer shifts to avoid unnecessary interpolation
                offset = (reoriented.affine[:3, 3] * size_factor -
                          reoriented.affine[:3, 3])
                target_affine[:3,
                              3] = reoriented.affine[:3,
                                                     3] + offset.astype(int)

            data = nli.resample_img(reoriented, target_affine,
                                    target_shape).get_data()
            conform_xfm = np.linalg.inv(reoriented.affine).dot(target_affine)
            reoriented = reoriented.__class__(data, target_affine,
                                              reoriented.header)

        if self.inputs.deoblique_header:
            is_oblique = np.any(
                np.abs(nb.affines.obliquity(reoriented.affine)) > 0)
            if is_oblique:
                LOGGER.warning("Removing obliquity from image affine")
                new_affine = reoriented.affine.copy()
                new_affine[:, :-1] = 0
                new_affine[(0, 1, 2), (0, 1, 2)] = reoriented.header.get_zooms()[:3] \
                    * np.sign(reoriented.affine[(0, 1, 2), (0, 1, 2)])
                reoriented = nb.Nifti1Image(reoriented.get_fdata(), new_affine,
                                            reoriented.header)

        # Image may be reoriented, rescaled, and/or resized
        if reoriented is not orig_img:
            out_name = fname_presuffix(fname,
                                       suffix='_lps',
                                       newpath=runtime.cwd)
            reoriented.to_filename(out_name)
            transform = ornt_xfm.dot(conform_xfm)
            if not np.allclose(orig_img.affine.dot(transform), target_affine):
                LOGGER.warning("Check alignment of anatomical image.")

        else:
            out_name = fname
            transform = np.eye(4)

        mat_name = fname_presuffix(fname,
                                   suffix='.mat',
                                   newpath=runtime.cwd,
                                   use_ext=False)
        np.savetxt(mat_name, transform, fmt='%.08f')
        self._results['transform'] = mat_name
        self._results['out_file'] = out_name

        return runtime
Esempio n. 27
0
    def _run_interface(self, runtime):
        # Load image, orient as RAS
        fname = self.inputs.in_file
        orig_img = nb.load(fname)
        reoriented = nb.as_closest_canonical(orig_img)

        # Set target shape information
        target_zooms = np.array(self.inputs.target_zooms)
        target_shape = np.array(self.inputs.target_shape)
        target_span = target_shape * target_zooms

        zooms = np.array(reoriented.header.get_zooms()[:3])
        shape = np.array(reoriented.shape[:3])

        # Reconstruct transform from orig to reoriented image
        ornt_xfm = nb.orientations.inv_ornt_aff(
            nb.io_orientation(orig_img.affine), orig_img.shape)
        # Identity unless proven otherwise
        target_affine = reoriented.affine.copy()
        conform_xfm = np.eye(4)

        xyz_unit = reoriented.header.get_xyzt_units()[0]
        if xyz_unit == 'unknown':
            # Common assumption; if we're wrong, unlikely to be the only thing that breaks
            xyz_unit = 'mm'

        # Set a 0.05mm threshold to performing rescaling
        atol = {'meter': 1e-5, 'mm': 0.01, 'micron': 10}[xyz_unit]

        # Rescale => change zooms
        # Resize => update image dimensions
        rescale = not np.allclose(zooms, target_zooms, atol=atol)
        resize = not np.all(shape == target_shape)
        if rescale or resize:
            if rescale:
                scale_factor = target_zooms / zooms
                target_affine[:3, :3] = reoriented.affine[:3, :3].dot(
                    np.diag(scale_factor))

            if resize:
                # The shift is applied after scaling.
                # Use a proportional shift to maintain relative position in dataset
                size_factor = target_span / (zooms * shape)
                # Use integer shifts to avoid unnecessary interpolation
                offset = (reoriented.affine[:3, 3] * size_factor -
                          reoriented.affine[:3, 3])
                target_affine[:3,
                              3] = reoriented.affine[:3,
                                                     3] + offset.astype(int)

            data = nli.resample_img(reoriented, target_affine,
                                    target_shape).dataobj
            conform_xfm = np.linalg.inv(reoriented.affine).dot(target_affine)
            reoriented = reoriented.__class__(data, target_affine,
                                              reoriented.header)

        # Image may be reoriented, rescaled, and/or resized
        if reoriented is not orig_img:
            out_name = fname_presuffix(fname,
                                       suffix='_ras',
                                       newpath=runtime.cwd)
            reoriented.to_filename(out_name)
        else:
            out_name = fname

        transform = ornt_xfm.dot(conform_xfm)
        if not np.allclose(orig_img.affine.dot(transform), target_affine):
            raise ValueError("Original and target affines are not similar")

        mat_name = fname_presuffix(fname,
                                   suffix='.mat',
                                   newpath=runtime.cwd,
                                   use_ext=False)
        np.savetxt(mat_name, transform, fmt='%.08f')

        self._results['out_file'] = out_name
        self._results['transform'] = mat_name

        return runtime
def run(args):
  print('Input Nifti: '+args.nifti_src)
  print('Colormap to use: '+args.colormap)
  try:
    import nibabel
    nii = nibabel.load(args.nifti_src)
    img = numpy.squeeze(nii.get_data())
    hdr = nii.get_header()
    q = hdr.get_best_affine();
    ornt = nibabel.io_orientation(q)
    img = nibabel.apply_orientation(img,ornt)
    dims = img.shape
    print 'Nifti image loaded, data type "{}"'.format(img.dtype)

    global SliceDirs
    slice_dir_orientation = SliceDirs[args.slice_dir]
    slice_dir_index = {'x':0,'y':1,'z':2}[args.slice_dir]
    numSlices = img.shape[slice_dir_index];
    maxSlices = 2048;
    if numSlices>maxSlices:
      raise Exception('too many slices (more than '+str(maxSlices)+')');

    baseName = op.basename(args.nifti_src)
    baseName = re.sub('.gz$', '',baseName)
    baseName = re.sub('.nii$', '',baseName)
    outFolder = args.out
    if not(op.exists(outFolder)):
      os.makedirs(outFolder)
    print 'Created output folder "{}".'.format(outFolder)

    if len(dims)==4:
      raise Exception('NIFTI file with RGB color data not supported yet.')

    index2rgb = nit.parse_colormap(img,args.colormap)
    if isinstance(index2rgb,dict):
        rgbLen = len(index2rgb[index2rgb.keys()[0]])
    else:
        rgbLen = len(index2rgb[0])

    rescale = bool(args.pctile)
    minmax = [None,None]
    if rescale:
        minmax = nit.get_limits(img,args.pctile)

    fmt = 'png'
    if rescale and rgbLen is 3:
        fmt = 'jpg'
    
    for i in range(0,numSlices):
        dim = args.dim
        slice = nit.get_slice(img,dim,i)
        if index2rgb:
            slice = nit.slice2rgb(slice,index2rgb,rescale,minmax[0],minmax[1])
                    
        # Save image
        outFile = baseName+'_{:04d}.{}'.format(i,fmt)
        scipy.misc.toimage(slice).save(op.join(outFolder,outFile))
            
        if i==0:
            print 'image {}{} saved to {} file "{}".'.format(dim,i,fmt,outFile)
            
  except:
    print "Unexpected error:", sys.exc_info()[0]
    raise
Esempio n. 29
0
    def track_shm(self, debug=False):
        if self.sphere_coverage > 7 or self.sphere_coverage < 1:
            raise ValueError("sphere coverage must be between 1 and 7")
        verts, edges, faces = create_half_unit_sphere(self.sphere_coverage)
        verts, pot = disperse_charges(verts, 10, .3)

        data, voxel_size, affine, fa, bvec, bval = self.all_inputs.read_data()
        self.voxel_size = voxel_size
        self.affine = affine
        self.shape = fa.shape

        model_type = all_shmodels[self.model_type]
        model = model_type(self.sh_order, bval, bvec, self.Lambda)
        model.set_sampling_points(verts, edges)

        data = np.asarray(data, dtype='float', order='C')
        if self.smoothing_kernel is not None:
            kernel = self.smoothing_kernel.get_kernel()
            convolve(data, kernel, out=data)

        normalize_data(data, bval, self.min_signal, out=data)
        dmin = data.min()
        data = data[..., lazy_index(bval > 0)]
        if self.bootstrap_input:
            if self.bootstrap_vector.size == 0:
                n = data.shape[-1]
                self.bootstrap_vector = np.random.randint(n, size=n)
            H = hat(model.B)
            R = lcr_matrix(H)
            data = bootstrap_data_array(data, H, R, self.bootstrap_vector)
            data.clip(dmin, out=data)

        mask = fa > self.fa_threshold
        targets = [read_roi(tgt, shape=self.shape) for tgt in self.targets]
        if self.stop_on_target:
            for target_mask in targets:
                mask = mask & ~target_mask

        seed_mask = read_roi(self.seed_roi, shape=self.shape)
        seeds = seeds_from_mask(seed_mask, self.seed_density, voxel_size)

        if ((self.interpolator == 'NearestNeighbor' and not self.probabilistic
             and not debug)):
            using_optimze = True
            peak_finder = NND_ClosestPeakSelector(model, data, mask,
                                                  voxel_size)
        else:
            using_optimze = False
            interpolator_type = all_interpolators[self.interpolator]
            interpolator = interpolator_type(data, voxel_size, mask)
            peak_finder = ClosestPeakSelector(model, interpolator)

        # Set peak_finder parameters for start steps
        peak_finder.angle_limit = 90
        model.peak_spacing = self.min_peak_spacing
        if self.seed_largest_peak:
            model.min_relative_peak = 1
        else:
            model.min_relative_peak = self.min_relative_peak

        data_ornt = nib.io_orientation(self.affine)
        best_start = reorient_vectors(self.start_direction, 'ras', data_ornt)
        start_steps = closest_start(seeds, peak_finder, best_start)

        if self.probabilistic:
            interpolator = ResidualBootstrapWrapper(interpolator,
                                                    model.B,
                                                    min_signal=dmin)
            peak_finder = ClosestPeakSelector(model, interpolator)
        elif using_optimze and self.seed_largest_peak:
            peak_finder.reset_cache()

        # Reset peak_finder parameters for tracking
        peak_finder.angle_limit = self.max_turn_angle
        model.peak_spacing = self.min_peak_spacing
        model.min_relative_peak = self.min_relative_peak

        integrator = BoundryIntegrator(voxel_size, overstep=.1)
        streamlines = generate_streamlines(peak_finder, integrator, seeds,
                                           start_steps)
        if self.track_two_directions:
            start_steps = -start_steps
            streamlinesB = generate_streamlines(peak_finder, integrator, seeds,
                                                start_steps)
            streamlines = merge_streamlines(streamlines, streamlinesB)

        for target_mask in targets:
            streamlines = target(streamlines, target_mask, voxel_size)

        return streamlines
Esempio n. 30
0
def run(args):
    print('Layer specification: {}'.format(args.layers))
    try:
        htmlFile = args.out
        if op.isdir(htmlFile):
            htmlFile = op.join(htmlFile, 'index.html')
        htmlFolder = op.dirname(htmlFile)
        if args.out_images:
            imgFolder = args.out_images
        else:
            htmlName, htmlExt = op.splitext(op.basename(htmlFile))
            imgFolder = op.join(htmlFolder, htmlName + '_files')

        if not (op.exists(htmlFolder)):
            os.makedirs(htmlFolder)
            print 'Created html output folder "{}".'.format(htmlFolder)
        if not (op.exists(imgFolder)):
            os.makedirs(imgFolder)
            print 'Created image output folder "{}".'.format(imgFolder)
        imgFolder = op.realpath(imgFolder)
        scriptDir = op.realpath(op.dirname(__file__))

        parsedLayers = []
        for i, lr in enumerate(args.layers):
            nifti_src = lr["file"]
            if not nifti_src: continue
            baseName = re.sub('(\.nii|\.nii.gz)$', '', op.basename(nifti_src))

            import nibabel
            nii = nibabel.load(nifti_src)
            img = numpy.squeeze(nii.get_data())
            hdr = nii.get_header()
            q = hdr.get_best_affine()
            ornt = nibabel.io_orientation(q)
            img = nibabel.apply_orientation(img, ornt)
            dims = img.shape
            print 'Nifti image loaded, data type "{}"'.format(img.dtype)

            if len(dims) == 4:
                raise Exception(
                    'NIFTI file with RGB color data not supported yet.')

            # apply colormap
            index2rgb = None
            if "colormap" in lr:
                index2rgb = niitools.parse_colormap(lr["colormap"])

            minmax = [None, None]
            rescale = "pctile" in args
            if rescale:
                minmax = niitools.get_limits(img, args.pctile)

            fmt = 'png'
            if rescale and rgbLen is 3:
                fmt = 'jpg'

            sliceRange = [[], [], []]
            for d in [0, 1, 2]:
                dim = ['x', 'y', 'z'][d]
                numSlices = dims[d]
                sliceStep = int(args.sliceRangePct[d][1] * numSlices / 100)
                sliceStart = int(args.sliceRangePct[d][0] * (numSlices - 1) /
                                 100)
                sliceEnd = int(args.sliceRangePct[d][2] * (numSlices - 1) /
                               100)
                sliceRange[d] = [sliceStart, sliceStep, sliceEnd]
                for i in range(sliceStart, sliceEnd + 1, sliceStep):
                    slice = niitools.get_slice(img, d, i)

                    pngFile = baseName + '_{}{:d}.{}'.format(dim, i, fmt)
                    if index2rgb:
                        slice = niitools.slice2rgb(slice, index2rgb, rescale,
                                                   minmax[0], minmax[1])

                    # Save image to PNG
                    scipy.misc.toimage(slice).save(op.join(imgFolder, pngFile))

                    if i == sliceStart:
                        print 'image {}{} saved to png file "{}".'.format(
                            dim, i, pngFile)

            pixdim = hdr['pixdim'][1:4]
            imgsize_mm = [
                round(pixdim[0] * dims[0], 1),
                round(pixdim[1] * dims[1], 1),
                round(pixdim[2] * dims[2], 1)
            ]
            print 'Image size in mm {}'.format(imgsize_mm)

            # update parsedLayers
            pl = {
                "name": baseName,
                "ext": fmt,
                "src": nifti_src,
                "imgsize_px": dims,
                "imgsize_mm": imgsize_mm
            }
            if "title" in lr:
                pl["title"] = lr["title"]
            parsedLayers.append(pl)

        inspectFile = '{}/nii_inspect.html'.format(scriptDir)
        with open(inspectFile, 'r') as fp:
            html = fp.read()
            html = html.replace(
                r"var defaultLayers = [];",
                r"var defaultLayers = {};".format(json.dumps(parsedLayers)))
            html = html.replace(
                r"var defaultSliceRange = [];",
                "var defaultSliceRange = {};".format(json.dumps(sliceRange)))
            html = html.replace(
                r"var imgDir = '';",
                "var imgDir = '{}/';".format(op.relpath(imgFolder,
                                                        htmlFolder)))

        with open(htmlFile, 'w') as fp:
            fp.write(html)

        print 'HTML viewer saved as "{}"'.format(htmlFile)

    except:
        print "Unexpected error:", sys.exc_info()[0]
        raise
Esempio n. 31
0
    def _run_interface(self, runtime):
        # Load image, orient as RAS
        fname = self.inputs.in_file
        orig_img = nb.load(fname)
        reoriented = nb.as_closest_canonical(orig_img)

        # Set target shape information
        target_zooms = np.array(self.inputs.target_zooms)
        target_shape = np.array(self.inputs.target_shape)
        target_span = target_shape * target_zooms

        zooms = np.array(reoriented.header.get_zooms()[:3])
        shape = np.array(reoriented.shape[:3])

        # Reconstruct transform from orig to reoriented image
        ornt_xfm = nb.orientations.inv_ornt_aff(
            nb.io_orientation(orig_img.affine), orig_img.shape)
        # Identity unless proven otherwise
        target_affine = reoriented.affine.copy()
        conform_xfm = np.eye(4)

        xyz_unit = reoriented.header.get_xyzt_units()[0]
        if xyz_unit == 'unknown':
            # Common assumption; if we're wrong, unlikely to be the only thing that breaks
            xyz_unit = 'mm'

        # Set a 0.05mm threshold to performing rescaling
        atol = {'meter': 1e-5, 'mm': 0.01, 'micron': 10}[xyz_unit]

        # Rescale => change zooms
        # Resize => update image dimensions
        rescale = not np.allclose(zooms, target_zooms, atol=atol)
        resize = not np.all(shape == target_shape)
        if rescale or resize:
            if rescale:
                scale_factor = target_zooms / zooms
                target_affine[:3, :3] = reoriented.affine[:3, :3].dot(np.diag(scale_factor))

            if resize:
                # The shift is applied after scaling.
                # Use a proportional shift to maintain relative position in dataset
                size_factor = target_span / (zooms * shape)
                # Use integer shifts to avoid unnecessary interpolation
                offset = (reoriented.affine[:3, 3] * size_factor - reoriented.affine[:3, 3])
                target_affine[:3, 3] = reoriented.affine[:3, 3] + offset.astype(int)

            data = nli.resample_img(reoriented, target_affine, target_shape).get_data()
            conform_xfm = np.linalg.inv(reoriented.affine).dot(target_affine)
            reoriented = reoriented.__class__(data, target_affine, reoriented.header)

        # Image may be reoriented, rescaled, and/or resized
        if reoriented is not orig_img:
            out_name = fname_presuffix(fname, suffix='_ras', newpath=runtime.cwd)
            reoriented.to_filename(out_name)
        else:
            out_name = fname

        transform = ornt_xfm.dot(conform_xfm)
        assert np.allclose(orig_img.affine.dot(transform), target_affine)

        mat_name = fname_presuffix(fname, suffix='.mat', newpath=runtime.cwd, use_ext=False)
        np.savetxt(mat_name, transform, fmt='%.08f')

        self._results['out_file'] = out_name
        self._results['transform'] = mat_name

        return runtime
Esempio n. 32
0
    def track_shm(self, debug=False):
        if self.sphere_coverage > 7 or self.sphere_coverage < 1:
            raise ValueError("sphere coverage must be between 1 and 7")
        verts, edges, faces = create_half_unit_sphere(self.sphere_coverage)
        verts, pot = disperse_charges(verts, 10, .3)

        data, voxel_size, affine, fa, bvec, bval = self.all_inputs.read_data()
        self.voxel_size = voxel_size
        self.affine = affine
        self.shape = fa.shape

        model_type = all_shmodels[self.model_type]
        model = model_type(self.sh_order, bval, bvec, self.Lambda)
        model.set_sampling_points(verts, edges)

        data = np.asarray(data, dtype='float', order='C')
        if self.smoothing_kernel is not None:
            kernel = self.smoothing_kernel.get_kernel()
            convolve(data, kernel, out=data)

        normalize_data(data, bval, self.min_signal, out=data)
        dmin = data.min()
        data = data[..., lazy_index(bval > 0)]
        if self.bootstrap_input:
            if self.bootstrap_vector.size == 0:
                n = data.shape[-1]
                self.bootstrap_vector = np.random.randint(n, size=n)
            H = hat(model.B)
            R = lcr_matrix(H)
            data = bootstrap_data_array(data, H, R, self.bootstrap_vector)
            data.clip(dmin, out=data)

        mask = fa > self.fa_threshold
        targets = [read_roi(tgt, shape=self.shape) for tgt in self.targets]
        if self.stop_on_target:
            for target_mask in targets:
                mask = mask & ~target_mask

        seed_mask = read_roi(self.seed_roi, shape=self.shape)
        seeds = seeds_from_mask(seed_mask, self.seed_density, voxel_size)

        if ((self.interpolator == 'NearestNeighbor' and not
             self.probabilistic and not debug)):
                using_optimze = True
                peak_finder = NND_ClosestPeakSelector(model, data, mask,
                                                      voxel_size)
        else:
            using_optimze = False
            interpolator_type = all_interpolators[self.interpolator]
            interpolator = interpolator_type(data, voxel_size, mask)
            peak_finder = ClosestPeakSelector(model, interpolator)

        # Set peak_finder parameters for start steps
        peak_finder.angle_limit = 90
        model.peak_spacing = self.min_peak_spacing
        if self.seed_largest_peak:
            model.min_relative_peak = 1
        else:
            model.min_relative_peak = self.min_relative_peak

        data_ornt = nib.io_orientation(self.affine)
        best_start = reorient_vectors(self.start_direction, 'ras', data_ornt)
        start_steps = closest_start(seeds, peak_finder, best_start)

        if self.probabilistic:
            interpolator = ResidualBootstrapWrapper(interpolator, model.B,
                                                    min_signal=dmin)
            peak_finder = ClosestPeakSelector(model, interpolator)
        elif using_optimze and self.seed_largest_peak:
            peak_finder.reset_cache()

        # Reset peak_finder parameters for tracking
        peak_finder.angle_limit = self.max_turn_angle
        model.peak_spacing = self.min_peak_spacing
        model.min_relative_peak = self.min_relative_peak

        integrator = BoundryIntegrator(voxel_size, overstep=.1)
        streamlines = generate_streamlines(peak_finder, integrator, seeds,
                                           start_steps)
        if self.track_two_directions:
            start_steps = -start_steps
            streamlinesB = generate_streamlines(peak_finder, integrator, seeds,
                                                start_steps)
            streamlines = merge_streamlines(streamlines, streamlinesB)

        for target_mask in targets:
            streamlines = target(streamlines, target_mask, voxel_size)

        return streamlines
def fit_dti_dipy(input_dwi,
                 input_bval,
                 input_bvec,
                 output_dir,
                 fit_type='',
                 mask='',
                 bmax='',
                 mask_tensor='F',
                 bids_fmt=False,
                 bids_id=''):

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    img = nib.load(input_dwi)
    axis_orient = nib.aff2axcodes(img.affine)

    ras_img = nib.as_closest_canonical(img)
    data = ras_img.get_data()

    bvals, bvecs = read_bvals_bvecs(input_bval, input_bvec)
    bvecs = reorient_vectors(bvecs,
                             axis_orient[0] + axis_orient[1] + axis_orient[2],
                             'RAS',
                             axis=1)

    if mask != '':
        mask_img = nib.as_closest_canonical(nib.load(mask))
        mask_data = mask_img.get_data()

    if bmax != "":
        jj = np.where(bvals >= bmax)
        bvals = np.delete(bvals, jj)
        bvecs = np.delete(bvecs, jj, 0)
        data = np.delete(data, jj, axis=3)

    values = np.array(bvals)
    ii = np.where(values == bvals.min())[0]
    b0_average = np.mean(data[:, :, :, ii], axis=3)

    gtab = gradient_table(bvals, bvecs)

    if fit_type == 'RESTORE':
        sigma = estimate_sigma(data)
        #calculate the average sigma from the b0's
        sigma = np.mean(sigma[ii])

        dti_model = dti.TensorModel(gtab, fit_method='RESTORE', sigma=sigma)

        if mask != '':
            dti_fit = dti_model.fit(data, mask_data)
        else:
            dti_fit = dti_model.fit(data)

    elif fit_type != 'RESTORE' and fit_type != '':
        dti_model = dti.TensorModel(gtab, fit_method=fit_type)

        if mask != '':
            dti_fit = dti_model.fit(data, mask_data)
        else:
            dti_fit = dti_model.fit(data)

    else:
        dti_model = dti.TensorModel(gtab)

        if mask != '':
            dti_fit = dti_model.fit(data, mask_data)
        else:
            dti_fit = dti_model.fit(data)

    estimate_data = dti_fit.predict(gtab, S0=b0_average)
    residuals = np.absolute(data - estimate_data)

    tensor = dti.lower_triangular(dti_fit.quadratic_form.astype(np.float32))
    evecs = dti_fit.evecs.astype(np.float32)
    evals = dti_fit.evals.astype(np.float32)

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    output_imgs = []

    #Define output imgs
    if bids_fmt:
        output_tensor_nifti = output_dir + '/' + bids_id + '_model-DTI_parameter-TENSOR.nii.gz'
        output_tensor_fsl = output_dir + '/' + bids_id + '_model-DTI_parameter-FSL_TENSOR.nii.gz'
        output_tensor_mrtrix = output_dir + '/' + bids_id + '_model-DTI_parameter-MRTRIX_TENSOR.nii.gz'

        output_V1 = output_dir + '/' + bids_id + '_model-DTI_parameter-V1.nii.gz'
        output_V2 = output_dir + '/' + bids_id + '_model-DTI_parameter-V2.nii.gz'
        output_V3 = output_dir + '/' + bids_id + '_model-DTI_parameter-V3.nii.gz'
        output_FSL_V1 = output_dir + '/' + bids_id + '_model-DTI_parameter-FSL_V1.nii.gz'
        output_FSL_V2 = output_dir + '/' + bids_id + '_model-DTI_parameter-FSL_V2.nii.gz'
        output_FSL_V3 = output_dir + '/' + bids_id + '_model-DTI_parameter-FSL_V3.nii.gz'

        output_L1 = output_dir + '/' + bids_id + '_model-DTI_parameter-L1.nii.gz'
        output_L2 = output_dir + '/' + bids_id + '_model-DTI_parameter-L2.nii.gz'
        output_L3 = output_dir + '/' + bids_id + '_model-DTI_parameter-L3.nii.gz'

        output_fa = output_dir + '/' + bids_id + '_model-DTI_parameter-FA.nii.gz'
        output_md = output_dir + '/' + bids_id + '_model-DTI_parameter-MD.nii.gz'
        output_rd = output_dir + '/' + bids_id + '_model-DTI_parameter-RD.nii.gz'
        output_ad = output_dir + '/' + bids_id + '_model-DTI_parameter-AD.nii.gz'
        output_tr = output_dir + '/' + bids_id + '_model-DTI_parameter-TRACE.nii.gz'

        output_ga = output_dir + '/' + bids_id + '_model-DTI_parameter-GA.nii.gz'
        output_color_fa = output_dir + '/' + bids_id + '_model-DTI_parameter-COLOR_FA.nii.gz'

        output_PL = output_dir + '/' + bids_id + '_model-DTI_parameter-PLANARITY.nii.gz'
        output_SP = output_dir + '/' + bids_id + '_model-DTI_parameter-SPHERICITY.nii.gz'
        output_MO = output_dir + '/' + bids_id + '_model-DTI_parameter-MODE.nii.gz'

        output_res = output_dir + '/' + bids_id + '_model-DTI_parameter-RESIDUALS.nii.gz'

    else:
        output_tensor_fsl = output_dir + '/dti_FSL_TENSOR.nii.gz'
        output_tensor_nifti = output_dir + '/dti_TENSOR.nii.gz'
        output_tensor_mrtrix = output_dir + '/dti_MRTRIX_TENSOR.nii.gz'

        output_V1 = output_dir + '/dti_V1.nii.gz'
        output_V2 = output_dir + '/dti_V2.nii.gz'
        output_V3 = output_dir + '/dti_V3.nii.gz'
        output_FSL_V1 = output_dir + '/dti_FSL_V1.nii.gz'
        output_FSL_V2 = output_dir + '/dti_FSL_V2.nii.gz'
        output_FSL_V3 = output_dir + '/dti_FSL_V3.nii.gz'

        output_L1 = output_dir + '/dti_L1.nii.gz'
        output_L2 = output_dir + '/dti_L2.nii.gz'
        output_L3 = output_dir + '/dti_L3.nii.gz'

        output_fa = output_dir + '/dti_FA.nii.gz'
        output_md = output_dir + '/dti_MD.nii.gz'
        output_rd = output_dir + '/dti_RD.nii.gz'
        output_ad = output_dir + '/dti_AD.nii.gz'
        output_tr = output_dir + '/dti_TRACE.nii.gz'

        output_ga = output_dir + '/dti_GA.nii.gz'
        output_color_fa = output_dir + '/dti_COLOR_FA.nii.gz'

        output_PL = output_dir + '/dti_PLANARITY.nii.gz'
        output_SP = output_dir + '/dti_SPHERICITY.nii.gz'
        output_MO = output_dir + '/dti_MODE.nii.gz'

        output_res = output_dir + '/dti_RESIDUALS.nii.gz'

    tensor_img = nifti1_symmat(tensor, ras_img.affine, ras_img.header)
    tensor_img.header.set_intent = 'NIFTI_INTENT_SYMMATRIX'
    tensor_img.to_filename(output_tensor_nifti)

    tensor_fsl = np.empty(tensor.shape)
    tensor_fsl[:, :, :, 0] = tensor[:, :, :, 0]
    tensor_fsl[:, :, :, 1] = tensor[:, :, :, 1]
    tensor_fsl[:, :, :, 2] = tensor[:, :, :, 3]
    tensor_fsl[:, :, :, 3] = tensor[:, :, :, 2]
    tensor_fsl[:, :, :, 4] = tensor[:, :, :, 4]
    tensor_fsl[:, :, :, 5] = tensor[:, :, :, 5]
    save_nifti(output_tensor_fsl, tensor_fsl, ras_img.affine, ras_img.header)

    tensor_mrtrix = np.empty(tensor.shape)
    tensor_mrtrix[:, :, :, 0] = tensor[:, :, :, 0]
    tensor_mrtrix[:, :, :, 1] = tensor[:, :, :, 2]
    tensor_mrtrix[:, :, :, 2] = tensor[:, :, :, 5]
    tensor_mrtrix[:, :, :, 3] = tensor[:, :, :, 1]
    tensor_mrtrix[:, :, :, 4] = tensor[:, :, :, 3]
    tensor_mrtrix[:, :, :, 5] = tensor[:, :, :, 4]
    save_nifti(output_tensor_mrtrix, tensor_mrtrix, ras_img.affine,
               ras_img.header)

    fa = dti_fit.fa
    color_fa = dti_fit.color_fa
    md = dti_fit.md
    rd = dti_fit.rd
    ad = dti_fit.ad
    ga = dti_fit.ga
    trace = dti_fit.trace
    dti_mode = dti_fit.mode
    dti_planarity = dti_fit.planarity
    dti_sphericity = dti_fit.sphericity

    #Remove any nan
    fa[np.isnan(fa)] = 0
    color_fa[np.isnan(color_fa)] = 0
    md[np.isnan(md)] = 0
    rd[np.isnan(rd)] = 0
    ad[np.isnan(ad)] = 0
    ga[np.isnan(ga)] = 0
    trace[np.isnan(trace)] = 0
    dti_mode[np.isnan(dti_mode)] = 0
    dti_planarity[np.isnan(dti_planarity)] = 0
    dti_sphericity[np.isnan(dti_sphericity)] = 0

    save_nifti(output_V1, evecs[:, :, :, :, 0], ras_img.affine, ras_img.header)
    save_nifti(output_V2, evecs[:, :, :, :, 1], ras_img.affine, ras_img.header)
    save_nifti(output_V3, evecs[:, :, :, :, 2], ras_img.affine, ras_img.header)

    save_nifti(output_L1, evals[:, :, :, 0], ras_img.affine, ras_img.header)
    save_nifti(output_L2, evals[:, :, :, 1], ras_img.affine, ras_img.header)
    save_nifti(output_L3, evals[:, :, :, 2], ras_img.affine, ras_img.header)

    save_nifti(output_fa, fa, ras_img.affine, ras_img.header)
    save_nifti(output_color_fa, color_fa, ras_img.affine, ras_img.header)
    save_nifti(output_md, md, ras_img.affine, ras_img.header)
    save_nifti(output_ad, ad, ras_img.affine, ras_img.header)
    save_nifti(output_rd, rd, ras_img.affine, ras_img.header)
    save_nifti(output_ga, ga, ras_img.affine, ras_img.header)
    save_nifti(output_tr, trace, ras_img.affine, ras_img.header)
    save_nifti(output_PL, dti_planarity, ras_img.affine, ras_img.header)
    save_nifti(output_SP, dti_sphericity, ras_img.affine, ras_img.header)
    save_nifti(output_MO, dti_mode, ras_img.affine, ras_img.header)
    save_nifti(output_res, residuals, ras_img.affine, ras_img.header)

    #Reorient back to the original
    output_imgs.append(output_tensor_nifti)
    output_imgs.append(output_tensor_fsl)
    output_imgs.append(output_tensor_mrtrix)
    output_imgs.append(output_V1)
    output_imgs.append(output_V2)
    output_imgs.append(output_V3)
    output_imgs.append(output_L1)
    output_imgs.append(output_L2)
    output_imgs.append(output_L3)
    output_imgs.append(output_fa)
    output_imgs.append(output_md)
    output_imgs.append(output_rd)
    output_imgs.append(output_ad)
    output_imgs.append(output_ga)
    output_imgs.append(output_color_fa)
    output_imgs.append(output_PL)
    output_imgs.append(output_SP)
    output_imgs.append(output_MO)
    output_imgs.append(output_res)

    #Change orientation back to the original orientation
    orig_ornt = nib.io_orientation(ras_img.affine)
    targ_ornt = nib.io_orientation(img.affine)
    transform = nib.orientations.ornt_transform(orig_ornt, targ_ornt)
    affine_xfm = nib.orientations.inv_ornt_aff(transform, ras_img.shape)
    trans_mat = affine_xfm[0:3, 0:3]

    for img_path in output_imgs:
        orig_img = nib.load(img_path)
        reoriented = orig_img.as_reoriented(transform)
        reoriented.to_filename(img_path)

    #Correct FSL tensor for orientation
    dirs = []
    dirs.append(np.array([[1], [0], [0]]))
    dirs.append(np.array([[1], [1], [0]]))
    dirs.append(np.array([[1], [0], [1]]))
    dirs.append(np.array([[0], [1], [0]]))
    dirs.append(np.array([[0], [1], [1]]))
    dirs.append(np.array([[0], [0], [1]]))

    tensor_fsl = nib.load(output_tensor_fsl)
    corr_fsl_tensor = np.empty(tensor_fsl.get_data().shape)

    for i in range(0, len(dirs)):

        rot_dir = np.matmul(trans_mat, dirs[i])
        sign = 1.0
        if np.sum(rot_dir) == 0.0:
            sign = -1.0

        if (np.absolute(rot_dir) == np.array([[1], [0], [0]])).all():
            tensor_ind = 0
        elif (np.absolute(rot_dir) == np.array([[1], [1], [0]])).all():
            tensor_ind = 1
        elif (np.absolute(rot_dir) == np.array([[1], [0], [1]])).all():
            tensor_ind = 2
        elif (np.absolute(rot_dir) == np.array([[0], [1], [0]])).all():
            tensor_ind = 3
        elif (np.absolute(rot_dir) == np.array([[0], [1], [1]])).all():
            tensor_ind = 4
        elif (np.absolute(rot_dir) == np.array([[0], [0], [1]])).all():
            tensor_ind = 5

        corr_fsl_tensor[:, :, :,
                        i] = sign * tensor_fsl.get_data()[:, :, :, tensor_ind]

    save_nifti(output_tensor_fsl, corr_fsl_tensor, tensor_fsl.affine,
               tensor_fsl.header)

    #Now correct the eigenvectors
    #Determine the order to rearrange
    vec_order = np.transpose(targ_ornt[:, 0]).astype(int)
    sign_order = np.transpose(targ_ornt[:, 1]).astype(int)

    fsl_v1 = nib.load(output_V1)
    corr_fsl_v1 = fsl_v1.get_data()[:, :, :, vec_order]
    for i in range(0, 2):
        corr_fsl_v1[:, :, :, i] = sign_order[i] * corr_fsl_v1[:, :, :, i]

    save_nifti(output_FSL_V1, corr_fsl_v1, fsl_v1.affine, fsl_v1.header)

    fsl_v2 = nib.load(output_V2)
    corr_fsl_v2 = fsl_v2.get_data()[:, :, :, vec_order]
    for i in range(0, 2):
        corr_fsl_v2[:, :, :, i] = sign_order[i] * corr_fsl_v2[:, :, :, i]

    save_nifti(output_FSL_V2, corr_fsl_v2, fsl_v2.affine, fsl_v2.header)

    fsl_v3 = nib.load(output_V3)
    corr_fsl_v3 = fsl_v3.get_data()[:, :, :, vec_order]
    for i in range(0, 2):
        corr_fsl_v3[:, :, :, i] = sign_order[i] * corr_fsl_v3[:, :, :, i]

    save_nifti(output_FSL_V3, corr_fsl_v3, fsl_v3.affine, fsl_v3.header)
Esempio n. 34
0
    def _run_interface(self, runtime):
        # Load image, orient as RAS
        fname = self.inputs.in_file
        orig_img = nb.load(fname)
        reoriented = nb.as_closest_canonical(orig_img)

        # Set target shape information
        target_zooms = np.array(self.inputs.target_zooms)
        target_shape = np.array(self.inputs.target_shape)
        target_span = target_shape * target_zooms

        zooms = np.array(reoriented.header.get_zooms()[:3])
        shape = np.array(reoriented.shape[:3])

        # Reconstruct transform from orig to reoriented image
        ornt_xfm = nb.orientations.inv_ornt_aff(
            nb.io_orientation(orig_img.affine), orig_img.shape)
        # Identity unless proven otherwise
        target_affine = reoriented.affine.copy()
        conform_xfm = np.eye(4)

        xyz_unit = reoriented.header.get_xyzt_units()[0]
        if xyz_unit == "unknown":
            # Common assumption; if we're wrong, unlikely to be the only thing that breaks
            xyz_unit = "mm"

        # Set a 0.05mm threshold to performing rescaling
        atol_gross = {"meter": 5e-5, "mm": 0.05, "micron": 50}[xyz_unit]
        # if 0.01 > difference > 0.001mm, freesurfer won't be able to merge the images
        atol_fine = {"meter": 1e-6, "mm": 0.001, "micron": 1}[xyz_unit]

        # Update zooms => Modify affine
        # Rescale => Resample to resized voxels
        # Resize => Resample to new image dimensions
        update_zooms = not np.allclose(
            zooms, target_zooms, atol=atol_fine, rtol=0)
        rescale = not np.allclose(zooms, target_zooms, atol=atol_gross, rtol=0)
        resize = not np.all(shape == target_shape)
        resample = rescale or resize
        if resample or update_zooms:
            # Use an affine with the corrected zooms, whether or not we resample
            if update_zooms:
                scale_factor = target_zooms / zooms
                target_affine[:3, :3] = reoriented.affine[:3, :3] @ np.diag(
                    scale_factor)

            if resize:
                # The shift is applied after scaling.
                # Use a proportional shift to maintain relative position in dataset
                size_factor = target_span / (zooms * shape)
                # Use integer shifts to avoid unnecessary interpolation
                offset = reoriented.affine[:3,
                                           3] * size_factor - reoriented.affine[:
                                                                                3,
                                                                                3]
                target_affine[:3,
                              3] = reoriented.affine[:3,
                                                     3] + offset.astype(int)

            conform_xfm = np.linalg.inv(reoriented.affine) @ target_affine

            # Create new image
            data = reoriented.dataobj
            if resample:
                data = nli.resample_img(reoriented, target_affine,
                                        target_shape).dataobj
            reoriented = reoriented.__class__(data, target_affine,
                                              reoriented.header)

        # Image may be reoriented, rescaled, and/or resized
        if reoriented is not orig_img:
            out_name = fname_presuffix(fname,
                                       suffix="_ras",
                                       newpath=runtime.cwd)
            reoriented.to_filename(out_name)
        else:
            out_name = fname

        transform = ornt_xfm.dot(conform_xfm)
        if not np.allclose(orig_img.affine.dot(transform), target_affine):
            raise ValueError("Original and target affines are not similar")

        mat_name = fname_presuffix(fname,
                                   suffix=".mat",
                                   newpath=runtime.cwd,
                                   use_ext=False)
        np.savetxt(mat_name, transform, fmt="%.08f")

        self._results["out_file"] = out_name
        self._results["transform"] = mat_name

        return runtime
  def main(self,input_nii,slicedir,outFolder,pctile,origin,colormap,reorient,replace,boundingbox_bgcolor,count_pixels):
    nii = nibabel.load(input_nii)
    dir2dim = {'x':0,'y':1,'z':2,'0':0,'1':1,'2':2,'coronal':0,'saggital':1,'horizontal':2,'axial':2}
    sliceDim = dir2dim[slicedir.lower()]
    
    # Nifti data is supposed to be in RAS orientation.
    # For Nifti files that violate the standard, the reorient string can be used to correct the orientation.
    if reorient:
      nii = nit.reorient(nii,reorient)

    hdr = nii.get_header()
    q = hdr.get_best_affine();
    ornt = nibabel.io_orientation(q)
    print('The orientation is: {}'.format(ornt))
    dims0 = [int(d) for d in nii.shape if d>1]
    dims = list(dims0)
    for i,d in enumerate(ornt):
      dims[i] = dims0[int(d[0])]
    print('The dimensions are: {}'.format(dims))
    numSlices = dims[sliceDim];
    baseName = op.basename(input_nii)
    baseName = re.sub('.gz$', '',baseName)
    baseName = re.sub('.nii$', '',baseName)
    if not op.exists(outFolder):
      os.makedirs(outFolder)
      print('Created output folder "{}".'.format(outFolder))

    rgbMode = False
    img_dtype = nii.get_data_dtype();
    if len(dims)==4 and dims[3]==3: 
      rgbMode = True
    elif img_dtype.names:
      if len(img_dtype.names)==3:
        rgbMode = 'record'
    rescale =  pctile is not None

    fmt = 'png'
    if rescale or rgbMode:
      fmt = 'jpg'

    filePattern = baseName+'_%04d.{}'.format(fmt)
    filePattern_py = filePattern.replace('_%04d','_{:04d}')

    if origin:
      try:
        if origin.startswith('['):
          ijk0 = json.loads(origin)
        elif origin == 'center':
          dims = hdr.get_data_shape()
          ijk0 = [dims[0]/2,dims[1]/2,dims[2]/2]
        q = hdr.get_best_affine()
        q[0:3,3] = -q[0:3,0:3].dot(ijk0)
        hdr.set_sform(q)
      except:
        raise Exception('Invalid origin "{}".'.format(origin))

    # save coordinate system (Right Anterior Superior) information
    rasLimits = nit.rasLimits(hdr)
    
    # Some nii files have no origin set (other than i=0, j=0, k=0)
    # Use the origin parameter to overrule this.
    ##if origin == 'center':
    ##  def ctr(x1,x2): 
    ##    w=x2-x1
    ##    return -w/2,w/2
    ##  rasLimits = [ctr(xx[0],xx[1]) for xx in rasLimits]
        
    with open(op.join(outFolder,'raslimits.json'), 'w') as fp:
      json.dump(rasLimits,fp)
      
    slicePos = (rasLimits[sliceDim][0] + (numpy.arange(0.0,dims[sliceDim])+0.5)*(rasLimits[sliceDim][1]-rasLimits[sliceDim][0])/dims[sliceDim]).tolist()
    with open(op.join(outFolder,'slicepos.json'), 'w') as fp:
      json.dump(slicePos,fp)

    # quit if ALL slices already exist
    if not replace:
      done = True
      for i in range(0,numSlices):
        outFile = filePattern_py.format(i)
        fullFile = op.join(outFolder,outFile)
        if not op.exists(fullFile): 
          done = False
          break
      if done:
        return FancyDict(
          filePattern = op.join(outFolder,filePattern),
          rasLimits = rasLimits
        )
    
    # load image, it is needed 
    img = nii.get_data()
    img = nibabel.apply_orientation(img,ornt)
    img = numpy.squeeze(img)
    if rgbMode == 'record': img = nit.record2rgb(img)
    
    print('Nifti image loaded, shape "{}",data type "{}"'.format(dims,img.dtype))

    maxSlices = 2048;
    if numSlices>maxSlices:
      raise Exception('Too many slices (more than '+str(maxSlices)+')');

    if not rgbMode:
      minmax = nit.get_limits(img)
      if rescale:                
        minmax = nit.get_limits(img,pctile)
      print('minmax {}, rescale {}'.format(minmax,rescale))
      index2rgb = nit.parse_colormap(colormap,minmax)

      if isinstance(index2rgb,dict):
        rgbLen = len(index2rgb[index2rgb.keys()[0]])
      else:
        rgbLen = len(index2rgb[0])

      # save index2rgb
      if not rescale:
        if isinstance(index2rgb,dict):
          index2rgb_hex = {index:'{:02X}{:02X}{:02X}'.format(rgb[0],rgb[1],rgb[2]) for (index,rgb) in index2rgb.iteritems()}
        else:
          index2rgb_hex = ['{:02X}{:02X}{:02X}'.format(rgb[0],rgb[1],rgb[2]) for rgb in index2rgb]
        with open(op.join(outFolder,'index2rgb.json'), 'w') as fp:
          json.dump(index2rgb_hex,fp)
    elif rgbMode:
      grayscale = img.dot(numpy.array([0.2989,0.5870,0.1140]))
      minmax = nit.get_limits(grayscale)
      if rescale:                
        minmax = nit.get_limits(grayscale,pctile)
      rescale = True
      rgbLen = 3
      index2rgb = False
    else:
      rescale = False
      rgbLen = 3
      index2rgb = False

    bbg = boundingbox_bgcolor
    if bbg is '': bbg = False
    if bbg:
      boundingBox = {}
      boundingBoxFile = op.join(outFolder,'boundingbox.json')
      if op.exists(boundingBoxFile):
        with open(boundingBoxFile, 'r') as fp:
          boundingBox = json.load(fp)

    pxc = count_pixels
    if pxc:
      pixCount = {};
      pixCountFile = op.join(outFolder,'pixcount.json')
      if op.exists(pixCountFile):
        with open(pixCountFile, 'r') as fp:
          pixCount = json.load(fp)

    for i in range(0,numSlices):
      outFile = filePattern_py.format(i)
      fullFile = op.join(outFolder,outFile)
      if op.exists(fullFile):
        if i==0:
          print('image {}{} already exists as {}-file "{}".'.format(sliceDim,i,fmt,fullFile))
        if not replace: 
          continue
      slc = nit.get_slice(img,sliceDim,i)

      if pxc:
        labels = numpy.unique(slc)
        cnt = {}
        for b in labels:
          cnt[str(b)] = numpy.count_nonzero(slc == b)
        pixCount[i] = cnt
        
      if index2rgb:
        slc = nit.slice2rgb(slc,index2rgb,rescale,minmax[0],minmax[1])
      elif rescale:
        slc = nit.rgbslice_rescale(slc,minmax[0],minmax[1])

      # Save image
      ans = scipy.misc.toimage(slc).save(op.join(outFolder,outFile))

      if bbg:
        if(bbg == "auto"):
          # do this only once
          bgColor = nit.autoBackgroundColor(slc)
          print('boundingbox auto bgColor is {}'.format(bgColor))
        else:
          bgColor = nit.hex2rgb(bgg)
        mask = nit.imageMask(slc,[bgColor])
        print 'mask shape {} {}'.format(bgColor,mask.shape)
      
        ## bounding box
        nonzero = numpy.argwhere(mask)
        #print 'nonzero {}'.format(nonzero)
        if nonzero.size>0:
          lefttop = nonzero.min(0)[::-1] # because y=rows and x=cols
          rightbottom = nonzero.max(0)[::-1]
          bb = lefttop.tolist()
          bb.extend(rightbottom-lefttop+(1,1))
          boundingBox[i] = bb
      
      if i==0:
        print('image {}{} saved to {}-file "{}".'.format(sliceDim,i,fmt,fullFile))
      
    if bbg:
      if len(boundingBox)>0:
        bb0 = boundingBox.itervalues().next()
        xyMin = [bb0[0],bb0[1]]
        xyMax = [bb0[0]+bb0[2],bb0[1]+bb0[3]]
        for bb in boundingBox.itervalues():
          if bb[0]<xyMin[0]: xyMin[0] = bb[0]
          if bb[1]<xyMin[1]: xyMin[1] = bb[1]
          if bb[0]+bb[2]>xyMax[0]: xyMax[0] = bb[0]+bb[2]
          if bb[1]+bb[3]>xyMax[1]: xyMax[1] = bb[1]+bb[3]
        boundingBox['combined'] = [xyMin[0],xyMin[1],xyMax[0]-xyMin[0],xyMax[1]-xyMin[1]]
      with open(boundingBoxFile, 'w') as fp:
        json.dump(boundingBox,fp)
      
    if pxc:
      with open(pixCountFile, 'w') as fp:
        json.dump(pixCount,fp)

    return FancyDict(
      filePattern=op.join(outFolder,filePattern),
      rasLimits=rasLimits
    )
Esempio n. 36
0
def run(args):
    print('Layer specification: {}'.format(args.layers))
    try:
        htmlFile = args.out
        if op.isdir(htmlFile):
            htmlFile = op.join(htmlFile,'index.html')
        htmlFolder = op.dirname(htmlFile)
        if args.out_images:
            imgFolder = args.out_images
        else:
            htmlName,htmlExt = op.splitext(op.basename(htmlFile))
            imgFolder = op.join(htmlFolder,htmlName+'_files')
        
        if not(op.exists(htmlFolder)):
            os.makedirs(htmlFolder)
            print 'Created html output folder "{}".'.format(htmlFolder)
        if not(op.exists(imgFolder)):
            os.makedirs(imgFolder)
            print 'Created image output folder "{}".'.format(imgFolder)
        imgFolder = op.realpath(imgFolder)
        scriptDir = op.realpath(op.dirname(__file__))

        parsedLayers = []
        for i,lr in enumerate(args.layers):
            nifti_src = lr["file"]
            if not nifti_src: continue
            baseName = re.sub('(\.nii|\.nii.gz)$','',op.basename(nifti_src))

            import nibabel
            print 'Loading "{}"'.format(nifti_src)
            nii = nibabel.load(nifti_src)
            img = numpy.squeeze(nii.get_data())
            img_min = numpy.amin(img)
            img_max = numpy.amax(img)
            print 'Image type: {} {}-{}'.format(img.dtype,img_min,img_max)
            if "pctile" in lr:
                pctile = re.split('[,\- ]+',str(lr["pctile"]))
                pctile = [float(p) for p in pctile]
                if len(pctile)<1:
                    pctile = [0,100]
                elif len(pctile)<2:
                    pctile = [0,pctile[0]]
                if pctile[1]<=pctile[0]:
                    raise Exception('Max percentile must be larger than min percentile, not {},{}'.format(pctile[0],pctile[1]))
                elif pctile[0]<0:
                    raise Exception('Min percentile must be >=0, not {}'.format(pctile[0]))
                elif pctile[1]>100:
                    raise Exception('Max percentile must be <=100, not {}'.format(pctile[1]))
                if pctile[0]>0:
                    img_min = numpy.percentile(img,pctile[0])
                if pctile[1]>0:
                    img_max = numpy.percentile(img,pctile[1])
                print 'Percentile {}-{} range: {}-{}'.format(pctile[0],pctile[1],img_min,img_max)
            hdr = nii.get_header()
            q = hdr.get_best_affine();
            ornt = nibabel.io_orientation(q)
            print 'Orientation: {}'.format(ornt)
            img = nibabel.apply_orientation(img,ornt)
            dims = img.shape
            print 'Nifti image for layer {} loaded, data type "{}"'.format(i,img.dtype)

            if len(dims)==4:
                raise Exception('Error: NIFTI file with RGB color data not supported.')
                        
            # apply colormap
            fmt = 'jpg'
            index2rgb = None
            hasAlpha = 1
            rescale = False
            if "colormap" in lr:
                cmap = lr["colormap"]
                matches = re.search('^(#[0-9a-fA-F]+)-(#[0-9a-fA-F]+)$',cmap)
                if matches:
                    rescale = True
                    hasAlpha = False
                    rgb1 = hex2rgb(matches.group(1))
                    rgb2 = hex2rgb(matches.group(2))
                    index2rgb = [
                        numpy.array([
                            rgb1[0]+i/255.0*(rgb2[0]-rgb1[0]),
                            rgb1[1]+i/255.0*(rgb2[1]-rgb1[1]),
                            rgb1[2]+i/255.0*(rgb2[2]-rgb1[2])
                        ],numpy.uint8) for i in range(256)
                    ]
                elif cmap.startswith('alpha'):
                    fmt = 'png'
                    rescale = True
                    matches = re.search('^alpha-(#[0-9a-fA-F]+)$',cmap)
                    if matches:
                        rgb = hex2rgb(matches.group(1))
                    else:
                        rgb = list([255,255,255])
                    index2rgb = [[rgb[0],rgb[1],rgb[2],i] for i in range(256)]
                elif cmap[0] == '#':
                    fmt = 'png'
                    hasAlpha = True
                    cmap = cmap.split(',')
                    index2rgb = {};
                    for i,a in enumerate(cmap):
                        index2rgb[i] = hex2rgba(a)
                    print "Color map {}".format(index2rgb)
                elif op.exists(cmap):
                    fmt = 'png'
                    hasAlpha = False
                    with open(cmap,'r') as fp:
                        cmap = json.load(fp)
                    print('{}'.format(cmap))
                    index2rgb = {};
                    if isinstance(cmap,dict):
                        for i in cmap:
                            print cmap[i]
                            index2rgb[i] = hex2rgba(cmap[i])
                    else:
                        raise Exception('Colormap file must contain a json-encoded map with color index as keys and #RRGGBB-colors as values.')
                else:
                    raise Exception('Do not know how to parse colormap "{}".'.format(cmap))
                  
            sliceRange = [[],[],[]]
            for d in [0,1,2]:
                dim = ['x','y','z'][d]
                numSlices = dims[d];
                sliceStep = int(args.sliceRangePct[d][1]*numSlices/100)
                sliceStart = int(args.sliceRangePct[d][0]*(numSlices-1)/100)
                sliceEnd = int(args.sliceRangePct[d][2]*(numSlices-1)/100)
                sliceRange[d] = [sliceStart,sliceStep,sliceEnd]
                for i in range(sliceStart,sliceEnd+1,sliceStep):
                    slice = get_slice(img,dim,i)
                    
                    pngFile = baseName+'_{}{:d}.{}'.format(dim,i,fmt)
                    if index2rgb:
                        slice = slice2rgb(slice,index2rgb,rescale,img_min,img_max)

                    # Save image to PNG
                    scipy.misc.toimage(slice).save(op.join(imgFolder,pngFile))
                        
                    if i==sliceStart:
                        print 'image {}{} saved to png file "{}".'.format(dim,i,pngFile)

            pixdim = hdr['pixdim'][1:4]
            imgsize_mm = [
                round(pixdim[0]*dims[0],1),
                round(pixdim[1]*dims[1],1),
                round(pixdim[2]*dims[2],1)
            ]
            print 'Image size in mm {}'.format(imgsize_mm)

            # update parsedLayers
            pl = {
              "name": baseName,
              "ext": fmt,
              "src": nifti_src,
              "imgsize_px": dims,
              "imgsize_mm": imgsize_mm
            }
            if "title" in lr:
                pl["title"] = lr["title"];
            parsedLayers.append(pl);

        inspectFile = '{}/nii_inspect.html'.format(scriptDir);
        with open(inspectFile, 'r') as fp:
            html = fp.read()
            html = html.replace(r"var defaultLayers = [];",
                r"var defaultLayers = {};".format(json.dumps(parsedLayers)))                            
            html = html.replace(r"var defaultSliceRange = [];",
                "var defaultSliceRange = {};".format(json.dumps(sliceRange)))
            html = html.replace(r"var imgDir = '';",
                "var imgDir = '{}/';".format(op.relpath(imgFolder,htmlFolder)))
  
        with open(htmlFile, 'w') as fp:
            fp.write(html)
                
        print 'HTML viewer saved as "{}"'.format(htmlFile)
        
    except:
        print "Unexpected error:", sys.exc_info()[0]
        raise
Esempio n. 37
0
def run(args):
  try:
    print('Input Nifti: '+args.nifti_src)
    print('Colormap to use: '+args.colormap)

    import nibabel
    nii = nibabel.load(args.nifti_src)
    hdr = nii.get_header()
    q = hdr.get_best_affine();
    ornt = nibabel.io_orientation(q)
    print('The orientation is: {}'.format(ornt))
    dims0 = [d for d in nii.shape if d>1]
    dims = dims0
    for i,d in enumerate(ornt):
      dims[i] = dims0[int(d[0])]
    print('The dimensions are: {}'.format(dims))
    sliceDim = args.dim
    numSlices = dims[sliceDim];
    baseName = op.basename(args.nifti_src)
    baseName = re.sub('.gz$', '',baseName)
    baseName = re.sub('.nii$', '',baseName)
    outFolder = args.out
    if not op.exists(outFolder):
      os.makedirs(outFolder)
    print('Created output folder "{}".'.format(outFolder))

    rgbMode = False
    img_dtype = nii.get_data_dtype();
    if len(dims)==4 and dims[3]==3: 
      rgbMode = True
    elif img_dtype.names:
      if len(img_dtype.names)==3:
        rgbMode = 'record'
    rescale =  "pctile" in args and args.pctile != None

    fmt = 'png'
    if rescale or rgbMode:
      fmt = 'jpg'

    filePattern = baseName+'_%04d.{}'.format(fmt)
    filePattern_py = filePattern.replace('_%04d','_{:04d}')

    # save coordinate system (Right Anterior Superior) information
    rasLimits = nit.rasLimits(hdr)
    if args.origin == 'center':
      def ctr(x1,x2): 
        w=x2-x1
        return -w/2,w/2
      rasLimits = [ctr(xx[0],xx[1]) for xx in rasLimits]
        
    with open(op.join(outFolder,'raslimits.json'), 'w') as fp:
      json.dump(rasLimits,fp)
      
    slicePos = (rasLimits[sliceDim][0] + (numpy.arange(0.0,dims[sliceDim])+0.5)*(rasLimits[sliceDim][1]-rasLimits[sliceDim][0])/dims[sliceDim]).tolist()
    with open(op.join(outFolder,'slicepos.json'), 'w') as fp:
      json.dump(slicePos,fp)

    # quit if ALL slices already exist
    if not args.replace:
      done = True
      for i in range(0,numSlices):
        outFile = filePattern_py.format(i)
        fullFile = op.join(outFolder,outFile)
        if not op.exists(fullFile): 
          done = False
          break
      if done:
        result = {
          'filePattern':op.join(outFolder,filePattern),
          'rasLimits':rasLimits
        }
        report.success(result)
        return
    
    # load image, it is needed 
    img = nii.get_data()
    img = nibabel.apply_orientation(img,ornt)
    img = numpy.squeeze(img)
    
    print('Nifti image loaded, shape "{}",data type "{}"'.format(dims,img.dtype))

    maxSlices = 2048;
    if numSlices>maxSlices:
      raise Exception('Too many slices (more than '+str(maxSlices)+')');

    if not rgbMode:
      minmax = nit.get_limits(img)
      if rescale:                
        minmax = nit.get_limits(img,args.pctile)
      print('minmax {}, rescale {}'.format(minmax,rescale))
      index2rgb = nit.parse_colormap(args.colormap,minmax)

      if isinstance(index2rgb,dict):
        rgbLen = len(index2rgb[index2rgb.keys()[0]])
      else:
        rgbLen = len(index2rgb[0])

      # save index2rgb
      if not rescale:
        if isinstance(index2rgb,dict):
          index2rgb_hex = {index:'{:02X}{:02X}{:02X}'.format(rgb[0],rgb[1],rgb[2]) for (index,rgb) in index2rgb.iteritems()}
        else:
          index2rgb_hex = ['{:02X}{:02X}{:02X}'.format(rgb[0],rgb[1],rgb[2]) for rgb in index2rgb]
        with open(op.join(outFolder,'index2rgb.json'), 'w') as fp:
          json.dump(index2rgb_hex,fp)
    else:
      rescale = False
      rgbLen = 3
      index2rgb = False

    bbg = args.boundingbox_bgcolor
    if bbg is '': bbg = False
    if bbg:
      boundingBox = {}
      boundingBoxFile = op.join(outFolder,'boundingbox.json')
      if op.exists(boundingBoxFile):
        with open(boundingBoxFile, 'r') as fp:
          boundingBox = json.load(fp)

    pxc = args.count_pixels
    if pxc:
      pixCount = {};
      pixCountFile = op.join(outFolder,'pixcount.json')
      if op.exists(pixCountFile):
        with open(pixCountFile, 'r') as fp:
          pixCount = json.load(fp)

    for i in range(0,numSlices):
      outFile = filePattern_py.format(i)
      fullFile = op.join(outFolder,outFile)
      if op.exists(fullFile):
        if i==0:
          print('image {}{} already exists as {}-file "{}".'.format(sliceDim,i,fmt,fullFile))
        if not args.replace: 
          continue
      slc = nit.get_slice(img,sliceDim,i)
      print ('slice shape {}'.format(slc.shape))

      if pxc:
        labels = numpy.unique(slc)
        cnt = {}
        for b in labels:
          cnt[str(b)] = numpy.count_nonzero(slc == b)
        pixCount[i] = cnt
        
      if index2rgb:
        slc = nit.slice2rgb(slc,index2rgb,rescale,minmax[0],minmax[1])

      if rgbMode=='record':
        # create 3rd dimension from rgb record
        slc = record2rgb(slc)

      # Save image
      ans = scipy.misc.toimage(slc).save(op.join(outFolder,outFile))

      if bbg:
        if(bbg == "auto"):
          # do this only once
          bgColor = nit.autoBackgroundColor(slc)
          print('boundingbox auto bgColor is {}'.format(bgColor))
        else:
          bgColor = nit.hex2rgb(bgg)
        mask = nit.imageMask(slc,[bgColor])
        print 'mask shape {} {}'.format(bgColor,mask.shape)
      
        ## bounding box
        nonzero = numpy.argwhere(mask)
        #print 'nonzero {}'.format(nonzero)
        if nonzero.size>0:
          lefttop = nonzero.min(0)[::-1] # because y=rows and x=cols
          rightbottom = nonzero.max(0)[::-1]
          bb = lefttop.tolist()
          bb.extend(rightbottom-lefttop+(1,1))
          boundingBox[i] = bb
      
      if i==0:
        print('image {}{} saved to {}-file "{}".'.format(sliceDim,i,fmt,fullFile))
      
    if bbg:
      if len(boundingBox)>0:
        bb0 = boundingBox.itervalues().next()
        xyMin = [bb0[0],bb0[1]]
        xyMax = [bb0[0]+bb0[2],bb0[1]+bb0[3]]
        for bb in boundingBox.itervalues():
          if bb[0]<xyMin[0]: xyMin[0] = bb[0]
          if bb[1]<xyMin[1]: xyMin[1] = bb[1]
          if bb[0]+bb[2]>xyMax[0]: xyMax[0] = bb[0]+bb[2]
          if bb[1]+bb[3]>xyMax[1]: xyMax[1] = bb[1]+bb[3]
        boundingBox['combined'] = [xyMin[0],xyMin[1],xyMax[0]-xyMin[0],xyMax[1]-xyMin[1]]
      with open(boundingBoxFile, 'w') as fp:
        json.dump(boundingBox,fp)
      
    if pxc:
      with open(pixCountFile, 'w') as fp:
        json.dump(pixCount,fp)

    result = {
      'filePattern':op.join(outFolder,filePattern),
      'rasLimits':rasLimits
    }
    report.success(result)
  except:
    report.fail(__file__)