Exemplo n.º 1
0
    def from_ras(cls, ras, moving=None, reference=None):
        """Create an ITK affine from a nitransform's RAS+ matrix."""
        ras = ras.copy()
        pre = LPS.copy()
        post = LPS.copy()
        if _is_oblique(reference.affine):
            print('Reference affine axes are oblique.')
            M = reference.affine
            A = shape_zoom_affine(reference.shape,
                                  voxel_sizes(M),
                                  x_flip=False,
                                  y_flip=False)
            pre = M.dot(np.linalg.inv(A)).dot(LPS)

        if _is_oblique(moving.affine):
            print('Moving affine axes are oblique.')
            M2 = moving.affine
            A2 = shape_zoom_affine(moving.shape,
                                   voxel_sizes(M2),
                                   x_flip=True,
                                   y_flip=True)
            post = A2.dot(np.linalg.inv(M2))

        # swapaxes is necessary, as axis 0 encodes series of transforms
        parameters = np.swapaxes(post.dot(ras.dot(pre)), 0, 1)

        tf = cls()
        tf.structarr['parameters'] = parameters.T
        return tf
Exemplo n.º 2
0
def test_spatial_axes_check():
    for fname in MINC_3DS + OTHER_IMGS:
        img = nib.load(pjoin(DATA_DIR, fname))
        s_img = smooth_image(img, 0)
        assert_array_equal(img.dataobj, s_img.dataobj)
        out = resample_from_to(img, img, mode='nearest')
        assert_almost_equal(img.dataobj, out.dataobj)
        if len(img.shape) > 3:
            continue
        # Resample to output does not raise an error
        out = resample_to_output(img, voxel_sizes(img.affine))
    for fname in MINC_4DS:
        img = nib.load(pjoin(DATA_DIR, fname))
        assert_raises(ValueError, smooth_image, img, 0)
        assert_raises(ValueError, resample_from_to, img, img, mode='nearest')
        assert_raises(ValueError,
                      resample_to_output, img, voxel_sizes(img.affine))
Exemplo n.º 3
0
def test_spatial_axes_check(caplog):
    for fname in MINC_3DS + OTHER_IMGS:
        img = nib.load(pjoin(DATA_DIR, fname))
        with caplog.at_level(
                logging.CRITICAL):  # Suppress logs when changing classes
            s_img = smooth_image(img, 0)
        assert_array_equal(img.dataobj, s_img.dataobj)
        with caplog.at_level(logging.CRITICAL):
            out = resample_from_to(img, img, mode='nearest')
        assert_almost_equal(img.dataobj, out.dataobj)
        if len(img.shape) > 3:
            continue
        # Resample to output does not raise an error
        out = resample_to_output(img, voxel_sizes(img.affine))
    for fname in MINC_4DS:
        img = nib.load(pjoin(DATA_DIR, fname))
        with pytest.raises(ValueError):
            smooth_image(img, 0)
        with pytest.raises(ValueError):
            resample_from_to(img, img, mode='nearest')
        with pytest.raises(ValueError):
            resample_to_output(img, voxel_sizes(img.affine))
Exemplo n.º 4
0
def _fsl_aff_adapt(space):
    """
    Adapt FSL affines.
    Calculates a matrix to convert from the original RAS image
    coordinates to FSL's internal coordinate system of transforms
    """
    aff = space.affine
    zooms = list(voxel_sizes(aff)) + [1]
    swp = np.eye(4)
    if np.linalg.det(aff) > 0:
        swp[0, 0] = -1.0
        swp[0, 3] = (space.shape[0] - 1) * zooms[0]
    return swp, np.diag(zooms)
Exemplo n.º 5
0
def save_slices(subject, fname, x, y, z, modality='mri'):
    """ Function to display row of image slices """
    header = nib.load(fname)
    affine = np.array(header.affine, float)
    data = header.get_data()
    images_fol = op.join(MMVT_DIR, subject, 'figures', 'slices')
    utils.make_dir(images_fol)

    clim = np.percentile(data, (1., 99.))
    codes = axcodes2ornt(aff2axcodes(affine))
    order = np.argsort([c[0] for c in codes])
    flips = np.array([c[1] < 0 for c in codes])[order]
    sizes = [data.shape[order] for order in order]
    scalers = voxel_sizes(affine)
    coordinates = np.array([x, y, z])[order].astype(int)

    r = [
        scalers[order[2]] / scalers[order[1]],
        scalers[order[2]] / scalers[order[0]],
        scalers[order[1]] / scalers[order[0]]
    ]
    for ii, xax, yax, ratio, prespective in zip(
        [0, 1, 2], [1, 0, 0], [2, 2, 1], r, ['Sagital', 'Coronal', 'Axial']):
        fig = plt.figure()
        fig.set_size_inches(1. * sizes[xax] / sizes[yax], 1, forward=False)
        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.set_axis_off()
        fig.add_axes(ax)

        d = get_image_data(data, order, flips, ii, coordinates)
        ax.imshow(d,
                  vmin=clim[0],
                  vmax=clim[1],
                  aspect=1,
                  cmap='gray',
                  interpolation='nearest',
                  origin='lower')
        lims = [0, sizes[xax], 0, sizes[yax]]
        ax.axis(lims)
        ax.set_aspect(ratio)
        ax.patch.set_visible(False)
        ax.set_frame_on(False)
        ax.axes.get_yaxis().set_visible(False)
        ax.axes.get_xaxis().set_visible(False)

        x, y, z = coordinates
        image_fname = op.join(
            images_fol, '{}_{}_{}_{}_{}.png'.format(modality, prespective, x,
                                                    y, z))
        print('Saving {}'.format(image_fname))
        plt.savefig(image_fname, dpi=sizes[xax])
    def to_sft(self, resize=False):
        """ Convert a TrxFile to a valid StatefulTractogram (in RAM) """
        affine = np.array(self.header['VOXEL_TO_RASMM'], dtype=np.float32)
        dimensions = np.array(self.header['DIMENSIONS'], dtype=np.uint16)
        vox_sizes = np.array(voxel_sizes(affine), dtype=np.float32)
        vox_order = ''.join(aff2axcodes(affine))
        space_attributes = (affine, dimensions, vox_sizes, vox_order)

        if resize:
            self.resize()
        sft = StatefulTractogram(self.streamlines, space_attributes, Space.RASMM,
                                 data_per_point=self.data_per_vertex,
                                 data_per_streamline=self.data_per_streamline)

        return sft
Exemplo n.º 7
0
    def to_sft(self):
        """ Convert a TrxFile to a valid StatefulTractogram """
        affine = self.voxel_to_rasmm
        dimensions = self.dimensions
        vox_sizes = np.array(voxel_sizes(affine), dtype=np.float32)
        vox_order = ''.join(aff2axcodes(affine))
        space_attributes = (affine, dimensions, vox_sizes, vox_order)

        sft = StatefulTractogram(
            self.streamlines,
            space_attributes,
            Space.RASMM,
            data_per_point=self.consolidate_data_per_point(),
            data_per_streamline=self.consolidate_data_per_streamline())

        return sft
Exemplo n.º 8
0
    def from_image(klass, img):
        """Create struct from an image."""
        volgeom = klass()
        sa = volgeom.structarr
        sa["valid"] = 1
        sa["volume"][:, 0] = img.shape[:3]  # Assumes xyzt-ordered image
        sa["voxelsize"][:, 0] = voxel_sizes(img.affine)[:3]
        A = img.affine[:3, :3]
        b = img.affine[:3, [3]]
        cols = A / sa["voxelsize"]
        sa["xras"] = cols[:, [0]]
        sa["yras"] = cols[:, [1]]
        sa["zras"] = cols[:, [2]]
        sa["cras"] = b + A.dot(sa["volume"]) / 2
        try:
            sa["filename"] = img.file_map["image"].filename
        except Exception:
            pass

        return volgeom
    def __str__(self):
        """ Generate the string for printing """
        affine = np.array(self.header['VOXEL_TO_RASMM'], dtype=np.float32)
        dimensions = np.array(self.header['DIMENSIONS'], dtype=np.uint16)
        vox_sizes = np.array(voxel_sizes(affine), dtype=np.float32)
        vox_order = ''.join(aff2axcodes(affine))

        text = 'VOXEL_TO_RASMM: \n{}'.format(
            np.array2string(affine,
                            formatter={'float_kind': lambda x: "%.6f" % x}))
        text += '\nDIMENSIONS: {}'.format(
            np.array2string(dimensions))
        text += '\nVOX_SIZES: {}'.format(
            np.array2string(vox_sizes,
                            formatter={'float_kind': lambda x: "%.2f" % x}))
        text += '\nVOX_ORDER: {}'.format(vox_order)

        strs_size = self.header['NB_STREAMLINES']
        pts_size = self.header['NB_VERTICES']
        strs_len, pts_len = self._get_real_len()

        if strs_size != strs_len or pts_size != pts_len:
            text += '\nstreamline_size: {}'.format(strs_size)
            text += '\nvertex_size: {}'.format(pts_size)

        text += '\nstreamline_count: {}'.format(strs_len)
        text += '\nvertex_count: {}'.format(pts_len)
        text += '\ndata_per_vertex keys: {}'.format(
            list(self.data_per_vertex.keys()))
        text += '\ndata_per_streamline keys: {}'.format(
            list(self.data_per_streamline.keys()))

        text += '\ngroups keys: {}'.format(list(self.groups.keys()))
        for group_key in self.groups.keys():
            if group_key in self.data_per_group:
                text += '\ndata_per_groups ({}) keys: {}'.format(
                    group_key, list(self.data_per_group[group_key].keys()))

        text += '\ncopy_safe: {}'.format(self._copy_safe)

        return text
Exemplo n.º 10
0
def obliquity(affine):
    r"""
    Estimate the *obliquity* an affine's axes represent.
    The term *obliquity* is defined here as the rotation of those axes with
    respect to the cardinal axes.
    This implementation is inspired by `AFNI's implementation
    <https://github.com/afni/afni/blob/b6a9f7a21c1f3231ff09efbd861f8975ad48e525/src/thd_coords.c#L660-L698>`_.
    For further details about *obliquity*, check `AFNI's documentation
    <https://sscc.nimh.nih.gov/sscc/dglen/Obliquity>_.
    Parameters
    ----------
    affine : 2D array-like
        Affine transformation array.  Usually shape (4, 4), but can be any 2D
        array.
    Returns
    -------
    angles : 1D array-like
        The *obliquity* of each axis with respect to the cardinal axes, in radians.
    """
    vs = voxel_sizes(affine)
    best_cosines = np.abs((affine[:-1, :-1] / vs).max(axis=1))
    return np.arccos(best_cosines)
Exemplo n.º 11
0
    def __str__(self):
        """ Generate the string for printing """
        affine = np.array(self.voxel_to_rasmm, dtype=np.float32)
        dimensions = np.array(self.dimensions, dtype=np.uint16)
        vox_sizes = np.array(voxel_sizes(affine), dtype=np.float32)
        vox_order = ''.join(aff2axcodes(affine))

        text = 'VOXEL_TO_RASMM: \n{}'.format(
            np.array2string(affine,
                            formatter={'float_kind': lambda x: "%.6f" % x}))
        text += '\nDIMENSIONS: {}'.format(np.array2string(dimensions))
        text += '\nVOX_SIZES: {}'.format(
            np.array2string(vox_sizes,
                            formatter={'float_kind': lambda x: "%.2f" % x}))
        text += '\nVOX_ORDER: {}'.format(vox_order)

        text += '\nNB_STREAMLINES: {}'.format(self.nb_streamlines)
        text += '\nNB_POINTS: {}'.format(self.nb_points)

        text += '\n' + TreeViewer(self._zcontainer).__unicode__()

        return text
Exemplo n.º 12
0
    def __init__(self, volume, affine=None, title=None, cmap='gray', clim=None, alpha=1.):
        """
        Parameters
        ----------
        volume : array-like
            The data that will be displayed by the slicer. Should have 3
            dimensions.
        affine : array-like or None, optional
            Affine transform for the data. This is used to determine
            how the data should be sliced for plotting into the sagittal,
            coronal, and axial view axes. If None, identity is assumed.
            The aspect ratio of the data are inferred from the affine
            transform.
        title : str or None, optional
            The title to display. Can be None (default) to display no
            title.
        cmap: matplotlib colormap, optional
            Colormap to use for ploting. Default: 'gray'
        clim: [min, max] or None
            Limits to use for plotting. Default: 1 and 99th percentiles
        alpha: float
            Transparency value
        """
        # Use these late imports of matplotlib so that we have some hope that
        # the test functions are the first to set the matplotlib backend. The
        # tests set the backend to something that doesn't require a display.
        self._title = title
        self._closed = False
        self._cross = True

        volume = np.asanyarray(volume)
        if volume.ndim < 3:
            raise ValueError('volume must have at least 3 dimensions')
        if np.iscomplexobj(volume):
            raise TypeError("Complex data not supported")
        affine = np.array(affine, float) if affine is not None else np.eye(4)
        if affine.shape != (4, 4):
            raise ValueError('affine must be a 4x4 matrix')
        # determine our orientation
        self._affine = affine
        codes = axcodes2ornt(aff2axcodes(self._affine))
        self._order = np.argsort([c[0] for c in codes])
        self._flips = np.array([c[1] < 0 for c in codes])[self._order]
        self._flips = list(self._flips) + [False]  # add volume dim
        self._scalers = voxel_sizes(self._affine)
        self._inv_affine = np.linalg.inv(affine)
        # current volume info
        self._volume_dims = volume.shape[3:]
        if len(self._volume_dims) > 0:
            raise NotImplementedError('Cannot handle 4-D Datasets')
        self._volumes = []

        # ^ +---------+   ^ +---------+
        # | |         |   | |         |
        #   |   Sag   |     |   Cor   |
        # S |    0    |   S |    1    |
        #   |         |     |         |
        #   |         |     |         |
        #   +---------+     +---------+
        #        A  -->
        # ^ +---------+
        # | |         |
        #   |  Axial  |
        # A |    2    |
        #   |         |
        #   |         |
        #   +---------+
        #   <--  R
        fig, axes = plt.subplots(2, 2)
        fig.set_size_inches((8, 8), forward=True)
        self._axes = [axes[0, 0], axes[0, 1], axes[1, 0]]
        plt.tight_layout(pad=0.1)
        fig.delaxes(axes[1, 1])
        if self._title is not None:
            fig.canvas.set_window_title(str(title))

        # Start midway through each axis, idx is current slice number
        self._ims, self._data_idx = list(), list()

        # set up axis crosshairs
        self._crosshairs = [None] * 3
        r = [self._scalers[self._order[2]] / self._scalers[self._order[1]],
             self._scalers[self._order[2]] / self._scalers[self._order[0]],
             self._scalers[self._order[1]] / self._scalers[self._order[0]]]
        self._sizes = [volume.shape[order] for order in self._order]
        for ii, xax, yax, ratio, label in zip([0, 1, 2], [1, 0, 0], [2, 2, 1],
                                              r, ('SAIP', 'SRIL', 'ARPL')):
            ax = self._axes[ii]
            vert = ax.plot([0] * 2, [-0.5, self._sizes[yax] - 0.5],
                           color=(0, 1, 0), linestyle='-')[0]
            horiz = ax.plot([-0.5, self._sizes[xax] - 0.5], [0] * 2,
                            color=(0, 1, 0), linestyle='-')[0]
            self._crosshairs[ii] = dict(vert=vert, horiz=horiz)
            # add text labels (top, right, bottom, left)
            lims = [0, self._sizes[xax], 0, self._sizes[yax]]
            bump = 0.01
            poss = [[lims[1] / 2., lims[3]],
                    [(1 + bump) * lims[1], lims[3] / 2.],
                    [lims[1] / 2., 0],
                    [lims[0] - bump * lims[1], lims[3] / 2.]]
            anchors = [['center', 'bottom'], ['left', 'center'],
                       ['center', 'top'], ['right', 'center']]
            for pos, anchor, lab in zip(poss, anchors, label):
                ax.text(pos[0], pos[1], lab,
                        horizontalalignment=anchor[0],
                        verticalalignment=anchor[1])
            ax.axis(lims)
            ax.set_aspect(ratio)
            ax.patch.set_visible(False)
            ax.set_frame_on(False)
            ax.axes.get_yaxis().set_visible(False)
            ax.axes.get_xaxis().set_visible(False)
            self._data_idx.append(0)
        self._data_idx.append(-1)  # volume

        self._figs = set([a.figure for a in self._axes])
        for fig in self._figs:
            fig.canvas.mpl_connect('scroll_event', self._on_scroll)
            fig.canvas.mpl_connect('motion_notify_event', self._on_mouse)
            fig.canvas.mpl_connect('button_press_event', self._on_mouse)

        # actually set data meaningfully
        self.add_overlay(volume, cmap=cmap, clim=clim, alpha=alpha, draw=False)
        self._position = np.zeros(4)
        self._position[3] = 1.  # convenience for affine multiplication
        self._changing = False  # keep track of status to avoid loops
        plt.draw()
        for fig in self._figs:
            fig.canvas.draw_idle()
            fig.canvas.draw()
        plt.pause(1e-3) # give a little bit of time for the renderer (needed on MacOS)
        self._set_position(0., 0., 0.)
        self._draw()
Exemplo n.º 13
0
    def to_filename(self, filename, fmt='X5', moving=None):
        """Store the transform in BIDS-Transforms HDF5 file format (.x5)."""
        if fmt.lower() in ['itk', 'ants', 'elastix']:
            with open(filename, 'w') as f:
                f.write('#Insight Transform File V1.0\n')

                for i in range(self.matrix.shape[0]):
                    parameters = LPS.dot(self.matrix[i].dot(LPS))
                    parameters = parameters[:3, :3].reshape(-1).tolist() + \
                        parameters[:3, 3].tolist()
                    itkfmt = """\
#Transform {0}
Transform: MatrixOffsetTransformBase_double_3_3
Parameters: {1}
FixedParameters: 0 0 0\n""".format
                    f.write(itkfmt(i,
                                   ' '.join(['%g' % p for p in parameters])))
            return filename

        if fmt.lower() == 'afni':
            from math import pi

            if moving and isinstance(moving, (str, bytes, Path)):
                moving = loadimg(str(moving))

            T = self.matrix.copy()
            pre = LPS
            post = LPS
            if obliquity(self.reference.affine).min(
            ) * 180 / pi > OBLIQUITY_THRESHOLD_DEG:
                print('Reference affine axes are oblique.')
                M = self.reference.affine
                A = shape_zoom_affine(self.reference.shape,
                                      voxel_sizes(M),
                                      x_flip=False,
                                      y_flip=False)
                pre = M.dot(np.linalg.inv(A)).dot(LPS)

                if not moving:
                    moving = self.reference

            if moving and obliquity(
                    moving.affine).min() * 180 / pi > OBLIQUITY_THRESHOLD_DEG:
                print('Moving affine axes are oblique.')
                M2 = moving.affine
                A2 = shape_zoom_affine(moving.shape,
                                       voxel_sizes(M2),
                                       x_flip=True,
                                       y_flip=True)
                post = A2.dot(np.linalg.inv(M2))

            # swapaxes is necessary, as axis 0 encodes series of transforms
            parameters = np.swapaxes(post.dot(self.matrix.copy().dot(pre)), 0,
                                     1)
            parameters = parameters[:, :3, :].reshape((T.shape[0], -1))
            np.savetxt(filename,
                       parameters,
                       delimiter='\t',
                       header="""\
3dvolreg matrices (DICOM-to-DICOM, row-by-row):""",
                       fmt='%g')
            return filename

        if fmt.lower() == 'fsl':
            if not moving:
                moving = self.reference

            if isinstance(moving, str):
                moving = loadimg(moving)

            # Adjust for reference image offset and orientation
            refswp, refspc = _fsl_aff_adapt(self.reference)
            pre = self.reference.affine.dot(
                np.linalg.inv(refspc).dot(np.linalg.inv(refswp)))

            # Adjust for moving image offset and orientation
            movswp, movspc = _fsl_aff_adapt(moving)
            post = np.linalg.inv(movswp).dot(
                movspc.dot(np.linalg.inv(moving.affine)))

            # Compose FSL transform
            mat = np.linalg.inv(
                np.swapaxes(post.dot(self.matrix.dot(pre)), 0, 1))

            if self.matrix.shape[0] > 1:
                for i in range(self.matrix.shape[0]):
                    np.savetxt('%s.%03d' % (filename, i),
                               mat[i],
                               delimiter=' ',
                               fmt='%g')
            else:
                np.savetxt(filename, mat[0], delimiter=' ', fmt='%g')
            return filename
        return super(Affine, self).to_filename(filename, fmt=fmt)