Пример #1
0
    def _run_interface(self, runtime):
        dx, dy, dz = get_data_dims(self.inputs.image_file)
        vx, vy, vz = get_vox_dims(self.inputs.image_file)
        image_file = nb.load(self.inputs.image_file)
        affine = image_file.get_affine()
        out_filename = op.abspath(self.inputs.out_filename)

        # Reads MRTrix tracks
        header, streamlines = read_mrtrix_tracks(self.inputs.in_file, as_generator=True)
        iflogger.info("MRTrix Header:")
        iflogger.info(header)
        # Writes to Trackvis
        trk_header = nb.trackvis.empty_header()
        trk_header["dim"] = [dx, dy, dz]
        trk_header["voxel_size"] = [vx, vy, vz]
        trk_header["n_count"] = header["count"]

        if isdefined(self.inputs.matrix_file) and isdefined(self.inputs.registration_image_file):
            iflogger.info("Applying transformation from matrix file {m}".format(m=self.inputs.matrix_file))
            xfm = np.genfromtxt(self.inputs.matrix_file)
            iflogger.info(xfm)
            registration_image_file = nb.load(self.inputs.registration_image_file)
            reg_affine = registration_image_file.get_affine()
            r_dx, r_dy, r_dz = get_data_dims(self.inputs.registration_image_file)
            r_vx, r_vy, r_vz = get_vox_dims(self.inputs.registration_image_file)
            iflogger.info("Using affine from registration image file {r}".format(r=self.inputs.registration_image_file))
            iflogger.info(reg_affine)
            trk_header["vox_to_ras"] = reg_affine
            trk_header["dim"] = [r_dx, r_dy, r_dz]
            trk_header["voxel_size"] = [r_vx, r_vy, r_vz]

            affine = np.dot(affine, np.diag(1.0 / np.array([vx, vy, vz, 1])))
            transformed_streamlines = transform_to_affine(streamlines, trk_header, affine)

            aff = affine_from_fsl_mat_file(xfm, [vx, vy, vz], [r_vx, r_vy, r_vz])
            iflogger.info(aff)

            axcode = aff2axcodes(reg_affine)
            trk_header["voxel_order"] = axcode[0] + axcode[1] + axcode[2]

            final_streamlines = move_streamlines(transformed_streamlines, aff)
            trk_tracks = ((ii, None, None) for ii in final_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info("Saving transformed Trackvis file as {out}".format(out=out_filename))
            iflogger.info("New TrackVis Header:")
            iflogger.info(trk_header)
        else:
            iflogger.info(
                "Applying transformation from scanner coordinates to {img}".format(img=self.inputs.image_file)
            )
            axcode = aff2axcodes(affine)
            trk_header["voxel_order"] = axcode[0] + axcode[1] + axcode[2]
            trk_header["vox_to_ras"] = affine
            transformed_streamlines = transform_to_affine(streamlines, trk_header, affine)
            trk_tracks = ((ii, None, None) for ii in transformed_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info("Saving Trackvis file as {out}".format(out=out_filename))
            iflogger.info("TrackVis Header:")
            iflogger.info(trk_header)
        return runtime
Пример #2
0
def test_conform():
    anat = nib.load(pjoin(DATA_DIR, 'anatomical.nii'))

    # Test with default arguments.
    c = conform(anat)
    assert c.shape == (256, 256, 256)
    assert c.header.get_zooms() == (1, 1, 1)
    assert c.dataobj.dtype.type == anat.dataobj.dtype.type
    assert aff2axcodes(c.affine) == ('R', 'A', 'S')
    assert isinstance(c, Nifti1Image)

    # Test with non-default arguments.
    c = conform(anat, out_shape=(100, 100, 200), voxel_size=(2, 2, 1.5),
        orientation="LPI", out_class=Nifti2Image)
    assert c.shape == (100, 100, 200)
    assert c.header.get_zooms() == (2, 2, 1.5)
    assert c.dataobj.dtype.type == anat.dataobj.dtype.type
    assert aff2axcodes(c.affine) == ('L', 'P', 'I')
    assert isinstance(c, Nifti2Image)

    # TODO: support nD images in `conform` in the future, but for now, test that we get
    # errors on non-3D images.
    func = nib.load(pjoin(DATA_DIR, 'functional.nii'))
    with pytest.raises(ValueError):
        conform(func)
    with pytest.raises(ValueError):
        conform(anat, out_shape=(100, 100))
    with pytest.raises(ValueError):
        conform(anat, voxel_size=(2, 2))
Пример #3
0
    def _run_interface(self, runtime):
        dx, dy, dz = get_data_dims(self.inputs.image_file)
        vx, vy, vz = get_vox_dims(self.inputs.image_file)
        image_file = nb.load(self.inputs.image_file)
        affine = image_file.get_affine()
        out_filename = op.abspath(self.inputs.out_filename)

        #Reads MRTrix tracks
        header, streamlines = read_mrtrix_tracks(self.inputs.in_file, as_generator=True)
        iflogger.info('MRTrix Header:')
        iflogger.info(header)
        # Writes to Trackvis
        trk_header = nb.trackvis.empty_header()
        trk_header['dim'] = [dx,dy,dz]
        trk_header['voxel_size'] = [vx,vy,vz]
        trk_header['n_count'] = header['count']

        if isdefined(self.inputs.matrix_file) and isdefined(self.inputs.registration_image_file):
            iflogger.info('Applying transformation from matrix file {m}'.format(m=self.inputs.matrix_file))
            xfm = np.genfromtxt(self.inputs.matrix_file)
            iflogger.info(xfm)
            registration_image_file = nb.load(self.inputs.registration_image_file)
            reg_affine = registration_image_file.get_affine()
            r_dx, r_dy, r_dz = get_data_dims(self.inputs.registration_image_file)
            r_vx, r_vy, r_vz = get_vox_dims(self.inputs.registration_image_file)
            iflogger.info('Using affine from registration image file {r}'.format(r=self.inputs.registration_image_file))
            iflogger.info(reg_affine)
            trk_header['vox_to_ras'] = reg_affine
            trk_header['dim'] = [r_dx,r_dy,r_dz]
            trk_header['voxel_size'] = [r_vx,r_vy,r_vz]

            affine = np.dot(affine,np.diag(1./np.array([vx, vy, vz, 1])))
            transformed_streamlines = transform_to_affine(streamlines, trk_header, affine)

            aff = affine_from_fsl_mat_file(xfm, [vx,vy,vz], [r_vx,r_vy,r_vz])
            iflogger.info(aff)

            axcode = aff2axcodes(reg_affine)
            trk_header['voxel_order'] = axcode[0]+axcode[1]+axcode[2]

            final_streamlines = move_streamlines(transformed_streamlines, aff)
            trk_tracks = ((ii,None,None) for ii in final_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info('Saving transformed Trackvis file as {out}'.format(out=out_filename))
            iflogger.info('New TrackVis Header:')
            iflogger.info(trk_header)
        else:
            iflogger.info('Applying transformation from scanner coordinates to {img}'.format(img=self.inputs.image_file))
            axcode = aff2axcodes(affine)
            trk_header['voxel_order'] = axcode[0]+axcode[1]+axcode[2]
            trk_header['vox_to_ras'] = affine
            transformed_streamlines = transform_to_affine(streamlines, trk_header, affine)
            trk_tracks = ((ii,None,None) for ii in transformed_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info('Saving Trackvis file as {out}'.format(out=out_filename))
            iflogger.info('TrackVis Header:')
            iflogger.info(trk_header)
        return runtime
Пример #4
0
def calc_centroids(msk, decimals=1, world=False):
    """Gets the centroids from a nifti mask by calculating the centers of mass of each vertebra
    
    Parameters:
    ----------
    msk: nibabel nifti mask
    decimals: rounds the coordinates x decimal digits
    
    Returns:
    ----------
    ctd_list: list of centroids 
    
    """
    msk_data = np.asanyarray(msk.dataobj, dtype=msk.dataobj.dtype)
    axc = nio.aff2axcodes(msk.affine)
    ctd_list = [axc]
    verts = np.unique(msk_data)[1:]
    verts = verts[~np.isnan(verts)]  # remove NaN values
    for i in verts:
        msk_temp = np.zeros(msk_data.shape, dtype=bool)
        msk_temp[msk_data == i] = True
        ctr_mass = center_of_mass(msk_temp)
        if world:
            ctr_mass = msk.affine[:3, :3].dot(ctr_mass) + msk.affine[:3, 3]
            ctr_mass = ctr_mass.tolist()
        ctd_list.append([i] + [round(x, decimals) for x in ctr_mass])
    return ctd_list
Пример #5
0
def main():
    args, parser = parse_args()

    try:
        nii = nib.load(args.anatomy)
    except Exception:
        parser.error("Expecting anatomical image as first agument.")

    for tractogram in args.tractograms:
        tractogram_format = nib.streamlines.detect_format(tractogram)
        if tractogram_format is not nib.streamlines.TckFile:
            print("Skipping non TCK file: '{}'".format(tractogram))
            continue

        filename, _ = os.path.splitext(tractogram)
        output_filename = filename + '.trk'
        if os.path.isfile(output_filename) and not args.force:
            msg = "Skipping existing file: '{}'. Use -f to overwrite."
            print(msg.format(output_filename))
            continue

        # Build header using infos from the anatomical image.
        header = {}
        header[Field.VOXEL_TO_RASMM] = nii.affine.copy()
        header[Field.VOXEL_SIZES] = nii.header.get_zooms()[:3]
        header[Field.DIMENSIONS] = nii.shape[:3]
        header[Field.VOXEL_ORDER] = "".join(aff2axcodes(nii.affine))

        tck = nib.streamlines.load(tractogram)
        nib.streamlines.save(tck.tractogram, output_filename, header=header)
Пример #6
0
def save_trk(fname, streamlines, affine, vox_size=None, shape=None, header=None):
    """ Saves tractogram files (*.trk)

    Parameters
    ----------
    fname : str
        output trk filename
    streamlines : list of 2D arrays, generator or ArraySequence
        Each 2D array represents a sequence of 3D points (points, 3).
    affine : array_like (4, 4)
        The mapping from voxel coordinates to streamline points.
    vox_size : array_like (3,), optional
        The sizes of the voxels in the reference image (default: None)
    shape : array, shape (dim,), optional
        The shape of the reference image (default: None)
    header : dict, optional
        Metadata associated to the tractogram file(*.trk). (default: None)
    """
    if vox_size is not None and shape is not None:
        if not isinstance(header, dict):
            header = {}
        header[Field.VOXEL_TO_RASMM] = affine.copy()
        header[Field.VOXEL_SIZES] = vox_size
        header[Field.DIMENSIONS] = shape
        header[Field.VOXEL_ORDER] = "".join(aff2axcodes(affine))

    tractogram = nib.streamlines.Tractogram(streamlines)
    tractogram.affine_to_rasmm = affine
    trk_file = nib.streamlines.TrkFile(tractogram, header=header)
    nib.streamlines.save(trk_file, fname)
Пример #7
0
    def _run_interface(self, runtime):

        from nibabel.streamlines import Field
        from nibabel.orientations import aff2axcodes
        print('-> Load nifti and copy header')
        nii = nib.load(self.inputs.in_image)

        header = {
            Field.VOXEL_TO_RASMM: nii.affine.copy(),
            Field.VOXEL_SIZES: nii.header.get_zooms()[:3],
            Field.DIMENSIONS: nii.shape[:3],
            Field.VOXEL_ORDER: "".join(aff2axcodes(nii.affine))
        }

        if nib.streamlines.detect_format(
                self.inputs.in_tracks) is not nib.streamlines.TckFile:
            print("Skipping non TCK file: '{}'".format(self.inputs.in_tracks))
        else:
            tck = nib.streamlines.load(self.inputs.in_tracks)
            self.out_tracks = self.inputs.out_tracks
            nib.streamlines.save(tck.tractogram,
                                 self.out_tracks,
                                 header=header)

        return runtime
def save_trk_heavy_duty(fname,
                        streamlines,
                        affine,
                        vox_size=None,
                        shape=None,
                        header=None):
    """ Saves tractogram files (*.trk)

    Parameters
    ----------
    fname : str
        output trk filename
    streamlines : list of 2D arrays, generator or ArraySequence
        Each 2D array represents a sequence of 3D points (points, 3).
    affine : array_like (4, 4)
        The mapping from voxel coordinates to streamline points.
    vox_size : array_like (3,), optional
        The sizes of the voxels in the reference image (default: None)
    shape : array, shape (dim,), optional
        The shape of the reference image (default: None)
    header : dict, optional
        Metadata associated to the tractogram file(*.trk). (default: None)
    """
    if vox_size is not None and shape is not None:
        if not isinstance(header, dict):
            header = {}
        header[Field.VOXEL_TO_RASMM] = affine.copy()
        header[Field.VOXEL_SIZES] = vox_size
        header[Field.DIMENSIONS] = shape
        header[Field.VOXEL_ORDER] = "".join(aff2axcodes(affine))

    tractogram = nib.streamlines.LazyTractogram(streamlines)
    tractogram.affine_to_rasmm = affine
    trk_file = nib.streamlines.TrkFile(tractogram, header=header)
    nib.streamlines.save(trk_file, fname)
Пример #9
0
def get_plane(img_path):
    """Gets the plane of the highest resolution from a nifti file
    
    Parameters:
    ----------
    img_path: the full path to the nifti file
    
    Returns:
    ----------
    plane: a string corresponding to the plane of highest resolution
    
    """
    plane_dict = {
        'S': 'ax', 'I': 'ax', 'L': 'sag', 'R': 'sag', 'A': 'cor', 'P': 'cor'}
    img = nib.load(str(img_path))
    axc = np.array(nio.aff2axcodes(img.affine))
    zms = np.around(img.header.get_zooms(), 1)
    ix_max = np.array(zms == np.amax(zms))
    num_max = np.count_nonzero(ix_max)
    if num_max == 2:
        plane = plane_dict[axc[~ix_max][0]]
    elif num_max == 1:
        plane = plane_dict[axc[ix_max][0]]
    else:
        plane = 'iso'
    return plane
Пример #10
0
def main():
    parser = build_argparser()
    args = parser.parse_args()

    try:
        nii = nib.load(args.anatomy)
    except:
        parser.error("Expecting anatomy image as first agument.")

    for tractogram in args.tractograms:
        if nib.streamlines.detect_format(
                tractogram) is not nib.streamlines.TckFile:
            print("Skipping non TCK file: '{}'".format(tractogram))
            continue

        output_filename = tractogram[:-4] + '.trk'
        if os.path.isfile(output_filename) and not args.force:
            print("Skipping existing file: '{}'. Use -f to overwrite.".format(
                output_filename))
            continue

        header = {}
        header[Field.VOXEL_TO_RASMM] = nii.affine.copy()
        header[Field.VOXEL_SIZES] = nii.header.get_zooms()[:3]
        header[Field.DIMENSIONS] = nii.shape[:3]
        header[Field.VOXEL_ORDER] = "".join(aff2axcodes(nii.affine))

        tck = nib.streamlines.load(tractogram)
        nib.streamlines.save(tck.tractogram, output_filename, header=header)
Пример #11
0
def main():
    args, parser = parse_args()

    try:
        nii = nib.load(args.anatomy)
    except Exception:
        parser.error("Expecting anatomical image as first agument.")

    for tractogram in args.tractograms:
        tractogram_format = nib.streamlines.detect_format(tractogram)
        if tractogram_format is not nib.streamlines.TckFile:
            print("Skipping non TCK file: '{}'".format(tractogram))
            continue

        filename, _ = os.path.splitext(tractogram)
        output_filename = filename + '.trk'
        if os.path.isfile(output_filename) and not args.force:
            msg = "Skipping existing file: '{}'. Use -f to overwrite."
            print(msg.format(output_filename))
            continue

        # Build header using infos from the anatomical image.
        header = {}
        header[Field.VOXEL_TO_RASMM] = nii.affine.copy()
        header[Field.VOXEL_SIZES] = nii.header.get_zooms()[:3]
        header[Field.DIMENSIONS] = nii.shape[:3]
        header[Field.VOXEL_ORDER] = "".join(aff2axcodes(nii.affine))

        tck = nib.streamlines.load(tractogram)
        nib.streamlines.save(tck.tractogram, output_filename, header=header)
Пример #12
0
def save_tractogram(fname,
                    streamlines,
                    affine,
                    vox_size=None,
                    shape=None,
                    header=None,
                    reduce_memory_usage=False,
                    tractogram_file=None):
    """ Saves tractogram files (*.trk or *.tck or *.dpy)

    Parameters
    ----------
    fname : str
        output trk filename
    streamlines : list of 2D arrays, generator or ArraySequence
        Each 2D array represents a sequence of 3D points (points, 3).
    affine : array_like (4, 4)
        The mapping from voxel coordinates to streamline points.
    vox_size : array_like (3,), optional
        The sizes of the voxels in the reference image (default: None)
    shape : array, shape (dim,), optional
        The shape of the reference image (default: None)
    header : dict, optional
        Metadata associated to the tractogram file(*.trk). (default: None)
    reduce_memory_usage : {False, True}, optional
        If True, save streamlines in a lazy manner i.e. they will not be kept
        in memory. Otherwise, keep all streamlines in memory until saving.
    tractogram_file : class TractogramFile, optional
        Define tractogram class type (TrkFile vs TckFile)
        Default is None which means auto detect format
    """
    if 'dpy' in os.path.splitext(fname)[1].lower():
        dpw = Dpy(fname, 'w')
        dpw.write_tracks(Streamlines(streamlines))
        dpw.close()
        return

    tractogram_file = tractogram_file or detect_format(fname)
    if tractogram_file is None:
        raise ValueError("Unknown format for 'fname': {}".format(fname))

    if vox_size is not None and shape is not None:
        if not isinstance(header, dict):
            header = {}
        header[Field.VOXEL_TO_RASMM] = affine.copy()
        header[Field.VOXEL_SIZES] = vox_size
        header[Field.DIMENSIONS] = shape
        header[Field.VOXEL_ORDER] = "".join(aff2axcodes(affine))

    if reduce_memory_usage and not callable(streamlines):
        sg = lambda: (s for s in streamlines)
    else:
        sg = streamlines

    tractogram_loader = LazyTractogram if reduce_memory_usage else Tractogram
    tractogram = tractogram_loader(sg)
    tractogram.affine_to_rasmm = affine
    track_file = tractogram_file(tractogram, header=header)
    nib.streamlines.save(track_file, fname)
Пример #13
0
def get_affine_trackvis_to_rasmm(header):
    """ Get affine mapping trackvis voxelmm space to RAS+ mm space

    The streamlines in a trackvis file are in 'voxelmm' space, where the
    coordinates refer to the corner of the voxel.

    Compute the affine matrix that will bring them back to RAS+ mm space, where
    the coordinates refer to the center of the voxel.

    Parameters
    ----------
    header : dict or ndarray
        Dict or numpy structured array containing trackvis header.

    Returns
    -------
    aff_tv2ras : shape (4, 4) array
        Affine array mapping coordinates in 'voxelmm' space to RAS+ mm space.
    """
    # TRK's streamlines are in 'voxelmm' space, we will compute the
    # affine matrix that will bring them back to RAS+ and mm space.
    affine = np.eye(4)

    # The affine matrix found in the TRK header requires the points to
    # be in the voxel space.
    # voxelmm -> voxel
    scale = np.eye(4)
    scale[range(3), range(3)] /= header[Field.VOXEL_SIZES]
    affine = np.dot(scale, affine)

    # TrackVis considers coordinate (0,0,0) to be the corner of the
    # voxel whereas streamlines returned assumes (0,0,0) to be the
    # center of the voxel. Thus, streamlines are shifted by half a voxel.
    offset = np.eye(4)
    offset[:-1, -1] -= 0.5
    affine = np.dot(offset, affine)

    # If the voxel order implied by the affine does not match the voxel
    # order in the TRK header, change the orientation.
    # voxel (header) -> voxel (affine)
    vox_order = header[Field.VOXEL_ORDER]
    # Input header can be dict or structured array
    if hasattr(vox_order, 'item'):  # structured array
        vox_order = header[Field.VOXEL_ORDER].item()
    affine_ornt = "".join(aff2axcodes(header[Field.VOXEL_TO_RASMM]))
    header_ornt = axcodes2ornt(vox_order.decode('latin1').upper())
    affine_ornt = axcodes2ornt(affine_ornt)
    ornt = nib.orientations.ornt_transform(header_ornt, affine_ornt)
    M = nib.orientations.inv_ornt_aff(ornt, header[Field.DIMENSIONS])
    affine = np.dot(M, affine)

    # Applied the affine found in the TRK header.
    # voxel -> rasmm
    voxel_to_rasmm = header[Field.VOXEL_TO_RASMM]
    affine_voxmm_to_rasmm = np.dot(voxel_to_rasmm, affine)
    return affine_voxmm_to_rasmm.astype(np.float32)
Пример #14
0
def get_affine_trackvis_to_rasmm(header):
    """ Get affine mapping trackvis voxelmm space to RAS+ mm space

    The streamlines in a trackvis file are in 'voxelmm' space, where the
    coordinates refer to the corner of the voxel.

    Compute the affine matrix that will bring them back to RAS+ mm space, where
    the coordinates refer to the center of the voxel.

    Parameters
    ----------
    header : dict or ndarray
        Dict or numpy structured array containing trackvis header.

    Returns
    -------
    aff_tv2ras : shape (4, 4) array
        Affine array mapping coordinates in 'voxelmm' space to RAS+ mm space.
    """
    # TRK's streamlines are in 'voxelmm' space, we will compute the
    # affine matrix that will bring them back to RAS+ and mm space.
    affine = np.eye(4)

    # The affine matrix found in the TRK header requires the points to
    # be in the voxel space.
    # voxelmm -> voxel
    scale = np.eye(4)
    scale[range(3), range(3)] /= header[Field.VOXEL_SIZES]
    affine = np.dot(scale, affine)

    # TrackVis considers coordinate (0,0,0) to be the corner of the
    # voxel whereas streamlines returned assumes (0,0,0) to be the
    # center of the voxel. Thus, streamlines are shifted by half a voxel.
    offset = np.eye(4)
    offset[:-1, -1] -= 0.5
    affine = np.dot(offset, affine)

    # If the voxel order implied by the affine does not match the voxel
    # order in the TRK header, change the orientation.
    # voxel (header) -> voxel (affine)
    vox_order = header[Field.VOXEL_ORDER]
    # Input header can be dict or structured array
    if hasattr(vox_order, 'item'):  # structured array
        vox_order = header[Field.VOXEL_ORDER].item()
    affine_ornt = "".join(aff2axcodes(header[Field.VOXEL_TO_RASMM]))
    header_ornt = axcodes2ornt(vox_order.decode('latin1').upper())
    affine_ornt = axcodes2ornt(affine_ornt)
    ornt = nib.orientations.ornt_transform(header_ornt, affine_ornt)
    M = nib.orientations.inv_ornt_aff(ornt, header[Field.DIMENSIONS])
    affine = np.dot(M, affine)

    # Applied the affine found in the TRK header.
    # voxel -> rasmm
    voxel_to_rasmm = header[Field.VOXEL_TO_RASMM]
    affine_voxmm_to_rasmm = np.dot(voxel_to_rasmm, affine)
    return affine_voxmm_to_rasmm.astype(np.float32)
Пример #15
0
 def test_no_op(self):
     vox_order = ''.join(aff2axcodes(self.affine))
     (vox_array, affine, aff_trans,
      ornt_trans) = dcmstack.reorder_voxels(self.vox_array, self.affine,
                                            vox_order)
     ok_((vox_array == self.vox_array).all())
     ok_((affine == self.affine).all())
     ok_((aff_trans == np.eye(4)).all())
     ok_(np.allclose(ornt_trans, [[0, 1], [1, 1], [2, 1]]))
     eq_(np.may_share_memory(affine, self.affine), False)
Пример #16
0
def convert_tck_to_trk(tracts_tck: Path, dwi_file: Path, tracts_trk: Path):
    nii = nib.load(dwi_file)
    header = {}
    header[Field.VOXEL_TO_RASMM] = nii.affine.copy()
    header[Field.VOXEL_SIZES] = nii.header.get_zooms()[:3]
    header[Field.DIMENSIONS] = nii.shape[:3]
    header[Field.VOXEL_ORDER] = "".join(aff2axcodes(nii.affine))
    tck = nib.streamlines.load(tracts_tck)
    nib.streamlines.save(tck.tractogram, str(tracts_trk), header=header)
    return tracts_trk
Пример #17
0
def save_tractogram(fname, streamlines, affine, vox_size=None, shape=None,
                    header=None, reduce_memory_usage=False,
                    tractogram_file=None):
    """ Saves tractogram files (*.trk or *.tck or *.dpy)

    Parameters
    ----------
    fname : str
        output trk filename
    streamlines : list of 2D arrays, generator or ArraySequence
        Each 2D array represents a sequence of 3D points (points, 3).
    affine : array_like (4, 4)
        The mapping from voxel coordinates to streamline points.
    vox_size : array_like (3,), optional
        The sizes of the voxels in the reference image (default: None)
    shape : array, shape (dim,), optional
        The shape of the reference image (default: None)
    header : dict, optional
        Metadata associated to the tractogram file(*.trk). (default: None)
    reduce_memory_usage : {False, True}, optional
        If True, save streamlines in a lazy manner i.e. they will not be kept
        in memory. Otherwise, keep all streamlines in memory until saving.
    tractogram_file : class TractogramFile, optional
        Define tractogram class type (TrkFile vs TckFile)
        Default is None which means auto detect format
    """
    if 'dpy' in os.path.splitext(fname)[1].lower():
        dpw = Dpy(fname, 'w')
        dpw.write_tracks(Streamlines(streamlines))
        dpw.close()
        return

    tractogram_file = tractogram_file or detect_format(fname)
    if tractogram_file is None:
        raise ValueError("Unknown format for 'fname': {}".format(fname))

    if vox_size is not None and shape is not None:
        if not isinstance(header, dict):
            header = {}
        header[Field.VOXEL_TO_RASMM] = affine.copy()
        header[Field.VOXEL_SIZES] = vox_size
        header[Field.DIMENSIONS] = shape
        header[Field.VOXEL_ORDER] = "".join(aff2axcodes(affine))

    if reduce_memory_usage and not callable(streamlines):
        sg = lambda: (s for s in streamlines)
    else:
        sg = streamlines

    tractogram_loader = LazyTractogram if reduce_memory_usage else Tractogram
    tractogram = tractogram_loader(sg)
    tractogram.affine_to_rasmm = affine
    track_file = tractogram_file(tractogram, header=header)
    nib.streamlines.save(track_file, fname)
Пример #18
0
def tck2trk(tck_fn, nii_fn, out_fn=None):
    nii = nib.load(nii_fn)
    header = {}
    header[Field.VOXEL_TO_RASMM] = nii.affine.copy()
    header[Field.VOXEL_SIZES] = nii.header.get_zooms()[:3]
    header[Field.DIMENSIONS] = nii.shape[:3]
    header[Field.VOXEL_ORDER] = "".join(aff2axcodes(nii.affine))

    tck = nib.streamlines.load(tck_fn)
    if out_fn is None:
        out_fn = tck_fn[:-4] + '.trk'
    nib.streamlines.save(tck.tractogram, out_fn, header=header)
Пример #19
0
 def test_no_op(self):
     vox_order = ''.join(aff2axcodes(self.affine))
     (vox_array, 
      affine, 
      aff_trans, 
      ornt_trans) = dcmstack.reorder_voxels(self.vox_array, 
                                            self.affine, 
                                            vox_order)
     ok_((vox_array == self.vox_array).all())
     ok_((affine == self.affine).all())
     ok_((aff_trans == np.eye(4)).all())
     ok_(np.allclose(ornt_trans, [[0, 1], [1, 1], [2, 1]]))
     eq_(np.may_share_memory(affine, self.affine), False)
Пример #20
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()
    for param in ['theta', 'curvature']:
        # Default was removed for consistency.
        if param not in args:
            setattr(args, param, None)

    assert_inputs_exist(parser, [args.sh_file, args.seed_file, args.mask_file])
    assert_outputs_exists(parser, args, [args.output_file])

    np.random.seed(args.seed)

    mask_img = nib.load(args.mask_file)
    mask_data = mask_img.get_data()

    seeds = random_seeds_from_mask(
        nib.load(args.seed_file).get_data(),
        seeds_count=args.nts if 'nts' in args else args.npv,
        seed_count_per_voxel='nts' not in args)

    # Tracking is performed in voxel space
    streamlines = LocalTracking(_get_direction_getter(args, mask_data),
                                BinaryTissueClassifier(mask_data),
                                seeds,
                                np.eye(4),
                                step_size=args.step_size,
                                max_cross=1,
                                maxlen=int(args.max_len / args.step_size) + 1,
                                fixedstep=True,
                                return_all=True)

    filtered_streamlines = (s for s in streamlines
                            if args.min_len <= length(s) <= args.max_len)
    if args.compress_streamlines:
        filtered_streamlines = (compress_streamlines(s, args.tolerance_error)
                                for s in filtered_streamlines)

    tractogram = LazyTractogram(lambda: filtered_streamlines,
                                affine_to_rasmm=mask_img.affine)

    # Header with the affine/shape from mask image
    header = {
        Field.VOXEL_TO_RASMM: mask_img.affine.copy(),
        Field.VOXEL_SIZES: mask_img.header.get_zooms(),
        Field.DIMENSIONS: mask_img.shape,
        Field.VOXEL_ORDER: ''.join(aff2axcodes(mask_img.affine))
    }

    # Use generator to save the streamlines on-the-fly
    nib.streamlines.save(tractogram, args.output_file, header=header)
Пример #21
0
def save_slices(subject, fname, x, y, z, modality='mri'):
    """ Function to display row of image slices """
    header = nib.load(fname)
    affine = np.array(header.affine, float)
    data = header.get_data()
    images_fol = op.join(MMVT_DIR, subject, 'figures', 'slices')
    utils.make_dir(images_fol)

    clim = np.percentile(data, (1., 99.))
    codes = axcodes2ornt(aff2axcodes(affine))
    order = np.argsort([c[0] for c in codes])
    flips = np.array([c[1] < 0 for c in codes])[order]
    sizes = [data.shape[order] for order in order]
    scalers = voxel_sizes(affine)
    coordinates = np.array([x, y, z])[order].astype(int)

    r = [
        scalers[order[2]] / scalers[order[1]],
        scalers[order[2]] / scalers[order[0]],
        scalers[order[1]] / scalers[order[0]]
    ]
    for ii, xax, yax, ratio, prespective in zip(
        [0, 1, 2], [1, 0, 0], [2, 2, 1], r, ['Sagital', 'Coronal', 'Axial']):
        fig = plt.figure()
        fig.set_size_inches(1. * sizes[xax] / sizes[yax], 1, forward=False)
        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.set_axis_off()
        fig.add_axes(ax)

        d = get_image_data(data, order, flips, ii, coordinates)
        ax.imshow(d,
                  vmin=clim[0],
                  vmax=clim[1],
                  aspect=1,
                  cmap='gray',
                  interpolation='nearest',
                  origin='lower')
        lims = [0, sizes[xax], 0, sizes[yax]]
        ax.axis(lims)
        ax.set_aspect(ratio)
        ax.patch.set_visible(False)
        ax.set_frame_on(False)
        ax.axes.get_yaxis().set_visible(False)
        ax.axes.get_xaxis().set_visible(False)

        x, y, z = coordinates
        image_fname = op.join(
            images_fol, '{}_{}_{}_{}_{}.png'.format(modality, prespective, x,
                                                    y, z))
        print('Saving {}'.format(image_fname))
        plt.savefig(image_fname, dpi=sizes[xax])
    def to_sft(self, resize=False):
        """ Convert a TrxFile to a valid StatefulTractogram (in RAM) """
        affine = np.array(self.header['VOXEL_TO_RASMM'], dtype=np.float32)
        dimensions = np.array(self.header['DIMENSIONS'], dtype=np.uint16)
        vox_sizes = np.array(voxel_sizes(affine), dtype=np.float32)
        vox_order = ''.join(aff2axcodes(affine))
        space_attributes = (affine, dimensions, vox_sizes, vox_order)

        if resize:
            self.resize()
        sft = StatefulTractogram(self.streamlines, space_attributes, Space.RASMM,
                                 data_per_point=self.data_per_vertex,
                                 data_per_streamline=self.data_per_streamline)

        return sft
Пример #23
0
    def to_sft(self):
        """ Convert a TrxFile to a valid StatefulTractogram """
        affine = self.voxel_to_rasmm
        dimensions = self.dimensions
        vox_sizes = np.array(voxel_sizes(affine), dtype=np.float32)
        vox_order = ''.join(aff2axcodes(affine))
        space_attributes = (affine, dimensions, vox_sizes, vox_order)

        sft = StatefulTractogram(
            self.streamlines,
            space_attributes,
            Space.RASMM,
            data_per_point=self.consolidate_data_per_point(),
            data_per_streamline=self.consolidate_data_per_streamline())

        return sft
Пример #24
0
def convert_tck_to_trk(tracts_tck: str, dwi_file: str):
    from nibabel.streamlines import Field
    from nibabel.orientations import aff2axcodes

    dwi_file = convert_to_mif(dwi_file, dwi_file.replace("mif", "nii"))
    out_file = tracts_tck.replace("tck", "trk")
    nii = nib.load(dwi_file)
    header = {}
    header[Field.VOXEL_TO_RASMM] = nii.affine.copy()
    header[Field.VOXEL_SIZES] = nii.header.get_zooms()[:3]
    header[Field.DIMENSIONS] = nii.shape[:3]
    header[Field.VOXEL_ORDER] = "".join(aff2axcodes(nii.affine))
    tck = nib.streamlines.load(tracts_tck)
    nib.streamlines.save(tck.tractogram, out_file, header=header)
    os.remove(dwi_file)
    return out_file
Пример #25
0
    def _run_interface(self, runtime):
        from nibabel.streamlines import Field
        from nibabel.orientations import aff2axcodes

        tck = nib.streamlines.load(self.inputs.input_tck)

        nii = nib.load(self.inputs.input_ref)

        header = {}
        header[Field.VOXEL_TO_RASMM] = nii.affine.copy()
        header[Field.VOXEL_SIZES] = nii.header.get_zooms()[:3]
        header[Field.DIMENSIONS] = nii.shape[:3]
        header[Field.VOXEL_ORDER] = "".join(aff2axcodes(nii.affine))

        self._tractogram = tck.tractogram
        self._header = header

        return runtime
Пример #26
0
def save_streamlines(base_dir, streamlines, append, spacing=(1.0, 1.0, 1.0)):
    # Test fibers are already in voxmm but we still need to align them to
    # corner
    affine = np.eye(4)
    affine[:3, 3] = 0.5

    header = {
        Field.VOXEL_TO_RASMM: affine.copy(),
        Field.VOXEL_SIZES: spacing,
        Field.DIMENSIONS: (5, 5, 5),
        Field.VOXEL_ORDER: ''.join(aff2axcodes(affine))
    }

    save_to = os.path.join(
        base_dir, 'fake_streamlines_{}.trk'.format(append))
    tractogram = nib.streamlines.Tractogram(
        streamlines, affine_to_rasmm=np.eye(4))
    nib.streamlines.save(tractogram, save_to, header=header)
    return save_to
    def __str__(self):
        """ Generate the string for printing """
        affine = np.array(self.header['VOXEL_TO_RASMM'], dtype=np.float32)
        dimensions = np.array(self.header['DIMENSIONS'], dtype=np.uint16)
        vox_sizes = np.array(voxel_sizes(affine), dtype=np.float32)
        vox_order = ''.join(aff2axcodes(affine))

        text = 'VOXEL_TO_RASMM: \n{}'.format(
            np.array2string(affine,
                            formatter={'float_kind': lambda x: "%.6f" % x}))
        text += '\nDIMENSIONS: {}'.format(
            np.array2string(dimensions))
        text += '\nVOX_SIZES: {}'.format(
            np.array2string(vox_sizes,
                            formatter={'float_kind': lambda x: "%.2f" % x}))
        text += '\nVOX_ORDER: {}'.format(vox_order)

        strs_size = self.header['NB_STREAMLINES']
        pts_size = self.header['NB_VERTICES']
        strs_len, pts_len = self._get_real_len()

        if strs_size != strs_len or pts_size != pts_len:
            text += '\nstreamline_size: {}'.format(strs_size)
            text += '\nvertex_size: {}'.format(pts_size)

        text += '\nstreamline_count: {}'.format(strs_len)
        text += '\nvertex_count: {}'.format(pts_len)
        text += '\ndata_per_vertex keys: {}'.format(
            list(self.data_per_vertex.keys()))
        text += '\ndata_per_streamline keys: {}'.format(
            list(self.data_per_streamline.keys()))

        text += '\ngroups keys: {}'.format(list(self.groups.keys()))
        for group_key in self.groups.keys():
            if group_key in self.data_per_group:
                text += '\ndata_per_groups ({}) keys: {}'.format(
                    group_key, list(self.data_per_group[group_key].keys()))

        text += '\ncopy_safe: {}'.format(self._copy_safe)

        return text
Пример #28
0
    def __str__(self):
        """ Generate the string for printing """
        affine = np.array(self.voxel_to_rasmm, dtype=np.float32)
        dimensions = np.array(self.dimensions, dtype=np.uint16)
        vox_sizes = np.array(voxel_sizes(affine), dtype=np.float32)
        vox_order = ''.join(aff2axcodes(affine))

        text = 'VOXEL_TO_RASMM: \n{}'.format(
            np.array2string(affine,
                            formatter={'float_kind': lambda x: "%.6f" % x}))
        text += '\nDIMENSIONS: {}'.format(np.array2string(dimensions))
        text += '\nVOX_SIZES: {}'.format(
            np.array2string(vox_sizes,
                            formatter={'float_kind': lambda x: "%.2f" % x}))
        text += '\nVOX_ORDER: {}'.format(vox_order)

        text += '\nNB_STREAMLINES: {}'.format(self.nb_streamlines)
        text += '\nNB_POINTS: {}'.format(self.nb_points)

        text += '\n' + TreeViewer(self._zcontainer).__unicode__()

        return text
Пример #29
0
def reorient_centroids_to(ctd_list, img, decimals=1, verb=False):
    """reorient centroids to image orientation
    
    Parameters:
    ----------
    ctd_list: list of centroids
    img: nibabel image 
    decimals: rounding decimal digits
    
    Returns:
    ----------
    out_list: reoriented list of centroids 
    
    """
    ctd_arr = np.transpose(np.asarray(ctd_list[1:]))
    if len(ctd_arr) == 0:
        print("[#] No centroids present") 
        return ctd_list
    v_list = ctd_arr[0].astype(int).tolist()  # vertebral labels
    ctd_arr = ctd_arr[1:]
    ornt_fr = nio.axcodes2ornt(ctd_list[0])  # original centroid orientation
    axcodes_to = nio.aff2axcodes(img.affine)
    ornt_to = nio.axcodes2ornt(axcodes_to)
    trans = nio.ornt_transform(ornt_fr, ornt_to).astype(int)
    perm = trans[:, 0].tolist()
    shp = np.asarray(img.dataobj.shape)
    ctd_arr[perm] = ctd_arr.copy()
    for ax in trans:
        if ax[1] == -1:
            size = shp[ax[0]]
            ctd_arr[ax[0]] = np.around(size - ctd_arr[ax[0]], decimals)
    out_list = [axcodes_to]
    ctd_list = np.transpose(ctd_arr).tolist()
    for v, ctd in zip(v_list, ctd_list):
        out_list.append([v] + ctd)
    if verb:
        print("[*] Centroids reoriented from", nio.ornt2axcodes(ornt_fr), "to", axcodes_to)
    return out_list
Пример #30
0
def save_trk_n(fname,
               streamlines,
               affine,
               vox_size=None,
               shape=None,
               header=None):
    """ function Helper for saving trk files.

    Parameters
    ----------
    fname : str
        output trk filename
    streamlines : list of 2D arrays
        Each 2D array represents a sequence of 3D points (points, 3).
    affine : array_like (4, 4)
        The mapping from voxel coordinates to streamline points.
    vox_size : array_like (3,)
        The sizes of the voxels in the reference image.
    shape : array, shape (dim,)
        The shape of the reference image.
    header : dict
        header from a trk file

    """
    if vox_size and shape:
        if not isinstance(header, dict):
            header = {}
        header[Field.VOXEL_TO_RASMM] = affine.copy()
        header[Field.VOXEL_SIZES] = vox_size
        header[Field.DIMENSIONS] = shape
        header[Field.VOXEL_ORDER] = "".join(aff2axcodes(affine))

    tractogram = nib.streamlines.Tractogram(streamlines)
    tractogram.affine_to_rasmm = affine
    trk_file = nib.streamlines.TrkFile(tractogram, header=header)
    nib.streamlines.save(trk_file, fname)
Пример #31
0
    def _read_header(fileobj):
        """ Reads a TRK header from a file.

        Parameters
        ----------
        fileobj : string or file-like object
            If string, a filename; otherwise an open file-like object
            pointing to TRK file (and ready to read from the beginning
            of the TRK header). Note that calling this function
            does not change the file position.

        Returns
        -------
        header : dict
            Metadata associated with this tractogram file.
        """
        # Record start position if this is a file-like object
        start_position = fileobj.tell() if hasattr(fileobj, 'tell') else None

        with Opener(fileobj) as f:
            # Reading directly from a file into a (mutable) bytearray enables a zero-copy
            # cast to a mutable numpy object with frombuffer
            header_buf = bytearray(header_2_dtype.itemsize)
            f.readinto(header_buf)
            header_rec = np.frombuffer(buffer=header_buf, dtype=header_2_dtype)
            # Check endianness
            endianness = native_code
            if header_rec['hdr_size'] != TrkFile.HEADER_SIZE:
                endianness = swapped_code

                # Swap byte order
                header_rec = header_rec.newbyteorder()
                if header_rec['hdr_size'] != TrkFile.HEADER_SIZE:
                    msg = "Invalid hdr_size: {0} instead of {1}"
                    raise HeaderError(msg.format(header_rec['hdr_size'],
                                                 TrkFile.HEADER_SIZE))

            if header_rec['version'] == 1:
                # There is no 4x4 matrix for voxel to RAS transformation.
                header_rec[Field.VOXEL_TO_RASMM] = np.zeros((4, 4))
            elif header_rec['version'] == 2:
                pass  # Nothing more to do.
            else:
                raise HeaderError('NiBabel only supports versions 1 and 2 of '
                                  'the Trackvis file format')

            # Convert the first record of `header_rec` into a dictionnary
            header = dict(zip(header_rec.dtype.names, header_rec[0]))
            header[Field.ENDIANNESS] = endianness

            # If vox_to_ras[3][3] is 0, it means the matrix is not recorded.
            if header[Field.VOXEL_TO_RASMM][3][3] == 0:
                header[Field.VOXEL_TO_RASMM] = np.eye(4, dtype=np.float32)
                warnings.warn(("Field 'vox_to_ras' in the TRK's header was"
                               " not recorded. Will continue assuming it's"
                               " the identity."), HeaderWarning)

            # Check that the 'vox_to_ras' affine is valid, i.e. should be
            # able to determine the axis directions.
            axcodes = aff2axcodes(header[Field.VOXEL_TO_RASMM])
            if None in axcodes:
                msg = ("The 'vox_to_ras' affine is invalid! Could not"
                       " determine the axis directions from it.\n{0}"
                       ).format(header[Field.VOXEL_TO_RASMM])
                raise HeaderError(msg)

            # By default, the voxel order is LPS.
            # http://trackvis.org/blog/forum/diffusion-toolkit-usage/interpretation-of-track-point-coordinates
            if header[Field.VOXEL_ORDER] == b"":
                msg = ("Voxel order is not specified, will assume 'LPS' since"
                       " it is Trackvis software's default.")
                warnings.warn(msg, HeaderWarning)
                header[Field.VOXEL_ORDER] = b"LPS"

            # Keep the file position where the data begin.
            header['_offset_data'] = f.tell()

        # Set the file position where it was, if it was previously open.
        if start_position is not None:
            fileobj.seek(start_position, os.SEEK_SET)

        return header
Пример #32
0
def aff_to_hdr(affine, trk_hdr, pos_vox=None, set_order=None):
    ''' Set affine `affine` into trackvis header `trk_hdr`

    Affine is mapping from voxel space to Nifti RAS) output coordinate
    system convention; x: Left -> Right, y: Posterior -> Anterior, z:
    Inferior -> Superior.  Sets affine if possible, and voxel sizes, and voxel
    axis ordering.

    Parameters
    ----------
    affine : (4,4) array-like
       Affine voxel to mm transformation
    trk_hdr : mapping
       Mapping implementing __setitem__
    pos_vos : None or bool
        If None, currently defaults to False - this will change in future
        versions of nibabel.  If False, allow negative voxel sizes in header to
        record axis flips.  Negative voxels cause problems for trackvis (the
        application).  If True, enforce positive voxel sizes.
    set_order : None or bool
        If None, currently defaults to False - this will change in future
        versions of nibabel.  If False, do not set ``voxel_order`` field in
        `trk_hdr`.  If True, calculcate ``voxel_order`` from `affine` and set
        into `trk_hdr`.

    Returns
    -------
    None

    Notes
    -----
    version 2 of the trackvis header has a dedicated field for the nifti RAS
    affine. In theory trackvis 1 has enough information to store an affine, with
    the fields 'origin', 'voxel_size' and 'image_orientation_patient'.
    Unfortunately, to be able to store any affine, we'd need to be able to set
    negative voxel sizes, to encode axis flips. This is because
    'image_orientation_patient' is only two columns of the 3x3 rotation matrix,
    and we need to know the number of flips to reconstruct the third column
    reliably.  It turns out that negative flips upset trackvis (the
    application).  The application also ignores the origin field, and may not
    use the 'image_orientation_patient' field.
    '''
    if pos_vox is None:
        warnings.warn(
            'Default for ``pos_vox`` will change to True in '
            'future versions of nibabel',
            FutureWarning,
            stacklevel=2)
        pos_vox = False
    if set_order is None:
        warnings.warn(
            'Default for ``set_order`` will change to True in '
            'future versions of nibabel',
            FutureWarning,
            stacklevel=2)
        set_order = False
    try:
        version = trk_hdr['version']
    except (KeyError, ValueError):  # dict or structured array
        version = 2
    if version == 2:
        trk_hdr['vox_to_ras'] = affine
    if set_order:
        trk_hdr['voxel_order'] = ''.join(aff2axcodes(affine))
    # Now on dodgy ground with DICOM fields in header
    # RAS to DPCS output
    affine = np.dot(DPCS_TO_TAL, affine)
    trans = affine[:3, 3]
    # Get zooms
    RZS = affine[:3, :3]
    zooms = np.sqrt(np.sum(RZS * RZS, axis=0))
    RS = RZS / zooms
    # If you said we could, adjust zooms to make RS correspond (below) to a true
    # rotation matrix.  We need to set the sign of one of the zooms to deal with
    # this.  Trackvis (the application) doesn't like negative zooms at all, so
    # you might want to disallow this with the pos_vox option.
    if not pos_vox and npl.det(RS) < 0:
        zooms[0] *= -1
        RS[:, 0] *= -1
    # retrieve rotation matrix from RS with polar decomposition.
    # Discard shears because we cannot store them.
    P, S, Qs = npl.svd(RS)
    R = np.dot(P, Qs)
    # it's an orthogonal matrix
    assert np.allclose(np.dot(R, R.T), np.eye(3))
    # set into header
    trk_hdr['origin'] = trans
    trk_hdr['voxel_size'] = zooms
    trk_hdr['image_orientation_patient'] = R[:, 0:2].T.ravel()
Пример #33
0
    def __init__(self, volume, affine=None, title=None, cmap='gray', clim=None, alpha=1.):
        """
        Parameters
        ----------
        volume : array-like
            The data that will be displayed by the slicer. Should have 3
            dimensions.
        affine : array-like or None, optional
            Affine transform for the data. This is used to determine
            how the data should be sliced for plotting into the sagittal,
            coronal, and axial view axes. If None, identity is assumed.
            The aspect ratio of the data are inferred from the affine
            transform.
        title : str or None, optional
            The title to display. Can be None (default) to display no
            title.
        cmap: matplotlib colormap, optional
            Colormap to use for ploting. Default: 'gray'
        clim: [min, max] or None
            Limits to use for plotting. Default: 1 and 99th percentiles
        alpha: float
            Transparency value
        """
        # Use these late imports of matplotlib so that we have some hope that
        # the test functions are the first to set the matplotlib backend. The
        # tests set the backend to something that doesn't require a display.
        self._title = title
        self._closed = False
        self._cross = True

        volume = np.asanyarray(volume)
        if volume.ndim < 3:
            raise ValueError('volume must have at least 3 dimensions')
        if np.iscomplexobj(volume):
            raise TypeError("Complex data not supported")
        affine = np.array(affine, float) if affine is not None else np.eye(4)
        if affine.shape != (4, 4):
            raise ValueError('affine must be a 4x4 matrix')
        # determine our orientation
        self._affine = affine
        codes = axcodes2ornt(aff2axcodes(self._affine))
        self._order = np.argsort([c[0] for c in codes])
        self._flips = np.array([c[1] < 0 for c in codes])[self._order]
        self._flips = list(self._flips) + [False]  # add volume dim
        self._scalers = voxel_sizes(self._affine)
        self._inv_affine = np.linalg.inv(affine)
        # current volume info
        self._volume_dims = volume.shape[3:]
        if len(self._volume_dims) > 0:
            raise NotImplementedError('Cannot handle 4-D Datasets')
        self._volumes = []

        # ^ +---------+   ^ +---------+
        # | |         |   | |         |
        #   |   Sag   |     |   Cor   |
        # S |    0    |   S |    1    |
        #   |         |     |         |
        #   |         |     |         |
        #   +---------+     +---------+
        #        A  -->
        # ^ +---------+
        # | |         |
        #   |  Axial  |
        # A |    2    |
        #   |         |
        #   |         |
        #   +---------+
        #   <--  R
        fig, axes = plt.subplots(2, 2)
        fig.set_size_inches((8, 8), forward=True)
        self._axes = [axes[0, 0], axes[0, 1], axes[1, 0]]
        plt.tight_layout(pad=0.1)
        fig.delaxes(axes[1, 1])
        if self._title is not None:
            fig.canvas.set_window_title(str(title))

        # Start midway through each axis, idx is current slice number
        self._ims, self._data_idx = list(), list()

        # set up axis crosshairs
        self._crosshairs = [None] * 3
        r = [self._scalers[self._order[2]] / self._scalers[self._order[1]],
             self._scalers[self._order[2]] / self._scalers[self._order[0]],
             self._scalers[self._order[1]] / self._scalers[self._order[0]]]
        self._sizes = [volume.shape[order] for order in self._order]
        for ii, xax, yax, ratio, label in zip([0, 1, 2], [1, 0, 0], [2, 2, 1],
                                              r, ('SAIP', 'SRIL', 'ARPL')):
            ax = self._axes[ii]
            vert = ax.plot([0] * 2, [-0.5, self._sizes[yax] - 0.5],
                           color=(0, 1, 0), linestyle='-')[0]
            horiz = ax.plot([-0.5, self._sizes[xax] - 0.5], [0] * 2,
                            color=(0, 1, 0), linestyle='-')[0]
            self._crosshairs[ii] = dict(vert=vert, horiz=horiz)
            # add text labels (top, right, bottom, left)
            lims = [0, self._sizes[xax], 0, self._sizes[yax]]
            bump = 0.01
            poss = [[lims[1] / 2., lims[3]],
                    [(1 + bump) * lims[1], lims[3] / 2.],
                    [lims[1] / 2., 0],
                    [lims[0] - bump * lims[1], lims[3] / 2.]]
            anchors = [['center', 'bottom'], ['left', 'center'],
                       ['center', 'top'], ['right', 'center']]
            for pos, anchor, lab in zip(poss, anchors, label):
                ax.text(pos[0], pos[1], lab,
                        horizontalalignment=anchor[0],
                        verticalalignment=anchor[1])
            ax.axis(lims)
            ax.set_aspect(ratio)
            ax.patch.set_visible(False)
            ax.set_frame_on(False)
            ax.axes.get_yaxis().set_visible(False)
            ax.axes.get_xaxis().set_visible(False)
            self._data_idx.append(0)
        self._data_idx.append(-1)  # volume

        self._figs = set([a.figure for a in self._axes])
        for fig in self._figs:
            fig.canvas.mpl_connect('scroll_event', self._on_scroll)
            fig.canvas.mpl_connect('motion_notify_event', self._on_mouse)
            fig.canvas.mpl_connect('button_press_event', self._on_mouse)

        # actually set data meaningfully
        self.add_overlay(volume, cmap=cmap, clim=clim, alpha=alpha, draw=False)
        self._position = np.zeros(4)
        self._position[3] = 1.  # convenience for affine multiplication
        self._changing = False  # keep track of status to avoid loops
        plt.draw()
        for fig in self._figs:
            fig.canvas.draw_idle()
            fig.canvas.draw()
        plt.pause(1e-3) # give a little bit of time for the renderer (needed on MacOS)
        self._set_position(0., 0., 0.)
        self._draw()
Пример #34
0
def main():
    parser = build_parser()
    args = parser.parse_args()
    print(args)

    if min(args.keep_top) < 0:
        parser.error("--keep-top must be between in [0, 1].")

    # Get experiment folder
    experiment_path = args.name
    if not os.path.isdir(experiment_path):
        # If not a directory, it must be the name of the experiment.
        experiment_path = pjoin(".", "experiments", args.name)

    if not os.path.isdir(experiment_path):
        parser.error('Cannot find experiment: {0}!'.format(args.name))

    # Load experiments hyperparameters
    try:
        hyperparams = smartutils.load_dict_from_json_file(
            pjoin(experiment_path, "hyperparams.json"))
    except FileNotFoundError:
        hyperparams = smartutils.load_dict_from_json_file(
            pjoin(experiment_path, "..", "hyperparams.json"))

    # Use this for hyperparams added in a new version, but nonexistent from older versions
    retrocompatibility_defaults = {
        'feed_previous_direction': False,
        'predict_offset': False,
        'normalize': False,
        'keep_step_size': False,
        'sort_streamlines': False
    }
    for new_hyperparams, default_value in retrocompatibility_defaults.items():
        if new_hyperparams not in hyperparams:
            hyperparams[new_hyperparams] = default_value

    with Timer("Loading signal data and tractogram", newline=True):
        volume_manager = VolumeManager()
        dataset = datasets.load_tractography_dataset_from_dwi_and_tractogram(
            args.signal,
            args.tractogram,
            volume_manager,
            use_sh_coeffs=hyperparams['use_sh_coeffs'],
            bvals=args.bvals,
            bvecs=args.bvecs,
            step_size=args.step_size)
        print("Dataset size:", len(dataset))

        if vizu_available and args.vizu:
            vizu.check_dataset_integrity(dataset, subset=0.2)

    with Timer("Loading model"):
        loss_type = args.loss_type
        model = None
        if hyperparams['model'] == 'gru_regression':
            from learn2track.models import GRU_Regression
            model = GRU_Regression.create(experiment_path,
                                          volume_manager=volume_manager)
        elif hyperparams['model'] == 'gru_mixture':
            from learn2track.models import GRU_Mixture
            model = GRU_Mixture.create(experiment_path,
                                       volume_manager=volume_manager)
        elif hyperparams['model'] == 'gru_multistep':
            from learn2track.models import GRU_Multistep_Gaussian
            model = GRU_Multistep_Gaussian.create(
                experiment_path, volume_manager=volume_manager)
            model.k = 1
            model.m = 1
        elif hyperparams['model'] == 'ffnn_regression':
            from learn2track.models import FFNN_Regression
            model = FFNN_Regression.create(experiment_path,
                                           volume_manager=volume_manager)

            if loss_type in ['l2_sum', 'l2_mean']:
                loss_type = "expected_value"

        else:
            raise NameError("Unknown model: {}".format(hyperparams['model']))

    with Timer("Building evaluation function"):
        # Override K for gru_multistep
        if 'k' in hyperparams:
            hyperparams['k'] = 1

        batch_scheduler = batch_scheduler_factory(
            hyperparams,
            dataset,
            use_data_augment=
            False,  # Otherwise it doubles the number of losses :-/
            train_mode=False,
            batch_size_override=args.batch_size)
        loss = loss_factory(hyperparams, model, dataset, loss_type=loss_type)
        l2_error = views.LossView(loss=loss, batch_scheduler=batch_scheduler)

    with Timer("Scoring...", newline=True):
        dummy_status = Status()  # Forces recomputing results
        losses = l2_error.losses.view(dummy_status)

        if hyperparams['model'] == 'ffnn_regression':
            _losses = dataset.streamlines.copy()
            _losses._data = losses.copy()
            _losses._lengths -= 1
            _losses._offsets -= np.arange(len(dataset.streamlines))

            if args.loss_type == 'l2_sum':
                losses = np.asarray([l.sum() for l in _losses])
            elif args.loss_type == 'l2_mean':
                losses = np.asarray([l.mean() for l in _losses])

        mean = float(l2_error.mean.view(dummy_status))
        stderror = float(l2_error.stderror.view(dummy_status))

        print("Loss: {:.4f} ± {:.4f}".format(mean, stderror))
        print("Min: {:.4f}".format(losses.min()))
        print("Max: {:.4f}".format(losses.max()))
        print("Percentiles: {}".format(
            np.percentile(losses, [0, 25, 50, 75, 100])))

    with Timer("Saving streamlines"):
        nii = dataset.subjects[0].signal
        tractogram = nib.streamlines.Tractogram(
            dataset.streamlines[batch_scheduler.indices],
            affine_to_rasmm=nii.affine)
        tractogram.data_per_streamline['loss'] = losses

        header = {}
        header[Field.VOXEL_TO_RASMM] = nii.affine.copy()
        header[Field.VOXEL_SIZES] = nii.header.get_zooms()[:3]
        header[Field.DIMENSIONS] = nii.shape[:3]
        header[Field.VOXEL_ORDER] = "".join(aff2axcodes(nii.affine))

        nib.streamlines.save(tractogram.copy(), args.out, header=header)

    if len(args.keep_top) > 0:
        for keep_top in args.keep_top:
            with Timer("Saving top {}% streamlines".format(keep_top)):
                idx = np.argsort(losses)
                idx = idx[:int(keep_top * len(losses))]
                print("Keeping {}/{} streamlines".format(
                    len(idx), len(losses)))
                sub_tractogram = tractogram[idx]
                out_filename = args.out[:-4] + "_top{}".format(
                    keep_top) + ".tck"
                nib.streamlines.save(sub_tractogram, out_filename)
Пример #35
0
def aff_to_hdr(affine, trk_hdr, pos_vox=None, set_order=None):
    ''' Set affine `affine` into trackvis header `trk_hdr`

    Affine is mapping from voxel space to Nifti RAS) output coordinate
    system convention; x: Left -> Right, y: Posterior -> Anterior, z:
    Inferior -> Superior.  Sets affine if possible, and voxel sizes, and voxel
    axis ordering.

    Parameters
    ----------
    affine : (4,4) array-like
       Affine voxel to mm transformation
    trk_hdr : mapping
       Mapping implementing __setitem__
    pos_vos : None or bool
        If None, currently defaults to False - this will change in future
        versions of nibabel.  If False, allow negative voxel sizes in header to
        record axis flips.  Negative voxels cause problems for trackvis (the
        application).  If True, enforce positive voxel sizes.
    set_order : None or bool
        If None, currently defaults to False - this will change in future
        versions of nibabel.  If False, do not set ``voxel_order`` field in
        `trk_hdr`.  If True, calculcate ``voxel_order`` from `affine` and set
        into `trk_hdr`.

    Returns
    -------
    None

    Notes
    -----
    version 2 of the trackvis header has a dedicated field for the nifti RAS
    affine. In theory trackvis 1 has enough information to store an affine, with
    the fields 'origin', 'voxel_size' and 'image_orientation_patient'.
    Unfortunately, to be able to store any affine, we'd need to be able to set
    negative voxel sizes, to encode axis flips. This is because
    'image_orientation_patient' is only two columns of the 3x3 rotation matrix,
    and we need to know the number of flips to reconstruct the third column
    reliably.  It turns out that negative flips upset trackvis (the
    application).  The application also ignores the origin field, and may not
    use the 'image_orientation_patient' field.
    '''
    if pos_vox is None:
        warnings.warn('Default for ``pos_vox`` will change to True in '
                      'future versions of nibabel',
                      FutureWarning,
                      stacklevel=2)
        pos_vox = False
    if set_order is None:
        warnings.warn('Default for ``set_order`` will change to True in '
                      'future versions of nibabel',
                      FutureWarning,
                      stacklevel=2)
        set_order = False
    try:
        version = trk_hdr['version']
    except (KeyError, ValueError): # dict or structured array
        version = 2
    if version == 2:
        trk_hdr['vox_to_ras'] = affine
    if set_order:
        trk_hdr['voxel_order'] = ''.join(aff2axcodes(affine))
    # Now on dodgy ground with DICOM fields in header
    # RAS to DPCS output
    affine = np.dot(DPCS_TO_TAL, affine)
    trans = affine[:3, 3]
    # Get zooms
    RZS = affine[:3, :3]
    zooms = np.sqrt(np.sum(RZS * RZS, axis=0))
    RS = RZS / zooms
    # If you said we could, adjust zooms to make RS correspond (below) to a true
    # rotation matrix.  We need to set the sign of one of the zooms to deal with
    # this.  Trackvis (the application) doesn't like negative zooms at all, so
    # you might want to disallow this with the pos_vox option.
    if not pos_vox and npl.det(RS) < 0:
        zooms[0] *= -1
        RS[:,0] *= -1
    # retrieve rotation matrix from RS with polar decomposition.
    # Discard shears because we cannot store them.
    P, S, Qs = npl.svd(RS)
    R = np.dot(P, Qs)
    # it's an orthogonal matrix
    assert np.allclose(np.dot(R, R.T), np.eye(3))
    # set into header
    trk_hdr['origin'] = trans
    trk_hdr['voxel_size'] = zooms
    trk_hdr['image_orientation_patient'] = R[:,0:2].T.ravel()
Пример #36
0
    def __init__(self, data, affine=None, axes=None, title=None):
        """
        Parameters
        ----------
        data : array-like
            The data that will be displayed by the slicer. Should have 3+
            dimensions.
        affine : array-like or None, optional
            Affine transform for the data. This is used to determine
            how the data should be sliced for plotting into the sagittal,
            coronal, and axial view axes. If None, identity is assumed.
            The aspect ratio of the data are inferred from the affine
            transform.
        axes : tuple of mpl.Axes or None, optional
            3 or 4 axes instances for the 3 slices plus volumes,
            or None (default).
        title : str or None, optional
            The title to display. Can be None (default) to display no
            title.
        """
        # Use these late imports of matplotlib so that we have some hope that
        # the test functions are the first to set the matplotlib backend. The
        # tests set the backend to something that doesn't require a display.
        self._plt = plt = optional_package('matplotlib.pyplot')[0]
        mpl_patch = optional_package('matplotlib.patches')[0]
        self._title = title
        self._closed = False

        data = np.asanyarray(data)
        if data.ndim < 3:
            raise ValueError('data must have at least 3 dimensions')
        if np.iscomplexobj(data):
            raise TypeError("Complex data not supported")
        affine = np.array(affine, float) if affine is not None else np.eye(4)
        if affine.shape != (4, 4):
            raise ValueError('affine must be a 4x4 matrix')
        # determine our orientation
        self._affine = affine
        codes = axcodes2ornt(aff2axcodes(self._affine))
        self._order = np.argsort([c[0] for c in codes])
        self._flips = np.array([c[1] < 0 for c in codes])[self._order]
        self._flips = list(self._flips) + [False]  # add volume dim
        self._scalers = voxel_sizes(self._affine)
        self._inv_affine = np.linalg.inv(affine)
        # current volume info
        self._volume_dims = data.shape[3:]
        self._current_vol_data = data[:, :, :, 0] if data.ndim > 3 else data
        self._data = data
        self._clim = np.percentile(data, (1., 99.))
        del data

        if axes is None:  # make the axes
            # ^ +---------+   ^ +---------+
            # | |         |   | |         |
            #   |   Sag   |     |   Cor   |
            # S |    0    |   S |    1    |
            #   |         |     |         |
            #   |         |     |         |
            #   +---------+     +---------+
            #        A  -->     <--  R
            # ^ +---------+     +---------+
            # | |         |     |         |
            #   |  Axial  |     |   Vol   |
            # A |    2    |     |    3    |
            #   |         |     |         |
            #   |         |     |         |
            #   +---------+     +---------+
            #   <--  R          <--  t  -->

            fig, axes = plt.subplots(2, 2)
            fig.set_size_inches((8, 8), forward=True)
            self._axes = [axes[0, 0], axes[0, 1], axes[1, 0], axes[1, 1]]
            plt.tight_layout(pad=0.1)
            if self.n_volumes <= 1:
                fig.delaxes(self._axes[3])
                self._axes.pop(-1)
            if self._title is not None:
                fig.canvas.set_window_title(str(title))
        else:
            self._axes = [axes[0], axes[1], axes[2]]
            if len(axes) > 3:
                self._axes.append(axes[3])

        # Start midway through each axis, idx is current slice number
        self._ims, self._data_idx = list(), list()

        # set up axis crosshairs
        self._crosshairs = [None] * 3
        r = [self._scalers[self._order[2]] / self._scalers[self._order[1]],
             self._scalers[self._order[2]] / self._scalers[self._order[0]],
             self._scalers[self._order[1]] / self._scalers[self._order[0]]]
        self._sizes = [self._data.shape[order] for order in self._order]
        for ii, xax, yax, ratio, label in zip([0, 1, 2], [1, 0, 0], [2, 2, 1],
                                              r, ('SAIP', 'SLIR', 'ALPR')):
            ax = self._axes[ii]
            d = np.zeros((self._sizes[yax], self._sizes[xax]))
            im = self._axes[ii].imshow(
                d, vmin=self._clim[0], vmax=self._clim[1], aspect=1,
                cmap='gray', interpolation='nearest', origin='lower')
            self._ims.append(im)
            vert = ax.plot([0] * 2, [-0.5, self._sizes[yax] - 0.5],
                           color=(0, 1, 0), linestyle='-')[0]
            horiz = ax.plot([-0.5, self._sizes[xax] - 0.5], [0] * 2,
                            color=(0, 1, 0), linestyle='-')[0]
            self._crosshairs[ii] = dict(vert=vert, horiz=horiz)
            # add text labels (top, right, bottom, left)
            lims = [0, self._sizes[xax], 0, self._sizes[yax]]
            bump = 0.01
            poss = [[lims[1] / 2., lims[3]],
                    [(1 + bump) * lims[1], lims[3] / 2.],
                    [lims[1] / 2., 0],
                    [lims[0] - bump * lims[1], lims[3] / 2.]]
            anchors = [['center', 'bottom'], ['left', 'center'],
                       ['center', 'top'], ['right', 'center']]
            for pos, anchor, lab in zip(poss, anchors, label):
                ax.text(pos[0], pos[1], lab,
                        horizontalalignment=anchor[0],
                        verticalalignment=anchor[1])
            ax.axis(lims)
            ax.set_aspect(ratio)
            ax.patch.set_visible(False)
            ax.set_frame_on(False)
            ax.axes.get_yaxis().set_visible(False)
            ax.axes.get_xaxis().set_visible(False)
            self._data_idx.append(0)
        self._data_idx.append(-1)  # volume

        # Set up volumes axis
        if self.n_volumes > 1 and len(self._axes) > 3:
            ax = self._axes[3]
            try:
                ax.set_facecolor('k')
            except AttributeError:  # old mpl
                ax.set_axis_bgcolor('k')
            ax.set_title('Volumes')
            y = np.zeros(self.n_volumes + 1)
            x = np.arange(self.n_volumes + 1) - 0.5
            step = ax.step(x, y, where='post', color='y')[0]
            ax.set_xticks(np.unique(np.linspace(0, self.n_volumes - 1,
                                                5).astype(int)))
            ax.set_xlim(x[0], x[-1])
            yl = [self._data.min(), self._data.max()]
            yl = [l + s * np.diff(lims)[0] for l, s in zip(yl, [-1.01, 1.01])]
            patch = mpl_patch.Rectangle([-0.5, yl[0]], 1., np.diff(yl)[0],
                                        fill=True, facecolor=(0, 1, 0),
                                        edgecolor=(0, 1, 0), alpha=0.25)
            ax.add_patch(patch)
            ax.set_ylim(yl)
            self._volume_ax_objs = dict(step=step, patch=patch)

        self._figs = set([a.figure for a in self._axes])
        for fig in self._figs:
            fig.canvas.mpl_connect('scroll_event', self._on_scroll)
            fig.canvas.mpl_connect('motion_notify_event', self._on_mouse)
            fig.canvas.mpl_connect('button_press_event', self._on_mouse)
            fig.canvas.mpl_connect('key_press_event', self._on_keypress)
            fig.canvas.mpl_connect('close_event', self._cleanup)

        # actually set data meaningfully
        self._position = np.zeros(4)
        self._position[3] = 1.  # convenience for affine multiplication
        self._changing = False  # keep track of status to avoid loops
        self._links = []  # other viewers this one is linked to
        self._plt.draw()
        for fig in self._figs:
            fig.canvas.draw()
        self._set_volume_index(0, update_slices=False)
        self._set_position(0., 0., 0.)
        self._draw()
Пример #37
0
    # determinant. We try pure rotation first
    R = np.c_[iop, np.cross(*iop.T)]
    vox = trk_hdr['voxel_size']
    aff[:3,:3] = R * vox
    aff[:3,3] = trk_hdr['origin']
    aff = np.dot(DPCS_TO_TAL, aff)
    # Next we check against the 'voxel_order' field if present and not empty.
    try:
        voxel_order = asstr(np.asscalar(trk_hdr['voxel_order']))
    except KeyError, ValueError:
        voxel_order = ''
    if voxel_order == '':
        return aff
    # If the voxel_order conflicts with the affine by one flip, this may have
    # been a negative determinant affine saved with positive voxel sizes
    exp_order = ''.join(aff2axcodes(aff))
    if voxel_order != exp_order:
        # If first pass doesn't match, try flipping the (estimated) third column
        aff[:,2] *= -1
        exp_order = ''.join(aff2axcodes(aff))
        if voxel_order != exp_order:
            raise HeaderError('Estimate of header affine does not match '
                              'voxel_order of %s' % exp_order)
    return aff


def aff_to_hdr(affine, trk_hdr, pos_vox=None, set_order=None):
    ''' Set affine `affine` into trackvis header `trk_hdr`

    Affine is mapping from voxel space to Nifti RAS) output coordinate
    system convention; x: Left -> Right, y: Posterior -> Anterior, z:
Пример #38
0
def MICOS(fileNAME):

	tic = time.clock()

	selfz =0
	selfz1 = 0
	selfz2 = 0
	selfrotD = -90
	imgObj2= nib.load(str(fileNAME))
	imgObj1 = imgObj2
	im = imgObj2
	selfaffine2 = imgObj2.get_affine()  
	selfheaderdtype = imgObj2.get_data_dtype()
	selfPSx = imgObj1.get_header()['pixdim'][1]
	selfPSy = imgObj1.get_header()['pixdim'][2]
	selfPSz = imgObj1.get_header()['pixdim'][3]
	(x,y,z) = orx.aff2axcodes(selfaffine2)
	selfOrx = x
	selfOry = y
	selfOrz = z
	ornt = orx.axcodes2ornt((x,y,z))  
	refOrnt = orx.axcodes2ornt(('R','S','A')) #was 'R', 'A', 'S'
	newOrnt = orx.ornt_transform(ornt,refOrnt)
	selfornt = ornt
	selfrefOrnt = refOrnt

	selfimg_data2 = imgObj2.get_data()       

	selfimg_data2 = orx.apply_orientation(selfimg_data2,newOrnt)

	selfimg_data2 = np.fliplr(np.rot90(selfimg_data2,1))
	im_data = selfimg_data2

	[x_si,y_si,z_si] = np.shape(im_data)

	#do 99% norm to 1000 
	im_data = np.array(im_data,dtype='float')
	im_data = im_data * 1000/np.percentile(im_data,99)
	#print np.shape(im_data)
	initialSeg = im_data.copy() * 0

	#begin user roi drawing...
	#go from middle up...
	for i in xrange(np.round(z_si/2),z_si,3):

		img = (im_data[:,:,i])

		# show the image
		if i > np.round(z_si/2):
			plt.figure(figsize=(ROI1.figwidth,ROI1.figheight))
		plt.imshow(img,cmap='gray')
		plt.colorbar()
		plt.title("outline one kidney, slice = " + str(i))

		# let user draw first ROI
		ROI1 = polydraw(roicolor='r') #let user draw first ROI

		# show the image with the first ROI
		plt.figure(figsize=(ROI1.figwidth,ROI1.figheight))
		plt.imshow(img,cmap='gray')
		plt.colorbar()
		ROI1.displayROI()
		plt.title("outline other kidney, slice = " + str(i))

		# let user draw second ROI
		ROI2 = polydraw(roicolor='b') #let user draw ROI

		initialSeg[:,:,i] = ROI1.getMask(img) + ROI2.getMask(img)


	#go from middle up...
	for i in xrange(np.round(z_si/2)-1,0,-3):

		img = (im_data[:,:,i])

		# show the image
		plt.figure(figsize=(ROI1.figwidth,ROI1.figheight))
		plt.imshow(img,cmap='gray')
		plt.colorbar()
		plt.title("outline one kidney, slice = " + str(i))

		# let user draw first ROI
		ROI1 = polydraw(roicolor='r') #let user draw first ROI

		# show the image with the first ROI
		plt.figure(figsize=(ROI1.figwidth,ROI1.figheight))
		plt.imshow(img,cmap='gray')
		plt.colorbar()
		ROI1.displayROI()
		plt.title("outline other kidney, slice = " + str(i))

		# let user draw second ROI
		ROI2 = polydraw(roicolor='b') #let user draw ROI

		initialSeg[:,:,i] = ROI1.getMask(img) + ROI2.getMask(img)

	toc = time.clock()


	#save out drawn polygon
	aff = selfaffine2
	outImage = deepcopy(initialSeg)#np.rot90(np.fliplr(self.segImg),-1)
	[x_si,y_si,z_si] = np.shape(outImage)
	#print np.shape(outImage)
	#This method works (for fastsegs)... but need more robust
	#for i in range(0,z_si):
	#    outImage[:,:,i] = np.rot90(self.segImg[:,:,z_si-1-i],-1)

	#try new method (more robust to header and affine mix-ups)
	ornt = orx.axcodes2ornt((selfOrx,selfOry,selfOrz))  
	refOrnt = orx.axcodes2ornt(('R','S','A'))
	newOrnt = orx.ornt_transform(refOrnt,ornt) #reversed these
	outImage= orx.apply_orientation(np.rot90(np.fliplr(outImage),-1),newOrnt)  
	#outImage = orx.apply_orientation(outImage,newOrnt)    
	#outImage = np.rot90(np.fliplr(outImage),-1)
	#print np.shape(outImage)
	#outImage = np.array(outImage,dtype=selfheaderdtype)
	new_image = nib.Nifti1Image(outImage,aff)

	nib.save(new_image,fileNAME[:-7]+'_polygon_MICOS.nii.gz')
Пример #39
0
    def _read_header(fileobj):
        """ Reads a TRK header from a file.

        Parameters
        ----------
        fileobj : string or file-like object
            If string, a filename; otherwise an open file-like object
            pointing to TRK file (and ready to read from the beginning
            of the TRK header). Note that calling this function
            does not change the file position.

        Returns
        -------
        header : dict
            Metadata associated with this tractogram file.
        """
        # Record start position if this is a file-like object
        start_position = fileobj.tell() if hasattr(fileobj, 'tell') else None

        with Opener(fileobj) as f:

            # Read the header in one block.
            header_str = f.read(header_2_dtype.itemsize)
            header_rec = np.fromstring(string=header_str, dtype=header_2_dtype)

            # Check endianness
            endianness = native_code
            if header_rec['hdr_size'] != TrkFile.HEADER_SIZE:
                endianness = swapped_code

                # Swap byte order
                header_rec = header_rec.newbyteorder()
                if header_rec['hdr_size'] != TrkFile.HEADER_SIZE:
                    msg = "Invalid hdr_size: {0} instead of {1}"
                    raise HeaderError(
                        msg.format(header_rec['hdr_size'],
                                   TrkFile.HEADER_SIZE))

            if header_rec['version'] == 1:
                header_rec = np.fromstring(
                    string=header_str, dtype=header_1_dtype)
            elif header_rec['version'] == 2:
                pass  # Nothing more to do.
            else:
                raise HeaderError('NiBabel only supports versions 1 and 2 of '
                                  'the Trackvis file format')

            # Convert the first record of `header_rec` into a dictionnary
            header = dict(zip(header_rec.dtype.names, header_rec[0]))
            header[Field.ENDIANNESS] = endianness

            # If vox_to_ras[3][3] is 0, it means the matrix is not recorded.
            if header[Field.VOXEL_TO_RASMM][3][3] == 0:
                header[Field.VOXEL_TO_RASMM] = np.eye(4, dtype=np.float32)
                warnings.warn(("Field 'vox_to_ras' in the TRK's header was"
                               " not recorded. Will continue assuming it's"
                               " the identity."), HeaderWarning)

            # Check that the 'vox_to_ras' affine is valid, i.e. should be
            # able to determine the axis directions.
            axcodes = aff2axcodes(header[Field.VOXEL_TO_RASMM])
            if None in axcodes:
                msg = ("The 'vox_to_ras' affine is invalid! Could not"
                       " determine the axis directions from it.\n{0}").format(
                           header[Field.VOXEL_TO_RASMM])
                raise HeaderError(msg)

            # By default, the voxel order is LPS.
            # http://trackvis.org/blog/forum/diffusion-toolkit-usage/interpretation-of-track-point-coordinates
            if header[Field.VOXEL_ORDER] == b"":
                msg = ("Voxel order is not specified, will assume 'LPS' since"
                       "it is Trackvis software's default.")
                warnings.warn(msg, HeaderWarning)
                header[Field.VOXEL_ORDER] = b"LPS"

            # Keep the file position where the data begin.
            header['_offset_data'] = f.tell()

        # Set the file position where it was, if it was previously open
        if start_position is not None:
            fileobj.seek(start_position, os.SEEK_SET)

        return header
Пример #40
0
from nibabel.orientations import aff2axcodes

import numpy as np

streamlines = np.load(
    '/media/localadmin/HagmannHDD/Seb/testPFT/diffusion_preproc_resampled_streamlines.npy'
)

imref = nb.load('/media/localadmin/HagmannHDD/Seb/testPFT/shore_gfa.nii.gz')

affine = imref.affine.copy()

print(imref.affine.copy())
print(affine)

header = {}
header[Field.ORIGIN] = affine[:3, 3]
header[Field.VOXEL_TO_RASMM] = affine
header[Field.VOXEL_SIZES] = imref.header.get_zooms()[:3]
header[Field.DIMENSIONS] = imref.shape[:3]
header[Field.VOXEL_ORDER] = "".join(aff2axcodes(affine))

for i, streamline in enumerate(streamlines):
    for j, voxel in enumerate(streamline):
        streamlines[i][j] = streamlines[i][j] - imref.affine.copy()[:3, 3]

print(header[Field.VOXEL_ORDER])
tractogram = Tractogram(streamlines=streamlines, affine_to_rasmm=affine)
out_fname = '/media/localadmin/HagmannHDD/Seb/testPFT/track_nib1.trk'
nb.streamlines.save(tractogram, out_fname, header=header)
Пример #41
0
parser.add_argument("--min-signal",
                    type=float,
                    default=1.0,
                    help="default: 1.0")
parser.add_argument("--tc-threshold",
                    type=float,
                    default=0.1,
                    help="default: 0.1")
parser.add_argument("--step-size",
                    type=float,
                    default=0.5,
                    help="default: 0.5")
args = parser.parse_args()

img = get_img(args.nifti_file)
voxel_order = "".join(aff2axcodes(img.affine))
gtab = get_gtab(args.bvals, args.bvecs)
mask = get_img(args.mask_nifti)
data = img.get_fdata()

# resample mask if necessary
if mask.shape != data.shape:
    from dipy.align.imaffine import AffineMap
    identity = np.eye(4)
    affine_map = AffineMap(identity, img.shape[:3], img.affine, mask.shape[:3],
                           mask.affine)
    mask = affine_map.transform(mask.get_fdata())
    #mask = np.round(mask)
else:
    mask = mask.get_fdata()
Пример #42
0
    def _run_interface(self, runtime):
        from dipy.tracking.utils import move_streamlines, \
            affine_from_fsl_mat_file
        dx, dy, dz = get_data_dims(self.inputs.image_file)
        vx, vy, vz = get_vox_dims(self.inputs.image_file)
        image_file = nb.load(self.inputs.image_file)
        affine = image_file.affine
        out_filename = op.abspath(self.inputs.out_filename)

        # Reads MRTrix tracks
        header, streamlines = read_mrtrix_tracks(
            self.inputs.in_file, as_generator=True)
        iflogger.info('MRTrix Header:')
        iflogger.info(header)
        # Writes to Trackvis
        trk_header = nb.trackvis.empty_header()
        trk_header['dim'] = [dx, dy, dz]
        trk_header['voxel_size'] = [vx, vy, vz]
        trk_header['n_count'] = header['count']

        if isdefined(self.inputs.matrix_file) and isdefined(
                self.inputs.registration_image_file):
            iflogger.info('Applying transformation from matrix file %s',
                          self.inputs.matrix_file)
            xfm = np.genfromtxt(self.inputs.matrix_file)
            iflogger.info(xfm)
            registration_image_file = nb.load(
                self.inputs.registration_image_file)
            reg_affine = registration_image_file.affine
            r_dx, r_dy, r_dz = get_data_dims(
                self.inputs.registration_image_file)
            r_vx, r_vy, r_vz = get_vox_dims(
                self.inputs.registration_image_file)
            iflogger.info('Using affine from registration image file %s',
                          self.inputs.registration_image_file)
            iflogger.info(reg_affine)
            trk_header['vox_to_ras'] = reg_affine
            trk_header['dim'] = [r_dx, r_dy, r_dz]
            trk_header['voxel_size'] = [r_vx, r_vy, r_vz]

            affine = np.dot(affine, np.diag(1. / np.array([vx, vy, vz, 1])))
            transformed_streamlines = transform_to_affine(
                streamlines, trk_header, affine)

            aff = affine_from_fsl_mat_file(xfm, [vx, vy, vz],
                                           [r_vx, r_vy, r_vz])
            iflogger.info(aff)

            axcode = aff2axcodes(reg_affine)
            trk_header['voxel_order'] = axcode[0] + axcode[1] + axcode[2]

            final_streamlines = move_streamlines(transformed_streamlines, aff)
            trk_tracks = ((ii, None, None) for ii in final_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info('Saving transformed Trackvis file as %s',
                          out_filename)
            iflogger.info('New TrackVis Header:')
            iflogger.info(trk_header)
        else:
            iflogger.info(
                'Applying transformation from scanner coordinates to %s',
                self.inputs.image_file)
            axcode = aff2axcodes(affine)
            trk_header['voxel_order'] = axcode[0] + axcode[1] + axcode[2]
            trk_header['vox_to_ras'] = affine
            transformed_streamlines = transform_to_affine(
                streamlines, trk_header, affine)
            trk_tracks = ((ii, None, None) for ii in transformed_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info('Saving Trackvis file as %s', out_filename)
            iflogger.info('TrackVis Header:')
            iflogger.info(trk_header)
        return runtime
Пример #43
0
def _check_hdr_points_space(hdr, points_space):
    """ Check header `hdr` for consistency with transform `points_space`

    Parameters
    ----------
    hdr : ndarray
        trackvis header as structured ndarray
    points_space : {None, 'voxmm', 'voxel', 'rasmm'
        nature of transform that we will (elsewhere) apply to streamlines paired
        with `hdr`.  None or 'voxmm' means pass through with no futher checks.
        'voxel' checks for all ``hdr['voxel_sizes'] being <= zero (error) or any
        being zero (warning).  'rasmm' checks for presence of non-zeros affine
        in ``hdr['vox_to_ras']``, and that the affine therein corresponds to
        ``hdr['voxel_order']`` and ''hdr['voxe_sizes']`` - and raises an error
        otherwise.

    Returns
    -------
    None

    Notes
    -----
    """
    if points_space is None or points_space == 'voxmm':
        return
    if points_space == 'voxel':
        voxel_size = hdr['voxel_size']
        if np.any(voxel_size < 0):
            raise HeaderError('Negative voxel sizes %s not valid for voxel - '
                              'voxmm conversion' % voxel_size)
        if np.all(voxel_size == 0):
            raise HeaderError('Cannot convert between voxels and voxmm when '
                              '"voxel_sizes" all 0')
        if np.any(voxel_size == 0):
            warnings.warn('zero values in "voxel_size" - %s' % voxel_size)
        return
    elif points_space == 'rasmm':
        try:
            affine = hdr['vox_to_ras']
        except ValueError:
            raise HeaderError('Need "vox_to_ras" field to get '
                              'affine with which to convert points; '
                              'this is present for headers >= version 2')
        if np.all(affine == 0) or affine[3,3] == 0:
            raise HeaderError('Need non-zero affine to convert between '
                              'rasmm points and voxmm')
        zooms = hdr['voxel_size']
        aff_zooms = np.sqrt(np.sum(affine[:3,:3]**2,axis=0))
        if not np.allclose(aff_zooms, zooms):
            raise HeaderError('Affine zooms %s differ from voxel_size '
                              'field value %s' % (aff_zooms, zooms))
        aff_order = ''.join(aff2axcodes(affine))
        voxel_order = asstr(np.asscalar(hdr['voxel_order']))
        if voxel_order == '':
            voxel_order = 'LPS' # trackvis default
        if not voxel_order == aff_order:
            raise HeaderError('Affine implies voxel_order %s but '
                              'header voxel_order is %s' %
                              (aff_order, voxel_order))
    else:
        raise ValueError('Painfully confusing "points_space" value of "%s"'
                         % points_space)
Пример #44
0
    def loadDataFunc(self):
        #Function for loading image file
        self.ui.autogray.setCheckState(2)
        self.autoGrayFlag = 1

        getFileNAME = QFileDialog.getOpenFileName()

        if str(getFileNAME)[-4:] == '.avw': #if avw file, do conversion, and then work with nifti file
            MR = loadAVW(str(getFileNAME))
            avw2nifti(str(getFileNAME[:-4]) + 'avw', MR, seg=None)
            getFileNAME = str(getFileNAME[:-4]) + 'avw.nii.gz'
        
        #print getFileNAME
        self.getFileNAME = getFileNAME
        self.ui.displayFileName.setText(str(os.path.basename(str(self.getFileNAME)))) 

        self.z =0
        self.z1 = 0
        self.z2 = 0
        self.rotD = -90
        imgObj2= nib.load(str(getFileNAME))
        imgObj1 = imgObj2
        self.affine2 = imgObj2.get_affine()     
        self.PSx = imgObj1.get_header()['pixdim'][1]
        self.PSy = imgObj1.get_header()['pixdim'][2]
        self.PSz = imgObj1.get_header()['pixdim'][3]
        (x,y,z) = orx.aff2axcodes(self.affine2)
        self.Orx = x
        self.Ory = y
        self.Orz = z
        ornt = orx.axcodes2ornt((x,y,z))  
        refOrnt = orx.axcodes2ornt(('R','S','A')) #was 'R', 'A', 'S'
        newOrnt = orx.ornt_transform(ornt,refOrnt)
        self.ornt = ornt
        self.refOrnt = refOrnt

        self.img_data2 = imgObj2.get_data()       

        self.img_data2 = orx.apply_orientation(self.img_data2,newOrnt)
        
        self.img_data2 = np.fliplr(np.rot90(self.img_data2,1))
        self.img_data1 = self.img_data2
        
        self.imageFile2 = str(getFileNAME) #changed self.ui.T2Image.currentText() to getFileNAME
        self.imageFile1 = self.imageFile2
        indx2 = self.imageFile2.rfind('/')
        indx1 = indx2
        self.filePath1 = self.imageFile1[0:indx1+1]
        self.fileName1 = self.imageFile1[indx1+1:]
        self.filePath2 = self.imageFile2[0:indx2+1]
        self.fileName2 = self.imageFile2[indx2+1:]
        
 
#        sizeT1C = self.img_data1.shape
        try:
            (x1,y1,z1) = self.img_data1.shape
            (x2,y2,z2) = self.img_data2.shape
        except:
            (x1,y1,z1,d1) = self.img_data1.shape
            (x2,y2,z2,d1) = self.img_data2.shape
            self.img_data1 = self.img_data1[:,:,:,0]
            self.img_data2 = self.img_data2[:,:,:,0]
            
        self.sliceNum1 = z1
        self.sliceNum2 = z2
        
        self.shape = self.img_data2.shape

        self.img1 = self.img_data1[:,:,self.z]
        self.img2 = self.img_data2[:,:,self.z]
        
        self.segImg = self.img_data2 * 0
        
        self.imshowFunc()  
        
        (x,y,z) = self.shape

        self.ui.figure3.canvas.ax.clear()
#        self.ui.figure3.canvas.ax.imshow(((self.img_data2[:,round(x/2),:])),cmap=plt.cm.gray)
        self.ui.figure3.canvas.ax.imshow(((self.img_data2[:,round(x/2),:])),cmap=plt.cm.gray)
        #self.ui.figure3.canvas.ax.set_aspect('auto')
        self.ui.figure3.canvas.ax.get_xaxis().set_visible(False)
        self.ui.figure3.canvas.ax.get_yaxis().set_visible(False)
        #self.ui.figure3.canvas.ax.set_title('Sagittal View', color = 'white') #this is where had sagittal view
        self.ui.figure3.canvas.draw()
        
        self.ui.figure4.canvas.ax.clear()
        self.ui.figure4.canvas.ax.imshow(np.rot90((self.img_data2[round(y/2),:,:]),1),cmap=plt.cm.gray)
        #self.ui.figure4.canvas.ax.set_aspect('auto')
        self.ui.figure4.canvas.ax.get_xaxis().set_visible(False)
        self.ui.figure4.canvas.ax.get_yaxis().set_visible(False)
        #self.ui.figure4.canvas.ax.set_title('Axial View', color = 'white')
        self.ui.figure4.canvas.draw()
        
#        self.imhistFunc()
        self.ui.imageSlider.setMinimum(0)
        self.ui.imageSlider.setMaximum(z2-1)
        self.ui.imageSlider.setSingleStep(1)

        self.maxSlice = z2 - 1
            
        (row,col,dep) = self.img_data2.shape
        self.overlayImgAX = np.zeros((row,col))
        
        return 
Пример #45
0
def MICOS(fileNAME):

	tic = time.clock()

	selfz =0
	selfz1 = 0
	selfz2 = 0
	selfrotD = -90
	imgObj2= nib.load(str(fileNAME))
	imgObj1 = imgObj2
	im = imgObj2
	selfaffine2 = imgObj2.get_affine()  
	selfheaderdtype = imgObj2.get_data_dtype()
	selfPSx = imgObj1.get_header()['pixdim'][1]
	selfPSy = imgObj1.get_header()['pixdim'][2]
	selfPSz = imgObj1.get_header()['pixdim'][3]
	(x,y,z) = orx.aff2axcodes(selfaffine2)
	selfOrx = x
	selfOry = y
	selfOrz = z
	ornt = orx.axcodes2ornt((x,y,z))  
	refOrnt = orx.axcodes2ornt(('R','S','A')) #was 'R', 'A', 'S'
	newOrnt = orx.ornt_transform(ornt,refOrnt)
	selfornt = ornt
	selfrefOrnt = refOrnt

	selfimg_data2 = imgObj2.get_data()       

	selfimg_data2 = orx.apply_orientation(selfimg_data2,newOrnt)

	selfimg_data2 = np.fliplr(np.rot90(selfimg_data2,1))
	im_data = selfimg_data2

	[x_si,y_si,z_si] = np.shape(im_data)

	#do 99% norm to 1000 
	im_data = np.array(im_data,dtype='float')
	im_data = im_data * 1000/np.percentile(im_data,99)
	#print np.shape(im_data)
	initialSeg = im_data.copy() * 0

	#begin user roi drawing...
	#go from middle up...
	for i in xrange(np.round(z_si/2),z_si,3):

		img = (im_data[:,:,i])

		# show the image
		if i > np.round(z_si/2):
			plt.figure(figsize=(ROI1.figwidth,ROI1.figheight))
		plt.imshow(img,cmap='gray')
		plt.colorbar()
		plt.title("outline one kidney, slice = " + str(i))

		# let user draw first ROI
		ROI1 = polydraw(roicolor='r') #let user draw first ROI

		# show the image with the first ROI
		plt.figure(figsize=(ROI1.figwidth,ROI1.figheight))
		plt.imshow(img,cmap='gray')
		plt.colorbar()
		ROI1.displayROI()
		plt.title("outline other kidney, slice = " + str(i))

		# let user draw second ROI
		ROI2 = polydraw(roicolor='b') #let user draw ROI

		initialSeg[:,:,i] = ROI1.getMask(img) + ROI2.getMask(img)


	#go from middle up...
	for i in xrange(np.round(z_si/2)-1,0,-3):

		img = (im_data[:,:,i])

		# show the image
		plt.figure(figsize=(ROI1.figwidth,ROI1.figheight))
		plt.imshow(img,cmap='gray')
		plt.colorbar()
		plt.title("outline one kidney, slice = " + str(i))

		# let user draw first ROI
		ROI1 = polydraw(roicolor='r') #let user draw first ROI

		# show the image with the first ROI
		plt.figure(figsize=(ROI1.figwidth,ROI1.figheight))
		plt.imshow(img,cmap='gray')
		plt.colorbar()
		ROI1.displayROI()
		plt.title("outline other kidney, slice = " + str(i))

		# let user draw second ROI
		ROI2 = polydraw(roicolor='b') #let user draw ROI

		initialSeg[:,:,i] = ROI1.getMask(img) + ROI2.getMask(img)

	toc = time.clock()


	#save out drawn polygon
	aff = selfaffine2
	outImage = deepcopy(initialSeg)#np.rot90(np.fliplr(self.segImg),-1)
	[x_si,y_si,z_si] = np.shape(outImage)
	#print np.shape(outImage)
	#This method works (for fastsegs)... but need more robust
	#for i in range(0,z_si):
	#    outImage[:,:,i] = np.rot90(self.segImg[:,:,z_si-1-i],-1)

	#try new method (more robust to header and affine mix-ups)
	ornt = orx.axcodes2ornt((selfOrx,selfOry,selfOrz))  
	refOrnt = orx.axcodes2ornt(('R','S','A'))
	newOrnt = orx.ornt_transform(refOrnt,ornt) #reversed these
	outImage= orx.apply_orientation(np.rot90(np.fliplr(outImage),-1),newOrnt)  
	#outImage = orx.apply_orientation(outImage,newOrnt)    
	#outImage = np.rot90(np.fliplr(outImage),-1)
	#print np.shape(outImage)
	#outImage = np.array(outImage,dtype=selfheaderdtype)
	new_image = nib.Nifti1Image(outImage,aff)

	nib.save(new_image,fileNAME[:-7]+'_polygon_MICOS.nii.gz')

	# Dilate and fill in missing slices
	initialSeg = dilation(initialSeg,iterations = 1)
	finalSeg = initialSeg.copy() * 0

	# now try convex hull method instead to better approximate missing slices (previous method is above)
	# This works but way too long. Also, would likely need to do object finding first, so compute
	# Convex hull for each kidney separately.
	while 0:
		xlist,ylist,zlist = find_3D_object_voxel_list(initialSeg)
		voxlist = np.zeros(shape=(np.shape(xlist)[0],3),dtype='int16')
		voxlist[:,0] = xlist
		voxlist[:,1] = ylist
		voxlist[:,2] = zlist
		tri = dtri(voxlist)

		# construct full voxel list
		xxlist,yylist,zzlist = find_3D_object_voxel_list((initialSeg+1)>0)

		fullvoxlist = np.zeros(shape=(np.shape(xxlist)[0],3),dtype='int16')
		fullvoxlist[:,0] = xxlist
		fullvoxlist[:,1] = yylist
		fullvoxlist[:,2] = zzlist

		finalSeg = np.array(in_hull(fullvoxlist,tri),dtype=float)
		finalSeg = np.reshape(finalSeg,(x_si,y_si,z_si))

	# Now do gaussian blur of polygon to smooth
	initialSeg = (filt.gaussian_filter(initialSeg.copy()*255,sigma=[3,3,1])) > 100

	#Begin optimized method...
	for i in xrange(0,z_si):
		img = (im_data[:,:,i])
		if np.max(initialSeg[:,:,i]>0):

			mgac = []
			gI = msnake.gborders(img,alpha=1E5,sigma=3.0) # increasing sigma allows more changes in contour
			mgac = msnake.MorphGAC(gI,smoothing=3,threshold=0.01,balloon=0.0) #was 2.5
			mgac.levelset = initialSeg[:,:,i]>0.5
			for ijk123 in xrange(100):
				mgac.step()
			finalSeg[:,:,i] = mgac.levelset
		#print i

	# Now do gaussian blur and threshold to finalize segmentation...
	finalSeg = (filt.gaussian_filter(finalSeg.copy()*255,sigma=[3,3,1])) > 100
	#using this helps with single slice errors of the active contour

	# Try adding now narrow band sobel/watershed technique.

	for i in xrange(0,z_si):
		img = (im_data[:,:,i])
		segslice = finalSeg[:,:,i]
		if np.max(finalSeg[:,:,i]>0):

			erodeimg = erosion(segslice.copy(),iterations=1)
			dilateimg = dilation(segslice.copy(),iterations=1)

			seeds = img * 0
			seeds[:] = 1
			seeds[dilateimg>0] = 0
			seeds[erodeimg>0] = 2
			sobelFilt = sobel(np.array(img.copy(),dtype='int16'))
			mgac = watershed(sobelFilt,seeds)>1

			finalSeg[:,:,i] = mgac>0

	#save out segmentation
	aff = selfaffine2
	outImage = deepcopy(finalSeg)#np.rot90(np.fliplr(self.segImg),-1)
	outImage = np.array(outImage,dtype='float')
	[x_si,y_si,z_si] = np.shape(outImage)

	#This method works (for fastsegs)... but need more robust
	#for i in range(0,z_si):
	#    outImage[:,:,i] = np.rot90(self.segImg[:,:,z_si-1-i],-1)

	#try new method (more robust to header and affine mix-ups)
	ornt = orx.axcodes2ornt((selfOrx,selfOry,selfOrz))  
	refOrnt = orx.axcodes2ornt(('R','S','A'))
	newOrnt = orx.ornt_transform(refOrnt,ornt) #reversed these
	outImage= orx.apply_orientation(np.rot90(np.fliplr(outImage),-1),newOrnt)  
	#outImage = orx.apply_orientation(outImage,newOrnt)    
	#outImage = np.rot90(np.fliplr(outImage),-1)
	new_image = nib.Nifti1Image(outImage,aff)
	nib.save(new_image,fileNAME[:-7]+'_FASTseg_MICOS.nii.gz')

	print 'time = '
	print toc - tic

	return (fileNAME[:-7]+'_FASTseg_MICOS.nii.gz')