예제 #1
0
파일: convert.py 프로젝트: Conxz/nipype
def transform_to_affine(streams, header, affine):
    rotation, scale = np.linalg.qr(affine)
    streams = move_streamlines(streams, rotation)
    scale[0:3, 0:3] = np.dot(scale[0:3, 0:3], np.diag(1. / header['voxel_size']))
    scale[0:3, 3] = abs(scale[0:3, 3])
    streams = move_streamlines(streams, scale)
    return streams
예제 #2
0
파일: mriutil.py 프로젝트: sbrambati/toad
def transform_to_affine(streams, header, affine):
    from dipy.tracking.utils import move_streamlines
    rotation, scale = numpy.linalg.qr(affine)
    streams = move_streamlines(streams, rotation)
    scale[0:3,0:3] = numpy.dot(scale[0:3,0:3], numpy.diag(1./header['voxel_size']))
    scale[0:3,3] = abs(scale[0:3,3])
    streams = move_streamlines(streams, scale)
    return streams
예제 #3
0
def test_voxel_ornt():
    sh = (40, 40, 40)
    sz = (1, 2, 3)
    I4 = np.eye(4)

    ras = orientation_from_string('ras')
    sra = orientation_from_string('sra')
    lpi = orientation_from_string('lpi')
    srp = orientation_from_string('srp')

    affine = reorder_voxels_affine(ras, ras, sh, sz)
    assert_array_equal(affine, I4)
    affine = reorder_voxels_affine(sra, sra, sh, sz)
    assert_array_equal(affine, I4)
    affine = reorder_voxels_affine(lpi, lpi, sh, sz)
    assert_array_equal(affine, I4)
    affine = reorder_voxels_affine(srp, srp, sh, sz)
    assert_array_equal(affine, I4)

    streamlines = make_streamlines()
    box = np.array(sh)*sz

    sra_affine = reorder_voxels_affine(ras, sra, sh, sz)
    toras_affine = reorder_voxels_affine(sra, ras, sh, sz)
    assert_array_equal(np.dot(toras_affine, sra_affine), I4)
    expected_sl = (sl[:, [2, 0, 1]] for sl in streamlines)
    test_sl = move_streamlines(streamlines, sra_affine)
    for ii in xrange(len(streamlines)):
        assert_array_equal(next(test_sl), next(expected_sl))

    lpi_affine = reorder_voxels_affine(ras, lpi, sh, sz)
    toras_affine = reorder_voxels_affine(lpi, ras, sh, sz)
    assert_array_equal(np.dot(toras_affine, lpi_affine), I4)
    expected_sl = (box - sl for sl in streamlines)
    test_sl = move_streamlines(streamlines, lpi_affine)
    for ii in xrange(len(streamlines)):
        assert_array_equal(next(test_sl), next(expected_sl))

    srp_affine = reorder_voxels_affine(ras, srp, sh, sz)
    toras_affine = reorder_voxels_affine(srp, ras, (40, 40, 40), (3, 1, 2))
    assert_array_equal(np.dot(toras_affine, srp_affine), I4)
    expected_sl = [sl.copy() for sl in streamlines]
    for sl in expected_sl:
        sl[:, 1] = box[1] - sl[:, 1]
    expected_sl = (sl[:, [2, 0, 1]] for sl in expected_sl)
    test_sl = move_streamlines(streamlines, srp_affine)
    for ii in xrange(len(streamlines)):
        assert_array_equal(next(test_sl), next(expected_sl))
예제 #4
0
파일: trackvis.py 프로젝트: MPDean/dipy
def save_trk(filename, points, vox_to_ras, shape):
    """A temporary helper function for saving trk files.

    This function will soon be replaced by better trk file support in nibabel.
    """
    voxel_order = nib.orientations.aff2axcodes(vox_to_ras)
    voxel_order = "".join(voxel_order)

    # Compute the vox_to_ras of "trackvis space"
    zooms = np.sqrt((vox_to_ras * vox_to_ras).sum(0))
    vox_to_trk = np.diag(zooms)
    vox_to_trk[3, 3] = 1
    vox_to_trk[:3, 3] = zooms[:3] / 2.

    points = utils.move_streamlines(points,
                                    input_space=vox_to_ras,
                                    output_space=vox_to_trk)

    data = ((p, None, None) for p in points)

    hdr = nib.trackvis.empty_header()
    hdr['dim'] = shape
    hdr['voxel_order'] = voxel_order
    hdr['voxel_size'] = zooms[:3]

    nib.trackvis.write(filename, data, hdr)
예제 #5
0
파일: convert.py 프로젝트: forgit/nipype
    def _run_interface(self, runtime):
        dx, dy, dz = get_data_dims(self.inputs.image_file)
        vx, vy, vz = get_vox_dims(self.inputs.image_file)
        image_file = nb.load(self.inputs.image_file)
        affine = image_file.get_affine()
        out_filename = op.abspath(self.inputs.out_filename)

        # Reads MRTrix tracks
        header, streamlines = read_mrtrix_tracks(self.inputs.in_file, as_generator=True)
        iflogger.info("MRTrix Header:")
        iflogger.info(header)
        # Writes to Trackvis
        trk_header = nb.trackvis.empty_header()
        trk_header["dim"] = [dx, dy, dz]
        trk_header["voxel_size"] = [vx, vy, vz]
        trk_header["n_count"] = header["count"]

        if isdefined(self.inputs.matrix_file) and isdefined(self.inputs.registration_image_file):
            iflogger.info("Applying transformation from matrix file {m}".format(m=self.inputs.matrix_file))
            xfm = np.genfromtxt(self.inputs.matrix_file)
            iflogger.info(xfm)
            registration_image_file = nb.load(self.inputs.registration_image_file)
            reg_affine = registration_image_file.get_affine()
            r_dx, r_dy, r_dz = get_data_dims(self.inputs.registration_image_file)
            r_vx, r_vy, r_vz = get_vox_dims(self.inputs.registration_image_file)
            iflogger.info("Using affine from registration image file {r}".format(r=self.inputs.registration_image_file))
            iflogger.info(reg_affine)
            trk_header["vox_to_ras"] = reg_affine
            trk_header["dim"] = [r_dx, r_dy, r_dz]
            trk_header["voxel_size"] = [r_vx, r_vy, r_vz]

            affine = np.dot(affine, np.diag(1.0 / np.array([vx, vy, vz, 1])))
            transformed_streamlines = transform_to_affine(streamlines, trk_header, affine)

            aff = affine_from_fsl_mat_file(xfm, [vx, vy, vz], [r_vx, r_vy, r_vz])
            iflogger.info(aff)

            axcode = aff2axcodes(reg_affine)
            trk_header["voxel_order"] = axcode[0] + axcode[1] + axcode[2]

            final_streamlines = move_streamlines(transformed_streamlines, aff)
            trk_tracks = ((ii, None, None) for ii in final_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info("Saving transformed Trackvis file as {out}".format(out=out_filename))
            iflogger.info("New TrackVis Header:")
            iflogger.info(trk_header)
        else:
            iflogger.info(
                "Applying transformation from scanner coordinates to {img}".format(img=self.inputs.image_file)
            )
            axcode = aff2axcodes(affine)
            trk_header["voxel_order"] = axcode[0] + axcode[1] + axcode[2]
            trk_header["vox_to_ras"] = affine
            transformed_streamlines = transform_to_affine(streamlines, trk_header, affine)
            trk_tracks = ((ii, None, None) for ii in transformed_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info("Saving Trackvis file as {out}".format(out=out_filename))
            iflogger.info("TrackVis Header:")
            iflogger.info(trk_header)
        return runtime
예제 #6
0
def write_trk(fname, streamlines, affine=None, shape=None):
    """
    Write out a .trk file

    Parameters
    ----------
    fname : str
        Full path to save the file into
    streamlines : list
        A list of arrays of 3D coordinates
    affine : array (4,4), optional.
        An affine transformation associated with the streamlines. Defaults to
        identity.
    shape : 3-element tuple, optional
        Spatial dimensions of an image associated with the streamlines.
        Defaults to not be set in the file header.
    """
    if affine is None:
        affine = np.eye(4)

    zooms = np.sqrt((affine * affine).sum(0))
    streamlines = move_streamlines(streamlines, affine)
    data = ((s, None, None) for s in streamlines)

    voxel_order = nib.orientations.aff2axcodes(affine)
    voxel_order = "".join(voxel_order)

    hdr = nib.trackvis.empty_header()
    hdr['voxel_size'] = zooms[:3]
    hdr['voxel_order'] = voxel_order
    hdr['vox_to_ras'] = affine
    if shape is not None:
        hdr['dim'] = shape
    trackvis.write(fname, data, hdr, points_space="rasmm")
예제 #7
0
파일: trackvis.py 프로젝트: MarcCote/dipy
def save_trk(filename, points, vox_to_ras, shape):
    """A temporary helper function for saving trk files.

    This function will soon be replaced by better trk file support in nibabel.
    """
    warnings.warn("The `dipy.io.trackvis.save_trk` function is deprecated as of version" +
                  " 0.14 of Dipy and will be removed in a future " +
                  "version. Please use `dipy.io.streamline.save_trk` function instead",
                  DeprecationWarning)

    voxel_order = nib.orientations.aff2axcodes(vox_to_ras)
    voxel_order = "".join(voxel_order)

    # Compute the vox_to_ras of "trackvis space"
    zooms = np.sqrt((vox_to_ras * vox_to_ras).sum(0))
    vox_to_trk = np.diag(zooms)
    vox_to_trk[3, 3] = 1
    vox_to_trk[:3, 3] = zooms[:3] / 2.

    points = utils.move_streamlines(points,
                                    input_space=vox_to_ras,
                                    output_space=vox_to_trk)

    data = ((p, None, None) for p in points)

    hdr = nib.trackvis.empty_header()
    hdr['dim'] = shape
    hdr['voxel_order'] = voxel_order
    hdr['voxel_size'] = zooms[:3]

    nib.trackvis.write(filename, data, hdr)
예제 #8
0
def test_move_streamlines():
    streamlines = make_streamlines()
    affine = np.eye(4)
    new_streamlines = move_streamlines(streamlines, affine)
    for i, test_sl in enumerate(new_streamlines):
        assert_array_equal(test_sl, streamlines[i])

    affine[:3,3] += (4,5,6)
    new_streamlines = move_streamlines(streamlines, affine)
    for i, test_sl in enumerate(new_streamlines):
        assert_array_equal(test_sl, streamlines[i]+(4, 5, 6))

    affine = np.eye(4)
    affine = affine[[2,1,0,3]]
    new_streamlines = move_streamlines(streamlines, affine)
    for i, test_sl in enumerate(new_streamlines):
        assert_array_equal(test_sl, streamlines[i][:, [2, 1, 0]])
예제 #9
0
def march_the_cubes(data, aff):
    from skimage.measure import marching_cubes
    import numpy as np
    from dipy.tracking.utils import move_streamlines
    verts, faces = marching_cubes(data, level=0.5)
    verts = np.asarray(list(move_streamlines(verts, aff)))
    #TODO: apply affine here
    return verts, faces
예제 #10
0
    def _run_interface(self, runtime):
        dx, dy, dz = get_data_dims(self.inputs.image_file)
        vx, vy, vz = get_vox_dims(self.inputs.image_file)
        image_file = nb.load(self.inputs.image_file)
        affine = image_file.get_affine()
        out_filename = op.abspath(self.inputs.out_filename)

        # Reads MRTrix tracks
        header, streamlines = read_mrtrix_tracks(self.inputs.in_file, as_generator=True)
        iflogger.info('MRTrix Header:')
        iflogger.info(header)
        # Writes to Trackvis
        trk_header = nb.trackvis.empty_header()
        trk_header['dim'] = [dx, dy, dz]
        trk_header['voxel_size'] = [vx, vy, vz]
        trk_header['n_count'] = header['count']

        if isdefined(self.inputs.matrix_file) and isdefined(self.inputs.registration_image_file):
            iflogger.info('Applying transformation from matrix file {m}'.format(m=self.inputs.matrix_file))
            xfm = np.genfromtxt(self.inputs.matrix_file)
            iflogger.info(xfm)
            registration_image_file = nb.load(self.inputs.registration_image_file)
            reg_affine = registration_image_file.get_affine()
            r_dx, r_dy, r_dz = get_data_dims(self.inputs.registration_image_file)
            r_vx, r_vy, r_vz = get_vox_dims(self.inputs.registration_image_file)
            iflogger.info('Using affine from registration image file {r}'.format(r=self.inputs.registration_image_file))
            iflogger.info(reg_affine)
            trk_header['vox_to_ras'] = reg_affine
            trk_header['dim'] = [r_dx, r_dy, r_dz]
            trk_header['voxel_size'] = [r_vx, r_vy, r_vz]

            affine = np.dot(affine, np.diag(1. / np.array([vx, vy, vz, 1])))
            transformed_streamlines = transform_to_affine(streamlines, trk_header, affine)

            aff = affine_from_fsl_mat_file(xfm, [vx, vy, vz], [r_vx, r_vy, r_vz])
            iflogger.info(aff)

            axcode = aff2axcodes(reg_affine)
            trk_header['voxel_order'] = axcode[0]+axcode[1]+axcode[2]

            final_streamlines = move_streamlines(transformed_streamlines, aff)
            trk_tracks = ((ii, None, None) for ii in final_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info('Saving transformed Trackvis file as {out}'.format(out=out_filename))
            iflogger.info('New TrackVis Header:')
            iflogger.info(trk_header)
        else:
            iflogger.info('Applying transformation from scanner coordinates to {img}'.format(img=self.inputs.image_file))
            axcode = aff2axcodes(affine)
            trk_header['voxel_order'] = axcode[0]+axcode[1]+axcode[2]
            trk_header['vox_to_ras'] = affine
            transformed_streamlines = transform_to_affine(streamlines, trk_header, affine)
            trk_tracks = ((ii, None, None) for ii in transformed_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info('Saving Trackvis file as {out}'.format(out=out_filename))
            iflogger.info('TrackVis Header:')
            iflogger.info(trk_header)
        return runtime
예제 #11
0
def test_target():
    streamlines = [np.array([[0., 0., 0.],
                             [1., 0., 0.],
                             [2., 0., 0.]]),
                   np.array([[0., 0., 0],
                             [0, 1., 1.],
                             [0, 2., 2.]])
                  ]
    affine = np.eye(4)
    mask = np.zeros((4, 4, 4), dtype=bool)
    mask[0, 0, 0] = True

    # Both pass though
    new = list(target(streamlines, mask, affine=affine))
    assert_equal(len(new), 2)
    new = list(target(streamlines, mask, affine=affine, include=False))
    assert_equal(len(new), 0)

    # only first
    mask[:] = False
    mask[1, 0, 0] = True
    new = list(target(streamlines, mask, affine=affine))
    assert_equal(len(new), 1)
    assert_true(new[0] is streamlines[0])
    new = list(target(streamlines, mask, affine=affine, include=False))
    assert_equal(len(new), 1)
    assert_true(new[0] is streamlines[1])

    # Test that bad points raise a value error
    bad_sl = [ np.array([[10., 10., 10.]])]
    new = target(bad_sl, mask, affine=affine)
    assert_raises(ValueError, list, new)
    bad_sl = [-np.array([[10., 10., 10.]])]
    new = target(bad_sl, mask, affine=affine)
    assert_raises(ValueError, list, new)

    # Test smaller voxels
    affine = np.random.random((4, 4)) - .5
    affine[3] = [0, 0, 0, 1]
    streamlines = list(move_streamlines(streamlines, affine))
    new = list(target(streamlines, mask, affine=affine))
    assert_equal(len(new), 1)
    assert_true(new[0] is streamlines[0])
    new = list(target(streamlines, mask, affine=affine, include=False))
    assert_equal(len(new), 1)
    assert_true(new[0] is streamlines[1])

    # Test that changing mask and affine do not break target
    include = target(streamlines, mask, affine=affine)
    exclude = target(streamlines, mask, affine=affine, include=False)
    affine[:] = np.eye(4)
    mask[:] = False
    include = list(include)
    exclude = list(exclude)
    assert_equal(len(include), 1)
    assert_true(include[0] is streamlines[0])
    assert_equal(len(exclude), 1)
    assert_true(exclude[0] is streamlines[1])
예제 #12
0
def test_move_streamlines():
    streamlines = make_streamlines()
    affine = np.eye(4)
    new_streamlines = move_streamlines(streamlines, affine)
    for i, test_sl in enumerate(new_streamlines):
        assert_array_equal(test_sl, streamlines[i])

    affine[:3, 3] += (4, 5, 6)
    new_streamlines = move_streamlines(streamlines, affine)
    for i, test_sl in enumerate(new_streamlines):
        assert_array_equal(test_sl, streamlines[i]+(4, 5, 6))

    affine = np.eye(4)
    affine = affine[[2, 1, 0, 3]]
    new_streamlines = move_streamlines(streamlines, affine)
    for i, test_sl in enumerate(new_streamlines):
        assert_array_equal(test_sl, streamlines[i][:, [2, 1, 0]])

    affine[:3, 3] += (4, 5, 6)
    new_streamlines = move_streamlines(streamlines, affine)
    undo_affine = move_streamlines(new_streamlines, np.eye(4),
                                   input_space=affine)
    for i, test_sl in enumerate(undo_affine):
        assert_array_almost_equal(test_sl, streamlines[i])

    # Test that changing affine does affect moving streamlines
    affineA = affine.copy()
    affineB = affine.copy()
    streamlinesA = move_streamlines(streamlines, affineA)
    streamlinesB = move_streamlines(streamlines, affineB)
    affineB[:] = 0
    for (a, b) in zip(streamlinesA, streamlinesB):
        assert_array_equal(a, b)
예제 #13
0
def test_move_streamlines():
    streamlines = make_streamlines()
    affine = np.eye(4)
    new_streamlines = move_streamlines(streamlines, affine)
    for i, test_sl in enumerate(new_streamlines):
        assert_array_equal(test_sl, streamlines[i])

    affine[:3, 3] += (4, 5, 6)
    new_streamlines = move_streamlines(streamlines, affine)
    for i, test_sl in enumerate(new_streamlines):
        assert_array_equal(test_sl, streamlines[i] + (4, 5, 6))

    affine = np.eye(4)
    affine = affine[[2, 1, 0, 3]]
    new_streamlines = move_streamlines(streamlines, affine)
    for i, test_sl in enumerate(new_streamlines):
        assert_array_equal(test_sl, streamlines[i][:, [2, 1, 0]])

    affine[:3, 3] += (4, 5, 6)
    new_streamlines = move_streamlines(streamlines, affine)
    undo_affine = move_streamlines(new_streamlines,
                                   np.eye(4),
                                   input_space=affine)
    for i, test_sl in enumerate(undo_affine):
        assert_array_almost_equal(test_sl, streamlines[i])

    # Test that changing affine does affect moving streamlines
    affineA = affine.copy()
    affineB = affine.copy()
    streamlinesA = move_streamlines(streamlines, affineA)
    streamlinesB = move_streamlines(streamlines, affineB)
    affineB[:] = 0
    for (a, b) in zip(streamlinesA, streamlinesB):
        assert_array_equal(a, b)
예제 #14
0
파일: eudx.py 프로젝트: MPDean/dipy
 def __iter__(self):
     if self.seed_list is not None:
         inv = np.linalg.inv(self.affine)
         seed_voxels = np.dot(self.seed_list, inv[:3, :3].T)
         seed_voxels += inv[:3, 3]
     else:
         seed_voxels = None
     voxel_tracks = self._voxel_tracks(seed_voxels)
     return utils.move_streamlines(voxel_tracks, self.affine)
예제 #15
0
def convert_to_indices(streamline, papaya_aff, aff, img):
    #print(streamline)
    topoints = lambda x : np.array([[m["x"], m["y"], m["z"]] for m in x["world_coor"]])
    points_orig = topoints(streamline)
    points_nifti_space = list(utils.move_streamlines([points_orig], aff, input_space=papaya_aff))[0]
    from dipy.tracking._utils import _to_voxel_coordinates, _mapping_to_voxel
    lin_T, offset = _mapping_to_voxel(aff, None)
    idx = _to_voxel_coordinates(points_orig, lin_T, offset)
    return points_nifti_space, idx
예제 #16
0
 def __iter__(self):
     if self.seed_list is not None:
         inv = np.linalg.inv(self.affine)
         seed_voxels = np.dot(self.seed_list, inv[:3, :3].T)
         seed_voxels += inv[:3, 3]
     else:
         seed_voxels = None
     voxel_tracks = self._voxel_tracks(seed_voxels)
     return utils.move_streamlines(voxel_tracks, self.affine)
예제 #17
0
def test_eudx_further():
    """ Cause we love testin.. ;-)
    """

    fimg, fbvals, fbvecs = get_data('small_101D')

    img = ni.load(fimg)
    affine = img.affine
    data = img.get_data()
    gtab = gradient_table(fbvals, fbvecs)
    tensor_model = TensorModel(gtab)
    ten = tensor_model.fit(data)
    x, y, z = data.shape[:3]
    seeds = np.zeros((10**4, 3))
    for i in range(10**4):
        rx = (x-1)*np.random.rand()
        ry = (y-1)*np.random.rand()
        rz = (z-1)*np.random.rand()
        seeds[i] = np.ascontiguousarray(np.array([rx, ry, rz]),
                                        dtype=np.float64)

    sphere = get_sphere('symmetric724')

    ind = quantize_evecs(ten.evecs)
    eu = EuDX(a=ten.fa, ind=ind, seeds=seeds,
              odf_vertices=sphere.vertices, a_low=.2)
    T = [e for e in eu]

    # check that there are no negative elements
    for t in T:
        assert_equal(np.sum(t.ravel() < 0), 0)

    # Test eudx with affine
    def random_affine(seeds):
        affine = np.eye(4)
        affine[:3, :] = np.random.random((3, 4))
        seeds = np.dot(seeds, affine[:3, :3].T)
        seeds += affine[:3, 3]
        return affine, seeds

    # Make two random affines and move seeds
    affine1, seeds1 = random_affine(seeds)
    affine2, seeds2 = random_affine(seeds)

    # Make tracks using different affines
    eu1 = EuDX(a=ten.fa, ind=ind, odf_vertices=sphere.vertices,
               seeds=seeds1, a_low=.2, affine=affine1)
    eu2 = EuDX(a=ten.fa, ind=ind, odf_vertices=sphere.vertices,
               seeds=seeds2, a_low=.2, affine=affine2)

    # Move from eu2 affine2 to affine1
    eu2_to_eu1 = utils.move_streamlines(eu2, output_space=affine1,
                                        input_space=affine2)
    # Check that the tracks are the same
    for sl1, sl2 in zip(eu1, eu2_to_eu1):
        assert_array_almost_equal(sl1, sl2)
예제 #18
0
def test_target():
    streamlines = [
        np.array([[0., 0., 0.], [1., 0., 0.], [2., 0., 0.]]),
        np.array([[0., 0., 0], [0, 1., 1.], [0, 2., 2.]])
    ]
    affine = np.eye(4)
    mask = np.zeros((4, 4, 4), dtype=bool)
    mask[0, 0, 0] = True

    # Both pass though
    new = list(target(streamlines, mask, affine=affine))
    assert_equal(len(new), 2)
    new = list(target(streamlines, mask, affine=affine, include=False))
    assert_equal(len(new), 0)

    # only first
    mask[:] = False
    mask[1, 0, 0] = True
    new = list(target(streamlines, mask, affine=affine))
    assert_equal(len(new), 1)
    assert_true(new[0] is streamlines[0])
    new = list(target(streamlines, mask, affine=affine, include=False))
    assert_equal(len(new), 1)
    assert_true(new[0] is streamlines[1])

    # Test that bad points raise a value error
    bad_sl = [np.array([[10., 10., 10.]])]
    new = target(bad_sl, mask, affine=affine)
    assert_raises(ValueError, list, new)
    bad_sl = [-np.array([[10., 10., 10.]])]
    new = target(bad_sl, mask, affine=affine)
    assert_raises(ValueError, list, new)

    # Test smaller voxels
    affine = np.random.random((4, 4)) - .5
    affine[3] = [0, 0, 0, 1]
    streamlines = list(move_streamlines(streamlines, affine))
    new = list(target(streamlines, mask, affine=affine))
    assert_equal(len(new), 1)
    assert_true(new[0] is streamlines[0])
    new = list(target(streamlines, mask, affine=affine, include=False))
    assert_equal(len(new), 1)
    assert_true(new[0] is streamlines[1])

    # Test that changing mask and affine do not break target
    include = target(streamlines, mask, affine=affine)
    exclude = target(streamlines, mask, affine=affine, include=False)
    affine[:] = np.eye(4)
    mask[:] = False
    include = list(include)
    exclude = list(exclude)
    assert_equal(len(include), 1)
    assert_true(include[0] is streamlines[0])
    assert_equal(len(exclude), 1)
    assert_true(exclude[0] is streamlines[1])
예제 #19
0
def test_eudx_further():
    """ Cause we love testin.. ;-)
    """

    fimg,fbvals,fbvecs=get_data('small_101D')

    img=ni.load(fimg)
    affine=img.get_affine()
    data=img.get_data()
    gtab = gradient_table(fbvals, fbvecs)
    tensor_model = TensorModel(gtab)
    ten = tensor_model.fit(data)
    x,y,z=data.shape[:3]
    seeds=np.zeros((10**4,3))
    for i in range(10**4):
        rx=(x-1)*np.random.rand()
        ry=(y-1)*np.random.rand()
        rz=(z-1)*np.random.rand()
        seeds[i]=np.ascontiguousarray(np.array([rx,ry,rz]),dtype=np.float64)

    sphere = get_sphere('symmetric724')

    ind = quantize_evecs(ten.evecs)
    eu=EuDX(a=ten.fa, ind=ind, seeds=seeds,
            odf_vertices=sphere.vertices, a_low=.2)
    T=[e for e in eu]

    #check that there are no negative elements
    for t in T:
        assert_equal(np.sum(t.ravel()<0),0)

    # Test eudx with affine
    def random_affine(seeds):
        affine = np.eye(4)
        affine[:3, :] = np.random.random((3, 4))
        seeds = np.dot(seeds, affine[:3, :3].T)
        seeds += affine[:3, 3]
        return affine, seeds

    # Make two random affines and move seeds
    affine1, seeds1 = random_affine(seeds)
    affine2, seeds2 = random_affine(seeds)

    # Make tracks using different affines
    eu1 = EuDX(a=ten.fa, ind=ind, odf_vertices=sphere.vertices,
               seeds=seeds1, a_low=.2, affine=affine1)
    eu2 = EuDX(a=ten.fa, ind=ind, odf_vertices=sphere.vertices,
               seeds=seeds2, a_low=.2, affine=affine2)

    # Move from eu2 affine2 to affine1
    eu2_to_eu1 = utils.move_streamlines(eu2, output_space=affine1,
                                        input_space=affine2)
    # Check that the tracks are the same
    for sl1, sl2 in zip(eu1, eu2_to_eu1):
        assert_array_almost_equal(sl1, sl2)
예제 #20
0
파일: test_utils.py 프로젝트: arokem/dipy
def _target(target_f, streamlines, voxel_both_true, voxel_one_true,
            test_bad_points):
    affine = np.eye(4)
    mask = np.zeros((4, 4, 4), dtype=bool)

    # Both pass though
    mask[voxel_both_true] = True
    new = list(target_f(streamlines, mask, affine=affine))
    npt.assert_equal(len(new), 2)
    new = list(target_f(streamlines, mask, affine=affine, include=False))
    npt.assert_equal(len(new), 0)

    # only first
    mask[:] = False
    mask[voxel_one_true] = True
    new = list(target_f(streamlines, mask, affine=affine))
    npt.assert_equal(len(new), 1)
    assert_true(new[0] is streamlines[0])
    new = list(target_f(streamlines, mask, affine=affine, include=False))
    npt.assert_equal(len(new), 1)
    assert_true(new[0] is streamlines[1])

    # Test that bad points raise a value error
    if test_bad_points:
        bad_sl = streamlines + [np.array([[10.0, 10.0, 10.0]])]
        new = target_f(bad_sl, mask, affine=affine)
        npt.assert_raises(ValueError, list, new)
        bad_sl = streamlines + [-np.array([[10.0, 10.0, 10.0]])]
        new = target_f(bad_sl, mask, affine=affine)
        npt.assert_raises(ValueError, list, new)

    # Test smaller voxels
    affine = np.array([[.3, 0, 0, 0],
                       [0, .2, 0, 0],
                       [0, 0, .4, 0],
                       [0, 0, 0, 1]])
    streamlines = list(move_streamlines(streamlines, affine))
    new = list(target_f(streamlines, mask, affine=affine))
    npt.assert_equal(len(new), 1)
    assert_true(new[0] is streamlines[0])
    new = list(target_f(streamlines, mask, affine=affine, include=False))
    npt.assert_equal(len(new), 1)
    assert_true(new[0] is streamlines[1])

    # Test that changing mask or affine does not break target/target_line_based
    include = target_f(streamlines, mask, affine=affine)
    exclude = target_f(streamlines, mask, affine=affine, include=False)
    affine[:] = np.eye(4)
    mask[:] = False
    include = list(include)
    exclude = list(exclude)
    npt.assert_equal(len(include), 1)
    assert_true(include[0] is streamlines[0])
    npt.assert_equal(len(exclude), 1)
    assert_true(exclude[0] is streamlines[1])
예제 #21
0
def main(args):
    streamlines = load_trk(args.input)
    offset = np.array(streamlines[1]["dimensions"])
    vx_size = np.array(streamlines[1]["voxel_sizes"])

    FS_xfm = [[1.25, 0.000, 0.000,
               (-90) / 2], [0.000, 1.25, 0.000, (-126) / 2],
              [0.000, 0.000, 1.25, (-72) / 2], [0.000, 0.000, 0.000, 1.000]]

    vox_to_ras = np.array([[-1.25, 0., 0., 90.], [0., 1.25, 0., -126.],
                           [0., 0., 1.25, -72.], [0., 0., 0., 1.]])
    zooms = np.sqrt((vox_to_ras * vox_to_ras).sum(0))
    vox_to_trk = np.diag(zooms)
    vox_to_trk[3, 3] = 1
    vox_to_trk[:3, 3] = zooms[:3] / 2.

    #FS_xfm = Affine().from_matrix44(FS_xfm).inv().as_affine()
    #FS_xfm = inv_affine(np.array(FS_xfm))

    #new_streamlines = transform_streamlines(streamlines[0],FS_xfm)
    new_streamlines = utils.move_streamlines(streamlines[0],
                                             input_space=vox_to_trk,
                                             output_space=np.eye(4))
    new_streamlines = utils.move_streamlines(new_streamlines,
                                             input_space=np.eye(4),
                                             output_space=FS_xfm)
    #new_streamlines = utils.move_streamlines(
    #  streamlines[0],
    #  input_space=vox_to_trk,
    #  output_space=vox_to_ras
    #)
    #new_streamlines = utils.move_streamlines(
    #  new_streamlines,
    #  input_space=vox_to_ras,
    #  output_space=np.eye(4)
    #)
    #new_streamlines = []
    #for idx in range(len(streamlines[0])):
    #  s = np.array(streamlines[0][idx]) - offset
    #  new_streamlines.append(s.tolist())
    #save_trk( args.output, new_streamlines, vox_to_ras, shape=offset)
    save_trk(args.output, new_streamlines, np.eye(4), shape=[0, 0, 0])
예제 #22
0
파일: mriutil.py 프로젝트: bpinsard/toad
def transform_to_trackvis_voxmm(streams, header):
    from nibabel.volumeutils import rec2dict
    from nibabel.streamlines.trk import get_affine_rasmm_to_trackvis
    from dipy.tracking.utils import move_streamlines
    header = rec2dict(header)
    header['voxel_sizes'] = header['voxel_size']
    header['voxel_to_rasmm'] = header['vox_to_ras']
    header['dimensions'] = header['dim']
    affine = get_affine_rasmm_to_trackvis(header)
    streams = move_streamlines(streams, affine)
    return streams
예제 #23
0
def convert_to_indices(streamline, papaya_aff, aff, img):
    #print(streamline)
    topoints = lambda x: np.array([[m["x"], m["y"], m["z"]]
                                   for m in x["world_coor"]])
    points_orig = topoints(streamline)
    points_nifti_space = list(
        utils.move_streamlines([points_orig], aff, input_space=papaya_aff))[0]
    from dipy.tracking._utils import _to_voxel_coordinates, _mapping_to_voxel
    lin_T, offset = _mapping_to_voxel(aff, None)
    idx = _to_voxel_coordinates(points_orig, lin_T, offset)
    return points_nifti_space, idx
예제 #24
0
파일: mriutil.py 프로젝트: arnaudbore/toad
def transform_to_trackvis_voxmm(streams, header):
    from nibabel.volumeutils import rec2dict
    from nibabel.streamlines.trk import get_affine_rasmm_to_trackvis
    from dipy.tracking.utils import move_streamlines
    header = rec2dict(header)
    header['voxel_sizes'] = header['voxel_size']
    header['voxel_to_rasmm'] = header['vox_to_ras']
    header['dimensions'] = header['dim']
    affine = get_affine_rasmm_to_trackvis(header)
    streams = move_streamlines(streams, affine)
    return streams
예제 #25
0
def _target(target_f, streamlines, voxel_both_true, voxel_one_true,
            test_bad_points):
    affine = np.eye(4)
    mask = np.zeros((4, 4, 4), dtype=bool)

    # Both pass though
    mask[voxel_both_true] = True
    new = list(target_f(streamlines, mask, affine=affine))
    assert_equal(len(new), 2)
    new = list(target_f(streamlines, mask, affine=affine, include=False))
    assert_equal(len(new), 0)

    # only first
    mask[:] = False
    mask[voxel_one_true] = True
    new = list(target_f(streamlines, mask, affine=affine))
    assert_equal(len(new), 1)
    assert_true(new[0] is streamlines[0])
    new = list(target_f(streamlines, mask, affine=affine, include=False))
    assert_equal(len(new), 1)
    assert_true(new[0] is streamlines[1])

    # Test that bad points raise a value error
    if test_bad_points:
        bad_sl = streamlines + [np.array([[10.0, 10.0, 10.0]])]
        new = target_f(bad_sl, mask, affine=affine)
        assert_raises(ValueError, list, new)
        bad_sl = streamlines + [-np.array([[10.0, 10.0, 10.0]])]
        new = target_f(bad_sl, mask, affine=affine)
        assert_raises(ValueError, list, new)

    # Test smaller voxels
    affine = np.random.random((4, 4)) - .5
    affine[3] = [0, 0, 0, 1]
    streamlines = list(move_streamlines(streamlines, affine))
    new = list(target_f(streamlines, mask, affine=affine))
    assert_equal(len(new), 1)
    assert_true(new[0] is streamlines[0])
    new = list(target_f(streamlines, mask, affine=affine, include=False))
    assert_equal(len(new), 1)
    assert_true(new[0] is streamlines[1])

    # Test that changing mask or affine does not break target/target_line_based
    include = target_f(streamlines, mask, affine=affine)
    exclude = target_f(streamlines, mask, affine=affine, include=False)
    affine[:] = np.eye(4)
    mask[:] = False
    include = list(include)
    exclude = list(exclude)
    assert_equal(len(include), 1)
    assert_true(include[0] is streamlines[0])
    assert_equal(len(exclude), 1)
    assert_true(exclude[0] is streamlines[1])
예제 #26
0
def append_to_trk(
  streamlines,
  filename,
  update_num_trk=False,
  vox_to_ras=None,
  send_to_voxmm=None
  ):

  if not os.path.isfile(filename):
    raise FileNotFoundError("append_to_trk called on non-existant file")

  if send_to_voxmm is None:
    raise RuntimeError("append_to_trk:send_to_voxmm not set.\nsend_to_voxmm must be explicitly True or False (otherwise bad things could happen in the trk).")

  if send_to_voxmm and vox_to_ras is not None:
    #this clones behavior from 
    ## https://github.com/nipy/dipy/blob/master/dipy/io/trackvis.py#L27
    zooms = np.sqrt((vox_to_ras * vox_to_ras).sum(0))
    vox_to_trk = np.diag(zooms)
    vox_to_trk[3, 3] = 1
    vox_to_trk[:3, 3] = zooms[:3] / 2.
    streamlines = list(utils.move_streamlines(
      streamlines,
      input_space=vox_to_ras,
      output_space=vox_to_trk
    ))
  elif send_to_voxmm:
    raise RuntimeError("send_to_voxmm set, but no transform passed.")

  with open(filename, mode='r+b') as file:
    ##get header
    hdr = file.read(1000)

    size_idx = 1000-12
    num_trks = struct.unpack("i", hdr[size_idx:(size_idx+4)])[0]

    if num_trks == 0 and update_num_trk:
      print("Error, num_trks set to zero, cannot update num, aborting")
      exit(1)
    elif update_num_trk:
      #move to size_idx in hdr
      file.seek(size_idx,0)
      file.write(struct.pack("i", num_trks + len(streamlines[0])))

    #move to end of file
    file.seek(0,2)

    #iterate through streams and output
    for stream in np.array(streamlines):
      file.write(struct.pack("i",len(stream)))
      for row in stream:
        file.write(struct.pack("fff",row[0],row[1],row[2]))
예제 #27
0
def test_streamline_registration():
    sl1 = [
        np.array([[0, 0, 0], [0, 0, 0.5], [0, 0, 1], [0, 0, 1.5]]),
        np.array([[0, 0, 0], [0, 0.5, 0.5], [0, 1, 1]])
    ]
    affine = np.eye(4)
    affine[:3, 3] = np.random.randn(3)
    sl2 = list(move_streamlines(sl1, affine))
    aligned, matrix = streamline_registration(sl2, sl1)
    npt.assert_almost_equal(matrix, np.linalg.inv(affine))
    npt.assert_almost_equal(aligned[0], sl1[0])
    npt.assert_almost_equal(aligned[1], sl1[1])

    # We assume the two tracks come from the same space, but it might have
    # some affine associated with it:
    base_aff = np.eye(4) * np.random.rand()
    base_aff[:3, 3] = np.array([1, 2, 3])
    base_aff[3, 3] = 1

    with nbtmp.InTemporaryDirectory() as tmpdir:
        for use_aff in [None, base_aff]:
            fname1 = op.join(tmpdir, 'sl1.trk')
            fname2 = op.join(tmpdir, 'sl2.trk')
            if use_aff is not None:
                # Move the streamlines to this other space, and report it:
                write_trk(fname1, move_streamlines(sl1,
                                                   np.linalg.inv(use_aff)),
                          use_aff)
                write_trk(fname2, move_streamlines(sl2,
                                                   np.linalg.inv(use_aff)),
                          use_aff)
            else:
                write_trk(fname1, sl1)
                write_trk(fname2, sl2)

            aligned, matrix = streamline_registration(fname2, fname1)
            npt.assert_almost_equal(aligned[0], sl1[0], decimal=5)
            npt.assert_almost_equal(aligned[1], sl1[1], decimal=5)
예제 #28
0
def test_streamline_registration():
    sl1 = [np.array([[0, 0, 0], [0, 0, 0.5], [0, 0, 1], [0, 0, 1.5]]),
           np.array([[0, 0, 0], [0, 0.5, 0.5], [0, 1, 1]])]
    affine = np.eye(4)
    affine[:3, 3] = np.random.randn(3)
    sl2 = list(move_streamlines(sl1, affine))
    aligned, matrix = streamline_registration(sl2, sl1)
    npt.assert_almost_equal(matrix, np.linalg.inv(affine))
    npt.assert_almost_equal(aligned[0], sl1[0])
    npt.assert_almost_equal(aligned[1], sl1[1])

    # We assume the two tracks come from the same space, but it might have
    # some affine associated with it:
    base_aff = np.eye(4) * np.random.rand()
    base_aff[:3, 3] = np.array([1, 2, 3])
    base_aff[3, 3] = 1

    with nbtmp.InTemporaryDirectory() as tmpdir:
        for use_aff in [None, base_aff]:
            fname1 = op.join(tmpdir, 'sl1.trk')
            fname2 = op.join(tmpdir, 'sl2.trk')
            if use_aff is not None:
                # Move the streamlines to this other space, and report it:
                write_trk(fname1,
                          move_streamlines(sl1, np.linalg.inv(use_aff)),
                          use_aff)
                write_trk(fname2,
                          move_streamlines(sl2, np.linalg.inv(use_aff)),
                          use_aff)
            else:
                write_trk(fname1, sl1)
                write_trk(fname2, sl2)

            aligned, matrix = streamline_registration(fname2, fname1)
            npt.assert_almost_equal(aligned[0], sl1[0], decimal=5)
            npt.assert_almost_equal(aligned[1], sl1[1], decimal=5)
예제 #29
0
def streamline_registration(moving,
                            static,
                            n_points=100,
                            native_resampled=False):
    """
    Register two collections of streamlines ('bundles') to each other

    Parameters
    ----------
    moving, static : lists of 3 by n, or str
        The two bundles to be registered. Given either as lists of arrays with
        3D coordinates, or strings containing full paths to these files.

    n_points : int, optional
        How many points to resample to. Default: 100.

    native_resampled : bool, optional
        Whether to return the moving bundle in the original space, but
        resampled in the static space to n_points.

    Returns
    -------
    aligned : list
        Streamlines from the moving group, moved to be closely matched to
        the static group.

    matrix : array (4, 4)
        The affine transformation that takes us from 'moving' to 'static'
    """
    # Load the streamlines, if you were given a file-name
    if isinstance(moving, str):
        moving = sut.read_trk(moving)
    if isinstance(static, str):
        static = sut.read_trk(static)

    srr = StreamlineLinearRegistration()
    srm = srr.optimize(static=set_number_of_points(static, n_points),
                       moving=set_number_of_points(moving, n_points))

    aligned = srm.transform(moving)
    if native_resampled:
        aligned = set_number_of_points(aligned, n_points)
        aligned = move_streamlines(aligned, np.linalg.inv(srm.matrix))

    return aligned, srm.matrix
예제 #30
0
def test_fit_data():
    fdata, fbval, fbvec = dpd.get_fnames('small_25')
    fstreamlines = dpd.get_fnames('small_25_streamlines')
    gtab = grad.gradient_table(fbval, fbvec)
    ni_data = nib.load(fdata)
    data = ni_data.get_data()
    tensor_streamlines = nib.streamlines.load(fstreamlines).streamlines
    tensor_streamlines = move_streamlines(tensor_streamlines, np.eye(4),
                                          ni_data.affine)
    life_model = life.FiberModel(gtab)
    life_fit = life_model.fit(data, tensor_streamlines)
    model_error = life_fit.predict() - life_fit.data
    model_rmse = np.sqrt(np.mean(model_error**2, -1))
    matlab_rmse, matlab_weights = dpd.matlab_life_results()
    # Lower error than the matlab implementation for these data:
    npt.assert_(np.median(model_rmse) < np.median(matlab_rmse))
    # And a moderate correlation with the Matlab implementation weights:
    npt.assert_(np.corrcoef(matlab_weights, life_fit.beta)[0, 1] > 0.6)
예제 #31
0
def detect_FP(df, data, aff, report):
    fp = df[df.annotation == "FP"][["x", "y", "z"]].values
    to_indices = np.round(
        np.asarray(
            list(utils.move_streamlines(fp.tolist(),
                                        np.linalg.inv(aff))))).astype(int)

    for j, idx in enumerate(to_indices):
        entry = {}
        val = data[idx[0], idx[1], idx[2]]
        entry["world"] = fp.tolist()[j]
        entry["ijk"] = idx.tolist()
        entry["caught"] = False
        if val:
            entry["caught"] = True
            entry["size"] = float(np.sum(data == val))
            data[data == val] = 0

        else:
            searchR = 1

            for i in range(3):
                new_idx = deepcopy(idx)
                new_idx[i] += searchR
                #print("trying", new_idx)
                val = data[new_idx[0], new_idx[1], new_idx[2]]
                if val != 0:
                    break
                new_idx = deepcopy(idx)
                new_idx[i] -= searchR
                val = data[new_idx[0], new_idx[1], new_idx[2]]
                if val != 0:
                    break
            if val == 0:
                #print("ERROR, no lesion here!!!", idx) #TODO: draw a 2 vox box around this coordinate and look for val
                pass
            else:
                #print("FOUND VALS AROUND POINT", new_idx)
                entry["ijk"] = new_idx.tolist()
                entry["caught"] = True
                entry["size"] = float(np.sum(data == val))
                data[data == val] = 0
        report["FP"].append(entry)
    return report, data
예제 #32
0
def get_streamlines_plot(path, ref_img_path, subsampling=10):
    affine = nib.load(ref_img_path).affine

    streams, hdr = trackvis.read(path)
    streamlines = [s[0] for s in streams]
    streamlines = list(move_streamlines(streamlines, np.linalg.inv(affine)))

    traces = []
    for sl in streamlines[::subsampling]:
        color = get_voxelwise_orientation_colormap([sl],
                                                   orientation="saggital")[0]
        x, y, z, = zip(*sl)
        trace = go.Scatter3d(x=x,
                             y=y,
                             z=z,
                             line=dict(color=color, width=2),
                             mode="lines")
        traces.append(trace)
    return traces
예제 #33
0
def streamline_registration(moving, static, n_points=100,
                            native_resampled=False):
    """
    Register two collections of streamlines ('bundles') to each other

    Parameters
    ----------
    moving, static : lists of 3 by n, or str
        The two bundles to be registered. Given either as lists of arrays with
        3D coordinates, or strings containing full paths to these files.

    n_points : int, optional
        How many points to resample to. Default: 100.

    native_resampled : bool, optional
        Whether to return the moving bundle in the original space, but
        resampled in the static space to n_points.

    Returns
    -------
    aligned : list
        Streamlines from the moving group, moved to be closely matched to
        the static group.

    matrix : array (4, 4)
        The affine transformation that takes us from 'moving' to 'static'
    """
    # Load the streamlines, if you were given a file-name
    if isinstance(moving, str):
        moving = sut.read_trk(moving)
    if isinstance(static, str):
        static = sut.read_trk(static)

    srr = StreamlineLinearRegistration()
    srm = srr.optimize(static=set_number_of_points(static, n_points),
                       moving=set_number_of_points(moving, n_points))

    aligned = srm.transform(moving)
    if native_resampled:
        aligned = set_number_of_points(aligned, n_points)
        aligned = move_streamlines(aligned, np.linalg.inv(srm.matrix))

    return aligned, srm.matrix
예제 #34
0
파일: test_utils.py 프로젝트: mbeyeler/dipy
def test_move_streamlines():
    streamlines, seeds = make_streamlines(True)
    affine = np.eye(4)
    new_streamlines = move_streamlines(streamlines, affine)
    for i, test_sl in enumerate(new_streamlines):
        npt.assert_array_equal(test_sl, streamlines[i])

    affine[:3, 3] += (4, 5, 6)
    new_streamlines = move_streamlines(streamlines, affine)
    for i, test_sl in enumerate(new_streamlines):
        npt.assert_array_equal(test_sl, streamlines[i] + (4, 5, 6))

    affine = np.eye(4)
    affine = affine[[2, 1, 0, 3]]
    new_streamlines = move_streamlines(streamlines, affine)
    for i, test_sl in enumerate(new_streamlines):
        npt.assert_array_equal(test_sl, streamlines[i][:, [2, 1, 0]])

    affine[:3, 3] += (4, 5, 6)
    new_streamlines = move_streamlines(streamlines, affine)
    undo_affine = move_streamlines(new_streamlines,
                                   np.eye(4),
                                   input_space=affine)
    for i, test_sl in enumerate(undo_affine):
        npt.assert_array_almost_equal(test_sl, streamlines[i])

    # Test that changing affine does affect moving streamlines
    affineA = affine.copy()
    affineB = affine.copy()
    streamlinesA = move_streamlines(streamlines, affineA)
    streamlinesB = move_streamlines(streamlines, affineB)
    affineB[:] = 0
    for (a, b) in zip(streamlinesA, streamlinesB):
        npt.assert_array_equal(a, b)

    # Test that seeds are also moved
    streamlinesA, seedsA = zip(
        *move_streamlines(streamlines, affineA, seeds=seeds))
    for (seed, seedA) in zip(seeds, seedsA):
        npt.assert_raises(AssertionError, npt.assert_array_equal, seed, seedA)
예제 #35
0
def convert_fibs(inp, outp, parcellation):
    label_nii  = nib.load(parcellation)
    label_data = label_nii.get_data()

    fiber_npz = np.load(inp)
    fibers = fiber_npz[fiber_npz.keys()[0]]

    voxel_size = label_nii.header.get_zooms()
    shape = label_data.shape
    affine = label_nii.affine

    trackvis_header = nib.trackvis.empty_header()
    trackvis_header['voxel_size'] = voxel_size
    trackvis_header['dim'] = shape
    trackvis_header['voxel_order'] = "RAS"

    trackvis_point_space = utils.affine_for_trackvis(voxel_size)
    trk = utils.move_streamlines(fibers, trackvis_point_space, input_space=np.eye(4))
    trk = list(trk)

    for_save = [(sl, None, None) for sl in trk]
    nib.trackvis.write(outp, for_save, trackvis_header)
예제 #36
0
def test_read_write_trk():
    sl = [np.array([[0, 0, 0], [0, 0, 0.5], [0, 0, 1], [0, 0, 1.5]]),
          np.array([[0, 0, 0], [0, 0.5, 0.5], [0, 1, 1]])]

    with nbtmp.InTemporaryDirectory() as tmpdir:
        fname = op.join(tmpdir, 'sl.trk')
        aus.write_trk(fname, sl)
        new_sl = aus.read_trk(fname)
        npt.assert_equal(list(new_sl), sl)

        # What happens if this set of streamlines has some funky affine
        # associated with it?
        aff = np.eye(4) * np.random.rand()
        aff[:3, 3] = np.array([1, 2, 3])
        aff[3, 3] = 1
        # We move the streamlines, and report the inverse of the affine:
        aus.write_trk(fname, move_streamlines(sl, aff),
                      affine=np.linalg.inv(aff))
        # When we read this, we get back what we put in:
        new_sl = aus.read_trk(fname)
        # Compare each streamline:
        for new, old in zip(new_sl, sl):
            npt.assert_almost_equal(new, old, decimal=4)
예제 #37
0
파일: api.py 프로젝트: akeshavan/pyAFQ
def _export_bundles(row,
                    wm_labels,
                    bundle_dict,
                    reg_template,
                    odf_model="DTI",
                    directions="det",
                    n_seeds=2,
                    random_seeds=False,
                    force_recompute=False):

    for func, folder in zip([_clean_bundles, _bundles],
                            ['clean_bundles', 'bundles']):
        bundles_file = func(row,
                            wm_labels,
                            bundle_dict,
                            reg_template,
                            odf_model=odf_model,
                            directions=directions,
                            n_seeds=n_seeds,
                            random_seeds=random_seeds,
                            force_recompute=force_recompute)

        bundles_dir = op.join(row['results_dir'], folder)
        os.makedirs(bundles_dir, exist_ok=True)
        trk = nib.streamlines.load(bundles_file)
        tg = trk.tractogram
        streamlines = tg.streamlines
        for bundle in bundle_dict:
            uid = bundle_dict[bundle]['uid']
            idx = np.where(tg.data_per_streamline['bundle'] == uid)[0]
            this_sl = (streamlines[idx])
            fname = op.join(bundles_dir, '%s.trk' % bundle)
            aus.write_trk(fname,
                          dtu.move_streamlines(
                              this_sl, np.linalg.inv(row['dwi_affine'])),
                          affine=row['dwi_affine'])
예제 #38
0
 def generate_streamlines(self):
     streamlines = parfor(self._track, self.seeds, n_jobs=self.n_jobs,
                          backend=self.backend, engine=self.engine)
     streamlines = list(chain(*streamlines))
     return dtu.move_streamlines(streamlines,
                                 self.affine)
예제 #39
0
def test_AFQ_data2():
    """
    Test with some actual data again, this time for track segmentation
    """
    tmpdir = nbtmp.InTemporaryDirectory()
    afd.organize_stanford_data(path=tmpdir.name)
    preafq_path = op.join(tmpdir.name, 'stanford_hardi',
                          'derivatives', 'preafq')
    myafq = api.AFQ(preafq_path=preafq_path,
                    sub_prefix='sub',
                    bundle_list=["SLF", "ARC", "CST", "FP"])

    # Replace the mapping and streamlines with precomputed:
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.move_streamlines([s for s in streamlines if s.shape[0] > 100],
                             np.linalg.inv(myafq.dwi_affine[0])))

    sl_file = op.join(myafq.data_frame.results_dir[0],
                      'sub-01_sess-01_dwiDTI_det_streamlines.trk')
    aus.write_trk(sl_file, streamlines, affine=myafq.dwi_affine[0])

    mapping_file = op.join(myafq.data_frame.results_dir[0],
                           'sub-01_sess-01_dwi_mapping.nii.gz')
    nib.save(mapping, mapping_file)
    reg_prealign_file = op.join(myafq.data_frame.results_dir[0], 'sub-01_sess-01_dwi_reg_prealign.npy')
    np.save(reg_prealign_file, np.eye(4))

    tgram = nib.streamlines.load(myafq.bundles[0]).tractogram
    bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict)
    npt.assert_equal(len(bundles['CST_R']), 2)

    # Test ROI exporting:
    myafq.export_rois()
    assert op.exists(op.join(myafq.data_frame['results_dir'][0],
                     'ROIs',
                     'CST_R_roi1_include.nii.gz'))

    # Test bundles exporting:
    myafq.export_bundles()
    assert op.exists(op.join(myafq.data_frame['results_dir'][0],
                     'bundles',
                     'CST_R.trk'))

    tract_profiles = pd.read_csv(myafq.tract_profiles[0])
    assert tract_profiles.shape == (800, 5)


    # Before we run the CLI, we'll remove the bundles and ROI folders, to see
    # that the CLI generates them
    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0],
                  'bundles'))

    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0],
                  'ROIs'))

    # Test the CLI:
    print("Running the CLI:")
    cmd = "pyAFQ " + preafq_path
    out = os.system(cmd)
    assert out ==  0
    # The combined tract profiles should already exist from the CLI Run:
    from_file = pd.read_csv(op.join(myafq.afq_dir, 'tract_profiles.csv'))
    # And should be identical to what we would get by rerunning this:
    combined_profiles = myafq.combine_profiles()
    assert combined_profiles.shape == (800, 7)
    assert_frame_equal(combined_profiles, from_file)

    # Make sure the CLI did indeed generate these:
    assert op.exists(op.join(myafq.data_frame['results_dir'][0],
                     'ROIs',
                     'CST_R_roi1_include.nii.gz'))

    assert op.exists(op.join(myafq.data_frame['results_dir'][0],
                     'bundles',
                     'CST_R.trk'))
예제 #40
0
    dti_params = dti.fit_dti(hardi_fdata, hardi_fbval, hardi_fbvec,
                             out_dir='.')
else:
    dti_params = {'FA': './dti_FA.nii.gz',
                  'params': './dti_params.nii.gz'}

print("Tracking...")
if not op.exists('dti_streamlines.trk'):
    streamlines = list(aft.track(dti_params['params']))
    aus.write_trk('./dti_streamlines.trk', streamlines, affine=img.affine)
else:
    tg = nib.streamlines.load('./dti_streamlines.trk').tractogram
    streamlines = tg.apply_affine(np.linalg.inv(img.affine)).streamlines

streamlines = dts.Streamlines(dtu.move_streamlines(
    [s for s in streamlines if s.shape[0] > 100],
    np.linalg.inv(img.affine)))

templates = afd.read_templates()
bundle_names = ["CST", "ILF"]

bundles = {}
for name in bundle_names:
    for hemi in ['_R', '_L']:
        bundles[name + hemi] = {
            'ROIs': [templates[name + '_roi1' + hemi],
                     templates[name + '_roi2' + hemi]],
            'rules': [True, True],
            'prob_map': templates[name + hemi + '_prob_map'],
            'cross_midline': False}
예제 #41
0
# Save density map
dm_img = nib.Nifti1Image(dm.astype("int16"), hardi_img.affine)
dm_img.to_filename("lr-superiorfrontal-dm.nii.gz")

# Make a trackvis header so we can save streamlines
voxel_size = labels_img.header.get_zooms()
trackvis_header = nib.trackvis.empty_header()
trackvis_header['voxel_size'] = voxel_size
trackvis_header['dim'] = shape
trackvis_header['voxel_order'] = "RAS"

# Move streamlines to "trackvis space"
trackvis_point_space = utils.affine_for_trackvis(voxel_size)
lr_sf_trk = utils.move_streamlines(lr_superiorfrontal_track,
                                   trackvis_point_space,
                                   input_space=affine)
lr_sf_trk = list(lr_sf_trk)

# Save streamlines
for_save = [(sl, None, None) for sl in lr_sf_trk]
nib.trackvis.write("lr-superiorfrontal.trk", for_save, trackvis_header)
"""
Let's take a moment here to consider the representation of streamlines used in
dipy. Streamlines are a path though the 3d space of an image represented by a
set of points. For these points to have a meaningful interpretation, these
points must be given in a known coordinate system. The ``affine`` attribute of
the ``streamline_generator`` object specifies the coordinate system of the
points with respect to the voxel indices of the input data.
``trackvis_point_space`` specifies the trackvis coordinate system with respect
to the same indices. The ``move_streamlines`` function returns a new set of
예제 #42
0
    def _run_interface(self, runtime):
        from dipy.tracking.utils import move_streamlines, \
            affine_from_fsl_mat_file
        dx, dy, dz = get_data_dims(self.inputs.image_file)
        vx, vy, vz = get_vox_dims(self.inputs.image_file)
        image_file = nb.load(self.inputs.image_file)
        affine = image_file.affine
        out_filename = op.abspath(self.inputs.out_filename)

        # Reads MRTrix tracks
        header, streamlines = read_mrtrix_tracks(
            self.inputs.in_file, as_generator=True)
        iflogger.info('MRTrix Header:')
        iflogger.info(header)
        # Writes to Trackvis
        trk_header = nb.trackvis.empty_header()
        trk_header['dim'] = [dx, dy, dz]
        trk_header['voxel_size'] = [vx, vy, vz]
        trk_header['n_count'] = header['count']

        if isdefined(self.inputs.matrix_file) and isdefined(
                self.inputs.registration_image_file):
            iflogger.info('Applying transformation from matrix file %s',
                          self.inputs.matrix_file)
            xfm = np.genfromtxt(self.inputs.matrix_file)
            iflogger.info(xfm)
            registration_image_file = nb.load(
                self.inputs.registration_image_file)
            reg_affine = registration_image_file.affine
            r_dx, r_dy, r_dz = get_data_dims(
                self.inputs.registration_image_file)
            r_vx, r_vy, r_vz = get_vox_dims(
                self.inputs.registration_image_file)
            iflogger.info('Using affine from registration image file %s',
                          self.inputs.registration_image_file)
            iflogger.info(reg_affine)
            trk_header['vox_to_ras'] = reg_affine
            trk_header['dim'] = [r_dx, r_dy, r_dz]
            trk_header['voxel_size'] = [r_vx, r_vy, r_vz]

            affine = np.dot(affine, np.diag(1. / np.array([vx, vy, vz, 1])))
            transformed_streamlines = transform_to_affine(
                streamlines, trk_header, affine)

            aff = affine_from_fsl_mat_file(xfm, [vx, vy, vz],
                                           [r_vx, r_vy, r_vz])
            iflogger.info(aff)

            axcode = aff2axcodes(reg_affine)
            trk_header['voxel_order'] = axcode[0] + axcode[1] + axcode[2]

            final_streamlines = move_streamlines(transformed_streamlines, aff)
            trk_tracks = ((ii, None, None) for ii in final_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info('Saving transformed Trackvis file as %s',
                          out_filename)
            iflogger.info('New TrackVis Header:')
            iflogger.info(trk_header)
        else:
            iflogger.info(
                'Applying transformation from scanner coordinates to %s',
                self.inputs.image_file)
            axcode = aff2axcodes(affine)
            trk_header['voxel_order'] = axcode[0] + axcode[1] + axcode[2]
            trk_header['vox_to_ras'] = affine
            transformed_streamlines = transform_to_affine(
                streamlines, trk_header, affine)
            trk_tracks = ((ii, None, None) for ii in transformed_streamlines)
            trk.write(out_filename, trk_tracks, trk_header)
            iflogger.info('Saving Trackvis file as %s', out_filename)
            iflogger.info('TrackVis Header:')
            iflogger.info(trk_header)
        return runtime
예제 #43
0
def test_MarkovIntegrator():
    class KeepGoing(MarkovIntegrator):
        def _next_step(self, location, prev_step):
            if prev_step is None:
                return np.array([[1., 0, 0], [0, 1., 0], [0, 0., 1]])
            if not self._mask[location]:
                return None
            else:
                return prev_step

    data = np.ones((10, 10, 10, 65))
    data_interp = NearestNeighborInterpolator(data, (1, 1, 1))

    seeds = [np.array([5.2, 5.2, 5.2])]
    stepper = FixedSizeStepper(.5)
    mask = np.ones((10, 10, 10), 'bool')
    gen = KeepGoing(model=None,
                    interpolator=data_interp,
                    mask=mask,
                    take_step=stepper,
                    angle_limit=0.,
                    seeds=seeds)
    streamlines = list(gen)
    assert_equal(len(streamlines), 3)

    expected = np.zeros((20, 3))
    for i in range(3):
        expected[:] = 5.2
        expected[:, i] = np.arange(.2, 10, .5)
        assert_array_almost_equal(streamlines[i], expected)

    # Track only the first (largest) peak for each seed
    gen = KeepGoing(model=None,
                    interpolator=data_interp,
                    mask=mask,
                    take_step=stepper,
                    angle_limit=0.,
                    seeds=seeds,
                    max_cross=1)
    streamlines = list(gen)
    assert_equal(len(streamlines), 1)

    expected = np.zeros((20, 3))
    expected[:] = 5.2
    expected[:, 0] = np.arange(.2, 10, .5)
    assert_array_almost_equal(streamlines[0], expected)

    mask = np.ones((20, 20, 20), 'bool')
    gen = KeepGoing(model=None,
                    interpolator=data_interp,
                    mask=mask,
                    take_step=stepper,
                    angle_limit=0.,
                    seeds=seeds,
                    max_cross=1,
                    mask_voxel_size=(.5, .5, .5))
    streamlines = list(gen)
    assert_equal(len(streamlines), 1)
    assert_array_almost_equal(streamlines[0], expected)

    # Test tracking with affine
    affine = np.eye(4)
    affine[:3, :] = np.random.random((3, 4)) - .5

    seeds = [np.dot(affine[:3, :3], seeds[0] - .5) + affine[:3, 3]]
    sl_affine = KeepGoing(model=None,
                          interpolator=data_interp,
                          mask=mask,
                          take_step=stepper,
                          angle_limit=0.,
                          seeds=seeds,
                          max_cross=1,
                          mask_voxel_size=(.5, .5, .5),
                          affine=affine)

    default = np.eye(4)
    default[:3, 3] = .5
    sl_default = list(utils.move_streamlines(sl_affine, default, affine))

    assert_equal(len(sl_default), 1)
    assert_array_almost_equal(sl_default[0], expected)
예제 #44
0
def correct_lesions(in_csv, lesion_file, ratio_file, ants_seg, dist_radius=5):

    # Initialize report, load csv data and lesion seg data
    report = dict(FP=[],
                  FN=[],
                  base_csv=in_csv,
                  ratio_file=ratio_file,
                  dist_radius=dist_radius)
    df = pd.read_csv(in_csv)
    img = nib.load(lesion_file)
    data, aff = img.get_data(), img.affine
    report["orig_lesion_volume"] = fslstats(lesion_file)
    report["orig_num_lesions"] = num_lesions(data)
    # detect false positives and return if none are detected
    # probably a coordinate system error, or clicks are bad
    report, data = detect_FP(df, data, aff, report)
    num_success = len([r for r in report["FP"] if r["caught"]])
    # coordinate system error ??
    if len(report["FP"]) and not num_success:
        print(
            "Coordinate system error? or can't find any FP -- is this labelled correctly?",
            len(report["FP"]), num_success)
        return None, None, None

    # Write the lesion file w/ the false positives removed.
    out_path = os.path.join(
        os.path.split(os.path.split(in_csv)[0])[0], "lst_edits")
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    out_file = os.path.join(out_path, "no_FP_" + lesion_file.split("/")[-1])
    nib.Nifti1Image(data, aff).to_filename(out_file)

    # Load the ratio image
    ratio_img = nib.load(ratio_file)
    ratio, raffine = ratio_img.get_data(), ratio_img.affine

    # Get the tissue segmentation in the same space as the ratio file
    # Mask to exclude CSF
    def chopper(in_file, ratio_file):
        import tempfile
        from subprocess import check_call
        out_file = os.path.join(
            os.path.split(in_csv)[0],
            "antsSeg.nii.gz")  #tempfile.mktemp(suffix=".nii.gz")
        if not os.path.exists(out_file):
            cmd = "mri_convert -i {} -o {} --like {}".format(
                in_file, out_file, ratio_file)
            check_call(cmd.split(" "))
        return out_file

    ants_chopped = chopper(ants_seg, ratio_file)  #gets shit to alignment space
    ants_img = nib.load(ants_chopped)
    ants_data = ants_img.get_data()
    wm_mask = ants_data >= 1  #do not ?? exclude CSF
    ratio[wm_mask == 0] = 0

    # Prepare false negatives and find them
    fn = df[df.annotation == "FN"][["x", "y", "z"]].values
    to_indices = np.round(
        np.asarray(
            list(utils.move_streamlines(fn.tolist(),
                                        np.linalg.inv(aff))))).astype(int)
    entries, fn_image = find_FN(ratio, to_indices, fn, dist_radius)
    print(ratio.shape, data.shape)
    report["FN"] += entries
    data = data + fn_image  #add the found false neggies to data

    # Score the false negatives, reject if less than 40% are found
    report_file = fname_presuffix(
        in_csv,
        prefix="report_dr{}_".format(dist_radius),
        newpath=out_path,
        use_ext=False,
        suffix="_{}.json".format(
            os.path.split(ratio_file)[-1]))  #long name for prov.
    stats = report_stats(report)
    num_success = len([r for r in report["FN"] if r["caught"]])
    total = len(report["FN"])
    if float(total) > 0:
        print("success score", num_success / float(total))
        if num_success / float(total) < 0.4:
            print("notgood enough")
            if os.path.exists(report_file):
                os.remove(report_file)  #this is an old report file. remove it
            return None, None, None

    # Write the final image, save the report
    out_file = os.path.join(
        out_path, "no_FP_filled_FN_dr{}_".format(dist_radius) +
        ratio_file.split("/")[-1])
    nib.Nifti1Image(data, aff).to_filename(out_file)

    report["final_lesion_vol"] = fslstats(out_file)
    report["final_lesion_count"] = num_lesions(data)
    save_json(report_file, report)
    print("\n\n\n", "OUTPUT:", out_file, "\n\n\n")
    return out_file, report, stats
예제 #45
0
def test_MarkovIntegrator():

    class KeepGoing(MarkovIntegrator):
        def _next_step(self, location, prev_step):
            if prev_step is None:
                return np.array([[1., 0, 0],
                                 [0, 1., 0],
                                 [0, 0., 1]])
            if not self._mask[location]:
                return None
            else:
                return prev_step

    data = np.ones((10, 10, 10, 65))
    data_interp = NearestNeighborInterpolator(data, (1, 1, 1))

    seeds = [np.array([5.2, 5.2, 5.2])]
    stepper = FixedSizeStepper(.5)
    mask = np.ones((10, 10, 10), 'bool')
    gen = KeepGoing(model=None, interpolator=data_interp, mask=mask,
                    take_step=stepper, angle_limit=0., seeds=seeds)
    streamlines = list(gen)
    assert_equal(len(streamlines), 3)

    expected = np.zeros((20, 3))
    for i in range(3):
        expected[:] = 5.2
        expected[:, i] = np.arange(.2, 10, .5)
        assert_array_almost_equal(streamlines[i], expected)

    # Track only the first (largest) peak for each seed
    gen = KeepGoing(model=None, interpolator=data_interp, mask=mask,
                    take_step=stepper, angle_limit=0., seeds=seeds,
                    max_cross=1)
    streamlines = list(gen)
    assert_equal(len(streamlines), 1)

    expected = np.zeros((20, 3))
    expected[:] = 5.2
    expected[:, 0] = np.arange(.2, 10, .5)
    assert_array_almost_equal(streamlines[0], expected)

    mask = np.ones((20, 20, 20), 'bool')
    gen = KeepGoing(model=None, interpolator=data_interp, mask=mask,
                    take_step=stepper, angle_limit=0., seeds=seeds,
                    max_cross=1, mask_voxel_size=(.5, .5, .5))
    streamlines = list(gen)
    assert_equal(len(streamlines), 1)
    assert_array_almost_equal(streamlines[0], expected)

    # Test tracking with affine
    affine = np.eye(4)
    affine[:3, :] = np.random.random((3, 4)) - .5

    seeds = [np.dot(affine[:3, :3], seeds[0] - .5) + affine[:3, 3]]
    sl_affine = KeepGoing(model=None, interpolator=data_interp, mask=mask,
                    take_step=stepper, angle_limit=0., seeds=seeds,
                    max_cross=1, mask_voxel_size=(.5, .5, .5), affine=affine)

    default = np.eye(4)
    default[:3, 3] = .5
    sl_default = list(utils.move_streamlines(sl_affine, default, affine))

    assert_equal(len(sl_default), 1)
    assert_array_almost_equal(sl_default[0], expected)
예제 #46
0
# Enables/disables interactive visualization
interactive = False

"""
To speed up visualization, we will select a random sub-set of streamlines to
display. This is particularly important, if you track from seeds throughout the
entire white matter, generating many streamlines. In this case, for
demonstration purposes, we subselect 900 streamlines.
"""

from dipy.tracking.streamline import select_random_set_of_streamlines
plot_streamlines = select_random_set_of_streamlines(streamlines, 900)

streamlines_actor = actor.streamtube(
    list(move_streamlines(plot_streamlines, inv(t1_aff))),
         cmap.line_colors(streamlines), linewidth=0.1)

vol_actor = actor.slicer(t1_data)

vol_actor.display(40, None, None)
vol_actor2 = vol_actor.copy()
vol_actor2.display(None, None, 35)

ren = window.Renderer()
ren.add(streamlines_actor)
ren.add(vol_actor)
ren.add(vol_actor2)

window.record(ren, out_path='sfm_streamlines.png', size=(800, 800))
if interactive:
예제 #47
0
def test_values_from_volume():
    decimal = 4
    data3d = np.arange(2000).reshape(20, 10, 10)
    # Test two cases of 4D data (handled differently)
    # One where the last dimension is length 3:
    data4d_3vec = np.arange(6000).reshape(20, 10, 10, 3)
    # The other where the last dimension is not 3:
    data4d_2vec = np.arange(4000).reshape(20, 10, 10, 2)
    for dt in [np.float32, np.float64]:
        for data in [data3d, data4d_3vec, data4d_2vec]:
            sl1 = [np.array([[1, 0, 0],
                             [1.5, 0, 0],
                             [2, 0, 0],
                             [2.5, 0, 0]]).astype(dt),
                   np.array([[2, 0, 0],
                             [3.1, 0, 0],
                             [3.9, 0, 0],
                             [4.1, 0, 0]]).astype(dt)]

            ans1 = [[data[1, 0, 0],
                     data[1, 0, 0] + (data[2, 0, 0] - data[1, 0, 0]) / 2,
                     data[2, 0, 0],
                     data[2, 0, 0] + (data[3, 0, 0] - data[2, 0, 0]) / 2],
                    [data[2, 0, 0],
                     data[3, 0, 0] + (data[4, 0, 0] - data[3, 0, 0]) * 0.1,
                     data[3, 0, 0] + (data[4, 0, 0] - data[3, 0, 0]) * 0.9,
                     data[4, 0, 0] + (data[5, 0, 0] - data[4, 0, 0]) * 0.1]]

            vv = values_from_volume(data, sl1)
            npt.assert_almost_equal(vv, ans1, decimal=decimal)

            vv = values_from_volume(data, np.array(sl1))
            npt.assert_almost_equal(vv, ans1, decimal=decimal)

            affine = np.eye(4)
            affine[:, 3] = [-100, 10, 1, 1]
            x_sl1 = ut.move_streamlines(sl1, affine)
            x_sl2 = ut.move_streamlines(sl1, affine)

            vv = values_from_volume(data, x_sl1, affine=affine)
            npt.assert_almost_equal(vv, ans1, decimal=decimal)

            # The generator has already been consumed so needs to be
            # regenerated:
            x_sl1 = list(ut.move_streamlines(sl1, affine))
            vv = values_from_volume(data, x_sl1, affine=affine)
            npt.assert_almost_equal(vv, ans1, decimal=decimal)

            # Test that the streamlines haven't mutated:
            l_sl2 = list(x_sl2)
            npt.assert_equal(x_sl1, l_sl2)

            vv = values_from_volume(data, np.array(x_sl1), affine=affine)
            npt.assert_almost_equal(vv, ans1, decimal=decimal)
            npt.assert_equal(np.array(x_sl1), np.array(l_sl2))

            # Test for lists of streamlines with different numbers of nodes:
            sl2 = [sl1[0][:-1], sl1[1]]
            ans2 = [ans1[0][:-1], ans1[1]]
            vv = values_from_volume(data, sl2)
            for ii, v in enumerate(vv):
                npt.assert_almost_equal(v, ans2[ii], decimal=decimal)

    # We raise an error if the streamlines fed don't make sense. In this
    # case, a tuple instead of a list, generator or array
    nonsense_sl = (np.array([[1, 0, 0],
                             [1.5, 0, 0],
                             [2, 0, 0],
                             [2.5, 0, 0]]),
                   np.array([[2, 0, 0],
                             [3.1, 0, 0],
                             [3.9, 0, 0],
                             [4.1, 0, 0]]))

    npt.assert_raises(RuntimeError, values_from_volume, data, nonsense_sl)

    # For some use-cases we might have singleton streamlines (with only one
    # node each):
    data3D = np.ones((2, 2, 2))
    streamlines = np.ones((10, 1, 3))
    npt.assert_equal(values_from_volume(data3D, streamlines).shape, (10, 1))
    data4D = np.ones((2, 2, 2, 2))
    streamlines = np.ones((10, 1, 3))
    npt.assert_equal(values_from_volume(data4D, streamlines).shape, (10, 1, 2))
예제 #48
0
def label_streamlines_density(streamlines, labels, affine, f_name, img,
                              label_img):
    """
    .. figure:: connectivity.png
       :align: center
    
       **Connectivity of Corpus Callosum**
    
    .. include:: ../links_names.inc
    
    """

    shape = labels.shape
    dm = utils.density_map(streamlines, shape, affine=affine)
    sum = 0
    count = 0
    for i in dm:
        for j in i:
            for k in j:
                if (k != 0):
                    sum = sum + k
                    count += 1
    density = sum * 1.0 / count
    print density
    """
    
    To do that, we will use tools available in [nibabel](http://nipy.org/nibabel)
    """

    # Save density map
    dm_img = nib.Nifti1Image(dm.astype("int16"), img.get_affine())
    dm_img.to_filename(f_name + "-dm.nii.gz")

    # Make a trackvis header so we can save streamlines
    voxel_size = label_img.get_header().get_zooms()
    trackvis_header = nib.trackvis.empty_header()
    trackvis_header['voxel_size'] = voxel_size
    trackvis_header['dim'] = shape
    trackvis_header['voxel_order'] = "RAS"

    # Move streamlines to "trackvis space"
    trackvis_point_space = utils.affine_for_trackvis(voxel_size)
    lr_sf_trk = utils.move_streamlines(streamlines,
                                       trackvis_point_space,
                                       input_space=affine)
    lr_sf_trk = list(lr_sf_trk)
    """
    # Save streamlines
    for_save = [(sl, None, None) for sl in lr_sf_trk]
    
    nib.trackvis.write(f_name+"_label1.trk", for_save, trackvis_header)
    """
    """
    import tractconverter as tc
    density_file = f_name+"_label1.trk"
    input_format=tc.detect_format(density_file)
    input=input_format(density_file)
    output=tc.FORMATS['vtk'].create(density_file+".vtk",input.hdr)
    tc.convert(input,output)
    """
    """
    Let's take a moment here to consider the representation of streamlines used in
    dipy. Streamlines are a path though the 3d space of an image represented by a
    set of points. For these points to have a meaningful interpretation, these
    points must be given in a known coordinate system. The ``affine`` attribute of
    the ``streamline_generator`` object specifies the coordinate system of the
    points with respect to the voxel indices of the input data.
    ``trackvis_point_space`` specifies the trackvis coordinate system with respect
    to the same indices. The ``move_streamlines`` function returns a new set of
    streamlines from an existing set of streamlines in the target space. The
    target space and the input space must be specified as affine transformations
    with respect to the same reference [#]_. If no input space is given, the input
    space will be the same as the current representation of the streamlines, in
    other words the input space is assumed to be ``np.eye(4)``, the 4-by-4 identity
    matrix.
    
    All of the functions above that allow streamlines to interact with volumes take
    an affine argument. This argument allows these functions to work with
    streamlines regardless of their coordinate system. For example even though we
    moved our streamlines to "trackvis space", we can still compute the density map
    as long as we specify the right coordinate system.
    """

    dm_trackvis = utils.density_map(lr_sf_trk,
                                    shape,
                                    affine=trackvis_point_space)
    assert np.all(dm == dm_trackvis)

    return dm, density
    """
    This means that streamlines can interact with any image volume, for example a
    high resolution structural image, as long as one can register that image to
    the diffusion images and calculate the coordinate system with respect to that
    image.
    """
    """
예제 #49
0
import nibabel as nib

# Save density map
dm_img = nib.Nifti1Image(dm.astype("int16"), hardi_img.get_affine())
dm_img.to_filename("lr-superiorfrontal-dm.nii.gz")

# Make a trackvis header so we can save streamlines
voxel_size = labels_img.get_header().get_zooms()
trackvis_header = nib.trackvis.empty_header()
trackvis_header['voxel_size'] = voxel_size
trackvis_header['dim'] = shape
trackvis_header['voxel_order'] = "RAS"

# Move streamlines to "trackvis space"
trackvis_point_space = utils.affine_for_trackvis(voxel_size)
lr_sf_trk = utils.move_streamlines(lr_superiorfrontal_track,
                                   trackvis_point_space, input_space=affine)
lr_sf_trk = list(lr_sf_trk)

# Save streamlines
for_save = [(sl, None, None) for sl in lr_sf_trk]
nib.trackvis.write("lr-superiorfrontal.trk", for_save, trackvis_header)

"""
Let's take a moment here to consider the representation of streamlines used in
dipy. Streamlines are a path though the 3d space of an image represented by a
set of points. For these points to have a meaningful interpretation, these
points must be given in a known coordinate system. The ``affine`` attribute of
the ``streamline_generator`` object specifies the coordinate system of the
points with respect to the voxel indices of the input data.
``trackvis_point_space`` specifies the trackvis coordinate system with respect
to the same indices. The ``move_streamlines`` function returns a new set of
예제 #50
0
def segment(fdata, fbval, fbvec, streamlines, bundles,
            reg_template=None, mapping=None, as_generator=True, **reg_kwargs):
    """

    generate : bool
        Whether to generate the streamlines here, or return generators.

    reg_template : template to use for registration (defaults to the MNI T2)

    bundles: dict
        The format is something like::

             {'name': {'ROIs':[img, img], 'rules':[True, True]}}


    """
    img, data, gtab, mask = ut.prepare_data(fdata, fbval, fbvec)
    xform_sl = [s for s in dtu.move_streamlines(streamlines,
                                                np.linalg.inv(img.affine))]

    if reg_template is None:
        reg_template = dpd.read_mni_template()

    if mapping is None:
        mapping = reg.syn_register_dwi(fdata, gtab, template=reg_template,
                                       **reg_kwargs)

    if isinstance(mapping, str) or isinstance(mapping, nib.Nifti1Image):
        mapping = reg.read_mapping(mapping, img, reg_template)

    fiber_groups = {}
    for bundle in bundles:
        select_sl = xform_sl
        for ROI, rule in zip(bundles[bundle]['ROIs'],
                             bundles[bundle]['rules']):
            data = ROI.get_data()
            warped_ROI = patch_up_roi(mapping.transform_inverse(
                data,
                interpolation='nearest'))
            # This function requires lists as inputs:
            select_sl = dts.select_by_rois(select_sl,
                                           [warped_ROI.astype(bool)],
                                           [rule])
        # Next, we reorient each streamline according to an ARBITRARY, but
        # CONSISTENT order. To do this, we use the first ROI for which the rule
        # is True as the first one to pass through, and the last ROI for which
        # the rule is True as the last one to pass through:

        # Indices where the 'rule' is True:
        idx = np.where(bundles[bundle]['rules'])

        orient_ROIs = [bundles[bundle]['ROIs'][idx[0][0]],
                       bundles[bundle]['ROIs'][idx[0][-1]]]

        select_sl = dts.orient_by_rois(select_sl,
                                       orient_ROIs[0].get_data(),
                                       orient_ROIs[1].get_data(),
                                       in_place=True)
        if as_generator:
            fiber_groups[bundle] = select_sl
        else:
            fiber_groups[bundle] = list(select_sl)

    return fiber_groups
예제 #51
0
# Enables/disables interactive visualization
interactive = False

"""
To speed up visualization, we will select a random sub-set of streamlines to
display. This is particularly important, if you track from seeds throughout the
entire white matter, generating many streamlines. In this case, for
demonstration purposes, we subselect 900 streamlines.
"""

from dipy.tracking.streamline import select_random_set_of_streamlines
plot_streamlines = select_random_set_of_streamlines(streamlines, 900)

streamlines_actor = actor.streamtube(
    list(move_streamlines(plot_streamlines, inv(t1_aff))),
         cmap.line_colors(streamlines), linewidth=0.1)

vol_actor = actor.slicer(t1_data)

vol_actor.display(40, None, None)
vol_actor2 = vol_actor.copy()
vol_actor2.display(None, None, 35)

ren = window.Renderer()
ren.add(streamlines_actor)
ren.add(vol_actor)
ren.add(vol_actor2)

window.record(ren, out_path='sfm_streamlines.png', size=(800, 800))
if interactive:
예제 #52
0
def test_values_from_volume():
    decimal = 4
    data3d = np.arange(2000).reshape(20, 10, 10)
    # Test two cases of 4D data (handled differently)
    # One where the last dimension is length 3:
    data4d_3vec = np.arange(6000).reshape(20, 10, 10, 3)
    # The other where the last dimension is not 3:
    data4d_2vec = np.arange(4000).reshape(20, 10, 10, 2)
    for dt in [np.float32, np.float64]:
        for data in [data3d, data4d_3vec, data4d_2vec]:
            sl1 = [
                np.array([[1, 0, 0], [1.5, 0, 0], [2, 0, 0], [2.5, 0,
                                                              0]]).astype(dt),
                np.array([[2, 0, 0], [3.1, 0, 0], [3.9, 0, 0], [4.1, 0,
                                                                0]]).astype(dt)
            ]

            ans1 = [[
                data[1, 0,
                     0], data[1, 0, 0] + (data[2, 0, 0] - data[1, 0, 0]) / 2,
                data[2, 0,
                     0], data[2, 0, 0] + (data[3, 0, 0] - data[2, 0, 0]) / 2
            ],
                    [
                        data[2, 0, 0],
                        data[3, 0, 0] + (data[4, 0, 0] - data[3, 0, 0]) * 0.1,
                        data[3, 0, 0] + (data[4, 0, 0] - data[3, 0, 0]) * 0.9,
                        data[4, 0, 0] + (data[5, 0, 0] - data[4, 0, 0]) * 0.1
                    ]]

            vv = values_from_volume(data, sl1)
            npt.assert_almost_equal(vv, ans1, decimal=decimal)

            vv = values_from_volume(data, np.array(sl1))
            npt.assert_almost_equal(vv, ans1, decimal=decimal)

            affine = np.eye(4)
            affine[:, 3] = [-100, 10, 1, 1]
            x_sl1 = ut.move_streamlines(sl1, affine)
            x_sl2 = ut.move_streamlines(sl1, affine)

            vv = values_from_volume(data, x_sl1, affine=affine)
            npt.assert_almost_equal(vv, ans1, decimal=decimal)

            # The generator has already been consumed so needs to be
            # regenerated:
            x_sl1 = list(ut.move_streamlines(sl1, affine))
            vv = values_from_volume(data, x_sl1, affine=affine)
            npt.assert_almost_equal(vv, ans1, decimal=decimal)

            # Test that the streamlines haven't mutated:
            l_sl2 = list(x_sl2)
            npt.assert_equal(x_sl1, l_sl2)

            vv = values_from_volume(data, np.array(x_sl1), affine=affine)
            npt.assert_almost_equal(vv, ans1, decimal=decimal)
            npt.assert_equal(np.array(x_sl1), np.array(l_sl2))

            # Test for lists of streamlines with different numbers of nodes:
            sl2 = [sl1[0][:-1], sl1[1]]
            ans2 = [ans1[0][:-1], ans1[1]]
            vv = values_from_volume(data, sl2)
            for ii, v in enumerate(vv):
                npt.assert_almost_equal(v, ans2[ii], decimal=decimal)

    # We raise an error if the streamlines fed don't make sense. In this
    # case, a tuple instead of a list, generator or array
    nonsense_sl = (np.array([[1, 0, 0], [1.5, 0, 0], [2, 0, 0], [2.5, 0, 0]]),
                   np.array([[2, 0, 0], [3.1, 0, 0], [3.9, 0, 0], [4.1, 0,
                                                                   0]]))

    npt.assert_raises(RuntimeError, values_from_volume, data, nonsense_sl)

    # For some use-cases we might have singleton streamlines (with only one
    # node each):
    data3D = np.ones((2, 2, 2))
    streamlines = np.ones((10, 1, 3))
    npt.assert_equal(values_from_volume(data3D, streamlines).shape, (10, 1))
    data4D = np.ones((2, 2, 2, 2))
    streamlines = np.ones((10, 1, 3))
    npt.assert_equal(values_from_volume(data4D, streamlines).shape, (10, 1, 2))
예제 #53
0
t1_data = t1.get_data()
t1_aff = t1.get_affine()
color = line_colors(streamlines)

"""
To speed up visualization, we will select a random sub-set of streamlines to
display. This is particularly important, if you track from seeds throughout the
entire white matter, generating many streamlines. In this case, for
demonstration purposes, we subselect 900 streamlines.
"""

from dipy.tracking.streamline import select_random_set_of_streamlines

plot_streamlines = select_random_set_of_streamlines(streamlines, 900)

streamlines_actor = fvtk.streamtube(list(move_streamlines(plot_streamlines, inv(t1_aff))), line_colors(streamlines))

vol_actor = fvtk.slicer(t1_data, voxsz=(1.0, 1.0, 1.0), plane_i=[40], plane_j=None, plane_k=[35], outline=False)

ren = fvtk.ren()
fvtk.add(ren, streamlines_actor)
fvtk.add(ren, vol_actor)
fvtk.record(ren, n_frames=1, out_path="sfm_streamlines.png", size=(800, 800))

"""
.. figure:: sfm_streamlines.png
   :align: center

   **Sparse Fascicle Model tracks**

Finally, we can save these streamlines to a 'trk' file, for use in other
예제 #54
0
def segment(fdata, fbval, fbvec, streamlines, bundles,
            reg_template=None, mapping=None, prob_threshold=0,
            **reg_kwargs):
    """
    Segment streamlines into bundles based on inclusion ROIs.

    Parameters
    ----------
    fdata, fbval, fbvec : str
        Full path to data, bvals, bvecs

    streamlines : list of 2D arrays
        Each array is a streamline, shape (3, N).

    bundles: dict
        The format is something like::

            {'name': {'ROIs':[img1, img2],
            'rules':[True, True]},
            'prob_map': img3,
            'cross_midline': False}

    reg_template : str or nib.Nifti1Image, optional.
        Template to use for registration (defaults to the MNI T2)

    mapping : DiffeomorphicMap object, str or nib.Nifti1Image, optional
        A mapping between DWI space and a template. Defaults to generate
        this.

    prob_threshold : float.
        Initial cleaning of fiber groups is done using probability maps from
        [Hua2008]_. Here, we choose an average probability that needs to be
        exceeded for an individual streamline to be retained. Default: 0.

    References
    ----------
    .. [Hua2008] Hua K, Zhang J, Wakana S, Jiang H, Li X, et al. (2008)
       Tract probability maps in stereotaxic spaces: analyses of white
       matter anatomy and tract-specific quantification. Neuroimage 39:
       336-347
    """
    img, _, gtab, _ = ut.prepare_data(fdata, fbval, fbvec)
    tol = dts.dist_to_corner(img.affine)

    xform_sl = dts.Streamlines(dtu.move_streamlines(streamlines,
                                                    np.linalg.inv(img.affine)))

    if reg_template is None:
        reg_template = dpd.read_mni_template()

    if mapping is None:
        mapping = reg.syn_register_dwi(fdata, gtab, template=reg_template,
                                       **reg_kwargs)

    if isinstance(mapping, str) or isinstance(mapping, nib.Nifti1Image):
        mapping = reg.read_mapping(mapping, img, reg_template)

    fiber_probabilities = np.zeros((len(xform_sl), len(bundles)))

    # For expedience, we approximate each streamline as a 100 point curve:
    fgarray = _resample_bundle(xform_sl, 100)
    streamlines_in_bundles = np.zeros((len(xform_sl), len(bundles)))
    min_dist_coords = np.zeros((len(xform_sl), len(bundles), 2))

    fiber_groups = {}

    for bundle_idx, bundle in enumerate(bundles):
        # Get the ROI coordinates:
        ROI0 = bundles[bundle]['ROIs'][0]
        ROI1 = bundles[bundle]['ROIs'][1]
        if not isinstance(ROI0, np.ndarray):
            ROI0 = ROI0.get_data()

        warped_ROI0 = patch_up_roi(
            mapping.transform_inverse(
                ROI0,
                interpolation='nearest')).astype(bool)

        if not isinstance(ROI1, np.ndarray):
            ROI1 = ROI1.get_data()

        warped_ROI1 = patch_up_roi(
            mapping.transform_inverse(
                ROI1,
                interpolation='nearest')).astype(bool)

        roi_coords0 = np.array(np.where(warped_ROI0)).T
        roi_coords1 = np.array(np.where(warped_ROI1)).T

        crosses_midline = bundles[bundle]['cross_midline']

        # The probability map if doesn't exist is all ones with the same
        # shape as the ROIs:
        prob_map = bundles[bundle].get('prob_map', np.ones(ROI0.shape))
        if not isinstance(prob_map, np.ndarray):
            prob_map = prob_map.get_data()
        warped_prob_map = mapping.transform_inverse(prob_map,
                                                    interpolation='nearest')
        fiber_probabilities = dts.values_from_volume(warped_prob_map,
                                                     fgarray)
        fiber_probabilities = np.mean(fiber_probabilities, -1)

        for sl_idx, sl in enumerate(xform_sl):
            if fiber_probabilities[sl_idx] > prob_threshold:
                if crosses_midline is not None:
                    if (np.any(sl[:, 0] > img.shape[0] // 2) and
                            np.any(sl[:, 0] < img.shape[0] // 2)):
                        # This means that the streamline does
                        # cross the midline:
                        if crosses_midline:
                            # This is what we want, keep going
                            pass
                        else:
                            # This is not what we want, skip to next streamline
                            continue
                dist0 = cdist(sl, roi_coords0, 'euclidean')
                if np.min(dist0) <= tol:
                    dist1 = cdist(sl, roi_coords1, 'euclidean')
                    if np.min(dist1) <= tol:
                        min_dist_coords[sl_idx, bundle_idx, 0] =\
                            np.argmin(dist0, 0)[0]
                        min_dist_coords[sl_idx, bundle_idx, 1] =\
                            np.argmin(dist1, 0)[0]
                        streamlines_in_bundles[sl_idx, bundle_idx] =\
                            fiber_probabilities[sl_idx]

    # Eliminate any fibers not selected using the plane ROIs:
    possible_fibers = np.sum(streamlines_in_bundles, -1) > 0
    xform_sl = xform_sl[possible_fibers]
    streamlines_in_bundles = streamlines_in_bundles[possible_fibers]
    min_dist_coords = min_dist_coords[possible_fibers]
    bundle_choice = np.argmax(streamlines_in_bundles, -1)

    for bundle_idx, bundle in enumerate(bundles):
        print(bundle)
        select_idx = np.where(bundle_choice == bundle_idx)
        # Use a list here, because Streamlines don't support item assignment:
        select_sl = list(xform_sl[select_idx])
        # Sub-sample min_dist_coords:
        min_dist_coords_bundle = min_dist_coords[select_idx]
        if len(select_sl) == 0:
            fiber_groups[bundle] = dts.Streamlines([])
            # There's nothing here, move to the next bundle:
            continue

        for idx in range(len(select_sl)):
            min0 = min_dist_coords_bundle[idx, bundle_idx, 0]
            min1 = min_dist_coords_bundle[idx, bundle_idx, 1]
            if min0 > min1:
                select_sl[idx] = select_sl[idx][::-1]
        # We'll set this to Streamlines object for the next steps (e.g.,
        # cleaning) because these objects support indexing with arrays:
        select_sl = dts.Streamlines(select_sl)
        fiber_groups[bundle] = select_sl

    return fiber_groups
예제 #55
0
def _extract_vals(data, streamlines, affine=None, threedvec=False):
    """
    Helper function for use with `values_from_volume`.

    Parameters
    ----------
    data : 3D or 4D array
        Scalar (for 3D) and vector (for 4D) values to be extracted. For 4D
        data, interpolation will be done on the 3 spatial dimensions in each
        volume.

    streamlines : ndarray or list
        If array, of shape (n_streamlines, n_nodes, 3)
        If list, len(n_streamlines) with (n_nodes, 3) array in
        each element of the list.

    affine : ndarray, shape (4, 4)
        Affine transformation from voxels (image coordinates) to streamlines.
        Default: identity.

    threedvec : bool
        Whether the last dimension has length 3. This is a special case in
        which we can use :func:`vfu.interpolate_vector_3d` for the
        interploation of 4D volumes without looping over the elements of the
        last dimension.

    Return
    ------
    array or list (depending on the input) : values interpolate to each
        coordinate along the length of each streamline
    """
    data = data.astype(np.float)
    if (isinstance(streamlines, list) or
            isinstance(streamlines, types.GeneratorType)):
        if affine is not None:
            streamlines = ut.move_streamlines(streamlines,
                                              np.linalg.inv(affine))

        vals = []
        for sl in streamlines:
            if threedvec:
                vals.append(list(vfu.interpolate_vector_3d(data,
                                 sl.astype(np.float))[0]))
            else:
                vals.append(list(vfu.interpolate_scalar_3d(data,
                                 sl.astype(np.float))[0]))

    elif isinstance(streamlines, np.ndarray):
        sl_shape = streamlines.shape
        sl_cat = streamlines.reshape(sl_shape[0] *
                                     sl_shape[1], 3).astype(np.float)

        if affine is not None:
            inv_affine = np.linalg.inv(affine)
            sl_cat = (np.dot(sl_cat, inv_affine[:3, :3]) +
                      inv_affine[:3, 3])

        # So that we can index in one operation:
        if threedvec:
            vals = np.array(vfu.interpolate_vector_3d(data, sl_cat)[0])
        else:
            vals = np.array(vfu.interpolate_scalar_3d(data, sl_cat)[0])
        vals = np.reshape(vals, (sl_shape[0], sl_shape[1], -1))
        if vals.shape[-1] == 1:
            vals = np.reshape(vals, vals.shape[:-1])
    else:
        raise RuntimeError("Extracting values from a volume ",
                           "requires streamlines input as an array, ",
                           "a list of arrays, or a streamline generator.")

    return vals
예제 #56
0
def _extract_vals(data, streamlines, affine=None, threedvec=False):
    """
    Helper function for use with `values_from_volume`.

    Parameters
    ----------
    data : 3D or 4D array
        Scalar (for 3D) and vector (for 4D) values to be extracted. For 4D
        data, interpolation will be done on the 3 spatial dimensions in each
        volume.

    streamlines : ndarray or list
        If array, of shape (n_streamlines, n_nodes, 3)
        If list, len(n_streamlines) with (n_nodes, 3) array in
        each element of the list.

    affine : ndarray, shape (4, 4)
        Affine transformation from voxels (image coordinates) to streamlines.
        Default: identity.

    threedvec : bool
        Whether the last dimension has length 3. This is a special case in
        which we can use :func:`vfu.interpolate_vector_3d` for the
        interploation of 4D volumes without looping over the elements of the
        last dimension.

    Return
    ------
    array or list (depending on the input) : values interpolate to each
        coordinate along the length of each streamline
    """
    data = data.astype(np.float)
    if (isinstance(streamlines, list)
            or isinstance(streamlines, types.GeneratorType)):
        if affine is not None:
            streamlines = ut.move_streamlines(streamlines,
                                              np.linalg.inv(affine))

        vals = []
        for sl in streamlines:
            if threedvec:
                vals.append(
                    list(
                        vfu.interpolate_vector_3d(data,
                                                  sl.astype(np.float))[0]))
            else:
                vals.append(
                    list(
                        vfu.interpolate_scalar_3d(data,
                                                  sl.astype(np.float))[0]))

    elif isinstance(streamlines, np.ndarray):
        sl_shape = streamlines.shape
        sl_cat = streamlines.reshape(sl_shape[0] * sl_shape[1],
                                     3).astype(np.float)

        if affine is not None:
            inv_affine = np.linalg.inv(affine)
            sl_cat = (np.dot(sl_cat, inv_affine[:3, :3]) + inv_affine[:3, 3])

        # So that we can index in one operation:
        if threedvec:
            vals = np.array(vfu.interpolate_vector_3d(data, sl_cat)[0])
        else:
            vals = np.array(vfu.interpolate_scalar_3d(data, sl_cat)[0])
        vals = np.reshape(vals, (sl_shape[0], sl_shape[1], -1))
        if vals.shape[-1] == 1:
            vals = np.reshape(vals, vals.shape[:-1])
    else:
        raise RuntimeError("Extracting values from a volume ",
                           "requires streamlines input as an array, ",
                           "a list of arrays, or a streamline generator.")

    return vals
예제 #57
0
def test_AFQ_data_planes():
    """
    Test with some actual data again, this time for track segmentation
    """
    tmpdir = nbtmp.InTemporaryDirectory()
    afd.organize_stanford_data(path=tmpdir.name)
    dmriprep_path = op.join(tmpdir.name, 'stanford_hardi', 'derivatives',
                            'dmriprep')
    seg_algo = "planes"
    bundle_names = ["SLF", "ARC", "CST", "FP"]
    myafq = api.AFQ(dmriprep_path=dmriprep_path,
                    sub_prefix='sub',
                    seg_algo=seg_algo,
                    bundle_names=bundle_names,
                    odf_model="DTI")

    # Replace the mapping and streamlines with precomputed:
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.move_streamlines([s for s in streamlines if s.shape[0] > 100],
                             np.linalg.inv(myafq.dwi_affine[0])))

    sl_file = op.join(myafq.data_frame.results_dir[0],
                      'sub-01_sess-01_dwiDTI_det_streamlines.trk')
    aus.write_trk(sl_file, streamlines, affine=myafq.dwi_affine[0])

    mapping_file = op.join(myafq.data_frame.results_dir[0],
                           'sub-01_sess-01_dwi_mapping.nii.gz')
    nib.save(mapping, mapping_file)
    reg_prealign_file = op.join(myafq.data_frame.results_dir[0],
                                'sub-01_sess-01_dwi_reg_prealign.npy')
    np.save(reg_prealign_file, np.eye(4))

    tgram = nib.streamlines.load(myafq.bundles[0]).tractogram
    bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict)
    npt.assert_(len(bundles['CST_L']) > 0)

    # Test ROI exporting:
    myafq.export_rois()
    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'ROIs',
                'CST_R_roi1_include.nii.gz'))

    # Test bundles exporting:
    myafq.export_bundles()
    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'bundles', 'CST_R.trk'))

    tract_profiles = pd.read_csv(myafq.tract_profiles[0])
    assert tract_profiles.shape == (800, 5)

    # Before we run the CLI, we'll remove the bundles and ROI folders, to see
    # that the CLI generates them
    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'bundles'))

    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'ROIs'))

    # Test the CLI:
    print("Running the CLI:")
    cmd = "pyAFQ " + dmriprep_path
    out = os.system(cmd)
    assert out == 0
    # The combined tract profiles should already exist from the CLI Run:
    from_file = pd.read_csv(op.join(myafq.afq_dir, 'tract_profiles.csv'))
    # And should be identical to what we would get by rerunning this:
    combined_profiles = myafq.combine_profiles()
    assert combined_profiles.shape == (800, 7)
    assert_frame_equal(combined_profiles, from_file)

    # Make sure the CLI did indeed generate these:
    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'ROIs',
                'CST_R_roi1_include.nii.gz'))

    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'bundles', 'CST_R.trk'))


# def test_AFQ_data_recobundles():
#     tmpdir = nbtmp.InTemporaryDirectory()
#     afd.fetch_hcp(["100206"], hcp_bucket='hcp-openaccess', profile_name="hcp",
#                   path=tmpdir.name)
#     dmriprep_path = op.join(tmpdir.name, 'HCP', 'derivatives', 'dmriprep')
#     seg_algo = "recobundles"
#     bundle_names = ["F", "CST", "AF", "CC_ForcepsMajor"]
#     myafq = api.AFQ(dmriprep_path=dmriprep_path,
#                     sub_prefix='sub',
#                     seg_algo=seg_algo,
#                     bundle_names=bundle_names,
#                     odf_model="DTI",
#                     b0_threshold=15)

#     # Replace the streamlines with precomputed:
#     path_to_trk = dpd.fetcher.fetch_target_tractogram_hcp()
#     path_to_trk = dpd.fetcher.get_target_tractogram_hcp()
#     sl_file = op.join(myafq.data_frame.results_dir[0], 'sub-100206_sess-01_dwiDTI_det_streamlines.trk')
#     shutil.copy(path_to_trk, sl_file)
#     myafq.data_frame["streamlines_file"] = sl_file
#     print("here")
#     tgram = nib.streamlines.load(myafq.bundles[0]).tractogram
#     print("here")
#     bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict)
#     npt.assert_(len(bundles['CST_L']) > 0)
예제 #58
0
 def __iter__(self):
     # Make tracks, move them to point space and return
     track = self._generate_streamlines()
     return utils.move_streamlines(track, self.affine)
예제 #59
0
def track(peaks,
          seed_image,
          max_nr_fibers=2000,
          smooth=None,
          compress=0.1,
          bundle_mask=None,
          start_mask=None,
          end_mask=None,
          dilation=1,
          nr_cpus=-1,
          verbose=True):
    """
    Great speedup was archived by:
    - only seeding in bundle_mask instead of entire image (seeding took very long)
    - calculating fiber length on the fly instead of using extra function which has to iterate over entire fiber a
    second time

    Args:
        peaks:
        seed_image:
        max_nr_fibers:
        peak_threshold:
        smooth:
        compress:
        bundle_mask:
        start_mask:
        end_mask:
        dilation:
        nr_cpus:
        verbose:

    Returns:

    """
    peaks[:, :, :, 0] *= -1  # how to flip along x axis to work properly
    if dilation > 0:
        # Add +1 dilation for start and end mask to be more robust
        start_mask = binary_dilation(start_mask,
                                     iterations=dilation + 1).astype(np.uint8)
        end_mask = binary_dilation(end_mask,
                                   iterations=dilation + 1).astype(np.uint8)
        bundle_mask = binary_dilation(bundle_mask,
                                      iterations=dilation).astype(np.uint8)

    global _PEAKS
    _PEAKS = peaks
    global _BUNDLE_MASK
    _BUNDLE_MASK = bundle_mask
    global _START_MASK
    _START_MASK = start_mask
    global _END_MASK
    _END_MASK = end_mask

    # Get list of coordinates of each voxel in mask to seed from those
    mask_coords = np.array(np.where(bundle_mask == 1)).transpose()
    nr_voxels = mask_coords.shape[0]
    spacing = seed_image.header.get_zooms()[0]

    max_nr_seeds = 250 * max_nr_fibers  # after how many seeds to abort (to avoid endless runtime)
    # How many seeds to process in each pool.map iteration
    seeds_per_batch = 5000

    if nr_cpus == -1:
        nr_processes = psutil.cpu_count()
    else:
        nr_processes = nr_cpus

    streamlines = []
    fiber_ctr = 0
    seed_ctr = 0
    # Processing seeds in batches to we can stop after we reached desired nr of streamlines. Not ideal. Could be
    #   optimised if more familiar with multiprocessing.
    while fiber_ctr < max_nr_fibers:
        pool = multiprocessing.Pool(processes=nr_processes)
        streamlines_tmp = pool.map(
            partial(process_seedpoint, spacing=spacing),
            seed_generator(mask_coords, seeds_per_batch))
        # streamlines_tmp = [process_seedpoint(seed, spacing=spacing) for seed in
        #                    seed_generator(mask_coords, seeds_per_batch)] # single threaded for debug
        pool.close()
        pool.join()

        streamlines_tmp = [sl for sl in streamlines_tmp
                           if len(sl) > 0]  # filter empty
        streamlines += streamlines_tmp
        fiber_ctr = len(streamlines)
        if verbose:
            print("nr_fibs: {}".format(fiber_ctr))
        seed_ctr += seeds_per_batch
        if seed_ctr > max_nr_seeds:
            if verbose:
                print("Early stopping because max nr of seeds reached.")
            break

    if verbose:
        print("final nr streamlines: {}".format(len(streamlines)))

    streamlines = streamlines[:
                              max_nr_fibers]  # remove surplus of fibers (comes from multiprocessing)
    streamlines = Streamlines(streamlines)  # Generate streamlines object

    # Move from origin being at the edge of the voxel to the origin being at the center of the voxel. Otherwise
    # tractogram and mask do not perfectly align when viewing in MITK, but are slightly offset.
    # We can still see a few fibers a little bit outside of mask because of big step size (no resegmenting done).
    streamlines = fiber_utils.add_to_each_streamline(streamlines, -0.5)

    # move streamlines to coordinate space
    streamlines = list(
        move_streamlines(streamlines, output_space=seed_image.get_affine()))

    if smooth:
        streamlines = fiber_utils.smooth_streamlines(streamlines,
                                                     smoothing_factor=smooth)

    if compress:
        streamlines = fiber_utils.compress_streamlines(streamlines,
                                                       error_threshold=0.1,
                                                       nr_cpus=nr_cpus)

    return streamlines