Esempio n. 1
0
def bench_length():
    repeat = 10
    nb_streamlines = DATA['nb_streamlines']
    streamlines = DATA["streamlines"]  # Streamlines as a list of ndarrays.

    print("Timing length() with {0:,} streamlines.".format(nb_streamlines))
    python_time = measure("[length_python(s) for s in streamlines]", repeat)
    print("Python time: {0:.2}sec".format(python_time))

    cython_time = measure("length(streamlines)", repeat)
    print("Cython time: {0:.3}sec".format(cython_time))
    print("Speed up of {0:.2f}x".format(python_time / cython_time))

    # Make sure it produces the same results.
    assert_array_equal([length_python(s) for s in DATA["streamlines"]],
                       length(DATA["streamlines"]))

    streamlines = DATA['streamlines_arrseq']
    cython_time_arrseq = measure("length(streamlines)", repeat)
    print("Cython time (ArrSeq): {0:.3}sec".format(cython_time_arrseq))
    print("Speed up of {0:.2f}x".format(python_time / cython_time_arrseq))

    # Make sure it produces the same results.
    assert_array_equal(length(DATA["streamlines"]),
                       length(DATA["streamlines_arrseq"]))
Esempio n. 2
0
def test_length_memory_leaks():
    # Test some dtypes
    dtypes = [np.float32, np.float64, np.int32, np.int64]
    for dtype in dtypes:
        rng = np.random.RandomState(1234)
        NB_STREAMLINES = 10000
        streamlines = [rng.randn(rng.randint(10, 100), 3).astype(dtype)
                       for _ in range(NB_STREAMLINES)]

        list_refcount_before = get_type_refcount()["list"]

        lengths = length(streamlines)
        list_refcount_after = get_type_refcount()["list"]

        # Calling `length` shouldn't increase the refcount of `list`
        # since the return value is a numpy array.
        assert_equal(list_refcount_after, list_refcount_before)

    # Test mixed dtypes
    rng = np.random.RandomState(1234)
    NB_STREAMLINES = 10000
    streamlines = []
    for i in range(NB_STREAMLINES):
        dtype = dtypes[i % len(dtypes)]
        streamlines.append(rng.randn(rng.randint(10, 100), 3).astype(dtype))

    list_refcount_before = get_type_refcount()["list"]

    lengths = length(streamlines)
    list_refcount_after = get_type_refcount()["list"]

    # Calling `length` shouldn't increase the refcount of `list`
    # since the return value is a numpy array.
    assert_equal(list_refcount_after, list_refcount_before)
Esempio n. 3
0
def test_feature_arclength():
    from dipy.tracking.streamline import length

    # Test subclassing Feature
    class ArcLengthFeature(dipymetric.Feature):
        def __init__(self):
            super(ArcLengthFeature, self).__init__(is_order_invariant=True)

        def infer_shape(self, streamline):
            return (1, 1)

        def extract(self, streamline):
            return length(streamline)[None, None]

    for feature in [dipymetric.ArcLengthFeature(), ArcLengthFeature()]:
        for s in [s1, s2, s3, s4]:
            # Test method infer_shape
            assert_equal(feature.infer_shape(s), (1, 1))

            # Test method extract
            features = feature.extract(s)
            assert_equal(features.shape, (1, 1))
            assert_array_almost_equal(features, length(s)[None, None])

        # This feature type is order invariant
        assert_true(feature.is_order_invariant)
        for s in [s1, s2, s3, s4]:
            features = feature.extract(s)
            features_flip = feature.extract(s[::-1])
            assert_array_almost_equal(features, features_flip)
Esempio n. 4
0
def get_endpoints_density_map(streamlines, dimensions, point_to_select=1):
    """
    Compute an endpoints density map, supports selecting more than one points
    at each end.
    Parameters
    ----------
    streamlines: list of ndarray
        The list of streamlines to compute endpoints density from.
    dimensions: tuple
        The shape of the reference volume for the streamlines.
    point_to_select: int
        Instead of computing the density based on the first and last points,
        select more than one at each end. To support compressed streamlines,
        a resampling to 0.5mm per segment is performed.
    Returns
    -------
    ndarray: A ndarray where voxel values represent the density of endpoints.
    """
    endpoints_map = np.zeros(dimensions)
    for streamline in streamlines:
        streamline = set_number_of_points(streamline,
                                          int(length(streamline)) * 2)
        points_list = list(streamline[0:point_to_select, :].astype(int))
        points_list.extend(
            streamline[-(point_to_select + 1):-1, :].astype(int))
        for xyz in points_list:
            x_val = int(np.clip(xyz[0], 0, dimensions[0] - 1))
            y_val = int(np.clip(xyz[1], 0, dimensions[1] - 1))
            z_val = int(np.clip(xyz[2], 0, dimensions[2] - 1))
            endpoints_map[x_val, y_val, z_val] += 1

    return endpoints_map
Esempio n. 5
0
def main():
    parser = build_parser()
    args = parser.parse_args()

    for f in args.datasets:
        with Timer("Normalizing step size of dataset '{}'".format(f)):
            tractography_data = neurotools.TractographyData.load(f)

            t = nib.streamlines.Tractogram(tractography_data.streamlines)
            t.apply_affine(
                tractography_data.signal.affine)  # Bring streamlines to RAS+mm

            streamlines = t.streamlines
            streamlines._lengths = streamlines._lengths.astype(int)
            streamlines._offsets = streamlines._offsets.astype(int)
            lengths = length(streamlines)
            nb_points = np.ceil(lengths / args.step_size).astype(int)

            new_streamlines = (set_number_of_points(s, n)
                               for s, n in zip(streamlines, nb_points))

            t = nib.streamlines.Tractogram(new_streamlines)
            t.apply_affine(np.linalg.inv(tractography_data.signal.affine))
            t.affine_to_rasmm = np.eye(4)
            tractography_data.streamlines = t.streamlines

        filename = f[:-4] + "_" + str(args.step_size) + "mm" + f[-4:]
        tractography_data.save(filename)
Esempio n. 6
0
def test_feature_arclength():
    from dipy.tracking.streamline import length

    # Test subclassing Feature
    class ArcLengthFeature(dipymetric.Feature):
        def __init__(self):
            super(ArcLengthFeature, self).__init__(is_order_invariant=True)

        def infer_shape(self, streamline):
            return (1, 1)

        def extract(self, streamline):
            return length(streamline)[None, None]

    for feature in [dipymetric.ArcLengthFeature(), ArcLengthFeature()]:
        for s in [s1, s2, s3, s4]:
            # Test method infer_shape
            assert_equal(feature.infer_shape(s), (1, 1))

            # Test method extract
            features = feature.extract(s)
            assert_equal(features.shape, (1, 1))
            assert_array_almost_equal(features, length(s)[None, None])

        # This feature type is order invariant
        assert_true(feature.is_order_invariant)
        for s in [s1, s2, s3, s4]:
            features = feature.extract(s)
            features_flip = feature.extract(s[::-1])
            assert_array_almost_equal(features, features_flip)
Esempio n. 7
0
    def test_length(self):
        # Test length of only one streamline
        length_streamline_cython = dipystreamline.length(self.streamline)
        length_streamline_python = length_python(self.streamline)
        assert_equal(length_streamline_cython, length_streamline_python)

        length_streamline_cython = dipystreamline.length(self.streamline_64bit)
        length_streamline_python = length_python(self.streamline_64bit)
        assert_equal(length_streamline_cython, length_streamline_python)

        # Test computing length of multiple streamlines of different nb_points
        length_streamlines_cython = dipystreamline.length(self.streamlines)

        for i, s in enumerate(self.streamlines):
            length_streamline_python = length_python(s)
            assert_array_almost_equal(length_streamlines_cython[i], length_streamline_python)

        length_streamlines_cython = dipystreamline.length(self.streamlines_64bit)

        for i, s in enumerate(self.streamlines_64bit):
            length_streamline_python = length_python(s)
            assert_array_almost_equal(length_streamlines_cython[i], length_streamline_python)

        # Test streamlines having mixed dtype
        streamlines = [self.streamline, self.streamline.astype(np.float64)]
        assert_raises(ValueError, dipystreamline.length, streamlines)

        # Test streamlines with differente shape
        length_streamlines_cython = dipystreamline.length(self.heterogeneous_streamlines)

        for i, s in enumerate(self.heterogeneous_streamlines):
            length_streamline_python = length_python(s)
            assert_array_almost_equal(length_streamlines_cython[i], length_streamline_python)

        # Test streamline having integer dtype
        length_streamline = dipystreamline.length(self.streamline.astype('int'))
        assert_true(length_streamline.dtype == np.float64)

        # Test empty list
        assert_equal(dipystreamline.length([]), 0.0)

        # Test streamline having only one point
        assert_equal(dipystreamline.length(np.array([[1, 2, 3]])), 0.0)

        # We do not support list of lists, it should be numpy ndarray.
        streamline = [[1, 2, 3], [4, 5, 5], [2, 1, 3], [4, 2, 1]]
        assert_raises(AttributeError, dipystreamline.length, streamline)
Esempio n. 8
0
def bench_length():
    repeat = 10
    nb_streamlines = DATA['nb_streamlines']

    msg = "Timing length() with {0:,} streamlines."
    print(msg.format(nb_streamlines * repeat))
    python_time = measure("[length_python(s) for s in streamlines]", repeat)
    print("Python time: {0:.2f} sec".format(python_time))

    cython_time = measure("length(streamlines)", repeat)
    print("Cython time: {0:.3f} sec".format(cython_time))
    print("Speed up of {0:.2f}x".format(python_time/cython_time))

    # Make sure it produces the same results.
    assert_array_almost_equal([length_python(s) for s in DATA["streamlines"]],
                              length(DATA["streamlines"]))

    cython_time_arrseq = measure("length(streamlines)", repeat)
    print("Cython time (ArrSeq): {0:.3f} sec".format(cython_time_arrseq))
    print("Speed up of {0:.2f}x".format(python_time/cython_time_arrseq))

    # Make sure it produces the same results.
    assert_array_equal(length(DATA["streamlines"]),
                       length(DATA["streamlines_arrseq"]))
Esempio n. 9
0
def test_length_memory_leaks():
    # Test some dtypes
    dtypes = [np.float32, np.float64, np.int32, np.int64]
    for dtype in dtypes:
        rng = np.random.RandomState(1234)
        NB_STREAMLINES = 10000
        streamlines = [
            rng.randn(rng.randint(10, 100), 3).astype(dtype)
            for _ in range(NB_STREAMLINES)
        ]

        list_refcount_before = get_type_refcount()["list"]

        lengths = length(streamlines)
        list_refcount_after = get_type_refcount()["list"]

        # Calling `length` shouldn't increase the refcount of `list`
        # since the return value is a numpy array.
        assert_equal(list_refcount_after, list_refcount_before)

    # Test mixed dtypes
    rng = np.random.RandomState(1234)
    NB_STREAMLINES = 10000
    streamlines = []
    for i in range(NB_STREAMLINES):
        dtype = dtypes[i % len(dtypes)]
        streamlines.append(rng.randn(rng.randint(10, 100), 3).astype(dtype))

    list_refcount_before = get_type_refcount()["list"]

    lengths = length(streamlines)
    list_refcount_after = get_type_refcount()["list"]

    # Calling `length` shouldn't increase the refcount of `list`
    # since the return value is a numpy array.
    assert_equal(list_refcount_after, list_refcount_before)
def embed_flattened_plus_flipped_plus_length_plus_curvature(streamlines):
    lengths = length(streamlines)
    # Mean curvature of the streamline (TO BE CHECKED!):
    # curvature = np.vstack([np.linalg.norm(np.gradient(s, axis=0), axis=0) for s in streamlines])
    curvature = np.array([frenet_serret(s)[3].squeeze() for s in streamlines])
    torsion = np.array([frenet_serret(s)[4].squeeze() for s in streamlines])
    X = embed_flattened_plus_flipped(streamlines)
    X = np.concatenate([
        X,
        np.concatenate([lengths, lengths])[:, None],
        np.vstack([curvature, curvature[::-1]]),
        np.vstack([torsion, torsion[::-1]])
    ],
                       axis=1)
    return X
Esempio n. 11
0
def get_head_tail_density_maps(streamlines, dimensions, point_to_select=1):
    """
    Compute two separate endpoints density maps for the head and tail
    Parameters
    ----------
    streamlines: list of ndarray
        The list of streamlines to compute endpoints density from.
    dimensions: tuple
        The shape of the reference volume for the streamlines.
    point_to_select: int
        Instead of computing the density based on the first and last points,
        select more than one at each end. To support compressed streamlines,
        a resampling to 0.5mm per segment is performed.
    Returns
    -------
    A tuple containing
        ndarray: A ndarray where voxel values represent the density of
            head endpoints.
        ndarray: A ndarray where voxel values represent the density of
            tail endpoints.
    """
    endpoints_map_head = np.zeros(dimensions)
    endpoints_map_tail = np.zeros(dimensions)
    for streamline in streamlines:
        nb_point = max(2,  int(length(streamline))*2)
        streamline = set_number_of_points(streamline,
                                          nb_point)
        points_list_head = \
            list(streamline[0:point_to_select, :])
        points_list_tail = \
            list(streamline[-point_to_select:, :])
        for xyz in points_list_head:
            x_val = np.clip(xyz[0], 0, dimensions[0]-1).astype(int)
            y_val = np.clip(xyz[1], 0, dimensions[1]-1).astype(int)
            z_val = np.clip(xyz[2], 0, dimensions[2]-1).astype(int)
            endpoints_map_head[x_val, y_val, z_val] += 1
        for xyz in points_list_tail:
            x_val = np.clip(xyz[0], 0, dimensions[0]-1).astype(int)
            y_val = np.clip(xyz[1], 0, dimensions[1]-1).astype(int)
            z_val = np.clip(xyz[2], 0, dimensions[2]-1).astype(int)
            endpoints_map_tail[x_val, y_val, z_val] += 1
    return endpoints_map_head, endpoints_map_tail
def computeStats(subjectID,reference,streamlines,classification,outdir):

	avg_length = []
	avg_curv = []
	stream_count = []
	mean_x = []
	mean_y = []
	mean_z = []
	num_vox = []

	tract_index_labels = np.trim_zeros(np.unique(classification['index'].tolist()))

	qb = QuickBundles(np.inf)

	for i in tract_index_labels:
		indices = [ t for t in range(len(classification['index'].tolist())) if classification['index'].tolist()[int(t)] == i ]
		avg_length.append(np.mean(length(streamlines[indices])))
		
		clusters = qb.cluster(streamlines[indices])
		avg_curv.append(mean_curvature(clusters.centroids[0]))
		orientation = mean_orientation(clusters.centroids[0])
		mean_x.append(orientation[0])
		mean_y.append(orientation[1])
		mean_z.append(orientation[2])
		stream_count.append(len(indices))
		denmap = density_map(streamlines[indices],reference.affine,reference.shape)
		num_vox.append(len(denmap[denmap>0]))

	df = pd.DataFrame([],dtype=object)
	df['subjectID'] = [ subjectID for f in range(len(classification['names'].tolist())) ]
	df['structureID'] = [ f for f in classification['names'].tolist() ]
	df['nodeID'] = [ 1 for f in range(len(df['structureID'])) ]
	df['streamline_count'] = stream_count
	df['average_length'] = avg_length
	df['average_curvature'] = avg_curv
	df['voxel_count'] = num_vox
	df['centroid_x'] = mean_x
	df['centroid_y'] = mean_y
	df['centroid_z'] = mean_z

	df.to_csv('%s/output_FiberStats.csv' %outdir,index=False)
Esempio n. 13
0
def rois_fiberlen_cellinput(N, cell_streamlines):
    # input:
    #        N: # of ROIs
    #        cell_streamlines: streamlines connecting ith and jth ROIs
    #        voxel_size, affine: some parameters to transfore streamlines to voxel

    connectcm_len = np.zeros([N, N])

    idx = 0
    for i in range(1, N):
        for j in range(i + 1, N):
            tmp_streamlines = cell_streamlines[idx]
            idx = idx + 1

            tmp_len = 0
            for sl in tmp_streamlines:
                flen = length(sl)
                tmp_len = tmp_len + flen

            if (len(tmp_streamlines) > 0):
                connectcm_len[i, j] = tmp_len / len(tmp_streamlines)

    return connectcm_len
 def extract(self, streamline):
     """ Extracts features from `streamline`. """
     # return np.sum(np.sqrt(np.sum((streamline[1:] - streamline[:-1]) ** 2)))
     # or use a Dipy's function that computes the arc length of a streamline.
     return length(streamline)
Esempio n. 15
0
 def extract(self, streamline):
     """ Extracts features from `streamline`. """
     # return np.sum(np.sqrt(np.sum((streamline[1:] - streamline[:-1]) ** 2)))
     # or use a Dipy's function that computes the arc length of a streamline.
     return length(streamline)
Esempio n. 16
0
    def build_scene(self):

        scene = window.Renderer()
        for (t, streamlines) in enumerate(self.tractograms):
            if self.random_colors:
                colors = self.prng.random_sample(3)
            else:
                colors = None

            if self.cluster:

                print(' Clustering threshold {} \n'.format(self.cluster_thr))
                clusters = qbx_and_merge(streamlines,
                                         [40, 30, 25, 20, self.cluster_thr])
                self.tractogram_clusters[t] = clusters
                centroids = clusters.centroids
                print(' Number of centroids is {}'.format(len(centroids)))
                sizes = np.array([len(c) for c in clusters])
                linewidths = np.interp(sizes,
                                       [sizes.min(), sizes.max()], [0.1, 2.])
                centroid_lengths = np.array([length(c) for c in centroids])

                print(' Minimum number of streamlines in cluster {}'.format(
                    sizes.min()))

                print(' Maximum number of streamlines in cluster {}'.format(
                    sizes.max()))

                print(' Construct cluster actors')
                for (i, c) in enumerate(centroids):

                    centroid_actor = actor.streamtube([c],
                                                      colors,
                                                      linewidth=linewidths[i],
                                                      lod=False)
                    scene.add(centroid_actor)

                    cluster_actor = actor.line(clusters[i], lod=False)
                    cluster_actor.GetProperty().SetRenderLinesAsTubes(1)
                    cluster_actor.GetProperty().SetLineWidth(6)
                    cluster_actor.GetProperty().SetOpacity(1)
                    cluster_actor.VisibilityOff()

                    scene.add(cluster_actor)

                    # Every centroid actor (cea) is paired to a cluster actor
                    # (cla).

                    self.cea[centroid_actor] = {
                        'cluster_actor': cluster_actor,
                        'cluster': i,
                        'tractogram': t,
                        'size': sizes[i],
                        'length': centroid_lengths[i],
                        'selected': 0,
                        'expanded': 0
                    }

                    self.cla[cluster_actor] = {
                        'centroid_actor': centroid_actor,
                        'cluster': i,
                        'tractogram': t,
                        'size': sizes[i],
                        'length': centroid_lengths[i],
                        'selected': 0
                    }
                    apply_shader(self, cluster_actor)
                    apply_shader(self, centroid_actor)

            else:

                streamline_actor = actor.line(streamlines, colors=colors)
                streamline_actor.GetProperty().SetEdgeVisibility(1)
                streamline_actor.GetProperty().SetRenderLinesAsTubes(1)
                streamline_actor.GetProperty().SetLineWidth(6)
                streamline_actor.GetProperty().SetOpacity(1)
                scene.add(streamline_actor)
        return scene
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    if not os.path.isfile(args.tracts):
        parser.error("Tracts file: {0} does not exist.".format(args.tracts))

    if not os.path.isfile(args.aparc):
        parser.error("Label file: {0} does not exist.".format(args.aparc))

    if not os.path.isfile(args.labels):
        parser.error("Requested region file: {0} does not exist.".format(
            args.labels))

    if not os.path.isfile(args.lut):
        parser.error("Freesurfer LUT file: {0} does not exist.".format(
            args.lut))

    if not os.path.isfile(args.faimage):
        parser.error("FA Image file: {0} does not exist.".format(args.faimage))

    if not os.path.isfile(args.mdimage):
        parser.error("MD Image file: {0} does not exist.".format(args.mdimage))

    # Validate that tracts can be processed
    if not validate_coordinates(args.aparc, args.tracts, nifti_compliant=True):
        parser.error("The tracts file contains points that are invalid.\n" +
                     "Use the remove_invalid_coordinates.py script to clean.")

    # Load label image
    labels_img = nib.load(args.aparc)
    full_labels = labels_img.get_data().astype('int')

    # Load fibers
    tract_format = tc.detect_format(args.tracts)
    tract = tract_format(args.tracts, args.aparc)

    affine = compute_affine_for_dipy_functions(args.aparc, args.tracts)

    #load FA and MD image
    fa_img = nib.load(args.faimage)
    fa_data = fa_img.get_data()

    md_img = nib.load(args.mdimage)
    md_data = md_img.get_data()

    # ========= processing streamlines =================
    fiberlen_range = np.asarray([args.minlen, args.maxlen])

    streamlines = [t for t in tract]
    print "Subject " + args.sub_id + " has " + str(
        len(streamlines)) + " raw streamlines."

    f_streamlines = []  #filtered streamlines
    lenrecord = []
    idx = 0
    for sl in streamlines:
        # Avoid streamlines having only one point, as they crash the
        # Dipy connectivity matrix function.
        if sl.shape[0] > 1:
            flen = length(sl)
            # get fibers having length between 20mm and 200mm
            if (flen > fiberlen_range[0]) & (flen < fiberlen_range[1]):
                f_streamlines.append(sl)
                lenrecord.append(flen)
                idx = idx + 1

    print "Subject " + args.sub_id + " has " + str(
        idx) + " streamlines with lengths between " + str(
            args.minlen) + " and " + str(args.maxlen) + "."

    # ============= process the parcellation =====================
    dilation_para = np.array([args.dilation_dist, args.dilation_windsize])

    # Compute the mapping from label name to label id
    label_id_mapping = compute_labels_map(args.lut)

    # Find which labels were requested by the user.
    requested_labels_mapping = compute_requested_labels(
        args.labels, label_id_mapping)

    # Filter to keep only needed ones
    filtered_labels = np.zeros(full_labels.shape, dtype='int')
    for label_val in requested_labels_mapping:
        if sum(sum(sum(full_labels == label_val))) == 0:
            print label_val
            print requested_labels_mapping[label_val]

        filtered_labels[full_labels == label_val] = label_val

    #cortex band dilation
    dilated_labels = cortexband_dilation_wm(filtered_labels, full_labels,
                                            dilation_para)

    # Reduce the range of labels to avoid a sparse matrix,
    # because the ids of labels can range from 0 to the 12000's.
    reduced_labels, labels_lut = dpu.reduce_labels(filtered_labels)
    reduced_dilated_labels, labels_lut = dpu.reduce_labels(dilated_labels)

    # Compute connectivity matrix and extract the fibers
    M, grouping = nconnectivity_matrix(f_streamlines,
                                       reduced_dilated_labels,
                                       fiberlen_range,
                                       args.cnpoint,
                                       affine=affine,
                                       symmetric=True,
                                       return_mapping=True,
                                       mapping_as_streamlines=True)

    Msize = len(M)
    CM_before_outlierremove = M[1:, 1:]
    nstream_bf = np.sum(CM_before_outlierremove)
    print args.sub_id + ' ' + str(
        nstream_bf
    ) + ' streamlines in the connectivity matrix before outlier removal.'

    #===================== process the streamlines =============
    print 'Processing streamlines to remove outliers ..............'

    outlier_para = 3
    average_thrd = 8

    M_after_ourlierremove = np.zeros((Msize, Msize))
    #downsample streamlines
    cell_streamlines = []
    cell_id = []
    for i in range(1, Msize):
        for j in range(i + 1, Msize):
            tmp_streamlines = grouping[i, j]
            tmp_streamlines = list(tmp_streamlines)
            #downsample
            tmp_streamlines_downsampled = [
                downsample(s, 100) for s in tmp_streamlines
            ]
            #remove outliers, we need to rewrite the QuickBundle method to speed up this process

            qb = QuickBundles(threshold=average_thrd)
            clusters = qb.cluster(tmp_streamlines_downsampled)
            outlier_clusters = clusters < outlier_para  #small clusters
            nonoutlier_clusters = clusters[np.logical_not(outlier_clusters)]

            tmp_nonoutlier_index = []
            for tmp_cluster in nonoutlier_clusters:
                tmp_nonoutlier_index = tmp_nonoutlier_index + tmp_cluster.indices

            clean_streamline_downsampled = [
                tmp_streamlines_downsampled[ind]
                for ind in tmp_nonoutlier_index
            ]
            cell_streamlines.append(clean_streamline_downsampled)
            cell_id.append([i, j])
            M_after_ourlierremove[i, j] = len(clean_streamline_downsampled)

    CM_after_ourlierremove = M_after_ourlierremove[1:, 1:]
    nstream_bf = np.sum(CM_after_ourlierremove)
    print args.sub_id + ' ' + str(
        nstream_bf
    ) + ' streamlines in the connectivity matrix after outlier removal.'

    #save streamlines and count matrix

    cmCountMatrix_fname = args.sub_id + "_" + args.pre + "_cm_count_raw.mat"
    cmCountMatrix_processed_fname = args.sub_id + "_" + args.pre + "_cm_count_processed.mat"
    cmStreamlineMatrix_fname = args.sub_id + "_" + args.pre + "_cm_streamlines.mat"
    reduced_labels_fname = args.sub_id + "_" + args.pre + "_reduced_labels.nii.gz"
    dilated_labels_fname = args.sub_id + "_" + args.pre + "_dilated_labels.nii.gz"
    RoiInfo_fname = args.sub_id + "_" + args.pre + "_RoiInfo.mat"

    # save the raw count matrix
    CM = M[1:, 1:]
    sio.savemat(cmCountMatrix_fname, {'cm': CM})
    sio.savemat(cmCountMatrix_processed_fname, {'cm': CM_after_ourlierremove})

    # save the streamline matrix
    sio.savemat(cmStreamlineMatrix_fname, {'slines': cell_streamlines})
    sio.savemat(RoiInfo_fname, {'ROIinfo': cell_id})
    print args.sub_id + 'cell_streamlines.mat, ROIinfo.mat has been saved'

    filtered_labels_img = nib.Nifti1Image(filtered_labels,
                                          labels_img.get_affine(),
                                          labels_img.get_header())
    nib.save(filtered_labels_img, reduced_labels_fname)
    print args.sub_id + 'filtered labels have saved'

    dilated_labels_img = nib.Nifti1Image(dilated_labels,
                                         labels_img.get_affine(),
                                         labels_img.get_header())
    nib.save(dilated_labels_img, dilated_labels_fname)
    print args.sub_id + 'dilated labels have saved'

    # ===================== process the streamlines and extract features =============
    cm_fa_curve = fa_extraction_use_cellinput(cell_streamlines,
                                              cell_id,
                                              fa_data,
                                              Msize,
                                              affine=affine)
    (tmp_cm_fa_mean, tmp_cm_fa_max,
     cm_count) = fa_mean_extraction(cm_fa_curve, Msize)

    # extract MD values along the streamlines
    cm_md_curve = fa_extraction_use_cellinput(cell_streamlines,
                                              cell_id,
                                              md_data,
                                              Msize,
                                              affine=affine)
    (tmp_cm_md_mean, tmp_cm_md_max,
     testcm) = fa_mean_extraction(cm_md_curve, Msize)

    #connected surface area
    # extract the connective volume ratio
    (tmp_cm_volumn,
     tmp_cm_volumn_ratio) = rois_connectedvol_cellinput(reduced_labels,
                                                        Msize,
                                                        cell_streamlines,
                                                        cell_id,
                                                        affine=affine)

    #fiber length
    tmp_connectcm_len = rois_fiberlen_cellinput(Msize, cell_streamlines)

    #save cm features
    cm_md_mean = tmp_cm_md_mean[1:, 1:]
    cm_md_max = tmp_cm_md_max[1:, 1:]

    cm_fa_mean = tmp_cm_fa_mean[1:, 1:]
    cm_fa_max = tmp_cm_fa_max[1:, 1:]

    cm_volumn = tmp_cm_volumn[1:, 1:]
    cm_volumn_ratio = tmp_cm_volumn_ratio[1:, 1:]

    connectcm_len = tmp_connectcm_len[1:, 1:]

    sio.savemat(args.pre + "_cm_processed_mdmean_100.mat",
                {'cm_mdmean': cm_md_mean})
    sio.savemat(args.pre + "_cm_processed_mdmax_100.mat",
                {'cm_mdmax': cm_md_max})
    sio.savemat(args.pre + "_cm_processed_famean_100.mat",
                {'cm_famean': cm_fa_mean})
    sio.savemat(args.pre + "_cm_processed_famax_100.mat",
                {'cm_famax': cm_fa_max})
    sio.savemat(args.pre + "_cm_processed_volumn_100.mat",
                {'cm_volumn': cm_volumn})
    sio.savemat(args.pre + "_cm_processed_volumn_ratio_100.mat",
                {'cm_volumn_ratio': cm_volumn_ratio})
    sio.savemat(args.pre + "_cm_processed_volumn_ratio_100.mat",
                {'cm_len': connectcm_len})

    # save the diffusion functions matrix
    cell_fa = []
    for i in range(1, Msize):
        for j in range(i + 1, Msize):
            tmp_fa = cm_fa_curve[i, j]
            tmp_fa = list(tmp_fa)
            cell_fa.append(tmp_fa)

    sio.savemat(args.pre + "_cm_processed_sfa_100.mat", {'sfa': cell_fa})
    print 'cell_fa.mat, fa_roiinfo.mat have been saved'

    cell_md = []
    for i in range(1, Msize):
        for j in range(i + 1, Msize):
            tmp_md = cm_md_curve[i, j]
            tmp_md = list(tmp_md)
            cell_md.append(tmp_md)

    sio.savemat(args.pre + "_cm_processed_smd_100.mat", {'smd': cell_md})
Esempio n. 18
0
    def add_cluster_actors(self, scene, tractograms,
                           threshold, enable_callbacks=True):
        """ Add streamline actors to the scene

        Parameters
        ----------
        scene : Scene
        tractograms : list
            list of tractograms
        threshold : float
            Cluster threshold
        enable_callbacks : bool
            Enable callbacks for selecting clusters
        """
        color_gen = distinguishable_colormap()
        for (t, sft) in enumerate(tractograms):
            streamlines = sft.streamlines

            if self.random_colors:
                colors = next(color_gen)
            else:
                colors = None

            if not self.world_coords:
                # TODO we need to read the affine of a tractogram
                # from a StatefullTractogram
                msg = 'Currently native coordinates are not supported'
                msg += ' for streamlines'
                raise ValueError(msg)

            if self.cluster:

                print(' Clustering threshold {} \n'.format(threshold))
                clusters = qbx_and_merge(streamlines,
                                         [40, 30, 25, 20, threshold])
                self.tractogram_clusters[t] = clusters
                centroids = clusters.centroids
                print(' Number of centroids is {}'.format(len(centroids)))
                sizes = np.array([len(c) for c in clusters])
                linewidths = np.interp(sizes,
                                       [sizes.min(), sizes.max()], [0.1, 2.])
                centroid_lengths = np.array([length(c) for c in centroids])

                print(' Minimum number of streamlines in cluster {}'
                      .format(sizes.min()))

                print(' Maximum number of streamlines in cluster {}'
                      .format(sizes.max()))

                print(' Construct cluster actors')
                for (i, c) in enumerate(centroids):

                    centroid_actor = actor.streamtube([c], colors,
                                                      linewidth=linewidths[i],
                                                      lod=False)
                    scene.add(centroid_actor)
                    self.mem.centroid_actors.append(centroid_actor)

                    cluster_actor = actor.line(clusters[i],
                                               lod=False)
                    cluster_actor.GetProperty().SetRenderLinesAsTubes(1)
                    cluster_actor.GetProperty().SetLineWidth(6)
                    cluster_actor.GetProperty().SetOpacity(1)
                    cluster_actor.VisibilityOff()

                    scene.add(cluster_actor)
                    self.mem.cluster_actors.append(cluster_actor)

                    # Every centroid actor (cea) is paired to a cluster actor
                    # (cla).

                    self.cea[centroid_actor] = {
                        'cluster_actor': cluster_actor,
                        'cluster': i, 'tractogram': t,
                        'size': sizes[i], 'length': centroid_lengths[i],
                        'selected': 0, 'expanded': 0}

                    self.cla[cluster_actor] = {
                        'centroid_actor': centroid_actor,
                        'cluster': i, 'tractogram': t,
                        'size': sizes[i], 'length': centroid_lengths[i],
                        'selected': 0, 'highlighted': 0}
                    apply_shader(self, cluster_actor)
                    apply_shader(self, centroid_actor)

            else:

                streamline_actor = actor.line(streamlines, colors=colors)
                streamline_actor.GetProperty().SetEdgeVisibility(1)
                streamline_actor.GetProperty().SetRenderLinesAsTubes(1)
                streamline_actor.GetProperty().SetLineWidth(6)
                streamline_actor.GetProperty().SetOpacity(1)
                scene.add(streamline_actor)
                self.mem.streamline_actors.append(streamline_actor)

        if not enable_callbacks:
            return

        def left_click_centroid_callback(obj, event):

            self.cea[obj]['selected'] = not self.cea[obj]['selected']
            self.cla[self.cea[obj]['cluster_actor']]['selected'] = \
                self.cea[obj]['selected']
            self.show_m.render()

        def left_click_cluster_callback(obj, event):

            if self.cla[obj]['selected']:
                self.cla[obj]['centroid_actor'].VisibilityOn()
                ca = self.cla[obj]['centroid_actor']
                self.cea[ca]['selected'] = 0
                obj.VisibilityOff()
                self.cea[ca]['expanded'] = 0

            self.show_m.render()

        for cl in self.cla:
            cl.AddObserver('LeftButtonPressEvent', left_click_cluster_callback,
                           1.0)
            self.cla[cl]['centroid_actor'].AddObserver(
                'LeftButtonPressEvent', left_click_centroid_callback, 1.0)
Esempio n. 19
0
def check_range(streamline, gt, lt):
    length_s = length(streamline)
    if (length_s > gt) & (length_s < lt):
        return True
    else:
        return False
Esempio n. 20
0
   :align: center

   Entire bundle with a specific color.

Show every streamline of a bundle with a different color
========================================================

Let's make a colormap where every streamline of the bundle is colored by its
length.
"""

scene.clear()

from dipy.tracking.streamline import length

lengths = length(bundle_native)

hue = (0.5, 0.5)  # blue only
saturation = (0.0, 1.0)  # black to white

lut_cmap = actor.colormap_lookup_table(scale_range=(lengths.min(),
                                                    lengths.max()),
                                       hue_range=hue,
                                       saturation_range=saturation)

stream_actor5 = actor.line(bundle_native,
                           lengths,
                           linewidth=0.1,
                           lookup_colormap=lut_cmap)

scene.add(stream_actor5)
Esempio n. 21
0
 def extract(self, streamline):
     return length(streamline)[None, None]
def embed_flattened_plus_flipped_plus_length(streamlines):
    lengths = length(streamlines)
    X = embed_flattened_plus_flipped(streamlines)
    X = np.concatenate([X, np.concatenate([lengths, lengths])[:, None]],
                       axis=1)
    return X
Esempio n. 23
0
def main():
    parser = _build_args_parser()
    args = parser.parse_args()
    logging.basicConfig(level=logging.INFO)

    # make sure all the given files exist
    if not isfile(args.streamlines):
        parser.error('The file "{0}" must exist.'.format(args.streamlines))

    if not isfile(args.intersections):
        parser.error('The file "{0}" must exist.'.format(args.intersections))

    # make sure that files are not accidently overwritten
    if isfile(args.output):
        if args.overwrite:
            logging.info('Overwriting "{0}".'.format(args.output))
        else:
            parser.error('The file "{0}" already exists. Use -f to overwrite it.'.format(args.output))

    if not args.output_tracts is None:
        if isfile(args.output_tracts):
            if args.overwrite:
                logging.info('Overwriting "{0}".'.format(args.output_tracts))
            else:
                parser.error('The file "{0}" already exists. Use -f to overwrite it.'.format(args.output_tracts))

    # load the surfaces
    logging.info('Loading tractography and intersections.')

    # load the intersections file
    intersections = np.load(args.intersections, allow_pickle=True)

    surf_ids0 = intersections['surf_ids0']
    surf_ids1 = intersections['surf_ids1']
    tri_ids0 = intersections['tri_ids0']
    tri_ids1 = intersections['tri_ids1']
    pts0 = intersections['pts0']
    pts1 = intersections['pts1']

    # load the streamlines
    streamlines = load_vtk_streamlines(args.streamlines)

    logging.info('Filtering streamlines with angles >= {0} and length outside range {1}-{2}mm.'.format(args.angle, args.min_length, args.max_length))

    n = len(tri_ids0)
    i = 0
    mask = np.zeros(n, np.bool)

    # create the mask for filtering
    for s in streamlines:
        s_filter = (length(s) <= args.max_length) & (length(s) >= args.min_length)

        if s_filter:
            s_filter = (metrics.winding(s) < args.angle)
        
        mask[i] = s_filter
        
        i = i + 1

    # save the filtered tractography if requested
    if not args.output_tracts is None:
        filtered_tracts = np.array(streamlines)[mask]
        save_vtk_streamlines(filtered_tracts, args.output_tracts, binary = True)

    remaining = np.sum(mask)
    logging.info('Removed {0} streamlines with {1} remaining.'.format(n - remaining, remaining))

    # save the results
    np.savez_compressed(args.output,
                        pts0=pts0[mask],
                        tri_ids0=tri_ids0[mask],
                        surf_ids0=surf_ids0[mask],
                        pts1=pts1[mask],
                        tri_ids1=tri_ids1[mask],
                        surf_ids1=surf_ids1[mask])
Esempio n. 24
0
   :align: center

   **Entire bundle with a specific color**.

Show every streamline of a bundle with a different color
========================================================

Let's make a colormap where every streamline of the bundle is colored by its
length.
"""

renderer.clear()

from dipy.tracking.streamline import length

lengths = length(bundle_native)

hue = [0.5, 0.5]  # red only
saturation = [0.0, 1.0]  # black to white

lut_cmap = actor.colormap_lookup_table(
    scale_range=(lengths.min(), lengths.max()), hue_range=hue, saturation_range=saturation
)

stream_actor5 = actor.line(bundle_native, lengths, linewidth=0.1, lookup_colormap=lut_cmap)

renderer.add(stream_actor5)
bar3 = actor.scalar_bar(lut_cmap)

renderer.add(bar3)
Esempio n. 25
0
def load_tractography_dataset_from_dwi_and_tractogram(dwi,
                                                      tractogram,
                                                      volume_manager,
                                                      use_sh_coeffs=False,
                                                      bvals=None,
                                                      bvecs=None,
                                                      step_size=None,
                                                      mean_centering=True):
    # Load signal
    signal = nib.load(dwi)
    signal.get_data()  # Forces loading volume in-memory.
    basename = re.sub('(\.gz|\.nii.gz)$', '', dwi)
    bvals = basename + '.bvals' if bvals is None else bvals
    bvecs = basename + '.bvecs' if bvecs is None else bvecs

    gradients = gradient_table(bvals, bvecs)
    tracto_data = TractographyData(signal, gradients)

    # Load streamlines
    tfile = nib.streamlines.load(tractogram)
    tractogram = tfile.tractogram

    # Resample streamline to have a fixed step size, if needed.
    if step_size is not None:
        print("Resampling streamlines to have a step size of {}mm".format(
            step_size))
        streamlines = tractogram.streamlines
        streamlines._lengths = streamlines._lengths.astype(int)
        streamlines._offsets = streamlines._offsets.astype(int)
        lengths = length(streamlines)
        nb_points = np.ceil(lengths / step_size).astype(int)
        new_streamlines = (set_number_of_points(s, n)
                           for s, n in zip(streamlines, nb_points))
        tractogram = nib.streamlines.Tractogram(new_streamlines,
                                                affine_to_rasmm=np.eye(4))

    # Compute matrix that brings streamlines back to diffusion voxel space.
    rasmm2vox_affine = np.linalg.inv(signal.affine)
    tractogram.apply_affine(rasmm2vox_affine)

    # Add streamlines to the TractogramData
    tracto_data.add(tractogram.streamlines, "tractogram")

    dwi = tracto_data.signal
    bvals = tracto_data.gradients.bvals
    bvecs = tracto_data.gradients.bvecs

    if use_sh_coeffs:
        # Use 45 spherical harmonic coefficients to represent the diffusion signal.
        volume = neurotools.get_spherical_harmonics_coefficients(
            dwi, bvals, bvecs,
            mean_centering=mean_centering).astype(np.float32)
    else:
        # Resample the diffusion signal to have 100 directions.
        volume = neurotools.resample_dwi(dwi,
                                         bvals,
                                         bvecs,
                                         mean_centering=mean_centering).astype(
                                             np.float32)

    tracto_data.signal.uncache(
    )  # Free some memory as we don't need the original signal.
    subject_id = volume_manager.register(volume)
    tracto_data.subject_id = subject_id

    return TractographyDataset([tracto_data], "dataset", keep_on_cpu=True)
Esempio n. 26
0
    def check_range(streamline, gt=greater_than, lt=less_than):

        if (length(streamline) > gt) & (length(streamline) < lt):
            return True
        else:
            return False
Esempio n. 27
0
File: app.py Progetto: grlee77/dipy
    def build_scene(self):

        scene = window.Renderer()
        for (t, streamlines) in enumerate(self.tractograms):
            if self.random_colors:
                colors = self.prng.random_sample(3)
            else:
                colors = None

            if self.cluster:

                print(' Clustering threshold {} \n'.format(self.cluster_thr))
                clusters = qbx_and_merge(streamlines,
                                         [40, 30, 25, 20, self.cluster_thr])
                self.tractogram_clusters[t] = clusters
                centroids = clusters.centroids
                print(' Number of centroids is {}'.format(len(centroids)))
                sizes = np.array([len(c) for c in clusters])
                linewidths = np.interp(sizes,
                                       [sizes.min(), sizes.max()], [0.1, 2.])
                centroid_lengths = np.array([length(c) for c in centroids])

                print(' Minimum number of streamlines in cluster {}'
                      .format(sizes.min()))

                print(' Maximum number of streamlines in cluster {}'
                      .format(sizes.max()))

                print(' Construct cluster actors')
                for (i, c) in enumerate(centroids):

                    centroid_actor = actor.streamtube([c], colors,
                                                      linewidth=linewidths[i],
                                                      lod=False)
                    scene.add(centroid_actor)

                    cluster_actor = actor.line(clusters[i],
                                               lod=False)
                    cluster_actor.GetProperty().SetRenderLinesAsTubes(1)
                    cluster_actor.GetProperty().SetLineWidth(6)
                    cluster_actor.GetProperty().SetOpacity(1)
                    cluster_actor.VisibilityOff()

                    scene.add(cluster_actor)

                    # Every centroid actor (cea) is paired to a cluster actor
                    # (cla).

                    self.cea[centroid_actor] = {
                        'cluster_actor': cluster_actor,
                        'cluster': i, 'tractogram': t,
                        'size': sizes[i], 'length': centroid_lengths[i],
                        'selected': 0, 'expanded': 0}

                    self.cla[cluster_actor] = {
                        'centroid_actor': centroid_actor,
                        'cluster': i, 'tractogram': t,
                        'size': sizes[i], 'length': centroid_lengths[i],
                        'selected': 0}
                    apply_shader(self, cluster_actor)
                    apply_shader(self, centroid_actor)

            else:

                streamline_actor = actor.line(streamlines, colors=colors)
                streamline_actor.GetProperty().SetEdgeVisibility(1)
                streamline_actor.GetProperty().SetRenderLinesAsTubes(1)
                streamline_actor.GetProperty().SetLineWidth(6)
                streamline_actor.GetProperty().SetOpacity(1)
                scene.add(streamline_actor)
        return scene
Esempio n. 28
0
def streamline_connectcut_returnfulllength(streamline, streamline_labels,
                                           npoints, fiberlen_range):
    """ cut streamline into streamlines that connecting different rois, here we try to return
        the full length of the fiber. Note in the function of streamline_connectcut, we only 
        return the intermedia part of a fiber between two rois. Here we retrun strealimes within
        the ROIs. 

    # input: streamline: one streamline
    #        streamline_label: the labels along streamline
    #        npoints: threthhold
    """
    unq_label = np.unique(streamline_labels)
    Nrois = len(streamline_labels)
    num_sl = 0
    new_streamlines = []
    new_streamlines_startlabel = []
    new_streamlines_endlabel = []

    # case1: the streamline is in the wm or only connect two rois, we just return this streamline
    if (len(unq_label) == 1):
        num_sl = num_sl + 1
        new_streamlines_startlabel.append(unq_label[0])
        new_streamlines_endlabel.append(unq_label[0])
        return streamline, num_sl, new_streamlines_startlabel, new_streamlines_endlabel

    if (len(unq_label) == 2):
        new_streamlines_startlabel.append(streamline_labels[0])
        new_streamlines_endlabel.append(streamline_labels[-1])
        num_sl = num_sl + 1
        return streamline, num_sl, new_streamlines_startlabel, new_streamlines_endlabel

    # case2: the streamline connects multiple rois
    ct = Counter(streamline_labels)
    passed_roi = []
    for t in ct:
        if ((t != 0) & (ct[t] > npoints)):
            passed_roi.append(t)

    #cut the streamline into nchoose(len(passed_roi),2) pieces
    for i in range(0, len(passed_roi)):
        for j in range(i + 1, len(passed_roi)):
            roia = passed_roi[i]
            roib = passed_roi[j]
            #find the part connects roia and roib
            label_roia = np.squeeze(
                np.asarray(np.where(streamline_labels == roia)))
            label_roib = np.squeeze(
                np.asarray(np.where(streamline_labels == roib)))
            if (label_roia[0] < label_roib[0]):  # if roia is in front of roib
                startidx = label_roia[1]  # start index
                endidx = label_roib[-1]  #
                tmpsl = streamline[startidx:endidx]
                if (length(tmpsl) > fiberlen_range[0]
                    ):  # for streamlines longer than xx mm, we record it
                    new_streamlines.append(tmpsl)
                    new_streamlines_startlabel.append(roia)
                    new_streamlines_endlabel.append(roib)
                    num_sl = num_sl + 1
            else:
                startidx = label_roib[1]
                endidx = label_roia[-1]  # can be improved here
                tmpsl = streamline[startidx:endidx]
                if (length(tmpsl) > fiberlen_range[0]
                    ):  # for streamlines longer than xx mm, we keep it
                    new_streamlines.append(tmpsl)
                    new_streamlines_startlabel.append(roib)
                    new_streamlines_endlabel.append(roia)
                    num_sl = num_sl + 1

    return new_streamlines, num_sl, new_streamlines_startlabel, new_streamlines_endlabel
Esempio n. 29
0
def main():
    parser = build_argparser()
    args = parser.parse_args()

    tracto_data = None

    if args.signal_source == "raw_signal":
        signal = nib.load(args.signal)
        signal.get_data()  # Forces loading volume in-memory.
        basename = re.sub('(\.gz|\.nii.gz)$', '', args.signal)

        try:
            bvals = basename + '.bvals' if args.bvals is None else args.bvals
            bvecs = basename + '.bvecs' if args.bvecs is None else args.bvecs
            gradients = gradient_table(bvals, bvecs)
        except FileNotFoundError:
            try:
                bvals = basename + '.bval' if args.bvals is None else args.bvals
                bvecs = basename + '.bvec' if args.bvecs is None else args.bvecs
                gradients = gradient_table(bvals, bvecs)
            except FileNotFoundError as e:
                print("Could not find .bvals/.bvecs or .bval/.bvec files...")
                raise e

        tracto_data = TractographyData(signal, gradients)
    elif args.signal_source == "processed_signal":
        loaded_tracto_data = TractographyData.load(args.tracto_data)
        tracto_data = TractographyData(loaded_tracto_data.signal,
                                       loaded_tracto_data.gradients)

    # Compute matrix that brings streamlines back to diffusion voxel space.
    rasmm2vox_affine = np.linalg.inv(tracto_data.signal.affine)

    # Retrieve data.
    with Timer("Retrieving data", newline=args.verbose):
        for filename in sorted(args.bundles):
            if args.verbose:
                print("{}".format(filename))

            # Load streamlines
            tfile = nib.streamlines.load(filename)
            tractogram = tfile.tractogram

            original_streamlines = tractogram.streamlines
            lengths = length(original_streamlines)
            streamlines = [
                s for (s, l) in zip(original_streamlines, lengths)
                if l >= args.min_length
            ]

            # Make sure file is not empty
            if len(streamlines) > 0:
                if args.subsample_streamlines:
                    output_streamlines = subsample_streamlines(
                        streamlines, args.clustering_threshold,
                        args.removal_distance)

                    print("Total difference: {} / {}".format(
                        len(original_streamlines), len(output_streamlines)))
                    new_tractogram = nib.streamlines.Tractogram(
                        output_streamlines,
                        affine_to_rasmm=tractogram.affine_to_rasmm)
                    tractogram = new_tractogram

                tractogram.apply_affine(rasmm2vox_affine)

                # Add streamlines to the TractogramData
                bundle_name = os.path.splitext(os.path.basename(filename))[0]
                tracto_data.add(tractogram.streamlines, bundle_name)

    if args.verbose:
        diff = tracto_data.streamlines._data - tracto_data.streamlines._data.astype(
            args.dtype)
        precision_error = np.sum(np.sqrt(np.sum(diff**2, axis=1)))
        avg_precision_error = precision_error / len(
            tracto_data.streamlines._data)
        print("Precision error: {} (avg. {})".format(precision_error,
                                                     avg_precision_error))

    # Save streamlines coordinates using either float16 or float32.
    tracto_data.streamlines._data = tracto_data.streamlines._data.astype(
        args.dtype)

    # Save dataset
    tracto_data.save(args.out)
def main():
    parser = _build_args_parser()
    args = parser.parse_args()
    logging.basicConfig(level=logging.INFO)

    # make sure all the given files exist
    if not isfile(args.surfaces):
        parser.error('The file "{0}" must exist.'.format(args.surfaces))

    if not isfile(args.surface_map):
        parser.error('The file "{0}" must exist.'.format(args.surface_map))

    if not isfile(args.streamlines):
        parser.error('The file "{0}" must exist.'.format(args.streamlines))

    # make sure that files are not accidently overwritten
    if isfile(args.output):
        if args.overwrite:
            logging.info('Overwriting "{0}".'.format(args.output))
        else:
            parser.error(
                'The file "{0}" already exists. Use -f to overwrite it.'.
                format(args.output))

    if isfile(args.out_tracts):
        if args.overwrite:
            logging.info('Overwriting "{0}".'.format(args.out_tracts))
        else:
            parser.error(
                'The file "{0}" already exists. Use -f to overwrite it.'.
                format(args.out_tracts))

    # load the surfaces
    logging.info('Loading .vtk surfaces and streamlines.')
    all_surfaces = load_vtk(args.surfaces)

    # load surface map
    surface_map = np.load(args.surface_map)

    # load mask for intersections
    surface_mask = np.load(args.surface_mask)

    # find triangles with any vertex within the mask
    vertices = ns.vtk_to_numpy(all_surfaces.GetPolys().GetData())
    triangles = np.vstack([vertices[1::4], vertices[2::4], vertices[3::4]]).T

    surface_mask = surface_mask[triangles]
    surface_mask = np.all(surface_mask, axis=1)
    surface_map = surface_map[triangles[:, 0]]

    # locator for quickly finding intersections
    locator = vtk.vtkOBBTree()
    locator.SetDataSet(all_surfaces)
    locator.BuildLocator()

    # load the streamlines
    streamlines = load_vtk_streamlines(args.streamlines)

    # load label images
    label_img = nib.load(args.aparc)
    label_data = label_img.get_data().astype('int')
    voxel_dim = label_img.get_header()['pixdim'][1:4]

    # calculate transform from voxel to mm coordinates
    affine = np.array(label_img.affine, dtype=float)
    affine = np.linalg.inv(affine)
    transform = affine[:3, :3].T
    offset = affine[:3, 3] + 0.5

    logging.info('Trimming, splitting, and filtering {0} streamlines.'.format(
        len(streamlines)))
    print(args.rois)

    new_streamlines = ROI_Streamlines([], [], [], [], [], [], [])

    for i in xrange(len(streamlines)):
        # just one segment
        # filter as error
        if len(streamlines[i]) < 3:
            continue

        # trim and split cortical intersections
        trimmed_streamlines, tri_in, tri_out = trim_cortical_streamline(
            streamlines[i], i, locator, surface_mask, surface_map)

        # split subcortical intersections
        for j in range(len(trimmed_streamlines)):
            # resample streamline to allow for fine level
            # intersections with subcortical regions
            fiber_len = length(trimmed_streamlines[j])
            n_points = int(fiber_len / SAMPLE_SIZE)

            if n_points < 3:
                continue

            resampled_streamline = set_number_of_points(
                trimmed_streamlines[j], n_points)

            # find voxels that the streamline passes through
            inds = np.dot(resampled_streamline, transform)
            inds = inds + offset

            if inds.min().round(decimals=6) < 0:
                logging.error(
                    'Streamline has points that map to negative voxel indices')

            ii, jj, kk = inds.astype(int).T
            sl_labels = label_data[ii, jj, kk]

            # split fibers among intersecting regions and return all intersections
            split_streamlines = split_subcortical_streamline(
                resampled_streamline, sl_labels, args.rois, tri_in[j],
                tri_out[j])

            # fill the results arrays
            new_streamlines.extend(split_streamlines)

    logging.info('Saving {0} final streamlines.'.format(
        len(new_streamlines.streamlines)))

    # save the results
    new_streamlines.save_streamlines(args.out_tracts)
    new_streamlines.save_intersections(args.output)
Esempio n. 31
0
def test_length():
    # Test length of only one streamline
    length_streamline_cython = length(streamline)
    length_streamline_python = length_python(streamline)
    assert_almost_equal(length_streamline_cython, length_streamline_python)

    length_streamline_cython = length(streamline_64bit)
    length_streamline_python = length_python(streamline_64bit)
    assert_almost_equal(length_streamline_cython, length_streamline_python)

    # Test computing length of multiple streamlines of different nb_points
    length_streamlines_cython = length(streamlines)

    for i, s in enumerate(streamlines):
        length_streamline_python = length_python(s)
        assert_array_almost_equal(length_streamlines_cython[i],
                                  length_streamline_python)

    length_streamlines_cython = length(streamlines_64bit)

    for i, s in enumerate(streamlines_64bit):
        length_streamline_python = length_python(s)
        assert_array_almost_equal(length_streamlines_cython[i],
                                  length_streamline_python)

    # ArraySequence
    # Test length of only one streamline
    length_streamline_cython = length(streamline_64bit)
    length_streamline_arrseq = length(Streamlines([streamline]))
    assert_almost_equal(length_streamline_arrseq, length_streamline_cython)

    length_streamline_cython = length(streamline_64bit)
    length_streamline_arrseq = length(Streamlines([streamline_64bit]))
    assert_almost_equal(length_streamline_arrseq, length_streamline_cython)

    # Test computing length of multiple streamlines of different nb_points
    length_streamlines_cython = length(streamlines)
    length_streamlines_arrseq = length(Streamlines(streamlines))
    assert_array_almost_equal(length_streamlines_arrseq,
                              length_streamlines_cython)

    length_streamlines_cython = length(streamlines_64bit)
    length_streamlines_arrseq = length(Streamlines(streamlines_64bit))
    assert_array_almost_equal(length_streamlines_arrseq,
                              length_streamlines_cython)

    # Test on a sliced ArraySequence
    length_streamlines_cython = length(streamlines_64bit[::2])
    length_streamlines_arrseq = length(Streamlines(streamlines_64bit)[::2])
    assert_array_almost_equal(length_streamlines_arrseq,
                              length_streamlines_cython)
    length_streamlines_cython = length(streamlines[::-1])
    length_streamlines_arrseq = length(Streamlines(streamlines)[::-1])
    assert_array_almost_equal(length_streamlines_arrseq,
                              length_streamlines_cython)

    # Test streamlines having mixed dtype
    streamlines_mixed_dtype = [
        streamline,
        streamline.astype(np.float64),
        streamline.astype(np.int32),
        streamline.astype(np.int64)
    ]
    lengths_mixed_dtype = [length(s) for s in streamlines_mixed_dtype]
    assert_array_equal(length(streamlines_mixed_dtype), lengths_mixed_dtype)

    # Test streamlines with different shape
    length_streamlines_cython = length(heterogeneous_streamlines)

    for i, s in enumerate(heterogeneous_streamlines):
        length_streamline_python = length_python(s)
        assert_array_almost_equal(length_streamlines_cython[i],
                                  length_streamline_python)

    # Test streamline having integer dtype
    length_streamline = length(streamline.astype('int'))
    assert_equal(length_streamline.dtype, np.float64)

    # Test empty list
    assert_equal(length([]), 0.0)

    # Test streamline having only one point
    assert_equal(length(np.array([[1, 2, 3]])), 0.0)

    # We do not support list of lists, it should be numpy ndarray.
    streamline_unsupported = [[1, 2, 3], [4, 5, 5], [2, 1, 3], [4, 2, 1]]
    assert_raises(AttributeError, length, streamline_unsupported)

    # Test setting computing length of a numpy with flag WRITABLE=False
    streamlines_readonly = []
    for s in streamlines:
        streamlines_readonly.append(s.copy())
        streamlines_readonly[-1].setflags(write=False)

    assert_array_almost_equal(length(streamlines_readonly),
                              [length_python(s) for s in streamlines_readonly])
    streamlines_readonly = []
    for s in streamlines_64bit:
        streamlines_readonly.append(s.copy())
        streamlines_readonly[-1].setflags(write=False)

    assert_array_almost_equal(length(streamlines_readonly),
                              [length_python(s) for s in streamlines_readonly])
Esempio n. 32
0
def check_range(streamline, gt, lt):
    length_s = length(streamline)
    if (length_s > gt) & (length_s < lt):
        return True
    else:
        return False
Esempio n. 33
0
def test_length():
    # Test length of only one streamline
    length_streamline_cython = length(streamline)
    length_streamline_python = length_python(streamline)
    assert_almost_equal(length_streamline_cython, length_streamline_python)

    length_streamline_cython = length(streamline_64bit)
    length_streamline_python = length_python(streamline_64bit)
    assert_almost_equal(length_streamline_cython, length_streamline_python)

    # Test computing length of multiple streamlines of different nb_points
    length_streamlines_cython = length(streamlines)

    for i, s in enumerate(streamlines):
        length_streamline_python = length_python(s)
        assert_array_almost_equal(length_streamlines_cython[i],
                                  length_streamline_python)

    length_streamlines_cython = length(streamlines_64bit)

    for i, s in enumerate(streamlines_64bit):
        length_streamline_python = length_python(s)
        assert_array_almost_equal(length_streamlines_cython[i],
                                  length_streamline_python)

    # ArraySequence
    # Test length of only one streamline
    length_streamline_cython = length(streamline_64bit)
    length_streamline_arrseq = length(Streamlines([streamline]))
    assert_almost_equal(length_streamline_arrseq, length_streamline_cython)

    length_streamline_cython = length(streamline_64bit)
    length_streamline_arrseq = length(Streamlines([streamline_64bit]))
    assert_almost_equal(length_streamline_arrseq, length_streamline_cython)

    # Test computing length of multiple streamlines of different nb_points
    length_streamlines_cython = length(streamlines)
    length_streamlines_arrseq = length(Streamlines(streamlines))
    assert_array_almost_equal(length_streamlines_arrseq,
                              length_streamlines_cython)

    length_streamlines_cython = length(streamlines_64bit)
    length_streamlines_arrseq = length(Streamlines(streamlines_64bit))
    assert_array_almost_equal(length_streamlines_arrseq,
                              length_streamlines_cython)

    # Test on a sliced ArraySequence
    length_streamlines_cython = length(streamlines_64bit[::2])
    length_streamlines_arrseq = length(Streamlines(streamlines_64bit)[::2])
    assert_array_almost_equal(length_streamlines_arrseq,
                              length_streamlines_cython)
    length_streamlines_cython = length(streamlines[::-1])
    length_streamlines_arrseq = length(Streamlines(streamlines)[::-1])
    assert_array_almost_equal(length_streamlines_arrseq,
                              length_streamlines_cython)

    # Test streamlines having mixed dtype
    streamlines_mixed_dtype = [streamline,
                               streamline.astype(np.float64),
                               streamline.astype(np.int32),
                               streamline.astype(np.int64)]
    lengths_mixed_dtype = [length(s)
                           for s in streamlines_mixed_dtype]
    assert_array_equal(length(streamlines_mixed_dtype),
                       lengths_mixed_dtype)

    # Test streamlines with different shape
    length_streamlines_cython = length(
        heterogeneous_streamlines)

    for i, s in enumerate(heterogeneous_streamlines):
        length_streamline_python = length_python(s)
        assert_array_almost_equal(length_streamlines_cython[i],
                                  length_streamline_python)

    # Test streamline having integer dtype
    length_streamline = length(streamline.astype('int'))
    assert_true(length_streamline.dtype == np.float64)

    # Test empty list
    assert_equal(length([]), 0.0)

    # Test streamline having only one point
    assert_equal(length(np.array([[1, 2, 3]])), 0.0)

    # We do not support list of lists, it should be numpy ndarray.
    streamline_unsupported = [[1, 2, 3], [4, 5, 5], [2, 1, 3], [4, 2, 1]]
    assert_raises(AttributeError, length,
                  streamline_unsupported)

    # Test setting computing length of a numpy with flag WRITABLE=False
    streamlines_readonly = []
    for s in streamlines:
        streamlines_readonly.append(s.copy())
        streamlines_readonly[-1].setflags(write=False)

    assert_array_almost_equal(length(streamlines_readonly),
                              [length_python(s) for s in streamlines_readonly])
    streamlines_readonly = []
    for s in streamlines_64bit:
        streamlines_readonly.append(s.copy())
        streamlines_readonly[-1].setflags(write=False)

    assert_array_almost_equal(length(streamlines_readonly),
                              [length_python(s) for s in streamlines_readonly])
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    if not os.path.isfile(args.tracts):
        parser.error("Tracts file: {0} does not exist.".format(args.tracts))

    if not os.path.isfile(args.org_aparc):
        parser.error("Original label file: {0} does not exist.".format(
            args.org_aparc))

    if not os.path.isfile(args.dilated_aparc):
        parser.error("Dilated label file: {0} does not exist.".format(
            args.dilated_aparc))

    if not os.path.isfile(args.subcortical_labels):
        parser.error("Requested region file: {0} does not exist.".format(
            args.subcortical_labels))

    if not os.path.isfile(args.lut):
        parser.error("Freesurfer LUT file: {0} does not exist.".format(
            args.lut))

    if not os.path.isfile(args.faimage):
        parser.error("FA Image file: {0} does not exist.".format(args.faimage))

    if not os.path.isfile(args.mdimage):
        parser.error("MD Image file: {0} does not exist.".format(args.mdimage))

    # Validate that tracts can be processed
    if not validate_coordinates(
            args.org_aparc, args.tracts, nifti_compliant=True):
        parser.error("The tracts file contains points that are invalid.\n" +
                     "Use the remove_invalid_coordinates.py script to clean.")

    # Load label images
    org_labels_img = nib.load(args.org_aparc)
    org_labels_data = org_labels_img.get_data().astype('int')

    dilated_labels_img = nib.load(args.dilated_aparc)
    dilated_labels_data = dilated_labels_img.get_data().astype('int')

    # Load fibers
    tract_format = tc.detect_format(args.tracts)
    tract = tract_format(args.tracts, args.org_aparc)
    affine = compute_affine_for_dipy_functions(args.org_aparc, args.tracts)

    #load FA and MD image
    fa_img = nib.load(args.faimage)
    fa_data = fa_img.get_data()

    md_img = nib.load(args.mdimage)
    md_data = md_img.get_data()

    # ========= processing streamlines =================
    fiberlen_range = np.asarray([args.minlen, args.maxlen])

    streamlines = [t for t in tract]
    print "Subjeect " + args.sub_id + " has " + str(
        len(streamlines)) + " streamlines."

    f_streamlines = []  #filtered streamlines
    lenrecord = []
    idx = 0
    for sl in streamlines:
        # Avoid streamlines having only one point, as they crash the
        # Dipy connectivity matrix function.
        if sl.shape[0] > 1:
            flen = length(sl)
            # get fibers having length between 20mm and 200mm
            if (flen > fiberlen_range[0]) & (flen < fiberlen_range[1]):
                f_streamlines.append(sl)
                lenrecord.append(flen)
                idx = idx + 1

    print "Subject " + args.sub_id + " has " + str(
        idx - 1) + " streamlines after filtering."

    # ============= process the parcellation =====================

    # Compute the mapping from label name to label id
    label_id_mapping = compute_labels_map(args.lut)

    # Find which labels were requested by the user.
    requested_labels_mapping = compute_requested_labels(
        args.subcortical_labels, label_id_mapping)

    # Increase aparc_filtered_labels with subcortical regions
    # 17 LH_Hippocampus
    # 53 RH_Hippocampus
    # 11 LH_Caudate
    # 50 RH_Caudate
    # 12 LH_Putamen
    # 51 RH_Putamen
    # 13 LH_Pallidum
    # 52 RH_Pallidum
    # 18 LH_Amygdala
    # 54 RH_Amygdala
    # 26 LH_Accumbens
    # 58 RH_Accumbens
    # 10 LH_Thalamus-Proper
    # 49 RH_Thalamus-Proper
    # 4 LH_Lateral-Ventricle
    # 43 RH_Lateral-Ventricle
    # 8 LH_Cerebellum-Cortex
    # 47 RH_Cerebellum-Cortex
    #
    # 16 _Brain-Stem (# 7,8 LH_Cerebellum) (# 41 RH_Cerebellum)

    sub_cortical_labels = [
        17, 53, 11, 50, 12, 51, 13, 52, 18, 54, 26, 58, 10, 49, 4, 43, 8, 47
    ]  # 16
    Brain_Stem_cerebellum = [16]  #1

    aparc_filtered_labels = dilated_labels_data
    for label_val in requested_labels_mapping:
        if sum(sum(sum(org_labels_data == label_val))) == 0:
            print label_val
            print requested_labels_mapping[label_val]

        aparc_filtered_labels[org_labels_data == label_val] = label_val

    for brain_stem_id in Brain_Stem_cerebellum:
        if sum(sum(sum(org_labels_data == brain_stem_id))) == 0:
            print 'no labels of '
            print brain_stem_id
        aparc_filtered_labels[
            org_labels_data ==
            brain_stem_id] = 99  # let the brain stem's label be 30

    # Reduce the range of labels to avoid a sparse matrix,
    # because the ids of labels can range from 0 to the 12000's.
    reduced_dilated_labels, labels_lut = dpu.reduce_labels(
        aparc_filtered_labels)

    #dilated_labels_fname = args.sub_id + "_" + args.pre + "_dilated_allbrain_labels.nii.gz"
    #dilated_labels_img = nib.Nifti1Image(aparc_filtered_labels, org_labels_img.get_affine(),org_labels_img.get_header())
    #nib.save(dilated_labels_img,dilated_labels_fname)
    #print args.sub_id + 'dilated labels have saved'
    #pdb.set_trace()

    # Compute connectivity matrix and extract the fibers
    M, grouping = nconnectivity_matrix(f_streamlines,
                                       reduced_dilated_labels,
                                       fiberlen_range,
                                       args.cnpoint,
                                       affine=affine,
                                       symmetric=True,
                                       return_mapping=True,
                                       mapping_as_streamlines=True,
                                       keepfiberinroi=True)

    Msize = len(M)
    CM_before_outlierremove = M[1:, 1:]
    nstream_bf = np.sum(CM_before_outlierremove)
    print args.sub_id + ' ' + str(
        nstream_bf
    ) + ' streamlines in the connectivity matrix before outlier removal.'

    # ===================== process the streamlines =============
    print 'Processing streamlines to remove outliers ..............'

    outlier_para = 3
    average_thrd = 8

    M_after_ourlierremove = np.zeros((Msize, Msize))
    # downsample streamlines
    cell_streamlines = []
    cell_id = []
    for i in range(1, Msize):
        for j in range(i + 1, Msize):
            tmp_streamlines = grouping[i, j]
            tmp_streamlines = list(tmp_streamlines)
            # downsample
            tmp_streamlines_downsampled = [
                downsample(s, 100) for s in tmp_streamlines
            ]
            # remove outliers, we need to rewrite the QuickBundle method to speed up this process

            qb = QuickBundles(threshold=average_thrd)
            clusters = qb.cluster(tmp_streamlines_downsampled)
            outlier_clusters = clusters < outlier_para  # small clusters
            nonoutlier_clusters = clusters[np.logical_not(outlier_clusters)]

            tmp_nonoutlier_index = []
            for tmp_cluster in nonoutlier_clusters:
                tmp_nonoutlier_index = tmp_nonoutlier_index + tmp_cluster.indices

            clean_streamline_downsampled = [
                tmp_streamlines_downsampled[ind]
                for ind in tmp_nonoutlier_index
            ]
            cell_streamlines.append(clean_streamline_downsampled)
            cell_id.append([i, j])
            M_after_ourlierremove[i, j] = len(clean_streamline_downsampled)

    CM_after_ourlierremove = M_after_ourlierremove[1:, 1:]
    nstream_bf = np.sum(CM_after_ourlierremove)
    print args.sub_id + ' ' + str(
        nstream_bf
    ) + ' streamlines in the connectivity matrix after outlier removal.'

    #===================== save the data =======================

    if (args.saving_indicator == 1):  # save the whole brain connectivity

        cmCountMatrix_fname = args.sub_id + "_" + args.pre + "_allbrain" + "_cm_count_raw.mat"
        cmCountMatrix_processed_fname = args.sub_id + "_" + args.pre + "_allbrain" + "_cm_count_processed.mat"
        cmStreamlineMatrix_fname = args.sub_id + "_" + args.pre + "_allbrain" + "_cm_streamlines.mat"
        reduced_dilated_labels_fname = args.sub_id + "_" + args.pre + "_allbrain" + "_reduced_dilated_labels.nii.gz"
        RoiInfo_fname = args.sub_id + "_" + args.pre + "_allbrain_RoiInfo.mat"

        # save the raw count matrix
        CM = M[1:, 1:]
        sio.savemat(cmCountMatrix_fname, {'cm': CM})
        sio.savemat(cmCountMatrix_processed_fname,
                    {'cm': CM_after_ourlierremove})

        # save the streamline matrix
        sio.savemat(cmStreamlineMatrix_fname, {'slines': cell_streamlines})
        sio.savemat(RoiInfo_fname, {'ROIinfo': cell_id})
        print args.sub_id + 'cell_streamlines.mat, ROIinfo.mat has been saved'

        filtered_labels_img = nib.Nifti1Image(aparc_filtered_labels,
                                              org_labels_img.get_affine(),
                                              org_labels_img.get_header())
        nib.save(filtered_labels_img, reduced_dilated_labels_fname)
        print args.sub_id + 'all brain dilated labels have saved'

        # ===================== process the streamlines and extract features =============
        cm_fa_curve = fa_extraction_use_cellinput(cell_streamlines,
                                                  cell_id,
                                                  fa_data,
                                                  Msize,
                                                  affine=affine)
        (tmp_cm_fa_mean, tmp_cm_fa_max,
         cm_count) = fa_mean_extraction(cm_fa_curve, Msize)

        # extract MD values along the streamlines
        cm_md_curve = fa_extraction_use_cellinput(cell_streamlines,
                                                  cell_id,
                                                  md_data,
                                                  Msize,
                                                  affine=affine)
        (tmp_cm_md_mean, tmp_cm_md_max,
         testcm) = fa_mean_extraction(cm_md_curve, Msize)

        # connected surface area
        # extract the connective volume ratio
        (tmp_cm_volumn, tmp_cm_volumn_ratio) = rois_connectedvol_cellinput(
            reduced_dilated_labels,
            Msize,
            cell_streamlines,
            cell_id,
            affine=affine)

        # fiber length
        tmp_connectcm_len = rois_fiberlen_cellinput(Msize, cell_streamlines)

        # save cm features
        cm_md_mean = tmp_cm_md_mean[1:, 1:]
        cm_md_max = tmp_cm_md_max[1:, 1:]

        cm_fa_mean = tmp_cm_fa_mean[1:, 1:]
        cm_fa_max = tmp_cm_fa_max[1:, 1:]

        cm_volumn = tmp_cm_volumn[1:, 1:]
        cm_volumn_ratio = tmp_cm_volumn_ratio[1:, 1:]

        connectcm_len = tmp_connectcm_len[1:, 1:]

        sio.savemat(args.pre + "_allbrain" + "_cm_processed_mdmean_100.mat",
                    {'cm_mdmean': cm_md_mean})
        sio.savemat(args.pre + "_allbrain" + "_cm_processed_mdmax_100.mat",
                    {'cm_mdmax': cm_md_max})
        sio.savemat(args.pre + "_allbrain" + "_cm_processed_famean_100.mat",
                    {'cm_famean': cm_fa_mean})
        sio.savemat(args.pre + "_allbrain" + "_cm_processed_famax_100.mat",
                    {'cm_famax': cm_fa_max})
        sio.savemat(args.pre + "_allbrain" + "_cm_processed_volumn_100.mat",
                    {'cm_volumn': cm_volumn})
        sio.savemat(
            args.pre + "_allbrain" + "_cm_processed_volumn_ratio_100.mat",
            {'cm_volumn_ratio': cm_volumn_ratio})
        sio.savemat(args.pre + "_allbrain" + "_cm_processed_fiberlen_100.mat",
                    {'cm_len': connectcm_len})

        # save the streamline matrix
        cell_fa = []
        for i in range(1, Msize):
            for j in range(i + 1, Msize):
                tmp_fa = cm_fa_curve[i, j]
                tmp_fa = list(tmp_fa)
                cell_fa.append(tmp_fa)

        sio.savemat(args.pre + "_allbrain" + "_cm_processed_sfa_100.mat",
                    {'sfa': cell_fa})
        print args.pre + '_allbrain" + "_cm_processed_sfa_100.mat' + ' has been saved'

        cell_md = []
        for i in range(1, Msize):
            for j in range(i + 1, Msize):
                tmp_md = cm_md_curve[i, j]
                tmp_md = list(tmp_md)
                cell_md.append(tmp_md)

        sio.savemat(args.pre + "_allbrain" + "_cm_processed_smd_100.mat",
                    {'smd': cell_md})
        print args.pre + '_allbrain" + "_cm_processed_smd_100.mat' + ' has been saved'

    if (
            args.saving_indicator == 0
    ):  # save the part of the connection: connection between subcortical region

        Nsubcortical_reg = len(sub_cortical_labels) + 1  # should be 19

        cmCountMatrix_fname = args.sub_id + "_" + args.pre + "_partbrain_subcort" + "_cm_count_raw.mat"
        cmCountMatrix_processed_fname = args.sub_id + "_" + args.pre + "_partbrain_subcort" + "_cm_count_processed.mat"
        cmStreamlineMatrix_fname = args.sub_id + "_" + args.pre + "_partbrain_subcort" + "_cm_streamlines.mat"
        reduced_dilated_labels_fname = args.sub_id + "_" + args.pre + "_partbrain_subcort" + "_reduced_dilated_labels.nii.gz"
        subcortical_RoiInfo_fname = args.sub_id + "_" + args.pre + "_partbrain_subcort_RoiInfo.mat"

        # save the raw count matrix
        CM = M[1:, 1:]
        sio.savemat(cmCountMatrix_fname, {'cm': CM})
        sio.savemat(cmCountMatrix_processed_fname,
                    {'cm': CM_after_ourlierremove})

        filtered_labels_img = nib.Nifti1Image(aparc_filtered_labels,
                                              org_labels_img.get_affine(),
                                              org_labels_img.get_header())
        nib.save(filtered_labels_img, reduced_dilated_labels_fname)
        print args.sub_id + ' all brain dilated labels have saved'

        # ===================== process the streamlines and extract features =============
        cm_fa_curve = fa_extraction_use_cellinput(cell_streamlines,
                                                  cell_id,
                                                  fa_data,
                                                  Msize,
                                                  affine=affine)
        (tmp_cm_fa_mean, tmp_cm_fa_max,
         cm_count) = fa_mean_extraction(cm_fa_curve, Msize)

        # extract MD values along the streamlines
        cm_md_curve = fa_extraction_use_cellinput(cell_streamlines,
                                                  cell_id,
                                                  md_data,
                                                  Msize,
                                                  affine=affine)
        (tmp_cm_md_mean, tmp_cm_md_max,
         testcm) = fa_mean_extraction(cm_md_curve, Msize)

        # connected surface area
        # extract the connective volume ratio
        (tmp_cm_volumn, tmp_cm_volumn_ratio) = rois_connectedvol_cellinput(
            reduced_dilated_labels,
            Msize,
            cell_streamlines,
            cell_id,
            affine=affine)

        # fiber length
        tmp_connectcm_len = rois_fiberlen_cellinput(Msize, cell_streamlines)

        # save cm features
        cm_md_mean = tmp_cm_md_mean[1:, 1:]
        cm_md_max = tmp_cm_md_max[1:, 1:]

        cm_fa_mean = tmp_cm_fa_mean[1:, 1:]
        cm_fa_max = tmp_cm_fa_max[1:, 1:]

        cm_volumn = tmp_cm_volumn[1:, 1:]
        cm_volumn_ratio = tmp_cm_volumn_ratio[1:, 1:]

        connectcm_len = tmp_connectcm_len[1:, 1:]

        sio.savemat(
            args.pre + "_partbrain_subcort" + "_cm_processed_mdmean_100.mat",
            {'cm_mdmean': cm_md_mean})
        sio.savemat(
            args.pre + "_partbrain_subcort" + "_cm_processed_mdmax_100.mat",
            {'cm_mdmax': cm_md_max})
        sio.savemat(
            args.pre + "_partbrain_subcort" + "_cm_processed_famean_100.mat",
            {'cm_famean': cm_fa_mean})
        sio.savemat(
            args.pre + "_partbrain_subcort" + "_cm_processed_famax_100.mat",
            {'cm_famax': cm_fa_max})
        sio.savemat(
            args.pre + "_partbrain_subcort" + "_cm_processed_volumn_100.mat",
            {'cm_volumn': cm_volumn})
        sio.savemat(
            args.pre + "_partbrain_subcort" +
            "_cm_processed_volumn_ratio_100.mat",
            {'cm_volumn_ratio': cm_volumn_ratio})
        sio.savemat(
            args.pre + "_partbrain_subcort" + "_cm_processed_fiberlen_100.mat",
            {'cm_len': connectcm_len})

        # save the streamline matrix
        cell_fa = []
        cell_id = []
        for i in range(1, Nsubcortical_reg):
            for j in range(i + 1, Msize):
                tmp_fa = cm_fa_curve[i, j]
                tmp_fa = list(tmp_fa)
                cell_fa.append(tmp_fa)
                cell_id.append([i, j])

        sio.savemat(
            args.pre + "_partbrain_subcort" + "_cm_processed_sfa_100.mat",
            {'sfa': cell_fa})
        print args.pre + '_partbrain_subcort' + '_cm_processed_sfa_100.mat' + 'has been saved.'

        cell_md = []
        for i in range(1, Nsubcortical_reg):
            for j in range(i + 1, Msize):
                tmp_md = cm_md_curve[i, j]
                tmp_md = list(tmp_md)
                cell_md.append(tmp_md)

        sio.savemat(args.pre + "_partbrain" + "_cm_processed_smd_100.mat",
                    {'smd': cell_md})
        print args.pre + '_partbrain' + '_cm_processed_smd_100.mat' + 'has been saved.'

        # save the streamline matrix
        subcortical_cell_streamlines = []
        cell_id = []
        idx = 0
        for i in range(1, Nsubcortical_reg):
            for j in range(i + 1, Msize):
                tmp_sls = cell_streamlines[idx]
                idx = idx + 1
                subcortical_cell_streamlines.append(tmp_sls)
                cell_id.append([i, j])

        sio.savemat(cmStreamlineMatrix_fname,
                    {'slines': subcortical_cell_streamlines})
        sio.savemat(subcortical_RoiInfo_fname, {'ROIinfo': cell_id})
        print cmStreamlineMatrix_fname + ' has been saved'
Esempio n. 35
0
    def check_range(streamline, gt=greater_than, lt=less_than):

        if (length(streamline) > gt) & (length(streamline) < lt):
            return True
        else:
            return False
Esempio n. 36
0
 def extract(self, streamline):
     return length(streamline)[None, None]