Пример #1
0
def test_afq_profile():
    data = np.ones((10, 10, 10))
    bundle = Streamlines()
    bundle.extend(np.array([[[0, 0., 0], [1, 0., 0.], [2, 0., 0.]]]))
    bundle.extend(np.array([[[0, 0., 0.], [1, 0., 0], [2, 0, 0.]]]))

    profile = afq_profile(data, bundle, np.eye(4))
    npt.assert_equal(profile, np.ones(100))

    profile = afq_profile(data, bundle, np.eye(4), n_points=10, weights=None)
    npt.assert_equal(profile, np.ones(10))

    profile = afq_profile(data,
                          bundle,
                          np.eye(4),
                          weights=gaussian_weights,
                          stat=np.median)

    npt.assert_equal(profile, np.ones(100))

    profile = afq_profile(data,
                          bundle,
                          np.eye(4),
                          orient_by=bundle[0],
                          weights=gaussian_weights,
                          stat=np.median)

    npt.assert_equal(profile, np.ones(100))

    profile = afq_profile(data, bundle, np.eye(4), n_points=10, weights=None)
    npt.assert_equal(profile, np.ones(10))

    profile = afq_profile(data,
                          bundle,
                          np.eye(4),
                          n_points=10,
                          weights=np.ones((2, 10)) * 0.5)
    npt.assert_equal(profile, np.ones(10))

    # Disallow setting weights that don't sum to 1 across fibers/nodes:
    npt.assert_raises(ValueError,
                      afq_profile,
                      data,
                      bundle,
                      np.eye(4),
                      n_points=10,
                      weights=np.ones((2, 10)) * 0.6)

    # Test using an affine:
    affine = np.eye(4)
    affine[:, 3] = [-1, 100, -20, 1]
    # Transform the streamlines:
    bundle._data = bundle._data + affine[:3, 3]
    profile = afq_profile(data, bundle, affine, n_points=10, weights=None)

    npt.assert_equal(profile, np.ones(10))

    # Test for error-handling:
    empty_bundle = Streamlines([])
    npt.assert_raises(ValueError, afq_profile, data, empty_bundle, np.eye(4))
Пример #2
0
    def _tract_profiles(self, row, weighting=None):
        profiles_file = self._get_fname(row, '_profiles.csv')
        if self.force_recompute or not op.exists(profiles_file):
            bundles_file = self._clean_bundles(row)
            keys = []
            vals = []
            for k in self.bundle_dict.keys():
                if k != "whole_brain":
                    keys.append(self.bundle_dict[k]['uid'])
                    vals.append(k)
            reverse_dict = dict(zip(keys, vals))

            bundle_names = []
            profiles = []
            node_numbers = []
            scalar_names = []

            trk = nib.streamlines.load(bundles_file)
            for scalar in self.scalars:
                scalar_file = self._scalar_dict[scalar](self, row)
                scalar_data = nib.load(scalar_file).get_fdata()
                for b in np.unique(
                        trk.tractogram.data_per_streamline['bundle']):
                    idx = np.where(
                        trk.tractogram.data_per_streamline['bundle'] == b)[0]
                    this_sl = trk.streamlines[idx]
                    bundle_name = reverse_dict[b]
                    this_profile = afq_profile(scalar_data, this_sl,
                                               row["dwi_affine"])
                    nodes = list(np.arange(this_profile.shape[0]))
                    bundle_names.extend([bundle_name] * len(nodes))
                    node_numbers.extend(nodes)
                    scalar_names.extend([scalar] * len(nodes))
                    profiles.extend(list(this_profile))

            profile_dframe = pd.DataFrame(
                dict(profiles=profiles,
                     bundle=bundle_names,
                     node=node_numbers,
                     scalar=scalar_names))
            profile_dframe.to_csv(profiles_file)
            meta = dict(source=bundles_file,
                        parameters=get_default_args(afq_profile))
            meta_fname = profiles_file.split('.')[0] + '.json'
            afd.write_json(meta_fname, meta)

        return profiles_file
def get_tract_profile(bundle,
                      metric_img,
                      metric_affine,
                      use_weights=False,
                      flip=True,
                      num_points=100):
    '''
    This function reorients the streamlines and extracts the diffusion metrics 
    along the tract. It essentiall performs step 1. The default number of points
    along a tract is 100, which can be thought of as %-along a tract.

    The flip variable signals if you would like to flip the direction of the
    streamlines after reorientation. For example if after reorientation all the
    streamlines were motor cortex -> brainstem and you actually wanted 
    brainstem -> motor cortex, then you set flip to True. The default is True
    because generally we see reorientation result in motor cortex -> brainstem.
    For the honours project, we were looking for the opposite
    '''

    # Reorient all the streamlines so that they are follwing the same direction
    feature = ResampleFeature(nb_points=num_points)
    d_metric = AveragePointwiseEuclideanMetric(feature)
    qb = QuickBundles(np.inf, metric=d_metric)
    centroid_bundle = qb.cluster(bundle).centroids[0]
    oriented_bundle = orient_by_streamline(bundle, centroid_bundle)

    # Calculate weights for each streamline/node in a bundle, based on a
    # Mahalanobis distance from the core the bundle, at that node
    w_bundle = None
    if use_weights:
        w_bundle = gaussian_weights(oriented_bundle)

    # Sample the metric along the tract. The implementation of this function
    # is based off of work by Yeatman et al. in 2012
    profile_bundle = afq_profile(metric_img,
                                 oriented_bundle,
                                 metric_affine,
                                 weights=w_bundle)

    # Reverse the profile bundle if the direction is not desired
    if flip:
        profile_bundle = np.flip(profile_bundle)

    return profile_bundle
Пример #4
0
def test_segment():
    segmentation = seg.Segmentation()
    segmentation.segment(bundles,
                         tg,
                         hardi_fdata,
                         hardi_fbval,
                         hardi_fbvec,
                         mapping=mapping)
    fiber_groups = segmentation.fiber_groups

    # We asked for 2 fiber groups:
    npt.assert_equal(len(fiber_groups), 2)
    # Here's one of them:
    CST_R_sl = fiber_groups['CST_R']
    # Let's make sure there are streamlines in there:
    npt.assert_(len(CST_R_sl) > 0)
    # Calculate the tract profile for a volume of all-ones:
    tract_profile = afq_profile(np.ones(nib.load(hardi_fdata).shape[:3]),
                                CST_R_sl.streamlines, np.eye(4))
    npt.assert_almost_equal(tract_profile, np.ones(100))

    clean_sl = seg.clean_bundle(CST_R_sl)
    npt.assert_equal(len(clean_sl), len(CST_R_sl))

"""
Calculate weights for each bundle:
"""

import dipy.stats.analysis as dsa

w_cst_l = dsa.gaussian_weights(oriented_cst_l)
w_af_l = dsa.gaussian_weights(oriented_af_l)

"""
And then use the weights to calculate the tract profiles for each bundle
"""

profile_cst_l = dsa.afq_profile(fa, oriented_cst_l, affine=img.affine,
                                weights=w_cst_l)

profile_af_l = dsa.afq_profile(fa, oriented_af_l, affine=img.affine,
                               weights=w_af_l)

fig, (ax1, ax2) = plt.subplots(1, 2)

ax1.plot(profile_cst_l)
ax1.set_ylabel("Fractional anisotropy")
ax1.set_xlabel("Node along CST")
ax2.plot(profile_af_l)
ax2.set_xlabel("Node along AF")
fig.savefig("tract_profiles")

"""
.. figure:: tract_profiles.png
Пример #6
0
##########################################################################
# Bundle profiles
# ---------------
# Streamlines are represented in the original diffusion space (`Space.VOX`) and
# scalar properties along the length of each bundle are queried from this
# scalar data. Here, the contribution of each streamline is weighted according
# to how representative this streamline is of the bundle overall.

print("Extracting tract profiles...")
for bundle in bundles:
    sft = load_tractogram(op.join(working_dir, f'{bundle}_afq.trk'),
                          img,
                          to_space=Space.VOX)
    fig, ax = plt.subplots(1)
    weights = gaussian_weights(sft.streamlines)
    profile = afq_profile(FA_data, sft.streamlines, np.eye(4), weights=weights)
    ax.plot(profile)
    ax.set_title(bundle)

plt.show()

##########################################################################
# References:
# -------------------------
# .. [Yeatman2012] Jason D Yeatman, Robert F Dougherty, Nathaniel J Myall,
#                  Brian A Wandell, Heidi M Feldman, "Tract profiles of
#                  white matter properties: automating fiber-tract
#                  quantification", PloS One, 7: e49790
#
# .. [Yeatman2014] Jason D Yeatman, Brian A Wandell, Aviv Mezer Feldman,
#                  "Lifespan maturation and degeneration of human brain white
Пример #7
0
def test_segment():

    templates = afd.read_templates()
    bundles = {
        'CST_L': {
            'ROIs': [templates['CST_roi1_L'], templates['CST_roi2_L']],
            'rules': [True, True],
            'prob_map': templates['CST_L_prob_map'],
            'cross_midline': None
        },
        'CST_R': {
            'ROIs': [templates['CST_roi1_R'], templates['CST_roi1_R']],
            'rules': [True, True],
            'prob_map': templates['CST_R_prob_map'],
            'cross_midline': None
        }
    }

    segmentation = seg.Segmentation()
    segmentation.segment(bundles,
                         tg,
                         hardi_fdata,
                         hardi_fbval,
                         hardi_fbvec,
                         mapping=mapping)
    fiber_groups = segmentation.fiber_groups

    # We asked for 2 fiber groups:
    npt.assert_equal(len(fiber_groups), 2)
    # Here's one of them:
    CST_R_sl = fiber_groups['CST_R']
    # Let's make sure there are streamlines in there:
    npt.assert_(len(CST_R_sl) > 0)
    # Calculate the tract profile for a volume of all-ones:
    tract_profile = afq_profile(np.ones(nib.load(hardi_fdata).shape[:3]),
                                CST_R_sl.streamlines, np.eye(4))
    npt.assert_almost_equal(tract_profile, np.ones(100))

    clean_sl = seg.clean_bundle(CST_R_sl)
    npt.assert_equal(len(clean_sl), len(CST_R_sl))

    # What if you don't have probability maps?
    bundles = {
        'CST_L': {
            'ROIs': [templates['CST_roi1_L'], templates['CST_roi2_L']],
            'rules': [True, True],
            'cross_midline': False
        },
        'CST_R': {
            'ROIs': [templates['CST_roi1_R'], templates['CST_roi1_R']],
            'rules': [True, True],
            'cross_midline': False
        }
    }

    segmentation.segment(bundles,
                         tg,
                         hardi_fdata,
                         hardi_fbval,
                         hardi_fbvec,
                         mapping=mapping)
    fiber_groups = segmentation.fiber_groups

    # This condition should still hold
    npt.assert_equal(len(fiber_groups), 2)
    npt.assert_(len(fiber_groups['CST_R']) > 0)

    # Test with the return_idx kwarg set to True:
    segmentation = seg.Segmentation(return_idx=True)
    segmentation.segment(bundles,
                         tg,
                         hardi_fdata,
                         hardi_fbval,
                         hardi_fbvec,
                         mapping=mapping)
    fiber_groups = segmentation.fiber_groups

    npt.assert_equal(len(fiber_groups), 2)
    npt.assert_(len(fiber_groups['CST_R']['sl']) > 0)
    npt.assert_(len(fiber_groups['CST_R']['idx']) > 0)

    # get bundles for reco method
    bundles = afd.read_hcp_atlas_16_bundles()
    bundle_names = ['whole_brain', 'CST_R', 'CST_L']
    for key in list(bundles):
        if key not in bundle_names:
            bundles.pop(key, None)

    # Try recobundles method
    segmentation = seg.Segmentation(seg_algo='Reco',
                                    progressive=False,
                                    greater_than=10,
                                    rm_small_clusters=1,
                                    rng=np.random.RandomState(seed=8))
    fiber_groups = segmentation.segment(bundles, tg, hardi_fdata, hardi_fbval,
                                        hardi_fbvec)

    # This condition should still hold
    npt.assert_equal(len(fiber_groups), 2)
    npt.assert_(len(fiber_groups['CST_R']) > 0)

    # Test with the return_idx kwarg set to True:
    segmentation = seg.Segmentation(seg_algo='Reco',
                                    progressive=False,
                                    greater_than=10,
                                    rm_small_clusters=1,
                                    rng=np.random.RandomState(seed=8),
                                    return_idx=True)

    fiber_groups = segmentation.segment(bundles, tg, hardi_fdata, hardi_fbval,
                                        hardi_fbvec)
    fiber_groups = segmentation.fiber_groups

    npt.assert_equal(len(fiber_groups), 2)
    npt.assert_(len(fiber_groups['CST_R']['sl']) > 0)
    npt.assert_(len(fiber_groups['CST_R']['idx']) > 0)
Пример #8
0
# to how representative this streamline is of the bundle overall.
#
# .. note::
#   As a sanity check the anterior forceps the tract profile is relatively
#   symmetric?

print("Extracting tract profiles...")
for bundle in bundles:
    print(f"Extracting {bundle}...")
    tractogram = load_tractogram(op.join(working_dir, f'afq_{bundle}.trk'),
                                 img,
                                 to_space=Space.VOX)
    fig, ax = plt.subplots(1)
    weights = gaussian_weights(tractogram.streamlines)
    profile = afq_profile(FA_data,
                          tractogram.streamlines,
                          np.eye(4),
                          weights=weights)
    ax.plot(profile)
    ax.set_title(bundle)

plt.show()
plt.savefig(op.join(working_dir, 'AntFrontal_tractprofile.png'))

##########################################################################
# References:
# -------------------------
# .. [Yeatman2012] Jason D Yeatman, Robert F Dougherty, Nathaniel J Myall,
#                  Brian A Wandell, Heidi M Feldman, "Tract profiles of
#                  white matter properties: automating fiber-tract
#                  quantification", PloS One, 7: e49790
#
Пример #9
0
    def tract_profiles(self,
                       data,
                       subject_label,
                       affine=np.eye(4),
                       method='afq',
                       metric='FA',
                       n_points=100,
                       weight=True):
        """
        Calculate a summarized profile of data for each bundle along
        its length.

        Follows the approach outlined in [Yeatman2012]_.

        Parameters
        ----------
        data : 3D volume
            The statistic to sample with the streamlines.

        subject_label : string
            String which identifies these bundles in the pandas dataframe.

        affine : array_like (4, 4), optional.
            The mapping from voxel coordinates to 'data' coordinates.
            Default: np.eye(4)

        method : string
            Method used to segment streamlines.
            Default: 'afq'

        metric : string
            Metric of statistic in data.
            Default: 'FA'

        n_points : int
            Number of points to resample to.
            Default: 100

        weight : boolean
            Whether to calculate gaussian weights before profiling.
            Default: True
        """
        self.to_space(Space.VOX)
        profiles = []
        for bundle_name, bundle in self.bundles.items():
            if weight:
                weights = gaussian_weights(bundle.streamlines,
                                           n_points=n_points)
            else:
                weights = None
            profile = afq_profile(data,
                                  bundle.streamlines,
                                  affine,
                                  weights=weights,
                                  n_points=n_points)
            for ii in range(len(profile)):
                # Subject, Bundle, node, method, metric (FA, MD), value
                profiles.append([
                    subject_label, bundle_name, ii, method, metric, profile[ii]
                ])
            logging.disable(level=logging.WARNING)
        logging.disable(logging.NOTSET)
        profiles = pd.DataFrame(
            data=profiles,
            columns=["Subject", "Bundle", "Node", "Method", "Metric", "Value"])
        return profiles
Пример #10
0
def evaluate_along_streamlines(scalar_img,
                               streamlines,
                               beginnings,
                               nr_points,
                               dilate=0,
                               predicted_peaks=None,
                               affine=None):
    # Runtime:
    # - default:                2.7s (test),    56s (all),      10s (test 4 bundles, 100 points)
    # - map_coordinate order 1: 1.9s (test),    26s (all),       6s (test 4 bundles, 100 points)
    # - map_coordinate order 3: 2.2s (test),    33s (all),
    # - values_from_volume:     2.5s (test),    43s (all),
    # - AFQ:                      ?s (test),     ?s (all),      85s  (test 4 bundles, 100 points)
    # => AFQ a lot slower than others

    streamlines = list(
        transform_streamlines(streamlines, np.linalg.inv(affine)))

    for i in range(dilate):
        beginnings = binary_dilation(beginnings)
    beginnings = beginnings.astype(np.uint8)
    streamlines = _orient_to_same_start_region(streamlines, beginnings)
    if predicted_peaks is not None:
        # scalar img can also be orig peaks
        best_orig_peaks = fiber_utils.get_best_original_peaks(
            predicted_peaks, scalar_img, peak_len_thr=0.00001)
        scalar_img = np.linalg.norm(best_orig_peaks, axis=-1)

    algorithm = "distance_map"  # equal_dist | distance_map | cutting_plane | afq

    if algorithm == "equal_dist":
        ### Sampling ###
        streamlines = fiber_utils.resample_fibers(streamlines,
                                                  nb_points=nr_points)
        values = map_coordinates(scalar_img, np.array(streamlines).T, order=1)
        ### Aggregation ###
        values_mean = np.array(values).mean(axis=1)
        values_std = np.array(values).std(axis=1)
        return values_mean, values_std

    if algorithm == "distance_map":  # cKDTree

        ### Sampling ###
        streamlines = fiber_utils.resample_fibers(streamlines,
                                                  nb_points=nr_points)
        values = map_coordinates(scalar_img, np.array(streamlines).T, order=1)

        ### Aggregating by cKDTree approach ###
        metric = AveragePointwiseEuclideanMetric()
        qb = QuickBundles(threshold=100., metric=metric)
        clusters = qb.cluster(streamlines)
        centroids = Streamlines(clusters.centroids)
        if len(centroids) > 1:
            print("WARNING: number clusters > 1 ({})".format(len(centroids)))
        _, segment_idxs = cKDTree(centroids.data, 1,
                                  copy_data=True).query(streamlines,
                                                        k=1)  # (2000, 100)

        values_t = np.array(values).T  # (2000, 100)

        # If we want to take weighted mean like in AFQ:
        # weights = dsa.gaussian_weights(Streamlines(streamlines))
        # values_t = weights * values_t
        # return np.sum(values_t, 0), None

        results_dict = defaultdict(list)
        for idx, sl in enumerate(values_t):
            for jdx, seg in enumerate(sl):
                results_dict[segment_idxs[idx, jdx]].append(seg)

        if len(results_dict.keys()) < nr_points:
            print(
                "WARNING: found less than required points. Filling up with centroid values."
            )
            centroid_values = map_coordinates(scalar_img,
                                              np.array([centroids[0]]).T,
                                              order=1)
            for i in range(nr_points):
                if len(results_dict[i]) == 0:
                    results_dict[i].append(np.array(centroid_values).T[0, i])

        results_mean = []
        results_std = []
        for key in sorted(results_dict.keys()):
            value = results_dict[key]
            if len(value) > 0:
                results_mean.append(np.array(value).mean())
                results_std.append(np.array(value).std())
            else:
                print("WARNING: empty segment")
                results_mean.append(0)
                results_std.append(0)
        return results_mean, results_std

    elif algorithm == "cutting_plane":
        # This will resample all streamline to have equally distant points (resulting in a different number of points
        # in each streamline). Then the "middle" of the tract will be estimated taking the middle element of the
        # centroid (estimated with QuickBundles). Then each streamline the point closest to the "middle" will be
        # calculated and points will be indexed for each streamline starting from the middle. Then averaging across
        # all streamlines will be done by taking the mean for points with same indices.

        ### Sampling ###
        streamlines = fiber_utils.resample_to_same_distance(
            streamlines, max_nr_points=nr_points)
        # map_coordinates does not allow streamlines with different lengths -> use values_from_volume
        values = np.array(
            values_from_volume(scalar_img, streamlines, affine=np.eye(4))).T

        ### Aggregating by Cutting Plane approach ###
        # Resample to all fibers having same number of points -> needed for QuickBundles
        streamlines_resamp = fiber_utils.resample_fibers(streamlines,
                                                         nb_points=nr_points)
        metric = AveragePointwiseEuclideanMetric()
        qb = QuickBundles(threshold=100., metric=metric)
        clusters = qb.cluster(streamlines_resamp)
        centroids = Streamlines(clusters.centroids)

        # index of the middle cluster
        middle_idx = int(nr_points / 2)
        middle_point = centroids[0][middle_idx]
        # For each streamline get idx for the point which is closest to the middle
        segment_idxs = fiber_utils.get_idxs_of_closest_points(
            streamlines, middle_point)

        # Align along the middle and assign indices
        segment_idxs_eqlen = []
        base_idx = 1000  # use higher index to avoid negative numbers for area below middle
        for idx, sl in enumerate(streamlines):
            sl_middle_pos = segment_idxs[idx]
            before_elems = sl_middle_pos
            after_elems = len(sl) - sl_middle_pos
            # indices for one streamline e.g. [998, 999, 1000, 1001, 1002, 1003]; 1000 is middle
            r = range((base_idx - before_elems), (base_idx + after_elems))
            segment_idxs_eqlen.append(r)
        segment_idxs = segment_idxs_eqlen

        # Calcuate maximum number of indices to not result in more indices than nr_points.
        # (this could be case if one streamline is very off-center and therefore has a lot of points only on one
        # side. In this case the values too far out of this streamline will be cut off).
        max_idx = base_idx + int(nr_points / 2)
        min_idx = base_idx - int(nr_points / 2)

        # Group by segment indices
        results_dict = defaultdict(list)
        for idx, sl in enumerate(values):
            for jdx, seg in enumerate(sl):
                current_idx = segment_idxs[idx][jdx]
                if current_idx >= min_idx and current_idx < max_idx:
                    results_dict[current_idx].append(seg)

        # If values missing fill up with centroid values
        if len(results_dict.keys()) < nr_points:
            print(
                "WARNING: found less than required points. Filling up with centroid values."
            )
            centroid_sl = [centroids[0]]
            centroid_sl = np.array(centroid_sl).T
            centroid_values = map_coordinates(scalar_img, centroid_sl, order=1)
            for idx, seg_idx in enumerate(range(min_idx, max_idx)):
                if len(results_dict[seg_idx]) == 0:
                    results_dict[seg_idx].append(
                        np.array(centroid_values).T[0, idx])

        # Aggregate by mean
        results_mean = []
        results_std = []
        for key in sorted(results_dict.keys()):
            value = results_dict[key]
            if len(value) > 0:
                results_mean.append(np.array(value).mean())
                results_std.append(np.array(value).std())
            else:
                print("WARNING: empty segment")
                results_mean.append(0)
                results_std.append(0)
        return results_mean, results_std

    elif algorithm == "afq":
        ### sampling + aggregation ###
        streamlines = fiber_utils.resample_fibers(streamlines,
                                                  nb_points=nr_points)
        streamlines = Streamlines(streamlines)
        weights = dsa.gaussian_weights(streamlines)
        results_mean = dsa.afq_profile(scalar_img,
                                       streamlines,
                                       affine=np.eye(4),
                                       weights=weights)
        results_std = np.zeros(nr_points)
        return results_mean, results_std
Пример #11
0
def tract_profiles(subses_dict, clean_bundles_file, bundle_dict, scalar_dict,
                   profile_weights, dwi_affine, tracking_params,
                   segmentation_params):
    keys = []
    vals = []
    for k in bundle_dict.keys():
        if k != "whole_brain":
            keys.append(bundle_dict[k]['uid'])
            vals.append(k)
    reverse_dict = dict(zip(keys, vals))

    bundle_names = []
    node_numbers = []
    profiles = np.empty((len(scalar_dict), 0)).tolist()
    this_profile = np.zeros((len(scalar_dict), 100))

    trk = nib.streamlines.load(clean_bundles_file)
    for b in np.unique(trk.tractogram.data_per_streamline['bundle']):
        idx = np.where(trk.tractogram.data_per_streamline['bundle'] == b)[0]
        this_sl = trk.streamlines[idx]
        bundle_name = reverse_dict[b]
        for ii, (scalar, scalar_file) in enumerate(scalar_dict.items()):
            scalar_data = nib.load(scalar_file).get_fdata()
            if isinstance(profile_weights, str):
                if profile_weights == "gauss":
                    this_prof_weights = gaussian_weights(this_sl)
                elif profile_weights == "median":
                    # weights bundle to only return the mean
                    def _median_weight(bundle):
                        fgarray = set_number_of_points(bundle, 100)
                        values = np.array(
                            values_from_volume(scalar_data, fgarray,
                                               dwi_affine))
                        weights = np.zeros(values.shape)
                        for ii, jj in enumerate(
                                np.argsort(values,
                                           axis=0)[len(values) // 2, :]):
                            weights[jj, ii] = 1
                        return weights

                    this_prof_weights = _median_weight
            else:
                this_prof_weights = profile_weights
            this_profile[ii] = afq_profile(scalar_data,
                                           this_sl,
                                           dwi_affine,
                                           weights=this_prof_weights)
            profiles[ii].extend(list(this_profile[ii]))
        nodes = list(np.arange(this_profile[0].shape[0]))
        bundle_names.extend([bundle_name] * len(nodes))
        node_numbers.extend(nodes)

    profile_dict = dict()
    profile_dict["tractID"] = bundle_names
    profile_dict["nodeID"] = node_numbers
    for ii, scalar in enumerate(scalar_dict.keys()):
        profile_dict[scalar] = profiles[ii]

    profile_dframe = pd.DataFrame(profile_dict)
    meta = dict(source=clean_bundles_file,
                parameters=get_default_args(afq_profile))

    return profile_dframe, meta