Ejemplo n.º 1
0
def streamlines_in_mask(sft, target_mask, all_in=False):
    """
    Parameters
    ----------
    sft : StatefulTractogram
        StatefulTractogram containing the streamlines to segment.
    target_mask : numpy.ndarray
        Binary mask in which the streamlines should pass.
    Returns
    -------
    ids : list
        Ids of the streamlines passing through the mask.
    """
    sft.to_vox()
    sft.to_corner()
    # Copy-Paste from Dipy to get indices
    if all_in:
        target_mask = np.array(target_mask, dtype=bool, copy=True)
        target_mask = np.invert(target_mask)
        tractogram_mask = compute_tract_counts_map(sft.streamlines,
                                                   target_mask.shape)
        tractogram_mask[tractogram_mask > 0] = 1
        tmp_mask = tractogram_mask.astype(np.uint8) * target_mask.astype(
            np.uint8)
        streamlines_case = _streamlines_in_mask(list(sft.streamlines),
                                                tmp_mask, np.eye(3), [0, 0, 0])

        return np.where(streamlines_case == [0, 1][False])[0].tolist()
    else:
        target_mask = np.array(target_mask, dtype=np.uint8, copy=True)
        streamlines_case = _streamlines_in_mask(list(sft.streamlines),
                                                target_mask, np.eye(3),
                                                [0, 0, 0])
        return np.where(streamlines_case == [0, 1][True])[0].tolist()
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_bundle, optional=args.reference)
    assert_outputs_exist(parser, args, args.out_img)

    max_ = np.iinfo(np.int16).max
    if args.binary is not None and (args.binary <= 0 or args.binary > max_):
        parser.error(
            'The value of --binary ({}) '
            'must be greater than 0 and smaller or equal to {}'.format(
                args.binary, max_))

    sft = load_tractogram_with_reference(parser, args, args.in_bundle)
    sft.to_vox()
    sft.to_corner()
    streamlines = sft.streamlines
    transformation, dimensions, _, _ = sft.space_attributes

    streamline_count = compute_tract_counts_map(streamlines, dimensions)

    if args.binary is not None:
        streamline_count[streamline_count > 0] = args.binary

    nib.save(
        nib.Nifti1Image(streamline_count.astype(np.int16), transformation),
        args.out_img)
Ejemplo n.º 3
0
def compute_voxel_measures(args):
    bundle_filename, bundle_reference = args[0]
    tracking_mask = args[1]
    gs_binary_3d = args[2]

    bundle_sft = load_tractogram(bundle_filename, bundle_reference)
    bundle_sft.to_vox()
    bundle_sft.to_corner()
    bundle_streamlines = bundle_sft.streamlines
    _, bundle_dimensions, _, _ = bundle_sft.space_attributes

    if not bundle_streamlines:
        logging.info('{} is empty'.format(bundle_filename))
        return None

    binary_3d = compute_tract_counts_map(bundle_streamlines, bundle_dimensions)
    binary_3d[binary_3d > 0] = 1

    binary_3d_indices = np.where(binary_3d.flatten() > 0)[0]
    gs_binary_3d_indices = np.where(gs_binary_3d.flatten() > 0)[0]

    voxels_binary = binary_classification(
        binary_3d_indices,
        gs_binary_3d_indices,
        int(np.prod(tracking_mask.shape)),
        mask_count=np.count_nonzero(tracking_mask))

    return dict(
        zip([
            'sensitivity_voxels', 'specificity_voxels', 'precision_voxels',
            'accuracy_voxels', 'dice_voxels', 'kappa_voxels', 'youden_voxels'
        ], voxels_binary))
def _average_wrapper(args):
    hdf5_filenames = args[0]
    key = args[1]
    binary = args[2]
    out_dir = args[3]

    hdf5_file_ref = h5py.File(hdf5_filenames[0], 'r')
    affine = hdf5_file_ref.attrs['affine']
    dimensions = hdf5_file_ref.attrs['dimensions']
    density_data = np.zeros(dimensions, dtype=np.float32)
    for hdf5_filename in hdf5_filenames:
        hdf5_file = h5py.File(hdf5_filename, 'r')

        if not (np.allclose(hdf5_file.attrs['affine'], affine)
                and np.allclose(hdf5_file.attrs['dimensions'], dimensions)):
            raise IOError('{} do not have a compatible header'.format(
                hdf5_filename))
        # scil_decompose_connectivity.py saves the streamlines in VOX/CORNER
        streamlines = reconstruct_streamlines_from_hdf5(hdf5_file, key)
        density = compute_tract_counts_map(streamlines, dimensions)
        hdf5_file.close()

        if binary:
            density_data[density > 0] += 1
        elif np.max(density) > 0:
            density_data += density / np.max(density)

    if np.max(density_data) > 0:
        density_data /= len(hdf5_filenames)

        nib.save(nib.Nifti1Image(density_data, affine),
                 os.path.join(out_dir, '{}.nii.gz'.format(key)))
Ejemplo n.º 5
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_bundles)
    output_streamlines_filename = '{}streamlines.trk'.format(
        args.output_prefix)
    output_voxels_filename = '{}voxels.nii.gz'.format(args.output_prefix)
    assert_outputs_exist(parser, args,
                         [output_voxels_filename, output_streamlines_filename])

    if not 0 <= args.ratio_voxels <= 1 or not 0 <= args.ratio_streamlines <= 1:
        parser.error('Ratios must be between 0 and 1.')

    fusion_streamlines = []
    if args.reference:
        reference_file = args.reference
    else:
        reference_file = args.in_bundles[0]
    sft_list = []
    for name in args.in_bundles:
        tmp_sft = load_tractogram_with_reference(parser, args, name)
        tmp_sft.to_vox()
        tmp_sft.to_corner()

        if not is_header_compatible(reference_file, tmp_sft):
            raise ValueError('Headers are not compatible.')
        sft_list.append(tmp_sft)
        fusion_streamlines.append(tmp_sft.streamlines)

    fusion_streamlines, _ = union_robust(fusion_streamlines)

    transformation, dimensions, _, _ = get_reference_info(reference_file)
    volume = np.zeros(dimensions)
    streamlines_vote = dok_matrix(
        (len(fusion_streamlines), len(args.in_bundles)))

    for i in range(len(args.in_bundles)):
        sft = sft_list[i]
        binary = compute_tract_counts_map(sft.streamlines, dimensions)
        volume[binary > 0] += 1

        if args.same_tractogram:
            _, indices = intersection_robust(
                [fusion_streamlines, sft.streamlines])
            streamlines_vote[list(indices), [i]] += 1

    if args.same_tractogram:
        real_indices = []
        ratio_value = int(args.ratio_streamlines * len(args.in_bundles))
        real_indices = np.where(
            np.sum(streamlines_vote, axis=1) >= ratio_value)[0]
        new_sft = StatefulTractogram.from_sft(fusion_streamlines[real_indices],
                                              sft_list[0])
        save_tractogram(new_sft, output_streamlines_filename)

    volume[volume < int(args.ratio_voxels * len(args.in_bundles))] = 0
    volume[volume > 0] = 1
    nib.save(nib.Nifti1Image(volume.astype(np.uint8), transformation),
             output_voxels_filename)
Ejemplo n.º 6
0
def _compute_streamline_mean(cur_ind, cur_min, cur_max, data):
    # From the precomputed indices, compute the binary map
    # and use it to weight the metric data for this specific streamline.
    cur_range = tuple(cur_max - cur_min)
    streamline_density = compute_tract_counts_map(ArraySequence([cur_ind]),
                                                  cur_range)
    streamline_data = data[cur_min[0]:cur_max[0], cur_min[1]:cur_max[1],
                           cur_min[2]:cur_max[2]]
    streamline_average = np.average(streamline_data,
                                    weights=streamline_density)
    return streamline_average
Ejemplo n.º 7
0
def compute_gt_masks(gt_bundles, parser, args):
    """
    Compute ground-truth masks. If the ground-truth is
    already a mask, load it. If the ground-truth is a
    bundle, compute the mask.

    Parameters
    ----------
    gt_bundles: list
        List of either StatefulTractograms or niftis.
    parser: ArgumentParser
        Argument parser which handles the script's arguments.
    args: Namespace
        List of arguments passed to the script.

    Returns
    -------
    mask_1: numpy.ndarray
        "Head" of the mask.
    mask_2: numpy.ndarray
        "Tail" of the mask.
    """

    gt_bundle_masks = []
    gt_bundle_inv_masks = []

    for gt_bundle in args.gt_bundles:
        # Support ground truth as streamlines or masks
        # Will be converted to binary masks immediately
        _, ext = split_name_with_nii(gt_bundle)
        if ext in ['.gz', '.nii.gz']:
            gt_img = nib.load(gt_bundle)
            gt_mask = get_data_as_mask(gt_img)
            affine = gt_img.affine
            dimensions = gt_mask.shape
        else:
            gt_sft = load_tractogram_with_reference(parser,
                                                    args,
                                                    gt_bundle,
                                                    bbox_check=False)
            gt_sft.to_vox()
            gt_sft.to_corner()
            affine, dimensions, _, _ = gt_sft.space_attributes
            gt_mask = compute_tract_counts_map(gt_sft.streamlines,
                                               dimensions).astype(np.int16)
        gt_inv_mask = np.zeros(dimensions, dtype=np.int16)
        gt_inv_mask[gt_mask == 0] = 1
        gt_mask[gt_mask > 0] = 1
        gt_bundle_masks.append(gt_mask)
        gt_bundle_inv_masks.append(gt_inv_mask)

    return gt_bundle_masks, gt_bundle_inv_masks, affine, dimensions
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser,
                        [args.in_bundle, args.in_centroid],
                        optional=args.reference)
    assert_outputs_exist(parser, args, args.out_map)

    sft_bundle = load_tractogram_with_reference(parser, args, args.in_bundle)
    sft_centroid = load_tractogram_with_reference(parser, args,
                                                  args.in_centroid)

    if not len(sft_bundle.streamlines):
        logging.error('Empty bundle file {}. '
                      'Skipping'.format(args.in_bundle))
        raise ValueError

    if not len(sft_centroid.streamlines):
        logging.error('Centroid file {} should contain one streamline. '
                      'Skipping'.format(args.in_centroid))
        raise ValueError

    sft_bundle.to_vox()
    bundle_streamlines_vox = sft_bundle.streamlines
    bundle_streamlines_vox._data *= args.upsample

    sft_centroid.to_vox()
    centroid_streamlines_vox = sft_centroid.streamlines
    centroid_streamlines_vox._data *= args.upsample

    upsampled_shape = [s * args.upsample for s in sft_bundle.dimensions]
    tdi_mask = compute_tract_counts_map(bundle_streamlines_vox,
                                        upsampled_shape) > 0

    tdi_mask_nzr = np.nonzero(tdi_mask)
    tdi_mask_nzr_ind = np.transpose(tdi_mask_nzr)

    min_dist_ind, _ = min_dist_to_centroid(tdi_mask_nzr_ind,
                                           centroid_streamlines_vox[0])

    # Save the (upscaled) labels mask
    labels_mask = np.zeros(tdi_mask.shape)
    labels_mask[tdi_mask_nzr] = min_dist_ind + 1  # 0 is background value
    rescaled_affine = sft_bundle.affine
    rescaled_affine[:3, :3] /= args.upsample
    labels_img = nib.Nifti1Image(labels_mask, rescaled_affine)
    upsampled_spacing = sft_bundle.voxel_sizes / args.upsample
    labels_img.header.set_zooms(upsampled_spacing)
    nib.save(labels_img, args.out_map)
Ejemplo n.º 9
0
def get_binary_maps(streamlines, sft):
    """
    Extract a mask from a bundle

    Parameters
    ----------
    streamlines: list
        List of streamlines.
    dimensions: tuple of ints
        Dimensions of the mask.
    sft : StatefulTractogram
        Reference tractogram.
    invalid: bool
        If true, remove invalid streamlines from tractogram.

    Returns
    -------
    bundles_voxels: numpy.ndarray
        Mask representing the bundle volume.
    endpoints_voxels: numpy.ndarray
        Mask representing the bundle's endpoints.
    """
    dimensions = sft.dimensions
    if not len(streamlines):
        return np.zeros(dimensions), np.zeros(dimensions)
    elif len(streamlines) == 1:
        streamlines = [streamlines]
    tmp_sft = StatefulTractogram.from_sft(streamlines, sft)
    tmp_sft.to_vox()
    tmp_sft.to_corner()

    if len(tmp_sft) == 1:
        return np.zeros(dimensions), np.zeros(dimensions)

    bundles_voxels = compute_tract_counts_map(tmp_sft.streamlines,
                                              dimensions).astype(np.int16)

    endpoints_voxels = get_endpoints_density_map(tmp_sft.streamlines,
                                                 dimensions).astype(np.int16)

    bundles_voxels[bundles_voxels > 0] = 1
    endpoints_voxels[endpoints_voxels > 0] = 1

    return bundles_voxels, endpoints_voxels
Ejemplo n.º 10
0
def plot_glass_brain(args, sft, img, output_filenames):
    sft.to_vox()
    sft.to_corner()
    _, dimensions, _, _ = sft.space_attributes
    data = compute_tract_counts_map(sft.streamlines, dimensions)
    data[data > 100] = 100
    img = nib.Nifti1Image(data, img.affine)

    axes = 'yz'
    if args.right:
        axes = 'r' + axes
    else:
        axes = 'l' + axes

    for i, axe in enumerate(axes):
        display = plotting.plot_glass_brain(img,
                                            black_bg=True,
                                            display_mode=axe,
                                            alpha=0.5)
        display.savefig(output_filenames[i], dpi=300)
def compute_measures(filename_tuple):
    sft = load_tractogram(filename_tuple[0], filename_tuple[1])
    _, dimensions, voxel_size, _ = sft.space_attributes

    nbr_streamlines = len(sft)
    if not nbr_streamlines:
        logging.warning('{} is empty'.format(filename_tuple[0]))
        return dict(
            zip([
                'volume', 'volume_endpoints', 'streamlines_count',
                'avg_length', 'std_length', 'min_length', 'max_length',
                'mean_curvature'
            ], [0, 0, 0, 0, 0, 0, 0, 0]))

    length_list = list(length(list(sft.streamlines)))
    length_avg = float(np.average(length_list))
    length_std = float(np.std(length_list))
    length_min = float(np.min(length_list))
    length_max = float(np.max(length_list))

    sft.to_vox()
    sft.to_corner()
    streamlines = sft.streamlines
    density = compute_tract_counts_map(streamlines, dimensions)
    endpoints_density = get_endpoints_density_map(streamlines, dimensions)

    curvature_list = np.zeros((nbr_streamlines, ))
    for i in range(nbr_streamlines):
        curvature_list[i] = mean_curvature(sft.streamlines[i])

    return dict(
        zip([
            'volume', 'volume_endpoints', 'streamlines_count', 'avg_length',
            'std_length', 'min_length', 'max_length', 'mean_curvature'
        ], [
            np.count_nonzero(density) * np.product(voxel_size),
            np.count_nonzero(endpoints_density) * np.product(voxel_size),
            nbr_streamlines, length_avg, length_std, length_min, length_max,
            float(np.mean(curvature_list))
        ]))
Ejemplo n.º 12
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_bundle, optional=args.reference)

    sft = load_tractogram_with_reference(parser, args, args.in_bundle)
    sft.to_vox()
    sft.to_corner()

    bundle_name, _ = os.path.splitext(os.path.basename(args.in_bundle))
    stats = {bundle_name: {}}
    if len(sft.streamlines) == 0:
        stats[bundle_name]['volume'] = None
        print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
        return

    tdi = compute_tract_counts_map(sft.streamlines, tuple(sft.dimensions))
    voxel_volume = np.prod(np.prod(sft.voxel_sizes))
    stats[bundle_name]['volume'] = np.count_nonzero(tdi) * voxel_volume

    print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
Ejemplo n.º 13
0
def get_bundle_metrics_mean_std(streamlines,
                                metrics_files,
                                density_weighting=True):
    """
    Returns the mean value of each metric for the whole bundle, only
    considering voxels that are crossed by streamlines. The mean values are
    weighted by the number of streamlines crossing a voxel by default.
    If false, every voxel traversed by a streamline has the same weight.

    Parameters
    ------------
    streamlines : list of numpy.ndarray
        Input streamlines under which to compute stats.
    metrics_files : sequence
        list of nibabel objects representing the metrics files
    density_weighting : bool
        weigh by the mean by the density of streamlines going through the voxel

    Returns
    ---------
    stats : list
        list of tuples where the first element of the tuple is the mean
        of a metric, and the second element is the standard deviation, for each
        metric.
    """

    # Compute weighting matrix taking the possible compression into account
    anat_dim = metrics_files[0].header.get_data_shape()
    weights = compute_tract_counts_map(streamlines, anat_dim)

    if not density_weighting:
        weights = weights > 0

    return map(
        lambda metric_file: weighted_mean_stddev(
            weights,
            metric_file.get_data().astype(np.float64)), metrics_files)
Ejemplo n.º 14
0
def _processing_wrapper(args):
    hdf5_filename = args[0]
    labels_img = args[1]
    in_label, out_label = args[2]
    measures_to_compute = copy.copy(args[3])
    if args[4] is not None:
        similarity_directory = args[4][0]
    weighted = args[5]
    include_dps = args[6]
    min_lesion_vol = args[7]

    hdf5_file = h5py.File(hdf5_filename, 'r')
    key = '{}_{}'.format(in_label, out_label)
    if key not in hdf5_file:
        return
    streamlines = reconstruct_streamlines_from_hdf5(hdf5_file, key)
    if len(streamlines) == 0:
        return

    affine, dimensions, voxel_sizes, _ = get_reference_info(labels_img)
    measures_to_return = {}

    if not (np.allclose(hdf5_file.attrs['affine'], affine, atol=1e-03)
            and np.array_equal(hdf5_file.attrs['dimensions'], dimensions)):
        raise ValueError('Provided hdf5 have incompatible headers.')

    # Precompute to save one transformation, insert later
    if 'length' in measures_to_compute:
        streamlines_copy = list(streamlines)
        # scil_decompose_connectivity.py requires isotropic voxels
        mean_length = np.average(length(streamlines_copy))*voxel_sizes[0]

    # If density is not required, do not compute it
    # Only required for volume, similarity and any metrics
    if not ((len(measures_to_compute) == 1 and
             ('length' in measures_to_compute or
              'streamline_count' in measures_to_compute)) or
            (len(measures_to_compute) == 2 and
             ('length' in measures_to_compute and
              'streamline_count' in measures_to_compute))):

        density = compute_tract_counts_map(streamlines,
                                           dimensions)

    if 'volume' in measures_to_compute:
        measures_to_return['volume'] = np.count_nonzero(density) * \
            np.prod(voxel_sizes)
        measures_to_compute.remove('volume')
    if 'streamline_count' in measures_to_compute:
        measures_to_return['streamline_count'] = len(streamlines)
        measures_to_compute.remove('streamline_count')
    if 'length' in measures_to_compute:
        measures_to_return['length'] = mean_length
        measures_to_compute.remove('length')
    if 'similarity' in measures_to_compute and similarity_directory:
        density_sim = load_node_nifti(similarity_directory,
                                      in_label, out_label,
                                      labels_img)
        if density_sim is None:
            ba_vox = 0
        else:
            ba_vox = compute_bundle_adjacency_voxel(density, density_sim)

        measures_to_return['similarity'] = ba_vox
        measures_to_compute.remove('similarity')

    for measure in measures_to_compute:
        # Maps
        if isinstance(measure, str) and os.path.isdir(measure):
            map_dirname = measure
            map_data = load_node_nifti(map_dirname,
                                       in_label, out_label,
                                       labels_img)
            measures_to_return[map_dirname] = np.average(
                map_data[map_data > 0])
        elif isinstance(measure, tuple):
            if not isinstance(measure[0], tuple) \
                    and os.path.isfile(measure[0]):
                metric_filename = measure[0]
                metric_img = measure[1]
                if not is_header_compatible(metric_img, labels_img):
                    logging.error('{} do not have a compatible header'.format(
                        metric_filename))
                    raise IOError

                metric_data = metric_img.get_fdata(dtype=np.float64)
                if weighted:
                    avg_value = np.average(metric_data, weights=density)
                else:
                    avg_value = np.average(metric_data[density > 0])
                measures_to_return[metric_filename] = avg_value
            # lesion
            else:
                lesion_filename = measure[0][0]
                computed_lesion_labels = measure[0][1]
                lesion_img = measure[1]
                if not is_header_compatible(lesion_img, labels_img):
                    logging.error('{} do not have a compatible header'.format(
                        lesion_filename))
                    raise IOError

                voxel_sizes = lesion_img.header.get_zooms()[0:3]
                lesion_img.set_filename('tmp.nii.gz')
                lesion_atlas = get_data_as_label(lesion_img)
                tmp_dict = compute_lesion_stats(
                    density.astype(bool), lesion_atlas,
                    voxel_sizes=voxel_sizes, single_label=True,
                    min_lesion_vol=min_lesion_vol,
                    precomputed_lesion_labels=computed_lesion_labels)

                tmp_ind = _streamlines_in_mask(list(streamlines),
                                               lesion_atlas.astype(np.uint8),
                                               np.eye(3), [0, 0, 0])
                streamlines_count = len(
                    np.where(tmp_ind == [0, 1][True])[0].tolist())

                if tmp_dict:
                    measures_to_return[lesion_filename+'vol'] = \
                        tmp_dict['lesion_total_volume']
                    measures_to_return[lesion_filename+'count'] = \
                        tmp_dict['lesion_count']
                    measures_to_return[lesion_filename+'sc'] = \
                        streamlines_count
                else:
                    measures_to_return[lesion_filename+'vol'] = 0
                    measures_to_return[lesion_filename+'count'] = 0
                    measures_to_return[lesion_filename+'sc'] = 0

    if include_dps:
        for dps_key in hdf5_file[key].keys():
            if dps_key not in ['data', 'offsets', 'lengths']:
                out_file = os.path.join(include_dps, dps_key)
                if 'commit' in dps_key:
                    measures_to_return[out_file] = np.sum(
                        hdf5_file[key][dps_key])
                else:
                    measures_to_return[out_file] = np.average(
                        hdf5_file[key][dps_key])

    return {(in_label, out_label): measures_to_return}
Ejemplo n.º 15
0
def compute_measures(filename_tuple):
    sft = load_tractogram(filename_tuple[0], filename_tuple[1])
    _, dimensions, voxel_size, _ = sft.space_attributes
    uniformize_bundle_sft(sft)
    nbr_streamlines = len(sft)
    if not nbr_streamlines:
        logging.warning('{} is empty'.format(filename_tuple[0]))
        return dict(
            zip([
                'volume', 'volume_endpoints', 'streamlines_count',
                'avg_length', 'std_length', 'min_length', 'max_length', 'span',
                'curl', 'diameter', 'elongation', 'surface_area',
                'end_surface_area_head', 'end_surface_area_tail',
                'radius_head', 'radius_tail', 'irregularity',
                'irregularity_of_end_surface_head',
                'irregularity_of_end_surface_tail', 'mean_curvature',
                'fractal_dimension'
            ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
                ]))

    streamline_cords = list(sft.streamlines)
    length_list = list(length(streamline_cords))
    length_avg = float(np.average(length_list))
    length_std = float(np.std(length_list))
    length_min = float(np.min(length_list))
    length_max = float(np.max(length_list))

    sft.to_vox()
    sft.to_corner()
    streamlines = sft.streamlines
    density = compute_tract_counts_map(streamlines, dimensions)
    endpoints_density = get_endpoints_density_map(streamlines, dimensions)

    span_list = list(map(compute_span, streamline_cords))
    span = float(np.average(span_list))
    curl = length_avg / span
    volume = np.count_nonzero(density) * np.product(voxel_size)
    diameter = 2 * np.sqrt(volume / (np.pi * length_avg))
    elon = length_avg / diameter

    roi = np.where(density != 0, 1, density)
    surf_area = approximate_surface_node(roi) * (voxel_size[0]**2)
    irregularity = surf_area / (np.pi * diameter * length_avg)

    endpoints_map_head, endpoints_map_tail = \
        get_head_tail_density_maps(sft.streamlines, dimensions)
    endpoints_map_head_roi = \
        np.where(endpoints_map_head != 0, 1, endpoints_map_head)
    endpoints_map_tail_roi = \
        np.where(endpoints_map_tail != 0, 1, endpoints_map_tail)
    end_sur_area_head = \
        approximate_surface_node(endpoints_map_head_roi) * (voxel_size[0] ** 2)
    end_sur_area_tail = \
        approximate_surface_node(endpoints_map_tail_roi) * (voxel_size[0] ** 2)

    endpoints_coords_head = np.array(np.where(endpoints_map_head_roi)).T
    endpoints_coords_tail = np.array(np.where(endpoints_map_tail_roi)).T
    radius_head = 1.5 * np.average(
        np.sqrt(((endpoints_coords_head -
                  np.average(endpoints_coords_head, axis=0))**2).sum(axis=1)))
    radius_tail = 1.5 * np.average(
        np.sqrt(((endpoints_coords_tail -
                  np.average(endpoints_coords_tail, axis=0))**2).sum(axis=1)))
    end_irreg_head = (np.pi * radius_head**2) / end_sur_area_head
    end_irreg_tail = (np.pi * radius_tail**2) / end_sur_area_tail

    fractal_dimension = compute_fractal_dimension(density)

    curvature_list = np.zeros((nbr_streamlines, ))
    for i in range(nbr_streamlines):
        curvature_list[i] = mean_curvature(sft.streamlines[i])

    return dict(
        zip([
            'volume', 'volume_endpoints', 'streamlines_count', 'avg_length',
            'std_length', 'min_length', 'max_length', 'span', 'curl',
            'diameter', 'elongation', 'surface_area', 'end_surface_area_head',
            'end_surface_area_tail', 'radius_head', 'radius_tail',
            'irregularity', 'irregularity_of_end_surface_head',
            'irregularity_of_end_surface_tail', 'mean_curvature',
            'fractal_dimension'
        ], [
            volume,
            np.count_nonzero(endpoints_density) * np.product(voxel_size),
            nbr_streamlines, length_avg, length_std, length_min, length_max,
            span, curl, diameter, elon, surf_area, end_sur_area_head,
            end_sur_area_tail, radius_head, radius_tail, irregularity,
            end_irreg_head, end_irreg_tail,
            float(np.mean(curvature_list)), fractal_dimension
        ]))
Ejemplo n.º 16
0
def load_data_tmp_saving(args):
    filename = args[0]
    reference = args[1]
    init_only = args[2]
    disable_centroids = args[3]

    # Since data is often re-use when comparing multiple bundles, anything
    # that can be computed once is saved temporarily and simply loaded on demand
    hash_tmp = hashlib.md5(filename.encode()).hexdigest()
    tmp_density_filename = os.path.join('tmp_measures/',
                                        '{}_density.nii.gz'.format(hash_tmp))
    tmp_endpoints_filename = os.path.join('tmp_measures/',
                                          '{}_endpoints.nii.gz'.format(hash_tmp))
    tmp_centroids_filename = os.path.join('tmp_measures/',
                                          '{}_centroids.trk'.format(hash_tmp))

    sft = load_tractogram(filename, reference)
    sft.to_vox()
    sft.to_corner()
    streamlines = sft.get_streamlines_copy()
    if not streamlines:
        if init_only:
            logging.warning('{} is empty'.format(filename))
        return None

    if os.path.isfile(tmp_density_filename) \
            and os.path.isfile(tmp_endpoints_filename) \
            and os.path.isfile(tmp_centroids_filename):
        # If initilization, loading the data is useless
        if init_only:
            return None
        density = nib.load(tmp_density_filename).get_fdata(dtype=np.float32)
        endpoints_density = nib.load(tmp_endpoints_filename).get_fdata(dtype=np.float32)
        sft_centroids = load_tractogram(tmp_centroids_filename, reference)
        sft_centroids.to_vox()
        sft_centroids.to_corner()
        centroids = sft_centroids.get_streamlines_copy()
    else:
        transformation, dimensions, _, _ = sft.space_attributes
        density = compute_tract_counts_map(streamlines, dimensions)
        endpoints_density = get_endpoints_density_map(streamlines, dimensions,
                                                      point_to_select=3)
        thresholds = [32, 24, 12, 6]
        if disable_centroids:
            centroids = []
        else:
            centroids = qbx_and_merge(streamlines, thresholds,
                                      rng=RandomState(0),
                                      verbose=False).centroids

        # Saving tmp files to save on future computation
        nib.save(nib.Nifti1Image(density.astype(np.float32), transformation),
                 tmp_density_filename)
        nib.save(nib.Nifti1Image(endpoints_density.astype(np.int16),
                                 transformation),
                 tmp_endpoints_filename)

        # Saving in vox space and corner.
        centroids_sft = StatefulTractogram.from_sft(centroids, sft)
        save_tractogram(centroids_sft, tmp_centroids_filename)

    return density, endpoints_density, streamlines, centroids
Ejemplo n.º 17
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser, args.in_bundles)
    assert_outputs_exist(parser, args, args.out_json)

    if (not args.streamlines_measures) and (not args.voxels_measures):
        parser.error('At least one of the two modes is needed')

    nbr_cpu = validate_nbr_processes(parser, args)

    all_binary_metrics = []
    bundles_references_tuple_extended = link_bundles_and_reference(
        parser, args, args.in_bundles)

    if args.streamlines_measures:
        # Gold standard related indices are computed once
        wb_sft = load_tractogram_with_reference(parser, args,
                                                args.streamlines_measures[1])
        wb_sft.to_vox()
        wb_sft.to_corner()
        wb_streamlines = wb_sft.streamlines

        gs_sft = load_tractogram_with_reference(parser, args,
                                                args.streamlines_measures[0])
        gs_sft.to_vox()
        gs_sft.to_corner()
        gs_streamlines = gs_sft.streamlines
        _, gs_dimensions, _, _ = gs_sft.space_attributes

        # Prepare the gold standard only once
        _, gs_streamlines_indices = perform_streamlines_operation(
            intersection, [wb_streamlines, gs_streamlines], precision=0)

        if nbr_cpu == 1:
            streamlines_dict = []
            for i in bundles_references_tuple_extended:
                streamlines_dict.append(
                    compute_streamlines_measures(
                        [i, wb_streamlines, gs_streamlines_indices]))
        else:
            pool = multiprocessing.Pool(nbr_cpu)
            streamlines_dict = pool.map(
                compute_streamlines_measures,
                zip(bundles_references_tuple_extended,
                    itertools.repeat(wb_streamlines),
                    itertools.repeat(gs_streamlines_indices)))
            pool.close()
            pool.join()
        all_binary_metrics.extend(streamlines_dict)

    if not args.voxels_measures:
        gs_binary_3d = compute_tract_counts_map(gs_streamlines, gs_dimensions)
        gs_binary_3d[gs_binary_3d > 0] = 1

        tracking_mask_data = compute_tract_counts_map(wb_streamlines,
                                                      gs_dimensions)
        tracking_mask_data[tracking_mask_data > 0] = 1
    else:
        gs_binary_3d = get_data_as_mask(nib.load(args.voxels_measures[0]))
        gs_binary_3d[gs_binary_3d > 0] = 1
        tracking_mask_data = get_data_as_mask(nib.load(
            args.voxels_measures[1]))
        tracking_mask_data[tracking_mask_data > 0] = 1

    if nbr_cpu == 1:
        voxels_dict = []
        for i in bundles_references_tuple_extended:
            voxels_dict.append(
                compute_voxel_measures([i, tracking_mask_data, gs_binary_3d]))
    else:
        voxels_dict = pool.map(
            compute_voxel_measures,
            zip(bundles_references_tuple_extended,
                itertools.repeat(tracking_mask_data),
                itertools.repeat(gs_binary_3d)))
        pool.close()
        pool.join()
    all_binary_metrics.extend(voxels_dict)

    # After all processing, write the json file and skip None value
    output_binary_dict = {}
    for binary_dict in all_binary_metrics:
        if binary_dict is not None:
            for measure_name in binary_dict.keys():
                if measure_name not in output_binary_dict:
                    output_binary_dict[measure_name] = []
                output_binary_dict[measure_name].append(
                    float(binary_dict[measure_name]))

    with open(args.out_json, 'w') as outfile:
        json.dump(output_binary_dict,
                  outfile,
                  indent=args.indent,
                  sort_keys=args.sort_keys)
Ejemplo n.º 18
0
def main():
    parser = _build_args_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_bundles)
    output_streamlines_filename = '{}streamlines.trk'.format(
        args.output_prefix)
    output_voxels_filename = '{}voxels.nii.gz'.format(args.output_prefix)
    assert_outputs_exist(parser, args,
                         [output_voxels_filename, output_streamlines_filename])

    if not 0 <= args.ratio_voxels <= 1 or not 0 <= args.ratio_streamlines <= 1:
        parser.error('Ratios must be between 0 and 1.')

    fusion_streamlines = []
    for name in args.in_bundles:
        fusion_streamlines.extend(
            load_tractogram_with_reference(parser, args, name).streamlines)

    fusion_streamlines, _ = perform_streamlines_operation(
        union, [fusion_streamlines], 0)
    fusion_streamlines = ArraySequence(fusion_streamlines)
    if args.reference:
        reference_file = args.reference
    else:
        reference_file = args.in_bundles[0]

    transformation, dimensions, _, _ = get_reference_info(reference_file)
    volume = np.zeros(dimensions)
    streamlines_vote = dok_matrix(
        (len(fusion_streamlines), len(args.in_bundles)))

    for i, name in enumerate(args.in_bundles):
        if not is_header_compatible(reference_file, name):
            raise ValueError('Both headers are not the same')
        sft = load_tractogram_with_reference(parser, args, name)
        bundle = sft.get_streamlines_copy()
        sft.to_vox()
        bundle_vox_space = sft.get_streamlines_copy()
        binary = compute_tract_counts_map(bundle_vox_space, dimensions)
        volume[binary > 0] += 1

        if args.same_tractogram:
            _, indices = perform_streamlines_operation(
                intersection, [fusion_streamlines, bundle], 0)
            streamlines_vote[list(indices), i] += 1

    if args.same_tractogram:
        real_indices = []
        for i in range(len(fusion_streamlines)):
            ratio_value = int(args.ratio_streamlines * len(args.in_bundles))
            if np.sum(streamlines_vote[i]) >= ratio_value:
                real_indices.append(i)

        new_streamlines = fusion_streamlines[real_indices]

        sft = StatefulTractogram(new_streamlines, reference_file, Space.RASMM)
        save_tractogram(sft, output_streamlines_filename)

    volume[volume < int(args.ratio_streamlines * len(args.in_bundles))] = 0
    volume[volume > 0] = 1
    nib.save(nib.Nifti1Image(volume.astype(np.uint8), transformation),
             output_voxels_filename)
Ejemplo n.º 19
0
def _processing_wrapper(args):
    bundles_dir = args[0]
    in_label, out_label = args[1]
    measures_to_compute = copy.copy(args[2])
    weighted = args[3]
    if args[4] is not None:
        similarity_directory = args[4][0]

    in_filename_1 = os.path.join(bundles_dir,
                                 '{}_{}.trk'.format(in_label, out_label))
    in_filename_2 = os.path.join(bundles_dir,
                                 '{}_{}.trk'.format(out_label, in_label))
    if os.path.isfile(in_filename_1):
        in_filename = in_filename_1
    elif os.path.isfile(in_filename_2):
        in_filename = in_filename_2
    else:
        return

    sft = load_tractogram(in_filename, 'same')
    affine, dimensions, voxel_sizes, _ = sft.space_attributes
    measures_to_return = {}

    # Precompute to save one transformation, insert later
    if 'length' in measures_to_compute:
        streamlines_copy = list(sft.get_streamlines_copy())
        mean_length = np.average(length(streamlines_copy))

    # If density is not required, do not compute it
    # Only required for volume, similarity and any metrics
    if not ((len(measures_to_compute) == 1 and
             ('length' in measures_to_compute
              or 'streamline_count' in measures_to_compute)) or
            (len(measures_to_compute) == 2 and
             ('length' in measures_to_compute
              and 'streamline_count' in measures_to_compute))):
        sft.to_vox()
        sft.to_corner()
        density = compute_tract_counts_map(sft.streamlines, dimensions)

    if 'volume' in measures_to_compute:
        measures_to_return['volume'] = np.count_nonzero(density) * \
            np.prod(voxel_sizes)
        measures_to_compute.remove('volume')
    if 'streamline_count' in measures_to_compute:
        measures_to_return['streamline_count'] = len(sft)
        measures_to_compute.remove('streamline_count')
    if 'length' in measures_to_compute:
        measures_to_return['length'] = mean_length
        measures_to_compute.remove('length')
    if 'similarity' in measures_to_compute and similarity_directory:
        density_sim = load_node_nifti(similarity_directory, in_label,
                                      out_label, in_filename)

        ba_vox = compute_bundle_adjacency_voxel(density, density_sim)

        measures_to_return['similarity'] = ba_vox
        measures_to_compute.remove('similarity')

    for measure in measures_to_compute:
        if os.path.isdir(measure):
            map_dirname = measure
            map_data = load_node_nifti(map_dirname, in_label, out_label,
                                       in_filename)
            measures_to_return[map_dirname] = np.average(
                map_data[map_data > 0])
        elif os.path.isfile(measure):
            metric_filename = measure
            if not is_header_compatible(metric_filename, sft):
                logging.error(
                    '{} and {} do not have a compatible header'.format(
                        in_filename, metric_filename))
                raise IOError

            metric_data = nib.load(metric_filename).get_data()
            if weighted:
                density = density / np.max(density)
                voxels_value = metric_data * density
                voxels_value = voxels_value[voxels_value > 0]
            else:
                voxels_value = metric_data[density > 0]

            measures_to_return[metric_filename] = np.average(voxels_value)

    return {(in_label, out_label): measures_to_return}
Ejemplo n.º 20
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()
    set_sft_logger_level('ERROR')
    assert_inputs_exist(parser, [args.in_bundle, args.in_centroid],
                        optional=args.reference)
    assert_outputs_exist(parser,
                         args,
                         args.out_labels_map,
                         optional=[
                             args.out_labels_npz, args.out_distances_npz,
                             args.labels_color_dpp, args.distances_color_dpp
                         ])

    sft_bundle = load_tractogram_with_reference(parser, args, args.in_bundle)
    sft_centroid = load_tractogram_with_reference(parser, args,
                                                  args.in_centroid)

    if not len(sft_bundle.streamlines):
        logging.error('Empty bundle file {}. '
                      'Skipping'.format(args.in_bundle))
        raise ValueError

    if len(sft_centroid.streamlines) < 1 \
            or len(sft_centroid.streamlines) > 1:
        logging.error('Centroid file {} should contain one streamline. '
                      'Skipping'.format(args.in_centroid))
        raise ValueError

    if not is_header_compatible(sft_centroid, sft_bundle):
        raise IOError('{} and {}do not have a compatible header'.format(
            args.in_centroid, args.in_bundle))

    sft_bundle.to_vox()
    sft_bundle.to_corner()

    # Slightly cut the bundle at the edgge to clean up single streamline voxels
    # with no neighbor. Remove isolated voxels to keep a single 'blob'
    binary_bundle = compute_tract_counts_map(
        sft_bundle.streamlines, sft_bundle.dimensions).astype(bool)

    structure = ndi.generate_binary_structure(3, 1)
    if np.count_nonzero(binary_bundle) > args.min_voxel_count \
            and len(sft_bundle) > args.min_streamline_count:
        binary_bundle = ndi.binary_dilation(binary_bundle,
                                            structure=np.ones((3, 3, 3)))
        binary_bundle = ndi.binary_erosion(binary_bundle,
                                           structure=structure,
                                           iterations=2)

        bundle_disjoint, _ = ndi.label(binary_bundle)
        unique, count = np.unique(bundle_disjoint, return_counts=True)
        val = unique[np.argmax(count[1:]) + 1]
        binary_bundle[bundle_disjoint != val] = 0

        # Chop off some streamlines
        cut_sft = cut_outside_of_mask_streamlines(sft_bundle, binary_bundle)
    else:
        cut_sft = sft_bundle

    if args.nb_pts is not None:
        sft_centroid = resample_streamlines_num_points(sft_centroid,
                                                       args.nb_pts)
    else:
        args.nb_pts = len(sft_centroid.streamlines[0])

    # Generate a centroids labels mask for the centroid alone
    sft_centroid.to_vox()
    sft_centroid.to_corner()
    sft_centroid = _affine_slr(sft_bundle, sft_centroid)

    # Map every streamlines points to the centroids
    binary_centroid = compute_tract_counts_map(
        sft_centroid.streamlines, sft_centroid.dimensions).astype(bool)
    # TODO N^2 growth in RAM, should split it if we want to do nb_pts = 100
    min_dist_label, min_dist = min_dist_to_centroid(
        cut_sft.streamlines._data, sft_centroid.streamlines._data)
    min_dist_label += 1  # 0 means no labels

    # It is not allowed that labels jumps labels for consistency
    # Streamlines should have continous labels
    curr_ind = 0
    final_streamlines = []
    final_label = []
    final_dist = []
    for i, streamline in enumerate(cut_sft.streamlines):
        next_ind = curr_ind + len(streamline)
        curr_labels = min_dist_label[curr_ind:next_ind]
        curr_dist = min_dist[curr_ind:next_ind]
        curr_ind = next_ind

        # Flip streamlines so the labels increase (facilitate if/else)
        # Should always be ordered in nextflow pipeline
        gradient = np.gradient(curr_labels)
        if len(np.argwhere(gradient < 0)) > len(np.argwhere(gradient > 0)):
            streamline = streamline[::-1]
            curr_labels = curr_labels[::-1]
            curr_dist = curr_dist[::-1]

        # Find jumps, cut them and find the longest
        gradient = np.ediff1d(curr_labels)
        max_jump = max(args.nb_pts // 5, 1)
        if len(np.argwhere(np.abs(gradient) > max_jump)) > 0:
            pos_jump = np.where(np.abs(gradient) > max_jump)[0] + 1
            split_chunk = np.split(curr_labels, pos_jump)
            max_len = 0
            max_pos = 0
            for j, chunk in enumerate(split_chunk):
                if len(chunk) > max_len:
                    max_len = len(chunk)
                    max_pos = j

            curr_labels = split_chunk[max_pos]
            gradient_chunk = np.ediff1d(chunk)
            if len(np.unique(np.sign(gradient_chunk))) > 1:
                continue
            streamline = np.split(streamline, pos_jump)[max_pos]
            curr_dist = np.split(curr_dist, pos_jump)[max_pos]

        final_streamlines.append(streamline)
        final_label.append(curr_labels)
        final_dist.append(curr_dist)

    # Re-arrange the new cut streamlines and their metadata
    # Compute the voxels equivalent of the labels maps
    new_sft = StatefulTractogram.from_sft(final_streamlines, sft_bundle)

    tdi_mask_nzr = np.nonzero(binary_bundle)
    tdi_mask_nzr_ind = np.transpose(tdi_mask_nzr)
    min_dist_ind, _ = min_dist_to_centroid(tdi_mask_nzr_ind,
                                           sft_centroid.streamlines[0])
    img_labels = np.zeros(binary_centroid.shape, dtype=np.int16)
    img_labels[tdi_mask_nzr] = min_dist_ind + 1  # 0 is background value

    nib.save(nib.Nifti1Image(img_labels, sft_bundle.affine),
             args.out_labels_map)

    if args.labels_color_dpp or args.distances_color_dpp \
            or args.out_labels_npz or args.out_distances_npz:
        labels_array = ArraySequence(final_label)
        dist_array = ArraySequence(final_dist)
        # WARNING: WILL NOT WORK WITH THE INPUT TRK !
        # These will fit only with the TRK saved below.
        if args.out_labels_npz:
            np.savez_compressed(args.out_labels_npz, labels_array._data)
        if args.out_distances_npz:
            np.savez_compressed(args.out_distances_npz, dist_array._data)

        cmap = plt.get_cmap(args.colormap)
        new_sft.data_per_point['color'] = ArraySequence(new_sft.streamlines)

        # Nicer visualisation for MI-Brain
        if args.labels_color_dpp:
            new_sft.data_per_point['color']._data = cmap(
                labels_array._data / np.max(labels_array._data))[:, 0:3] * 255
            save_tractogram(new_sft, args.labels_color_dpp)

        if args.distances_color_dpp:
            new_sft.data_per_point['color']._data = cmap(
                dist_array._data / np.max(dist_array._data))[:, 0:3] * 255
            save_tractogram(new_sft, args.distances_color_dpp)
Ejemplo n.º 21
0
def get_bundle_metrics_mean_std_per_point(streamlines,
                                          bundle_name,
                                          distances_to_centroid_streamline,
                                          metrics,
                                          labels,
                                          density_weighting=False,
                                          distance_weighting=False):
    """
    Compute the mean and std PER POiNT of the bundle for every given metric.

    Parameters
    ----------
    streamlines: list of numpy.ndarray
        Input streamlines under which to compute stats.
    bundle_name: str
        Name of the bundle. Will be used as a key in the dictionary.
    distances_to_centroid_streamline: np.ndarray
        List of distances obtained with scil_label_and_distance_maps.py
    metrics: sequence
        list of nibabel objects representing the metrics files
    labels: np.ndarray
        List of labels obtained with scil_label_and_distance_maps.py
    density_weighting: bool
        If true, weight statistics by the number of streamlines passing through
        each voxel. [False]
    distance_weighting: bool
        If true, weight statistics by the inverse of the distance between a
        streamline and the centroid.

    Returns
    -------
    stats
    """
    # Computing infos on bundle
    unique_labels = np.unique(labels)
    num_digits_labels = len(str(np.max(unique_labels)))
    if density_weighting:
        track_count = compute_tract_counts_map(
            streamlines, metrics[0].shape).astype(np.float64)
    else:
        track_count = np.ones(metrics[0].shape)

    # Bigger weight near the centroid streamline
    distances_to_centroid_streamline = 1.0 / distances_to_centroid_streamline

    # Keep data as int to get the underlying voxel
    bundle_data_int = streamlines.data.astype(np.int)

    # Get stats
    stats = {bundle_name: {}}
    for metric in metrics:
        metric_data = metric.get_fdata()
        current_metric_fname, _ = split_name_with_nii(
            os.path.basename(metric.get_filename()))
        stats[bundle_name][current_metric_fname] = {}

        for i in unique_labels:
            number_key = '{}'.format(i).zfill(num_digits_labels)
            label_stats = {}
            stats[bundle_name][current_metric_fname][number_key] = label_stats

            label_indices = bundle_data_int[labels == i]
            label_metric = metric_data[label_indices[:, 0],
                                       label_indices[:, 1], label_indices[:,
                                                                          2]]
            track_weight = track_count[label_indices[:, 0],
                                       label_indices[:, 1], label_indices[:,
                                                                          2]]
            label_weight = track_weight
            if distance_weighting:
                label_weight *= distances_to_centroid_streamline[labels == i]
            if np.sum(label_weight) == 0:
                logging.warning('Weights sum to zero, can\'t be normalized. '
                                'Disabling weighting')
                label_weight = None

            label_mean = np.average(label_metric, weights=label_weight)
            label_std = np.sqrt(
                np.average((label_metric - label_mean)**2,
                           weights=label_weight))
            label_stats['mean'] = float(label_mean)
            label_stats['std'] = float(label_std)
    return stats
Ejemplo n.º 22
0
def compute_masks(gt_files, parser, args):
    """
    Compute ground-truth masks. If the file is already a mask, load it.
    If it is a bundle, compute the mask.

    Parameters
    ----------
    gt_files: list
        List of either StatefulTractograms or niftis.
    parser: ArgumentParser
        Argument parser which handles the script's arguments.
    args: Namespace
        List of arguments passed to the script.

    Returns
    -------
    mask_1: numpy.ndarray
        "Head" of the mask.
    mask_2: numpy.ndarray
        "Tail" of the mask.
    """
    save_ref = args.reference

    gt_bundle_masks = []
    gt_bundle_inv_masks = []

    affine = None
    dimensions = None
    for gt_bundle in gt_files:
        if gt_bundle is not None:
            # Support ground truth as streamlines or masks
            # Will be converted to binary masks immediately
            _, ext = split_name_with_nii(gt_bundle)
            if ext in ['.gz', '.nii.gz']:
                gt_img = nib.load(gt_bundle)
                gt_mask = get_data_as_mask(gt_img)

                if affine is not None:
                    # compare affines.
                    # todO
                    logging.debug('Previous affine discarded. (todo)')
                affine = gt_img.affine
                dimensions = gt_mask.shape
            else:
                # Cheating ref because it may send a lot of warning if loading
                # many trk with ref (reference was maybe added only for some
                # of these files)
                if ext == '.trk':
                    args.reference = None
                else:
                    args.reference = save_ref
                gt_sft = load_tractogram_with_reference(parser,
                                                        args,
                                                        gt_bundle,
                                                        bbox_check=False)
                gt_sft.to_vox()
                gt_sft.to_corner()
                _affine, _dimensions, _, _ = gt_sft.space_attributes
                if affine is not None:
                    # compare affines.
                    # todO
                    logging.debug('Previous affine discarded. (todo)')
                affine = _affine
                dimensions = _dimensions
                gt_mask = compute_tract_counts_map(gt_sft.streamlines,
                                                   dimensions).astype(np.int16)
            gt_inv_mask = np.zeros(dimensions, dtype=np.int16)
            gt_inv_mask[gt_mask == 0] = 1
            gt_mask[gt_mask > 0] = 1
        else:
            gt_mask = None
            gt_inv_mask = None

        gt_bundle_masks.append(gt_mask)
        gt_bundle_inv_masks.append(gt_inv_mask)

    return gt_bundle_masks, gt_bundle_inv_masks, affine, dimensions
Ejemplo n.º 23
0
def _processing_wrapper(args):
    hdf5_filename = args[0]
    labels_img = args[1]
    in_label, out_label = args[2]
    measures_to_compute = copy.copy(args[3])
    if args[4] is not None:
        similarity_directory = args[4][0]
    weighted = args[5]
    include_dps = args[6]

    hdf5_file = h5py.File(hdf5_filename, 'r')
    key = '{}_{}'.format(in_label, out_label)
    if key not in hdf5_file:
        return
    streamlines = reconstruct_streamlines_from_hdf5(hdf5_file, key)

    affine, dimensions, voxel_sizes, _ = get_reference_info(labels_img)
    measures_to_return = {}

    if not (np.allclose(hdf5_file.attrs['affine'], affine, atol=1e-03)
            and np.array_equal(hdf5_file.attrs['dimensions'], dimensions)):
        raise ValueError('Provided hdf5 have incompatible headers.')

    # Precompute to save one transformation, insert later
    if 'length' in measures_to_compute:
        streamlines_copy = list(streamlines)
        # scil_decompose_connectivity.py requires isotropic voxels
        mean_length = np.average(length(streamlines_copy)) * voxel_sizes[0]

    # If density is not required, do not compute it
    # Only required for volume, similarity and any metrics
    if not ((len(measures_to_compute) == 1 and
             ('length' in measures_to_compute
              or 'streamline_count' in measures_to_compute)) or
            (len(measures_to_compute) == 2 and
             ('length' in measures_to_compute
              and 'streamline_count' in measures_to_compute))):

        density = compute_tract_counts_map(streamlines, dimensions)

    if 'volume' in measures_to_compute:
        measures_to_return['volume'] = np.count_nonzero(density) * \
            np.prod(voxel_sizes)
        measures_to_compute.remove('volume')
    if 'streamline_count' in measures_to_compute:
        measures_to_return['streamline_count'] = len(streamlines)
        measures_to_compute.remove('streamline_count')
    if 'length' in measures_to_compute:
        measures_to_return['length'] = mean_length
        measures_to_compute.remove('length')
    if 'similarity' in measures_to_compute and similarity_directory:
        density_sim = load_node_nifti(similarity_directory, in_label,
                                      out_label, labels_img)
        if density_sim is None:
            ba_vox = 0
        else:
            ba_vox = compute_bundle_adjacency_voxel(density, density_sim)

        measures_to_return['similarity'] = ba_vox
        measures_to_compute.remove('similarity')

    for measure in measures_to_compute:
        if isinstance(measure, str) and os.path.isdir(measure):
            map_dirname = measure
            map_data = load_node_nifti(map_dirname, in_label, out_label,
                                       labels_img)
            measures_to_return[map_dirname] = np.average(
                map_data[map_data > 0])
        elif isinstance(measure, tuple) and os.path.isfile(measure[0]):
            metric_filename = measure[0]
            metric_img = measure[1]
            if not is_header_compatible(metric_img, labels_img):
                logging.error('{} do not have a compatible header'.format(
                    metric_filename))
                raise IOError

            metric_data = metric_img.get_fdata(dtype=np.float64)
            if weighted:
                density = density / np.max(density)
                voxels_value = metric_data * density
                voxels_value = voxels_value[voxels_value > 0]
            else:
                voxels_value = metric_data[density > 0]

            measures_to_return[metric_filename] = np.average(voxels_value)

    if include_dps:
        for dps_key in hdf5_file[key].keys():
            if dps_key not in ['data', 'offsets', 'lengths']:
                out_file = os.path.join(include_dps, dps_key)
                measures_to_return[out_file] = np.average(
                    hdf5_file[key][dps_key])

    return {(in_label, out_label): measures_to_return}
Ejemplo n.º 24
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if (not args.bundle) and (not args.bundle_mask) \
            and (not args.bundle_labels_map):
        parser.error('One of the option --bundle or --map must be used')

    assert_inputs_exist(parser, [args.in_lesion],
                        optional=[args.bundle, args.bundle_mask,
                                  args.bundle_labels_map])
    assert_outputs_exist(parser, args, args.out_json,
                         optional=[args.out_lesion_stats,
                                   args.out_streamlines_stats])

    lesion_img = nib.load(args.in_lesion)
    lesion_data = get_data_as_mask(lesion_img, dtype=bool)

    if args.bundle:
        bundle_name, _ = split_name_with_nii(os.path.basename(args.bundle))
        sft = load_tractogram_with_reference(parser, args, args.bundle)
        sft.to_vox()
        sft.to_corner()
        streamlines = sft.get_streamlines_copy()
        map_data = compute_tract_counts_map(streamlines,
                                            lesion_data.shape)
        map_data[map_data > 0] = 1
    elif args.bundle_mask:
        bundle_name, _ = split_name_with_nii(
            os.path.basename(args.bundle_mask))
        map_img = nib.load(args.bundle_mask)
        map_data = get_data_as_mask(map_img)
    else:
        bundle_name, _ = split_name_with_nii(os.path.basename(
            args.bundle_labels_map))
        map_img = nib.load(args.bundle_labels_map)
        map_data = get_data_as_label(map_img)

    is_single_label = args.bundle_labels_map is None
    voxel_sizes = lesion_img.header.get_zooms()[0:3]
    lesion_atlas, _ = ndi.label(lesion_data)

    lesion_load_dict = compute_lesion_stats(
        map_data, lesion_atlas, single_label=is_single_label,
        voxel_sizes=voxel_sizes, min_lesion_vol=args.min_lesion_vol)

    if args.out_lesion_atlas:
        lesion_atlas *= map_data.astype(bool)
        nib.save(nib.Nifti1Image(lesion_atlas, lesion_img.affine),
                 args.out_lesion_atlas)

    volume_dict = {bundle_name: lesion_load_dict}
    with open(args.out_json, 'w') as outfile:
        json.dump(volume_dict, outfile,
                  sort_keys=args.sort_keys, indent=args.indent)

    if args.out_streamlines_stats or args.out_lesion_stats:
        lesion_dict = {}
        for lesion in np.unique(lesion_atlas)[1:]:
            curr_vol = np.count_nonzero(lesion_atlas[lesion_atlas == lesion]) \
                * np.prod(voxel_sizes)
            if curr_vol >= args.min_lesion_vol:
                key = str(lesion).zfill(4)
                lesion_dict[key] = {'volume': curr_vol}
                if args.bundle:
                    tmp = np.zeros(lesion_atlas.shape)
                    tmp[lesion_atlas == lesion] = 1
                    new_sft, _ = filter_grid_roi(sft, tmp, 'any', False)
                    lesion_dict[key]['strs_count'] = len(new_sft)

        lesion_vol_dict = {bundle_name: {}}
        streamlines_count_dict = {bundle_name: {'streamlines_count': {}}}
        for key in lesion_dict.keys():
            lesion_vol_dict[bundle_name][key] = lesion_dict[key]['volume']
            if args.bundle:
                streamlines_count_dict[bundle_name]['streamlines_count'][key] = \
                    lesion_dict[key]['strs_count']

        if args.out_lesion_stats:
            with open(args.out_lesion_stats, 'w') as outfile:
                json.dump(lesion_vol_dict, outfile,
                          sort_keys=args.sort_keys, indent=args.indent)
        if args.out_streamlines_stats:
            with open(args.out_streamlines_stats, 'w') as outfile:
                json.dump(streamlines_count_dict, outfile,
                          sort_keys=args.sort_keys, indent=args.indent)
Ejemplo n.º 25
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser,
                        [args.in_bundle, args.label_map, args.distance_map] +
                        args.metrics)

    sft = load_tractogram_with_reference(parser, args, args.in_bundle)
    sft.to_vox()
    sft.to_corner()

    stats = {}
    bundle_name, _ = os.path.splitext(os.path.basename(args.in_bundle))
    if len(sft) == 0:
        stats[bundle_name] = None
        print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
        return

    assert_same_resolution(args.metrics)
    metrics = [nib.load(metric) for metric in args.metrics]

    if args.density_weighting:
        track_count = compute_tract_counts_map(
            sft.streamlines, metrics[0].shape).astype(np.float64)
    else:
        track_count = np.ones(metrics[0].shape)

    label_file = np.load(args.label_map)
    labels = label_file['arr_0']

    unique_labels = np.unique(labels)
    num_digits_labels = len(str(np.max(unique_labels)))

    distance_file = np.load(args.distance_map)
    distances_to_centroid_streamline = distance_file['arr_0']
    # Bigger weight near the centroid streamline
    distances_to_centroid_streamline = 1.0 / distances_to_centroid_streamline

    if len(labels) != len(distances_to_centroid_streamline):
        raise Exception('Label map doesn\'t contain the same number of '
                        'entries as the distance map. {} != {}'.format(
                            len(labels),
                            len(distances_to_centroid_streamline)))

    bundle_data_int = sft.streamlines.data.astype(np.int)
    stats[bundle_name] = {}

    for metric in metrics:
        metric_data = metric.get_fdata()
        current_metric_fname, _ = split_name_with_nii(
            os.path.basename(metric.get_filename()))
        stats[bundle_name][current_metric_fname] = {}

        for i in unique_labels:
            number_key = '{}'.format(i).zfill(num_digits_labels)
            label_stats = {}
            stats[bundle_name][current_metric_fname][number_key] = label_stats

            label_indices = bundle_data_int[labels == i]
            label_metric = metric_data[label_indices[:, 0],
                                       label_indices[:, 1], label_indices[:,
                                                                          2]]
            track_weight = track_count[label_indices[:, 0],
                                       label_indices[:, 1], label_indices[:,
                                                                          2]]
            label_weight = track_weight
            if args.distance_weighting:
                label_weight *= distances_to_centroid_streamline[labels == i]
            if np.sum(label_weight) == 0:
                logging.warning('Weights sum to zero, can\'t be normalized. '
                                'Disabling weighting')
                label_weight = None

            label_mean = np.average(label_metric, weights=label_weight)
            label_std = np.sqrt(
                np.average((label_metric - label_mean)**2,
                           weights=label_weight))
            label_stats['mean'] = float(label_mean)
            label_stats['std'] = float(label_std)

    print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))