コード例 #1
0
def compute_dice_streamlines(bundle_1, bundle_2):
    """
    Compute the overlap (dice coefficient) between two bundles.
    Both bundles need to come from the exact same tractogram.
    Parameters
    ----------
    bundle_1: list of ndarray
        First set of streamlines.
    bundle_2: list of ndarray
        Second set of streamlines.
    Returns
    -------
    A tuple containing
        float: Value between 0 and 1 that represent the spatial aggrement
            between both bundles.
        list of ndarray: Intersection of streamlines in both bundle
        list of ndarray: Union of streamlines in both bundle
    """
    streamlines_intersect, _ = perform_streamlines_operation(
        intersection, [bundle_1, bundle_2], precision=0)
    streamlines_union, _ = perform_streamlines_operation(union,
                                                         [bundle_1, bundle_2],
                                                         precision=0)

    numerator = 2 * len(streamlines_intersect)
    denominator = len(bundle_1) + len(bundle_2)
    if denominator > 0:
        dice = numerator / float(denominator)
    else:
        dice = np.nan

    return dice, streamlines_intersect, streamlines_union
コード例 #2
0
def compute_streamlines_measures(args):
    bundle_filename, bundle_reference = args[0]
    wb_streamlines = args[1]
    gs_streamlines_indices = args[2]

    if not os.path.isfile(bundle_filename):
        logging.info('{} does not exist'.format(bundle_filename))
        return None

    bundle_sft = load_tractogram(bundle_filename, bundle_reference)
    bundle_sft.to_vox()
    bundle_sft.to_corner()
    bundle_streamlines = bundle_sft.streamlines
    _, bundle_dimensions, _, _ = bundle_sft.space_attributes

    if not bundle_streamlines:
        logging.info('{} is empty'.format(bundle_filename))
        return None

    _, streamlines_indices = perform_streamlines_operation(
        intersection, [wb_streamlines, bundle_streamlines], precision=0)

    streamlines_binary = binary_classification(streamlines_indices,
                                               gs_streamlines_indices,
                                               len(wb_streamlines))

    return dict(
        zip([
            'sensitivity_streamlines', 'specificity_streamlines',
            'precision_streamlines', 'accuracy_streamlines',
            'dice_streamlines', 'kappa_streamlines', 'youden_streamlines'
        ], streamlines_binary))
コード例 #3
0
def compute_outliers(sft, new_sft):
    """
    Return a stateful tractogram whose streamlines are the difference of the
    two input stateful tractograms
    """
    streamlines_list = [sft.streamlines, new_sft.streamlines]
    _, indices = perform_streamlines_operation(difference,
                                               streamlines_list,
                                               precision=0)
    outliers_sft = sft[indices]
    return outliers_sft
コード例 #4
0
def main():

    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser, args.inputs)
    assert_outputs_exist(parser, args, args.output)

    # Load all input streamlines.
    data = [load_data(parser, args, f) for f in args.inputs]
    streamlines, data_per_streamline, data_per_point = zip(*data)
    nb_streamlines = [len(s) for s in streamlines]

    # Apply the requested operation to each input file.
    logging.info('Performing operation \'{}\'.'.format(args.operation))
    if args.operation == 'concatenate':
        new_streamlines = sum(streamlines, [])
        indices = range(len(new_streamlines))
    else:
        new_streamlines, indices = perform_streamlines_operation(
            OPERATIONS[args.operation], streamlines, args.precision)

    # Get the meta data of the streamlines.
    new_data_per_streamline = {}
    new_data_per_point = {}
    if not args.no_metadata:

        for key in data_per_streamline[0].keys():
            all_data = np.vstack([s[key] for s in data_per_streamline])
            new_data_per_streamline[key] = all_data[indices, :]

        # Add the indices to the metadata if requested.
        if args.save_metadata_indices:
            new_data_per_streamline['ids'] = indices

        for key in data_per_point[0].keys():
            all_data = list(chain(*[s[key] for s in data_per_point]))
            new_data_per_point[key] = [all_data[i] for i in indices]

    # Save the indices to a file if requested.
    if args.save_indices is not None:
        start = 0
        indices_dict = {'filenames': args.inputs}
        for name, nb in zip(args.inputs, nb_streamlines):
            end = start + nb
            file_indices = \
                [i - start for i in indices if start <= i < end]
            indices_dict[name] = file_indices
            start = end
        with open(args.save_indices, 'wt') as f:
            json.dump(indices_dict, f)

    # Save the new streamlines.
    logging.info('Saving streamlines to {0}.'.format(args.output))

    # If no reference was provided, it means all input were trk file
    if args.reference:
        reference_file = args.reference
    else:
        reference_file = args.inputs[0]

    sft = StatefulTractogram(new_streamlines,
                             reference_file,
                             Space.RASMM,
                             data_per_streamline=new_data_per_streamline,
                             data_per_point=new_data_per_point)
    save_tractogram(sft, args.output)
コード例 #5
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser, args.in_bundles)
    assert_outputs_exist(parser, args, args.out_json)

    if (not args.streamlines_measures) and (not args.voxels_measures):
        parser.error('At least one of the two modes is needed')

    nbr_cpu = validate_nbr_processes(parser, args)

    all_binary_metrics = []
    bundles_references_tuple_extended = link_bundles_and_reference(
        parser, args, args.in_bundles)

    if args.streamlines_measures:
        # Gold standard related indices are computed once
        wb_sft = load_tractogram_with_reference(parser, args,
                                                args.streamlines_measures[1])
        wb_sft.to_vox()
        wb_sft.to_corner()
        wb_streamlines = wb_sft.streamlines

        gs_sft = load_tractogram_with_reference(parser, args,
                                                args.streamlines_measures[0])
        gs_sft.to_vox()
        gs_sft.to_corner()
        gs_streamlines = gs_sft.streamlines
        _, gs_dimensions, _, _ = gs_sft.space_attributes

        # Prepare the gold standard only once
        _, gs_streamlines_indices = perform_streamlines_operation(
            intersection, [wb_streamlines, gs_streamlines], precision=0)

        if nbr_cpu == 1:
            streamlines_dict = []
            for i in bundles_references_tuple_extended:
                streamlines_dict.append(
                    compute_streamlines_measures(
                        [i, wb_streamlines, gs_streamlines_indices]))
        else:
            pool = multiprocessing.Pool(nbr_cpu)
            streamlines_dict = pool.map(
                compute_streamlines_measures,
                zip(bundles_references_tuple_extended,
                    itertools.repeat(wb_streamlines),
                    itertools.repeat(gs_streamlines_indices)))
            pool.close()
            pool.join()
        all_binary_metrics.extend(streamlines_dict)

    if not args.voxels_measures:
        gs_binary_3d = compute_tract_counts_map(gs_streamlines, gs_dimensions)
        gs_binary_3d[gs_binary_3d > 0] = 1

        tracking_mask_data = compute_tract_counts_map(wb_streamlines,
                                                      gs_dimensions)
        tracking_mask_data[tracking_mask_data > 0] = 1
    else:
        gs_binary_3d = get_data_as_mask(nib.load(args.voxels_measures[0]))
        gs_binary_3d[gs_binary_3d > 0] = 1
        tracking_mask_data = get_data_as_mask(nib.load(
            args.voxels_measures[1]))
        tracking_mask_data[tracking_mask_data > 0] = 1

    if nbr_cpu == 1:
        voxels_dict = []
        for i in bundles_references_tuple_extended:
            voxels_dict.append(
                compute_voxel_measures([i, tracking_mask_data, gs_binary_3d]))
    else:
        voxels_dict = pool.map(
            compute_voxel_measures,
            zip(bundles_references_tuple_extended,
                itertools.repeat(tracking_mask_data),
                itertools.repeat(gs_binary_3d)))
        pool.close()
        pool.join()
    all_binary_metrics.extend(voxels_dict)

    # After all processing, write the json file and skip None value
    output_binary_dict = {}
    for binary_dict in all_binary_metrics:
        if binary_dict is not None:
            for measure_name in binary_dict.keys():
                if measure_name not in output_binary_dict:
                    output_binary_dict[measure_name] = []
                output_binary_dict[measure_name].append(
                    float(binary_dict[measure_name]))

    with open(args.out_json, 'w') as outfile:
        json.dump(output_binary_dict,
                  outfile,
                  indent=args.indent,
                  sort_keys=args.sort_keys)
コード例 #6
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser, args.in_tractograms)
    assert_outputs_exist(parser, args, args.out_tractogram,
                         optional=args.save_indices)

    if args.operation == 'lazy_concatenate':
        logging.info('Using lazy_concatenate, no spatial or metadata related '
                     'checks are performed.\nMetadata will be lost, only '
                     'trk/tck file are supported.')

        def list_generator_from_nib(filenames):
            for in_file in filenames:
                tractogram_file = nib.streamlines.load(in_file, lazy_load=True)
                for s in tractogram_file.streamlines:
                    yield s
        header = None
        for in_file in args.in_tractograms:
            _, ext = os.path.splitext(in_file)
            if ext == '.trk':
                if header is None:
                    header = nib.streamlines.load(
                        in_file, lazy_load=True).header
                elif not is_header_compatible(header, in_file):
                    logging.warning('Incompatible headers in the list.')

        generator = list_generator_from_nib(args.in_tractograms)
        out_tractogram = LazyTractogram(lambda: generator,
                                        affine_to_rasmm=np.eye(4))
        nib.streamlines.save(out_tractogram, args.out_tractogram,
                             header=header)
        return

    # Load all input streamlines.
    sft_list = []
    for f in args.in_tractograms:
        sft_list.append(load_tractogram_with_reference(
            parser, args, f, bbox_check=not args.ignore_invalid))

    # Apply the requested operation to each input file.
    logging.info('Performing operation \'{}\'.'.format(args.operation))
    new_sft = concatenate_sft(sft_list, args.no_metadata, args.fake_metadata)
    if args.operation == 'concatenate':
        indices = np.arange(len(new_sft), dtype=np.uint32)
    else:
        streamlines_list = [sft.streamlines for sft in sft_list]
        op_name = args.operation
        if args.robust:
            op_name += '_robust'
            _, indices = OPERATIONS[op_name](streamlines_list,
                                             precision=args.precision)
        else:
            _, indices = perform_streamlines_operation(
                OPERATIONS[op_name], streamlines_list,
                precision=args.precision)

    # Save the indices to a file if requested.
    if args.save_indices:
        start = 0
        out_dict = {}
        streamlines_len_cumsum = [len(sft) for sft in sft_list]
        for name, nb in zip(args.in_tractograms, streamlines_len_cumsum):
            end = start + nb
            # Switch to int32 for json
            out_dict[name] = [int(i - start)
                              for i in indices if start <= i < end]
            start = end

        with open(args.save_indices, 'wt') as f:
            json.dump(out_dict, f,
                      indent=args.indent,
                      sort_keys=args.sort_keys)

    # Save the new streamlines (and metadata)
    logging.info('Saving {} streamlines to {}.'.format(len(indices),
                                                       args.out_tractogram))
    save_tractogram(new_sft[indices], args.out_tractogram,
                    bbox_valid_check=not args.ignore_invalid)
コード例 #7
0
def main():
    parser = _build_args_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_bundles)
    output_streamlines_filename = '{}streamlines.trk'.format(
        args.output_prefix)
    output_voxels_filename = '{}voxels.nii.gz'.format(args.output_prefix)
    assert_outputs_exist(parser, args,
                         [output_voxels_filename, output_streamlines_filename])

    if not 0 <= args.ratio_voxels <= 1 or not 0 <= args.ratio_streamlines <= 1:
        parser.error('Ratios must be between 0 and 1.')

    fusion_streamlines = []
    for name in args.in_bundles:
        fusion_streamlines.extend(
            load_tractogram_with_reference(parser, args, name).streamlines)

    fusion_streamlines, _ = perform_streamlines_operation(
        union, [fusion_streamlines], 0)
    fusion_streamlines = ArraySequence(fusion_streamlines)
    if args.reference:
        reference_file = args.reference
    else:
        reference_file = args.in_bundles[0]

    transformation, dimensions, _, _ = get_reference_info(reference_file)
    volume = np.zeros(dimensions)
    streamlines_vote = dok_matrix(
        (len(fusion_streamlines), len(args.in_bundles)))

    for i, name in enumerate(args.in_bundles):
        if not is_header_compatible(reference_file, name):
            raise ValueError('Both headers are not the same')
        sft = load_tractogram_with_reference(parser, args, name)
        bundle = sft.get_streamlines_copy()
        sft.to_vox()
        bundle_vox_space = sft.get_streamlines_copy()
        binary = compute_tract_counts_map(bundle_vox_space, dimensions)
        volume[binary > 0] += 1

        if args.same_tractogram:
            _, indices = perform_streamlines_operation(
                intersection, [fusion_streamlines, bundle], 0)
            streamlines_vote[list(indices), i] += 1

    if args.same_tractogram:
        real_indices = []
        for i in range(len(fusion_streamlines)):
            ratio_value = int(args.ratio_streamlines * len(args.in_bundles))
            if np.sum(streamlines_vote[i]) >= ratio_value:
                real_indices.append(i)

        new_streamlines = fusion_streamlines[real_indices]

        sft = StatefulTractogram(new_streamlines, reference_file, Space.RASMM)
        save_tractogram(sft, output_streamlines_filename)

    volume[volume < int(args.ratio_streamlines * len(args.in_bundles))] = 0
    volume[volume > 0] = 1
    nib.save(nib.Nifti1Image(volume.astype(np.uint8), transformation),
             output_voxels_filename)
コード例 #8
0
def compute_bundle_adjacency_streamlines(bundle_1,
                                         bundle_2,
                                         non_overlap=False,
                                         centroids_1=None,
                                         centroids_2=None):
    """
    Compute the distance in millimeters between two bundles. Uses centroids
    to limit computation time. Each centroid of the first bundle is matched
    to the nearest centroid of the second bundle and vice-versa.
    Distance between matched paired is averaged for the final results.
    References
    ----------
    .. [Garyfallidis15] Garyfallidis et al. Robust and efficient linear
        registration of white-matter fascicles in the space of streamlines,
        Neuroimage, 2015.
    Parameters
    ----------
    bundle_1: list of ndarray
        First set of streamlines.
    bundle_2: list of ndarray
        Second set of streamlines.
    non_overlap: bool
        Exclude overlapping streamlines from the computation.
    centroids_1: list of ndarray
        Pre-computed centroids for the first bundle.
    centroids_2: list of ndarray
        Pre-computed centroids for the second bundle.
    Returns
    -------
    float: Distance in millimeters between both bundles.
    """
    if not bundle_1 or not bundle_2:
        return -1
    thresholds = [32, 24, 12, 6]
    # Intialize the clusters
    if centroids_1 is None:
        centroids_1 = qbx_and_merge(bundle_1,
                                    thresholds,
                                    rng=RandomState(0),
                                    verbose=False).centroids
    if centroids_2 is None:
        centroids_2 = qbx_and_merge(bundle_2,
                                    thresholds,
                                    rng=RandomState(0),
                                    verbose=False).centroids
    if non_overlap:
        non_overlap_1, _ = perform_streamlines_operation(subtraction,
                                                         [bundle_1, bundle_2],
                                                         precision=0)
        non_overlap_2, _ = perform_streamlines_operation(subtraction,
                                                         [bundle_2, bundle_1],
                                                         precision=0)
        if non_overlap_1:
            non_overlap_centroids_1 = qbx_and_merge(non_overlap_1,
                                                    thresholds,
                                                    rng=RandomState(0),
                                                    verbose=False).centroids
            distance_matrix_1 = bundles_distances_mdf(non_overlap_centroids_1,
                                                      centroids_2)

            min_b1 = np.min(distance_matrix_1, axis=0)
            distance_b1 = np.average(min_b1)
        else:
            distance_b1 = 0

        if non_overlap_2:
            non_overlap_centroids_2 = qbx_and_merge(non_overlap_2,
                                                    thresholds,
                                                    rng=RandomState(0),
                                                    verbose=False).centroids
            distance_matrix_2 = bundles_distances_mdf(centroids_1,
                                                      non_overlap_centroids_2)
            min_b2 = np.min(distance_matrix_2, axis=1)
            distance_b2 = np.average(min_b2)
        else:
            distance_b2 = 0

    else:
        distance_matrix = bundles_distances_mdf(centroids_1, centroids_2)
        min_b1 = np.min(distance_matrix, axis=0)
        min_b2 = np.min(distance_matrix, axis=1)
        distance_b1 = np.average(min_b1)
        distance_b2 = np.average(min_b2)

    return (distance_b1 + distance_b2) / 2.0
コード例 #9
0
ファイル: scil_streamlines_math.py プロジェクト: BIG-S2/PSC
def main():

    parser = build_args_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    if os.path.isfile(args.output):
        if args.force:
            logging.info('Overwriting {0}.'.format(args.output))
        else:
            parser.error('{0} already exist! Use -f to overwrite it.'.format(
                args.output))

    # Load all input streamlines.
    data = [load_data(f) for f in args.inputs]
    streamlines, data_per_streamline, data_per_point = zip(*data)
    nb_streamlines = [len(s) for s in streamlines]

    # Apply the requested operation to each input file.
    logging.info('Performing operation \'{}\'.'.format(args.operation))
    new_streamlines, indices = perform_streamlines_operation(
        OPERATIONS[args.operation], streamlines, args.precision)

    # Get the meta data of the streamlines.
    new_data_per_streamline = {}
    new_data_per_point = {}
    if not args.no_data:

        for key in data_per_streamline[0].keys():
            all_data = np.vstack([s[key] for s in data_per_streamline])
            new_data_per_streamline[key] = all_data[indices, :]

        # Add the indices to the metadata if requested.
        if args.save_meta_indices:
            new_data_per_streamline['ids'] = indices

        for key in data_per_point[0].keys():
            all_data = list(chain(*[s[key] for s in data_per_point]))
            new_data_per_point[key] = [all_data[i] for i in indices]

    # Save the indices to a file if requested.
    if args.save_indices is not None:
        start = 0
        indices_dict = {'filenames': args.inputs}
        for name, nb in zip(args.inputs, nb_streamlines):
            end = start + nb
            file_indices = \
                [i - start for i in indices if i >= start and i < end]
            indices_dict[name] = file_indices
            start = end
        with open(args.save_indices, 'wt') as f:
            json.dump(indices_dict, f)

    # Save the new streamlines.
    logging.info('Saving streamlines to {0}.'.format(args.output))
    reference_file = load(args.inputs[0], True)
    new_tractogram = Tractogram(new_streamlines,
                                data_per_streamline=new_data_per_streamline,
                                data_per_point=new_data_per_point)

    # If the reference is a .tck, the affine will be None.
    affine = reference_file.tractogram.affine_to_rasmm
    if affine is None:
        affine = np.eye(4)
    new_tractogram.affine_to_rasmm = affine

    new_header = reference_file.header.copy()
    new_header['nb_streamlines'] = len(new_streamlines)
    save(new_tractogram, args.output, header=new_header)