예제 #1
0
def find_bad_nrigh(name, score):

    test_data = np.load(os.path.join('..', 'cci_clean_data', name))
    data_evl = test_data['arr_0']
    key = os.path.splitext(name)[0]
    cci_score = cci_dict[key]
    cci_score = np.array(cci_score)

    streamlines_evl = Streamlines()

    for i in range(np.shape(data_evl)[0]):
        tmp = data_evl[i]
        tmp = zero_remove(tmp)
        #tmp = tmp[~np.all(tmp == 0, axis=-1)]
        #tmp = np.around(tmp, decimals=0)
        streamlines_evl.append(tmp)

    lengths = np.array(list(length(streamlines_evl)))

    neighb = np.zeros((np.shape(data_evl)[0]))

    subsamp_sls = set_number_of_points(streamlines_evl, 64)
    for i in range(len(streamlines_evl)):

        mdf_mx = bundles_distances_mdf([subsamp_sls[i]], subsamp_sls)
        if (score[i] == 0):  # bad fiber

            thre = np.percentile(mdf_mx, 4)
            len_bad = lengths[i]
            bound = len_bad * 0.1
            len_limt = 1 * ((lengths > len_bad - bound) &
                            (lengths < len_bad + bound))
            label = 1 * (mdf_mx < thre).flatten()  # bad neighbour
            neighb = neighb + label * (cci_score < 10)
    return data_evl, neighb
예제 #2
0
def _prune_segments(segments, min_length, max_length, vox_size):
    lengths = list(length(segments) * vox_size)
    valid = []
    invalid = []

    for s, l in zip(segments, lengths):
        if min_length <= l <= max_length:
            valid.append(s)
        else:
            invalid.append(s)
    return valid, invalid
예제 #3
0
def _prune_segments(segments, min_length, max_length, vox_size):
    lengths = list(length(segments) * vox_size)
    valid = []
    invalid = []

    for i, tuple_zip in enumerate(zip(segments, lengths)):
        _, le = tuple_zip
        if min_length <= le <= max_length:
            valid.append(i)
        else:
            invalid.append(i)
    return valid, invalid
예제 #4
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()
    for param in ['theta', 'curvature']:
        # Default was removed for consistency.
        if param not in args:
            setattr(args, param, None)

    assert_inputs_exist(parser, [args.sh_file, args.seed_file, args.mask_file])
    assert_outputs_exists(parser, args, [args.output_file])

    np.random.seed(args.seed)

    mask_img = nib.load(args.mask_file)
    mask_data = mask_img.get_data()

    seeds = random_seeds_from_mask(
        nib.load(args.seed_file).get_data(),
        seeds_count=args.nts if 'nts' in args else args.npv,
        seed_count_per_voxel='nts' not in args)

    # Tracking is performed in voxel space
    streamlines = LocalTracking(_get_direction_getter(args, mask_data),
                                BinaryTissueClassifier(mask_data),
                                seeds,
                                np.eye(4),
                                step_size=args.step_size,
                                max_cross=1,
                                maxlen=int(args.max_len / args.step_size) + 1,
                                fixedstep=True,
                                return_all=True)

    filtered_streamlines = (s for s in streamlines
                            if args.min_len <= length(s) <= args.max_len)
    if args.compress_streamlines:
        filtered_streamlines = (compress_streamlines(s, args.tolerance_error)
                                for s in filtered_streamlines)

    tractogram = LazyTractogram(lambda: filtered_streamlines,
                                affine_to_rasmm=mask_img.affine)

    # Header with the affine/shape from mask image
    header = {
        Field.VOXEL_TO_RASMM: mask_img.affine.copy(),
        Field.VOXEL_SIZES: mask_img.header.get_zooms(),
        Field.DIMENSIONS: mask_img.shape,
        Field.VOXEL_ORDER: ''.join(aff2axcodes(mask_img.affine))
    }

    # Use generator to save the streamlines on-the-fly
    nib.streamlines.save(tractogram, args.output_file, header=header)
예제 #5
0
def load_tractogram(T_filename, threshold_short_streamlines=10.0):
    """Load tractogram from TRK file and remove short streamlines with
    length below threshold.
    """
    print("Loading %s" % T_filename)
    T, hdr = trackvis.read(T_filename, as_generator=False)
    T = np.array([s[0] for s in T], dtype=np.object)
    print("%s: %s streamlines" % (T_filename, len(T)))

    # Removing short artifactual streamlines
    print("Removing (presumably artifactual) streamlines shorter than %s" % threshold_short_streamlines)
    T = np.array([s for s in T if length(s) >= threshold_short_streamlines], dtype=np.object)
    print("%s: %s streamlines" % (T_filename, len(T)))
    return T
예제 #6
0
파일: tools.py 프로젝트: yuzheww/scilpy
def cut_outside_of_mask_streamlines(sft, binary_mask):
    """ Cut streamlines so their longest segment are within the bounding box
    or a binary mask.
    This function keeps the data_per_point and data_per_streamline.

    Parameters
    ----------
    sft: StatefulTractogram
        The sft to remove invalid points from.

    Returns
    -------
    new_sft : StatefulTractogram
        New object with the invalid points removed from each streamline.
    cutting_counter : int
        Number of streamlines that were cut.
    """
    new_streamlines = []
    length_list = length(sft.streamlines)
    min_len, max_len = min(length_list), max(length_list)
    for i, streamline in enumerate(sft.streamlines):
        # streamline = set_number_of_points(streamline, 100)
        entry_found = False
        last_success = 0
        curr_len = 0
        longest_seq = (0, 0)
        for ind, pos in enumerate(streamline):
            pos = tuple(pos.astype(np.int16))
            if binary_mask[pos]:
                if not entry_found:
                    entry_found = True
                    last_success = ind
                    curr_len = 0
                else:
                    curr_len += 1
                    if curr_len > longest_seq[1] - longest_seq[0]:
                        longest_seq = (last_success, ind)
            else:
                if entry_found:
                    entry_found = False
                    if curr_len > longest_seq[1] - longest_seq[0]:
                        longest_seq = (last_success, ind - 1)
                        curr_len = 0
        if longest_seq[1] != 0:
            new_streamlines.append(streamline[longest_seq[0]:longest_seq[1]])

    new_sft = StatefulTractogram.from_sft(new_streamlines, sft)
    return filter_streamlines_by_length(new_sft,
                                        min_length=min_len,
                                        max_length=max_len)
예제 #7
0
파일: tools.py 프로젝트: yuzheww/scilpy
def resample_streamlines_step_size(sft, step_size):
    """
    Resample streamlines using a fixed step size.

    Parameters
    ----------
    sft: StatefulTractogram
        SFT containing the streamlines to subsample.
    step_size: float
        Size of the new steps, in mm.

    Return
    ------
    resampled_sft: StatefulTractogram
        The resampled streamlines as a sft.
    """

    # Checks
    if step_size == 0:
        raise ValueError("Step size can't be 0!")
    elif step_size < 0.1:
        logging.debug("The value of your step size seems suspiciously low. "
                      "Please check.")
    elif step_size > np.max(sft.voxel_sizes):
        logging.debug("The value of your step size seems suspiciously high. "
                      "Please check.")

    # Make sure we are in world space
    orig_space = sft.space
    sft.to_rasmm()

    # Resampling
    lengths = length(sft.streamlines)
    nb_points = np.ceil(lengths / step_size).astype(int)
    if np.any(nb_points == 1):
        logging.warning("Some streamlines are shorter than the provided "
                        "step size...")
        nb_points[nb_points == 1] = 2
    resampled_streamlines = [
        set_number_of_points(s, n) for s, n in zip(sft.streamlines, nb_points)
    ]

    # Creating sft
    resampled_sft = _warn_and_save(resampled_streamlines, sft)

    # Return to original space
    resampled_sft.to_space(orig_space)

    return resampled_sft
예제 #8
0
파일: tools.py 프로젝트: CHrlS98/scilpy
def filter_streamlines_by_length(sft, min_length=0., max_length=np.inf):
    """
    Filter streamlines using minimum and max length.

    Parameters
    ----------
    sft: StatefulTractogram
        SFT containing the streamlines to filter.
    min_length: float
        Minimum length of streamlines, in mm.
    max_length: float
        Maximum length of streamlines, in mm.

    Return
    ------
    filtered_sft : StatefulTractogram
        A tractogram without short streamlines.
    """

    # Make sure we are in world space
    orig_space = sft.space
    sft.to_rasmm()

    if sft.streamlines:
        # Compute streamlines lengths
        lengths = length(sft.streamlines)

        # Filter lengths
        filter_stream = np.logical_and(lengths >= min_length,
                                       lengths <= max_length)
    else:
        filter_stream = []

    filtered_streamlines = list(
        np.asarray(sft.streamlines, dtype=object)[filter_stream])
    filtered_data_per_point = sft.data_per_point[filter_stream]
    filtered_data_per_streamline = sft.data_per_streamline[filter_stream]

    # Create final sft
    filtered_sft = StatefulTractogram.from_sft(
        filtered_streamlines,
        sft,
        data_per_point=filtered_data_per_point,
        data_per_streamline=filtered_data_per_streamline)

    # Return to original space
    filtered_sft.to_space(orig_space)

    return filtered_sft
예제 #9
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.input])

    tractogram_file = nib.streamlines.load(args.input)
    streamlines = tractogram_file.streamlines

    lengths = list(length(streamlines))

    print(json.dumps({'min_length': float(np.min(lengths)),
                      'mean_length': float(np.mean(lengths)),
                      'max_length': float(np.max(lengths)),
                      'std_length': float(np.std(lengths))},
                     indent=args.indent, sort_keys=args.sort_keys))
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_bundle)

    sft = load_tractogram_with_reference(parser, args, args.in_bundle)
    streamlines = sft.streamlines
    lengths = [0]
    if streamlines:
        lengths = list(length(streamlines))

    print(json.dumps({'min_length': float(np.min(lengths)),
                      'mean_length': float(np.mean(lengths)),
                      'max_length': float(np.max(lengths)),
                      'std_length': float(np.std(lengths))},
                     indent=args.indent, sort_keys=args.sort_keys))
예제 #11
0
파일: tools.py 프로젝트: yuzheww/scilpy
def smooth_line_gaussian(streamline, sigma):
    if sigma < 0.00001:
        ValueError('Cant have a 0 sigma with gaussian.')

    nb_points = int(length(streamline))
    if nb_points < 2:
        logging.debug('Streamline shorter than 1mm, corner cases possible.')
        nb_points = 2
    sampled_streamline = set_number_of_points(streamline, nb_points)

    x, y, z = sampled_streamline.T
    x3 = gaussian_filter1d(x, sigma)
    y3 = gaussian_filter1d(y, sigma)
    z3 = gaussian_filter1d(z, sigma)
    smoothed_streamline = np.asarray([x3, y3, z3]).T

    # Ensure first and last point remain the same
    smoothed_streamline[0] = streamline[0]
    smoothed_streamline[-1] = streamline[-1]

    return smoothed_streamline
예제 #12
0
파일: tools.py 프로젝트: yuzheww/scilpy
def smooth_line_spline(streamline, sigma, nb_ctrl_points):
    if sigma < 0.00001:
        ValueError('Cant have a 0 sigma with spline.')

    nb_points = int(length(streamline))
    if nb_points < 2:
        logging.debug('Streamline shorter than 1mm, corner cases possible.')

    if nb_ctrl_points < 3:
        nb_ctrl_points = 3

    sampled_streamline = set_number_of_points(streamline, nb_ctrl_points)

    tck, u = splprep(sampled_streamline.T, s=sigma)
    smoothed_streamline = splev(np.linspace(0, 1, 99), tck)
    smoothed_streamline = np.squeeze(np.asarray([smoothed_streamline]).T)

    # Ensure first and last point remain the same
    smoothed_streamline[0] = streamline[0]
    smoothed_streamline[-1] = streamline[-1]

    return smoothed_streamline
def cal_cov_sub(sub_name):

    fiber_sub = np.load(os.path.join('..', 'cci_clean_data', sub_name))
    fiber_sub = fiber_sub['arr_0']
    cci = cci_dict[os.path.splitext(sub_name)[0]]
    mask_score = mask_score_dict[os.path.splitext(sub_name)[0]]

    streamlines_evl = Streamlines()

    for i in range(np.shape(fiber_sub)[0]):
        tmp = fiber_sub[i]
        tmp = zero_remove(tmp)
        streamlines_evl.append(tmp)

    lengths = np.array(list(length(streamlines_evl)))

    #==============
    fiber_one = fiber_sub[0].transpose()
    fiber_one_std = preprocessing.scale(fiber_one)

    covarience = np.cov(fiber_one_std)
    result = np.array([
        mask_score[0], cci[0], lengths[0], covarience[0, 0], covarience[1, 1],
        covarience[2, 2], covarience[0, 1], covarience[0, 2], covarience[1, 2]
    ]).transpose()

    for i in range(1, np.shape(fiber_sub)[0]):
        fiber_one = fiber_sub[i].transpose()
        fiber_one_std = preprocessing.scale(fiber_one)

        covarience = np.cov(fiber_one_std)
        #tmp = np.array( [covarience[0,0], covarience[1,1], covarience[2,2]]).transpose()
        tmp = np.array([
            mask_score[i], cci[i], lengths[i], covarience[0, 0],
            covarience[1, 1], covarience[2, 2], covarience[0, 1],
            covarience[0, 2], covarience[1, 2]
        ]).transpose()
        result = np.vstack((result, tmp))
    return result
예제 #14
0
파일: tools.py 프로젝트: arnaudbore/scilpy
def filter_streamlines_by_length(sft, min_length=0., max_length=np.inf):
    """
    Filter streamlines using minimum and max length.

    Parameters
    ----------
    sft: StatefulTractogram
        SFT containing the streamlines to filter.
    min_length: float
        Minimum length of streamlines, in mm.
    max_length: float
        Maximum length of streamlines, in mm.

    Return
    ------
    filtered_sft : StatefulTractogram
        A tractogram without short streamlines.
    """

    # Make sure we are in world space
    orig_space = sft.space
    sft.to_rasmm()

    if sft.streamlines:
        # Compute streamlines lengths
        lengths = length(sft.streamlines)

        # Filter lengths
        filter_stream = np.logical_and(lengths >= min_length,
                                       lengths <= max_length)
    else:
        filter_stream = []

    filtered_sft = sft[filter_stream]

    # Return to original space
    filtered_sft.to_space(orig_space)

    return filtered_sft
예제 #15
0
    del data, img, labels, labels_img, csd_peaks, csd_model
    gc.collect()
    print 'data, img, labels, labels_img, csd_peaks, csd_model delete to save memory'
    print 'generating streamlines from 20000 seeds for now because of kill 9 memory error if we seed from all voxels'

    classifier = BinaryTissueClassifier(mask)
    streamline_generator = LocalTracking(prob_dg,
                                         classifier,
                                         seeds,
                                         affine,
                                         step_size=.5)
    affine = streamline_generator.affine

    streamlines = Streamlines(streamline_generator)
    et4 = time.time() - st4
    lengths = [length(sl).astype(np.int) for sl in streamlines]
    print 'generating streamlines finished, the length is {}~{}, running time is {}'.format(
        np.min(lengths), np.max(lengths), et4)

    del bm, mask, fod_coeff, prob_dg, classifier, lengths
    gc.collect()
    print 'bm, mask, fod_coeff, prob_dg, classifier, lengths delete to save memory'

    #Cut streamlines
    streamlines = [sl for sl in streamlines if length(sl).astype(np.int) > 3]
    print 'we get {} streamlines'.format(len(streamlines))
    print 'cutting short streamlines finished'

    save_trk(outpath + 'connectivity_csv/' + runno + '_streamlines.trk',
             streamlines=streamlines,
             affine=np.eye(4))
예제 #16
0
def cluster_confidence(streamlines, max_mdf=5, subsample=12, power=1,
                       override=False):
    """ Computes the cluster confidence index (cci), which is an
    estimation of the support a set of streamlines gives to
    a particular pathway.

    Ex: A single streamline with no others in the dataset
    following a similar pathway has a low cci. A streamline
    in a bundle of 100 streamlines that follow similar
    pathways has a high cci.

    See: Jordan et al. 2017
    (Based on streamline MDF distance from Garyfallidis et al. 2012)

    Parameters
    ----------
    streamlines : list of 2D (N, 3) arrays
        A sequence of streamlines of length N (# streamlines)
    max_mdf : int
        The maximum MDF distance (mm) that will be considered a
        "supporting" streamline and included in cci calculation
    subsample: int
        The number of points that are considered for each streamline
        in the calculation. To save on calculation time, each
        streamline is subsampled to subsampleN points.
    power: int
        The power to which the MDF distance for each streamline
        will be raised to determine how much it contributes to
        the cci. High values of power make the contribution value
        degrade much faster. Example: a streamline with 5mm MDF
        similarity contributes 1/5 to the cci if power is 1, but
        only contributes 1/5^2 = 1/25 if power is 2.
    override: bool, False by default
        override means that the cci calculation will still occur even
        though there are short streamlines in the dataset that may alter
        expected behaviour.

    Returns
    -------
    Returns an array of CCI scores

    References
    ----------
    [Jordan17] Jordan K. Et al., Cluster Confidence Index: A Streamline-Wise
    Pathway Reproducibility Metric for Diffusion-Weighted MRI Tractography,
    Journal of Neuroimaging, vol 28, no 1, 2017.

    [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
    tractography simplification, Frontiers in Neuroscience,
    vol 6, no 175, 2012.

    """

    # error if any streamlines are shorter than 20mm
    lengths = list(length(streamlines))
    if min(lengths) < 20 and not override:
        raise ValueError('Short streamlines found. We recommend removing them.'
                         ' To continue without removing short streamlines set'
                         ' override=True')

    # calculate the pairwise MDF distance between all streamlines in dataset
    subsamp_sls = set_number_of_points(streamlines, subsample)

    cci_score_mtrx = np.zeros([len(subsamp_sls)])

    for i, sl in enumerate(subsamp_sls):
        mdf_mx = bundles_distances_mdf([subsamp_sls[i]], subsamp_sls)
        if (mdf_mx == 0).sum() > 1:
            raise ValueError('Identical streamlines. CCI calculation invalid')
        mdf_mx_oi = (mdf_mx > 0) & (mdf_mx < max_mdf) & ~ np.isnan(mdf_mx)
        mdf_mx_oi_only = mdf_mx[mdf_mx_oi]
        cci_score = np.sum(np.divide(1, np.power(mdf_mx_oi_only, power)))
        cci_score_mtrx[i] = cci_score

    return cci_score_mtrx
예제 #17
0
def _save_results_wrapper(args, tmp_dir, ext, hdf5_file, offsets_list, sub_dir,
                          is_commit_2):
    out_dir = os.path.join(args.out_dir, sub_dir)
    os.mkdir(out_dir)
    # Simplifying output for streamlines and cleaning output directory
    commit_results_dir = os.path.join(tmp_dir.name,
                                      'Results_StickZeppelinBall')
    streamline_weights = np.loadtxt(
        os.path.join(commit_results_dir, 'streamline_weights.txt'))

    sft = load_tractogram(args.in_tractogram, 'same')
    length_list = length(sft.streamlines)
    np.savetxt(os.path.join(commit_results_dir, 'streamlines_length.txt'),
               length_list)
    np.savetxt(
        os.path.join(commit_results_dir, 'streamline_weights_by_length.txt'),
        streamline_weights * length_list)

    if ext == '.h5':
        new_filename = os.path.join(commit_results_dir, 'decompose_commit.h5')
        with h5py.File(new_filename, 'w') as new_hdf5_file:
            new_hdf5_file.attrs['affine'] = sft.affine
            new_hdf5_file.attrs['dimensions'] = sft.dimensions
            new_hdf5_file.attrs['voxel_sizes'] = sft.voxel_sizes
            new_hdf5_file.attrs['voxel_order'] = sft.voxel_order
            # Assign the weights into the hdf5, while respecting the ordering of
            # connections/streamlines
            logging.debug('Adding commit weights to {}.'.format(new_filename))
            for i, key in enumerate(list(hdf5_file.keys())):
                new_group = new_hdf5_file.create_group(key)
                old_group = hdf5_file[key]
                tmp_streamline_weights = \
                    streamline_weights[offsets_list[i]:offsets_list[i+1]]

                essential_ind = np.where(tmp_streamline_weights > 0)[0]
                tmp_streamline_weights = tmp_streamline_weights[essential_ind]

                tmp_streamlines = reconstruct_streamlines(
                    old_group['data'],
                    old_group['offsets'],
                    old_group['lengths'],
                    indices=essential_ind)
                tmp_length_list = length(tmp_streamlines)
                # Replacing the data with the one above the threshold
                # Safe since this hdf5 was a copy in the first place
                new_group.create_dataset('data',
                                         data=tmp_streamlines.get_data(),
                                         dtype=np.float32)
                new_group.create_dataset('offsets',
                                         data=tmp_streamlines._offsets,
                                         dtype=np.int64)
                new_group.create_dataset('lengths',
                                         data=tmp_streamlines._lengths,
                                         dtype=np.int32)

                for dps_key in hdf5_file[key].keys():
                    if dps_key not in ['data', 'offsets', 'lengths']:
                        new_group.create_dataset(
                            key, data=hdf5_file[key][dps_key][essential_ind])

                dps_key = 'commit2_weights' if is_commit_2 else \
                    'commit1_weights'
                dps_key_tot = 'tot_commit2_weights' if is_commit_2 else \
                    'tot_commit1_weights'
                new_group.create_dataset(dps_key, data=tmp_streamline_weights)
                new_group.create_dataset(dps_key_tot,
                                         data=tmp_streamline_weights *
                                         tmp_length_list)

    files = os.listdir(commit_results_dir)
    for f in files:
        shutil.copy(os.path.join(commit_results_dir, f), out_dir)

    dps_key = 'commit2_weights' if is_commit_2 else \
        'commit1_weights'
    dps_key_tot = 'tot_commit2_weights' if is_commit_2 else \
        'tot_commit1_weights'
    # Reload is needed because of COMMIT handling its file by itself
    sft.data_per_streamline[dps_key] = streamline_weights
    sft.data_per_streamline[dps_key_tot] = streamline_weights * length_list

    essential_ind = np.where(streamline_weights > 0)[0]
    nonessential_ind = np.where(streamline_weights <= 0)[0]
    logging.debug('{} essential streamlines were kept at'.format(
        len(essential_ind)))
    logging.debug('{} nonessential streamlines were kept'.format(
        len(nonessential_ind)))

    save_tractogram(sft[essential_ind],
                    os.path.join(out_dir, 'essential_tractogram.trk'))
    save_tractogram(sft[nonessential_ind],
                    os.path.join(out_dir, 'nonessential_tractogram.trk'))
    if args.keep_whole_tractogram:
        output_filename = os.path.join(out_dir, 'tractogram.trk')
        logging.debug(
            'Saving tractogram with weights as {}'.format(output_filename))
        save_tractogram(sft, output_filename)
예제 #18
0
def run(context):

    ####################################################
    # Get the path to input files  and other parameter #
    ####################################################
    analysis_data = context.fetch_analysis_data()
    settings = analysis_data['settings']
    postprocessing = settings['postprocessing']
    dataset = settings['dataset']

    if dataset == "HCPL":
        dwi_file_handle = context.get_files('input', modality='HARDI')[0]
        dwi_file_path = dwi_file_handle.download('/root/')

        bvalues_file_handle = context.get_files(
            'input', reg_expression='.*prep.bvalues.hcpl.txt')[0]
        bvalues_file_path = bvalues_file_handle.download('/root/')
        bvecs_file_handle = context.get_files(
            'input', reg_expression='.*prep.gradients.hcpl.txt')[0]
        bvecs_file_path = bvecs_file_handle.download('/root/')
    elif dataset == "DSI":
        dwi_file_handle = context.get_files('input', modality='DSI')[0]
        dwi_file_path = dwi_file_handle.download('/root/')
        bvalues_file_handle = context.get_files(
            'input', reg_expression='.*prep.bvalues.txt')[0]
        bvalues_file_path = bvalues_file_handle.download('/root/')
        bvecs_file_handle = context.get_files(
            'input', reg_expression='.*prep.gradients.txt')[0]
        bvecs_file_path = bvecs_file_handle.download('/root/')
    else:
            context.set_progress(message='Wrong dataset parameter')

    inject_file_handle = context.get_files(
        'input', reg_expression='.*prep.inject.nii.gz')[0]
    inject_file_path = inject_file_handle.download('/root/')

    VUMC_ROIs_file_handle = context.get_files(
        'input', reg_expression='.*VUMC_ROIs.nii.gz')[0]
    VUMC_ROIs_file_path = VUMC_ROIs_file_handle.download('/root/')

    ###############################
    # _____ _____ _______     __  #
    # |  __ \_   _|  __ \ \   / / #
    # | |  | || | | |__) \ \_/ /  #
    # | |  | || | |  ___/ \   /   #
    # | |__| || |_| |      | |    #
    # |_____/_____|_|      |_|    #
    #                             #
    ###############################

    ########################################################################################
    #  _______             _          __  __   _______             _     __                #
    # |__   __|           | |        |  \/  | |__   __|           | |   / _|               #
    #    | |_ __ __ _  ___| | ___   _| \  / | ___| |_ __ __ _  ___| | _| |_ __ _  ___ ___  #
    #    | | '__/ _` |/ __| |/ / | | | |\/| |/ __| | '__/ _` |/ __| |/ /  _/ _` |/ __/ _ \ #
    #    | | | | (_| | (__|   <| |_| | |  | | (__| | | | (_| | (__|   <| || (_| | (_|  __/ #
    #    |_|_|  \__,_|\___|_|\_\\__, |_|  |_|\___|_|_|  \__,_|\___|_|\_\_| \__,_|\___\___| #
    #                            __/ |                                                     #
    #                           |___/                                                      #
    #                                                                                      #
    #                                                                                      #
    #                               IronTract Team                                         #
    ########################################################################################

    #################
    # Load the data #
    #################
    dwi_img = nib.load(dwi_file_path)
    bvals, bvecs = read_bvals_bvecs(bvalues_file_path,
                                    bvecs_file_path)
    gtab = gradient_table(bvals, bvecs)

    ############################################
    # Extract the brain mask from the b0 image #
    ############################################
    _, brain_mask = median_otsu(dwi_img.get_data()[:, :, :, 0],
                                median_radius=2, numpass=1)

    ##################################################################
    # Fit the tensor model and compute the fractional anisotropy map #
    ##################################################################
    context.set_progress(message='Processing voxel-wise DTI metrics.')
    tenmodel = TensorModel(gtab)
    tenfit = tenmodel.fit(dwi_img.get_data(), mask=brain_mask)
    FA = fractional_anisotropy(tenfit.evals)
    stopping_criterion = ThresholdStoppingCriterion(FA, 0.2)

    sphere = get_sphere("repulsion724")
    seed_mask_img = nib.load(inject_file_path)
    affine = seed_mask_img.affine
    seeds = utils.random_seeds_from_mask(seed_mask_img.get_data(),
                                         affine,
                                         seed_count_per_voxel=True,
                                         seeds_count=5000)

    if dataset == "HCPL":
        ################################################
        # Compute Fiber Orientation Distribution (CSD) #
        ################################################
        context.set_progress(message='Processing voxel-wise FOD estimation.')

        response, _ = auto_response_ssst(gtab, dwi_img.get_data(),
                                         roi_radii=10, fa_thr=0.7)
        csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=8)
        csd_fit = csd_model.fit(dwi_img.get_data(), mask=brain_mask)
        shm = csd_fit.shm_coeff

        prob_dg = ProbabilisticDirectionGetter.from_shcoeff(shm,
                                                            max_angle=20.,
                                                            sphere=sphere,
                                                            pmf_threshold=0.1)
    elif dataset == "DSI":
        context.set_progress(message='Processing voxel-wise DSI estimation.')
        dsmodel = DiffusionSpectrumModel(gtab)
        dsfit = dsmodel.fit(dwi_img.get_data())
        ODFs = dsfit.odf(sphere)
        prob_dg = ProbabilisticDirectionGetter.from_pmf(ODFs,
                                                        max_angle=20.,
                                                        sphere=sphere,
                                                        pmf_threshold=0.01)

    ###########################################
    # Compute DIPY Probabilistic Tractography #
    ###########################################
    context.set_progress(message='Processing tractography.')
    streamline_generator = LocalTracking(prob_dg, stopping_criterion, seeds,
                                         affine, step_size=.2, max_cross=1)
    streamlines = Streamlines(streamline_generator)
    # sft = StatefulTractogram(streamlines, seed_mask_img, Space.RASMM)
    # streamlines_file_path = "/root/streamlines.trk"
    # save_trk(sft, streamlines_file_path)

    ###########################################################################
    # Compute 3D volumes for the IronTract Challenge. For 'EPFL', we only     #
    # keep streamlines with length > 1mm. We compute the visitation  count    #
    # image and apply a small gaussian smoothing. The gaussian smoothing      #
    # is especially usefull to increase voxel coverage of deterministic       #
    # algorithms. The log of the smoothed visitation count map is then        #
    # iteratively thresholded producing 200 volumes/operation points.         #
    # For VUMC, additional streamline filtering is done using anatomical      #
    # priors (keeping only streamlines that intersect with at least one ROI). #
    ###########################################################################
    if postprocessing in ["EPFL", "ALL"]:
        context.set_progress(message='Processing density map (EPFL)')
        volume_folder = "/root/vol_epfl"
        output_epfl_zip_file_path = "/root/TrackyMcTrackface_EPFL_example.zip"
        os.mkdir(volume_folder)
        lengths = length(streamlines)
        streamlines = streamlines[lengths > 1]
        density = utils.density_map(streamlines, affine, seed_mask_img.shape)
        density = scipy.ndimage.gaussian_filter(density.astype("float32"), 0.5)

        log_density = np.log10(density + 1)
        max_density = np.max(log_density)
        for i, t in enumerate(np.arange(0, max_density, max_density / 200)):
            nbr = str(i)
            nbr = nbr.zfill(3)
            mask = log_density >= t
            vol_filename = os.path.join(volume_folder,
                                        "vol" + nbr + "_t" + str(t) + ".nii.gz")
            nib.Nifti1Image(mask.astype("int32"), affine,
                            seed_mask_img.header).to_filename(vol_filename)
        shutil.make_archive(output_epfl_zip_file_path[:-4], 'zip', volume_folder)

    if postprocessing in ["VUMC", "ALL"]:
        context.set_progress(message='Processing density map (VUMC)')
        ROIs_img = nib.load(VUMC_ROIs_file_path)
        volume_folder = "/root/vol_vumc"
        output_vumc_zip_file_path = "/root/TrackyMcTrackface_VUMC_example.zip"
        os.mkdir(volume_folder)
        lengths = length(streamlines)
        streamlines = streamlines[lengths > 1]

        rois = ROIs_img.get_fdata().astype(int)
        _, grouping = utils.connectivity_matrix(streamlines, affine, rois,
                                                inclusive=True,
                                                return_mapping=True,
                                                mapping_as_streamlines=False)
        streamlines = streamlines[grouping[(0, 1)]]

        density = utils.density_map(streamlines, affine, seed_mask_img.shape)
        density = scipy.ndimage.gaussian_filter(density.astype("float32"), 0.5)

        log_density = np.log10(density + 1)
        max_density = np.max(log_density)
        for i, t in enumerate(np.arange(0, max_density, max_density / 200)):
            nbr = str(i)
            nbr = nbr.zfill(3)
            mask = log_density >= t
            vol_filename = os.path.join(volume_folder,
                                        "vol" + nbr + "_t" + str(t) + ".nii.gz")
            nib.Nifti1Image(mask.astype("int32"), affine,
                            seed_mask_img.header).to_filename(vol_filename)
        shutil.make_archive(output_vumc_zip_file_path[:-4], 'zip', volume_folder)

    ###################
    # Upload the data #
    ###################
    context.set_progress(message='Uploading results...')
    #context.upload_file(fa_file_path, 'fa.nii.gz')
    # context.upload_file(fod_file_path, 'fod.nii.gz')
    # context.upload_file(streamlines_file_path, 'streamlines.trk')
    if postprocessing in ["EPFL", "ALL"]:
        context.upload_file(output_epfl_zip_file_path,
                            'TrackyMcTrackface_' + dataset +'_EPFL.zip')
    if postprocessing in ["VUMC", "ALL"]:
        context.upload_file(output_vumc_zip_file_path,
                            'TrackyMcTrackface_' + dataset +'_VUMC.zip')
예제 #19
0
def main():
    parser = _build_args_parser()
    args = parser.parse_args()

    if args.isVerbose:
        logging.basicConfig(level=logging.DEBUG)

    assert_inputs_exist(parser, [
        args.sh_file, args.seed_file, args.map_include_file,
        args.map_exclude_file
    ])
    assert_outputs_exist(parser, args, [args.output_file])

    if not nib.streamlines.is_supported(args.output_file):
        parser.error('Invalid output streamline file format (must be trk or ' +
                     'tck): {0}'.format(args.output_file))

    if not args.min_length > 0:
        parser.error('minL must be > 0, {}mm was provided.'.format(
            args.min_length))
    if args.max_length < args.min_length:
        parser.error(
            'maxL must be > than minL, (minL={}mm, maxL={}mm).'.format(
                args.min_length, args.max_length))

    if args.compress:
        if args.compress < 0.001 or args.compress > 1:
            logging.warning(
                'You are using an error rate of {}.\nWe recommend setting it '
                'between 0.001 and 1.\n0.001 will do almost nothing to the '
                'tracts while 1 will higly compress/linearize the tracts'.
                format(args.compress))

    if args.particles <= 0:
        parser.error('--particles must be >= 1.')

    if args.back_tracking <= 0:
        parser.error('PFT backtracking distance must be > 0.')

    if args.forward_tracking <= 0:
        parser.error('PFT forward tracking distance must be > 0.')

    if args.npv and args.npv <= 0:
        parser.error('Number of seeds per voxel must be > 0.')

    if args.nt and args.nt <= 0:
        parser.error('Total number of seeds must be > 0.')

    fodf_sh_img = nib.load(args.sh_file)
    fodf_sh_img = nib.load(args.sh_file)
    if not np.allclose(np.mean(fodf_sh_img.header.get_zooms()[:3]),
                       fodf_sh_img.header.get_zooms()[0],
                       atol=1.e-3):
        parser.error(
            'SH file is not isotropic. Tracking cannot be ran robustly.')

    tracking_sphere = HemiSphere.from_sphere(get_sphere('repulsion724'))

    # Check if sphere is unit, since we couldn't find such check in Dipy.
    if not np.allclose(np.linalg.norm(tracking_sphere.vertices, axis=1), 1.):
        raise RuntimeError('Tracking sphere should be unit normed.')

    sh_basis = args.sh_basis

    if args.algo == 'det':
        dgklass = DeterministicMaximumDirectionGetter
    else:
        dgklass = ProbabilisticDirectionGetter

    theta = get_theta(args.theta, args.algo)

    # Reminder for the future:
    # pmf_threshold == clip pmf under this
    # relative_peak_threshold is for initial directions filtering
    # min_separation_angle is the initial separation angle for peak extraction
    dg = dgklass.from_shcoeff(fodf_sh_img.get_data().astype(np.double),
                              max_angle=theta,
                              sphere=tracking_sphere,
                              basis_type=sh_basis,
                              pmf_threshold=args.sf_threshold,
                              relative_peak_threshold=args.sf_threshold_init)

    map_include_img = nib.load(args.map_include_file)
    map_exclude_img = nib.load(args.map_exclude_file)
    voxel_size = np.average(map_include_img.get_header()['pixdim'][1:4])

    tissue_classifier = None
    if not args.act:
        tissue_classifier = CmcTissueClassifier(map_include_img.get_data(),
                                                map_exclude_img.get_data(),
                                                step_size=args.step_size,
                                                average_voxel_size=voxel_size)
    else:
        tissue_classifier = ActTissueClassifier(map_include_img.get_data(),
                                                map_exclude_img.get_data())

    if args.npv:
        nb_seeds = args.npv
        seed_per_vox = True
    elif args.nt:
        nb_seeds = args.nt
        seed_per_vox = False
    else:
        nb_seeds = 1
        seed_per_vox = True

    voxel_size = fodf_sh_img.header.get_zooms()[0]
    vox_step_size = args.step_size / voxel_size
    seed_img = nib.load(args.seed_file)
    seeds = track_utils.random_seeds_from_mask(
        seed_img.get_data(),
        seeds_count=nb_seeds,
        seed_count_per_voxel=seed_per_vox,
        random_seed=args.seed)

    # Note that max steps is used once for the forward pass, and
    # once for the backwards. This doesn't, in fact, control the real
    # max length
    max_steps = int(args.max_length / args.step_size) + 1
    pft_streamlines = ParticleFilteringTracking(
        dg,
        tissue_classifier,
        seeds,
        np.eye(4),
        max_cross=1,
        step_size=vox_step_size,
        maxlen=max_steps,
        pft_back_tracking_dist=args.back_tracking,
        pft_front_tracking_dist=args.forward_tracking,
        particle_count=args.particles,
        return_all=args.keep_all,
        random_seed=args.seed)

    scaled_min_length = args.min_length / voxel_size
    scaled_max_length = args.max_length / voxel_size
    filtered_streamlines = (
        s for s in pft_streamlines
        if scaled_min_length <= length(s) <= scaled_max_length)
    if args.compress:
        filtered_streamlines = (compress_streamlines(s, args.compress)
                                for s in filtered_streamlines)

    tractogram = LazyTractogram(lambda: filtered_streamlines,
                                affine_to_rasmm=seed_img.affine)

    filetype = nib.streamlines.detect_format(args.output_file)
    header = create_header_from_anat(seed_img, base_filetype=filetype)

    # Use generator to save the streamlines on-the-fly
    nib.streamlines.save(tractogram, args.output_file, header=header)
def run(context):

    ####################################################
    # Get the path to input files  and other parameter #
    ####################################################
    context.set_progress(message='Set path.')
    os.environ[
        'PATH'] = os.environ['PATH'] + ':/root/mrtrix3-3.0_RC3_latest/bin'
    os.environ['PATH'] = os.environ['PATH'] + ':/root/antsbin/bin'

    context.set_progress(message='Retrieving data.')
    analysis_data = context.fetch_analysis_data()
    settings = analysis_data['settings']
    postprocessing = settings['postprocessing']

    hcpl_dwi_file_handle = context.get_files('input', modality='HARDI')[0]
    hcpl_dwi_file_path = hcpl_dwi_file_handle.download('/root/')

    hcpl_bvalues_file_handle = context.get_files(
        'input', reg_expression='.*prep.bvalues.hcpl.txt')[0]
    hcpl_bvalues_file_path = hcpl_bvalues_file_handle.download('/root/')
    hcpl_bvecs_file_handle = context.get_files(
        'input', reg_expression='.*prep.gradients.hcpl.txt')[0]
    hcpl_bvecs_file_path = hcpl_bvecs_file_handle.download('/root/')

    inject_file_handle = context.get_files(
        'input', reg_expression='.*prep.inject.nii.gz')[0]
    inject_file_path = inject_file_handle.download('/root/')
    seed_mask_img = nib.load(inject_file_path)
    affine = seed_mask_img.affine

    VUMC_ROIs_file_handle = context.get_files(
        'input', reg_expression='.*VUMC_ROIs.nii.gz')[0]
    VUMC_ROIs_file_path = VUMC_ROIs_file_handle.download('/root/')

    #############################
    # Fitting NODDI using AMICO #
    #############################
    context.set_progress(message='Setting up AMICO.')
    amico.core.setup()

    ae = amico.Evaluation('/root/', '.')

    [_, bvecs] = read_bvals_bvecs(None, hcpl_bvecs_file_path)
    bvecs_norm = normalized_vector(bvecs)
    bvecs_norm[0] = [0, 0, 0]
    np.savetxt('/root/grad_norm.txt',
               np.matrix.transpose(bvecs_norm),
               fmt='%.3f')

    amico.util.fsl2scheme(hcpl_bvalues_file_path, '/root/grad_norm.txt',
                          '/root/grad.scheme')

    os.system('dwi2mask -fslgrad ' + hcpl_bvecs_file_path + ' ' +
              hcpl_bvalues_file_path + ' ' + hcpl_dwi_file_path +
              ' /root/mask.nii.gz')

    ae.load_data(dwi_filename='prep.dwi.hcpl.nii.gz',
                 scheme_filename='grad.scheme',
                 mask_filename='mask.nii.gz',
                 b0_thr=30)

    ae.set_model('NODDI')
    ae.generate_kernels()
    ae.load_kernels()
    context.set_progress(message='Fitting NODDI maps.')
    ae.fit()

    ae.save_results()

    ######################################################
    # Computing inclusion/exclusion maps from NODDI maps #
    ######################################################
    context.set_progress(message='Defining masks.')
    os.system(
        'mrcalc /root/AMICO/NODDI/FIT_OD.nii.gz 0.1 -gt ' +
        '/root/AMICO/NODDI/FIT_OD.nii.gz 0.7 -lt -mul /root/wm_mask.nii.gz')

    os.system(
        'mrcalc /root/AMICO/NODDI/FIT_ICVF.nii.gz 0.95 -lt /root/gm_mask.nii.gz'
    )

    os.system(
        'mrcalc /root/AMICO/NODDI/FIT_ISOVF.nii.gz 0 -gt /root/csf_mask.nii.gz'
    )

    ##################################################
    # Doing reconstruction&tracking using TRAMPOLINO #
    ##################################################
    context.set_progress(message='Starting TRAMPOLINO recon&track.')
    os.chdir('/root')
    os.system(
        'trampolino -r results -n mrtrix_workflow recon -i ' +
        hcpl_dwi_file_path + ' ' + '-v ' + hcpl_bvecs_file_path + ' -b ' +
        hcpl_bvalues_file_path + ' ' +
        '--opt bthres:0,mask:wm_mask.nii.gz mrtrix_msmt_csd track ' + '-s ' +
        inject_file_path +
        ' --opt nos:10000,include:gm_mask.nii.gz,exclude:csf_mask.nii.gz ' +
        '--min_length 10,50 --ensemble min_length mrtrix_tckgen ' +
        'convert -r wm_mask.nii.gz tck2trk')

    track = load_tractogram('results/track.trk', 'wm_mask.nii.gz')
    streamlines = track.streamlines

    ###########################################################################
    # Compute 3D volumes for the IronTract Challenge. For 'EPFL', we only     #
    # keep streamlines with length > 1mm. We compute the visitation  count    #
    # image and apply a small gaussian smoothing. The gaussian smoothing      #
    # is especially usefull to increase voxel coverage of deterministic       #
    # algorithms. The log of the smoothed visitation count map is then        #
    # iteratively thresholded producing 200 volumes/operation points.         #
    # For VUMC, additional streamline filtering is done using anatomical      #
    # priors (keeping only streamlines that intersect with at least one ROI). #
    ###########################################################################

    if postprocessing in ['EPFL', 'ALL']:
        context.set_progress(message='Processing density map (EPFL).')
        volume_folder = '/root/vol_epfl'
        output_epfl_zip_file_path = '/root/SpaghettiBeans_EPFL.zip'
        os.mkdir(volume_folder)
        lengths = length(streamlines)
        streamlines = streamlines[lengths > 1]
        density = utils.density_map(streamlines, affine, seed_mask_img.shape)
        density = scipy.ndimage.gaussian_filter(density.astype('float32'), 0.5)

        log_density = np.log10(density + 1)
        max_density = np.max(log_density)
        for i, t in enumerate(np.arange(0, max_density, max_density / 200)):
            nbr = str(i)
            nbr = nbr.zfill(3)
            mask = log_density >= t
            vol_filename = os.path.join(
                volume_folder, 'vol' + nbr + '_t' + str(t) + '.nii.gz')
            nib.Nifti1Image(mask.astype('int32'), affine,
                            seed_mask_img.header).to_filename(vol_filename)
        shutil.make_archive(output_epfl_zip_file_path[:-4], 'zip',
                            volume_folder)

    if postprocessing in ['VUMC', 'ALL']:
        context.set_progress(message='Processing density map (VUMC).')
        ROIs_img = nib.load(VUMC_ROIs_file_path)
        volume_folder = '/root/vol_vumc'
        output_vumc_zip_file_path = '/root/SpaghettiBeans_VUMC.zip'
        os.mkdir(volume_folder)
        lengths = length(streamlines)
        streamlines = streamlines[lengths > 1]

        rois = ROIs_img.get_fdata().astype(int)
        _, grouping = utils.connectivity_matrix(streamlines,
                                                affine,
                                                rois,
                                                inclusive=True,
                                                return_mapping=True,
                                                mapping_as_streamlines=False)
        streamlines = streamlines[grouping[(0, 1)]]

        density = utils.density_map(streamlines, affine, seed_mask_img.shape)
        density = scipy.ndimage.gaussian_filter(density.astype('float32'), 0.5)

        log_density = np.log10(density + 1)
        max_density = np.max(log_density)
        for i, t in enumerate(np.arange(0, max_density, max_density / 200)):
            nbr = str(i)
            nbr = nbr.zfill(3)
            mask = log_density >= t
            vol_filename = os.path.join(
                volume_folder, 'vol' + nbr + '_t' + str(t) + '.nii.gz')
            nib.Nifti1Image(mask.astype('int32'), affine,
                            seed_mask_img.header).to_filename(vol_filename)
        shutil.make_archive(output_vumc_zip_file_path[:-4], 'zip',
                            volume_folder)

    ###################
    # Upload the data #
    ###################

    context.set_progress(message='Uploading results...')
    if postprocessing in ['EPFL', 'ALL']:
        context.upload_file(output_epfl_zip_file_path,
                            'SpaghettiBeans_EPFL.zip')
    if postprocessing in ['VUMC', 'ALL']:
        context.upload_file(output_vumc_zip_file_path,
                            'SpaghettiBeans_VUMC.zip')
#%%
thre = np.percentile(mse_idv,85)
pred_1 = 1*(mse_idv<thre)


bundle_str = Streamlines()

for i in range(np.shape(bundle)[0]):
    tmp = bundle[i]
    tmp = zero_remove(tmp)
    #tmp = tmp[~np.all(tmp == 0, axis=-1)]
    #tmp = np.around(tmp, decimals=0)
    bundle_str.append(tmp)
    
lengths = length(bundle_str)
len_thre = 40
pred_2 = 1*(lengths > len_thre)

pred = 1*((pred_1 + pred_2)>1)



data_new = np.delete(bundle, np.where(pred == 0), axis=0)

npz2ply_cleaned(data_new,name)
#%%

import sys
sys.path.append(r'toolkit')
from visualize_score import  visualize_streamline, visualize_streamline_removed
def run(context):

    ####################################################
    # Get the path to input files  and other parameter #
    ####################################################
    analysis_data = context.fetch_analysis_data()
    settings = analysis_data['settings']
    postprocessing = settings['postprocessing']
    numberOfStreamlines = settings['numberOfStreamlines']
    dataSupportExponent = settings['dataSupportExponent']
    dataset = settings['dataset']

    hcpl_dwi_file_handle = context.get_files('input', modality='HARDI')[0]
    hcpl_dwi_file_path = hcpl_dwi_file_handle.download('/root/')

    hcpl_bvalues_file_handle = context.get_files(
        'input', reg_expression='.*prep.bvalues.hcpl.txt')[0]
    hcpl_bvalues_file_path = hcpl_bvalues_file_handle.download('/root/')
    hcpl_bvecs_file_handle = context.get_files(
        'input', reg_expression='.*prep.gradients.hcpl.txt')[0]
    hcpl_bvecs_file_path = hcpl_bvecs_file_handle.download('/root/')

    dwi_file_handle = context.get_files('input', modality='DSI')[0]
    dwi_file_path = dwi_file_handle.download('/root/')
    bvalues_file_handle = context.get_files(
        'input', reg_expression='.*prep.bvalues.txt')[0]
    bvalues_file_path = bvalues_file_handle.download('/root/')
    bvecs_file_handle = context.get_files(
        'input', reg_expression='.*prep.gradients.txt')[0]
    bvecs_file_path = bvecs_file_handle.download('/root/')

    inject_file_handle = context.get_files(
        'input', reg_expression='.*prep.inject.nii.gz')[0]
    inject_file_path = inject_file_handle.download('/root/')

    VUMC_ROIs_file_handle = context.get_files(
        'input', reg_expression='.*VUMC_ROIs.nii.gz')[0]
    VUMC_ROIs_file_path = VUMC_ROIs_file_handle.download('/root/')

    if dataset == "HCPL":
        acqType = 'hcpl'
        dwi = hcpl_dwi_file_path
        input_bval_file_name = hcpl_bvalues_file_path
        input_bvec_file_name = hcpl_bvecs_file_path
    elif dataset == "DSI":
        acqType = 'dwi'
        dwi = dwi_file_path
        input_bval_file_name = bvalues_file_path
        input_bvec_file_name = bvecs_file_path
    else:
        context.set_progress(message='Wrong dataset parameter')

    #########################################
    # Convert bvals and bvecs to FSL format #
    #########################################

    # Use 1/4 of provided bvals
    bval = '/root/prep.' + acqType + '.bval'
    bvec = '/root/prep.' + acqType + '.bvec'

    tmp = np.loadtxt(input_bval_file_name)
    tmp = np.reshape(tmp, (-1, len(tmp))) / 4
    np.savetxt(bval, tmp, fmt='%.0f', delimiter=' ')

    tmp = np.loadtxt(input_bvec_file_name).transpose()
    np.savetxt(bvec, tmp, fmt='%.5f', delimiter=' ')

    ######################
    # Extract brain mask #
    ######################

    os.system("/miniconda/bin/dwiextract -bzero " + "-fslgrad " + bvec + " " +
              bval + " -force " + dwi + " /root/mask.nii.gz")

    os.system(
        "/miniconda/bin/mrfilter -force /root/mask.nii.gz median /root/mask.nii.gz"
    )
    os.system(
        "/miniconda/bin/mrfilter -force /root/mask.nii.gz smooth /root/mask.nii.gz"
    )
    os.system(
        "/miniconda/bin/mrthreshold -force /root/mask.nii.gz /root/mask.nii.gz"
    )

    #############################
    # Compute response function #
    #############################

    os.system("/miniconda/bin/dwi2response dhollander " + "-fslgrad " + bvec +
              " " + bval + " -force " + " -mask /root/mask.nii.gz " + dwi +
              " /root/wm_response.txt" + " /root/gm_response.txt" +
              " /root/csf_response.txt" + " -voxels /root/responSel.nii.gz")

    #####################
    # Compute FOD image #
    #####################

    os.system("/miniconda/bin/dwi2fod " + "-fslgrad " + bvec + " " + bval +
              " -force " + " -mask /root/mask.nii.gz msmt_csd " + dwi +
              " /root/wm_response.txt /root/wmfod.nii.gz " +
              " /root/gm_response.txt /root/gm.nii.gz " +
              " /root/csf_response.txt /root/csf.nii.gz")

    ################
    # Tractography #
    ################

    # Get tractogram using Trekker (.vtk output)
    os.system("./trekker_linux_x64_v0.7" + " -fod /root/wmfod.nii.gz" +
              " -seed_image /root/mask.nii.gz" +
              " -pathway=stop_at_exit /root/mask.nii.gz" +
              " -pathway=require_entry " + inject_file_path + " -seed_count " +
              numberOfStreamlines + " -dataSupportExponent " +
              dataSupportExponent + " -minFODamp 0.05" +
              " -minRadiusOfCurvature 0.1" + " -probeLength 0.025" +
              " -writeInterval 40" + " -verboseLevel 0" +
              " -output /root/tractogram.vtk")

    ################################
    # Tractogram format conversion #
    ################################

    # Convert .vtk to .trk (the long way, in order to have a smaller docker image)
    os.system(
        "/miniconda/bin/tckconvert -force /root/tractogram.vtk /root/tractogram.tck"
    )
    streamlines = nib.streamlines.load('/root/tractogram.tck').streamlines
    nii = nib.load('/root/mask.nii.gz')
    affine = nii.affine

    ##################
    # Postprocessing #
    ##################

    if postprocessing in ["EPFL", "ALL"]:
        context.set_progress(message='Processing density map (EPFL)')
        volume_folder = "/root/vol_epfl"
        output_epfl_zip_file_path = "/root/X-link_EPFL.zip"
        os.mkdir(volume_folder)
        lengths = length(streamlines)
        streamlines = streamlines[lengths > 1]
        density = utils.density_map(streamlines, affine, nii.shape)
        density = scipy.ndimage.gaussian_filter(density.astype("float32"), 0.5)

        log_density = np.log10(density + 1)
        max_density = np.max(log_density)
        for i, t in enumerate(np.arange(0, max_density, max_density / 200)):
            nbr = str(i)
            nbr = nbr.zfill(3)
            mask = log_density >= t
            vol_filename = os.path.join(
                volume_folder, "vol" + nbr + "_t" + str(t) + ".nii.gz")
            nib.Nifti1Image(mask.astype("int32"), affine,
                            nii.header).to_filename(vol_filename)
        shutil.make_archive(output_epfl_zip_file_path[:-4], 'zip',
                            volume_folder)

    if postprocessing in ["VUMC", "ALL"]:
        context.set_progress(message='Processing density map (VUMC)')
        ROIs_img = nib.load(VUMC_ROIs_file_path)
        volume_folder = "/root/vol_vumc"
        output_vumc_zip_file_path = "/root/X-link_VUMC.zip"
        os.mkdir(volume_folder)
        lengths = length(streamlines)
        streamlines = streamlines[lengths > 1]

        rois = ROIs_img.get_fdata().astype(int)
        _, grouping = utils.connectivity_matrix(streamlines,
                                                affine,
                                                rois,
                                                inclusive=True,
                                                return_mapping=True,
                                                mapping_as_streamlines=False)
        streamlines = streamlines[grouping[(0, 1)]]

        density = utils.density_map(streamlines, affine, nii.shape)
        density = scipy.ndimage.gaussian_filter(density.astype("float32"), 0.5)

        log_density = np.log10(density + 1)
        max_density = np.max(log_density)
        for i, t in enumerate(np.arange(0, max_density, max_density / 200)):
            nbr = str(i)
            nbr = nbr.zfill(3)
            mask = log_density >= t
            vol_filename = os.path.join(
                volume_folder, "vol" + nbr + "_t" + str(t) + ".nii.gz")
            nib.Nifti1Image(mask.astype("int32"), affine,
                            nii.header).to_filename(vol_filename)
        shutil.make_archive(output_vumc_zip_file_path[:-4], 'zip',
                            volume_folder)

    ###################
    # Upload the data #
    ###################
    context.set_progress(message='Uploading results...')
    #context.upload_file(fa_file_path, 'fa.nii.gz')
    # context.upload_file(fod_file_path, 'fod.nii.gz')
    # context.upload_file(streamlines_file_path, 'streamlines.trk')
    if postprocessing in ["EPFL", "ALL"]:
        context.upload_file(output_epfl_zip_file_path,
                            'X-link_' + dataset + '_EPFL.zip')
    if postprocessing in ["VUMC", "ALL"]:
        context.upload_file(output_vumc_zip_file_path,
                            'X-link_' + dataset + '_VUMC.zip')
def main():

    parser = buildArgsParser()
    args = parser.parse_args()

    if args.isVerbose:
        logging.basicConfig(level=logging.DEBUG)

    if not args.not_all:
        if not args.tdi_file:
            args.tdi_file = 'tdi.nii.gz'
        if not args.apm_file:
            args.apm_file = 'apm.nii.gz'
        if not args.cdec_file:
            args.cdec_file = 'cdec.nii.gz'

    arglist = [args.tdi_file, args.apm_file, args.cdec_file]
    if args.not_all and not any(arglist):
        parser.error('When using --not_all, you need to specify at least ' +
                     'one file to output.')
    for out in arglist:
        if os.path.isfile(out):
            if args.overwrite:
                logging.info('Overwriting "{0}".'.format(out))
            else:
                parser.error(
                    '"{0}" already exists! Use -f to overwrite it.'.format(
                        out))

    ref = nib.load(args.ref_file)
    ref_res = ref.get_header()['pixdim'][1]
    up_factor = ref_res / args.res
    data_shape = np.array(ref.shape) * up_factor
    data_shape = list(data_shape.astype('int32'))

    logging.info("Reference resolution: " + str(ref_res))
    logging.info("Reference shape: " + str(ref.shape))
    logging.info("Target resolution: " + str(args.res))
    logging.info("Target shape: " + str(data_shape))

    cdec_map = np.zeros(data_shape + [3], dtype='float32')
    tdi_map = np.zeros(data_shape, dtype='float32')
    apm_map = np.zeros(data_shape, dtype='float32')

    tract_format = tc.detect_format(args.tract_file)
    tract = tract_format(args.tract_file)
    streamlines = [i for i in tract]
    streamlines_np = np.array(streamlines, dtype=np.object)

    for i, streamline in enumerate(streamlines_np):
        if not i % 10000:
            logging.info(str(i) + "/" + str(streamlines_np.shape[0]))

        streamline_length = length(streamline)
        dec_vec = np.array(streamline[0] - streamline[-1])
        dec_vec_norm = np.linalg.norm(dec_vec)
        if dec_vec_norm > 0:
            dec_vec = np.abs(dec_vec / dec_vec_norm)
        else:
            dec_vec[0] = dec_vec[1] = dec_vec[2] = 0

        for point in streamline:
            pos = point / args.res
            ind = tuple(pos.astype('int32'))
            if (ind[0] >= 0 and ind[0] < data_shape[0] and ind[1] >= 0
                    and ind[1] < data_shape[1] and ind[2] >= 0
                    and ind[2] < data_shape[2]):
                tdi_map[ind] += 1
                apm_map[ind] += streamline_length
                cdec_map[ind] += dec_vec

    # devide the sum of streamline length by the streamline density
    apm_map /= tdi_map

    # normalise the cdec map
    cdec_norm = np.sqrt((cdec_map * cdec_map).sum(axis=3))
    cdec_map = cdec_map / cdec_norm.reshape(list(cdec_norm.shape) + [1]) * 255

    affine = ref.get_affine()
    affine[0][0] = affine[1][1] = affine[2][2] = args.res

    if args.tdi_file:
        tdi_img = nib.Nifti1Image(tdi_map, affine)
        tdi_img.get_header().set_zooms([args.res, args.res, args.res])
        tdi_img.get_header().set_qform(ref.get_header().get_qform())
        tdi_img.get_header().set_sform(ref.get_header().get_sform())
        tdi_img.to_filename(args.tdi_file)

    if args.apm_file:
        apm_img = nib.Nifti1Image(apm_map, affine)
        apm_img.get_header().set_zooms([args.res, args.res, args.res])
        apm_img.get_header().set_qform(ref.get_header().get_qform())
        apm_img.get_header().set_sform(ref.get_header().get_sform())
        apm_img.to_filename(args.apm_file)

    if args.cdec_file:
        cdec_img = nib.Nifti1Image(cdec_map.astype('uint8'), affine)
        cdec_img.get_header().set_zooms([args.res, args.res, args.res, 1])
        cdec_img.get_header().set_qform(ref.get_header().get_qform())
        cdec_img.get_header().set_sform(ref.get_header().get_sform())
        cdec_img.to_filename(args.cdec_file)
예제 #24
0
def _processing_wrapper(args):
    bundles_dir = args[0]
    in_label, out_label = args[1]
    measures_to_compute = copy.copy(args[2])
    weighted = args[3]
    if args[4] is not None:
        similarity_directory = args[4][0]

    in_filename_1 = os.path.join(bundles_dir,
                                 '{}_{}.trk'.format(in_label, out_label))
    in_filename_2 = os.path.join(bundles_dir,
                                 '{}_{}.trk'.format(out_label, in_label))
    if os.path.isfile(in_filename_1):
        in_filename = in_filename_1
    elif os.path.isfile(in_filename_2):
        in_filename = in_filename_2
    else:
        return

    sft = load_tractogram(in_filename, 'same')
    affine, dimensions, voxel_sizes, _ = sft.space_attributes
    measures_to_return = {}

    # Precompute to save one transformation, insert later
    if 'length' in measures_to_compute:
        streamlines_copy = list(sft.get_streamlines_copy())
        mean_length = np.average(length(streamlines_copy))

    # If density is not required, do not compute it
    # Only required for volume, similarity and any metrics
    if not ((len(measures_to_compute) == 1 and
             ('length' in measures_to_compute
              or 'streamline_count' in measures_to_compute)) or
            (len(measures_to_compute) == 2 and
             ('length' in measures_to_compute
              and 'streamline_count' in measures_to_compute))):
        sft.to_vox()
        sft.to_corner()
        density = compute_tract_counts_map(sft.streamlines, dimensions)

    if 'volume' in measures_to_compute:
        measures_to_return['volume'] = np.count_nonzero(density) * \
            np.prod(voxel_sizes)
        measures_to_compute.remove('volume')
    if 'streamline_count' in measures_to_compute:
        measures_to_return['streamline_count'] = len(sft)
        measures_to_compute.remove('streamline_count')
    if 'length' in measures_to_compute:
        measures_to_return['length'] = mean_length
        measures_to_compute.remove('length')
    if 'similarity' in measures_to_compute and similarity_directory:
        density_sim = load_node_nifti(similarity_directory, in_label,
                                      out_label, in_filename)

        ba_vox = compute_bundle_adjacency_voxel(density, density_sim)

        measures_to_return['similarity'] = ba_vox
        measures_to_compute.remove('similarity')

    for measure in measures_to_compute:
        if os.path.isdir(measure):
            map_dirname = measure
            map_data = load_node_nifti(map_dirname, in_label, out_label,
                                       in_filename)
            measures_to_return[map_dirname] = np.average(
                map_data[map_data > 0])
        elif os.path.isfile(measure):
            metric_filename = measure
            if not is_header_compatible(metric_filename, sft):
                logging.error(
                    '{} and {} do not have a compatible header'.format(
                        in_filename, metric_filename))
                raise IOError

            metric_data = nib.load(metric_filename).get_data()
            if weighted:
                density = density / np.max(density)
                voxels_value = metric_data * density
                voxels_value = voxels_value[voxels_value > 0]
            else:
                voxels_value = metric_data[density > 0]

            measures_to_return[metric_filename] = np.average(voxels_value)

    return {(in_label, out_label): measures_to_return}
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)

    assert_inputs_exist(parser, [args.in_odf, args.in_seed, args.in_mask])
    assert_outputs_exist(parser, args, args.out_tractogram)

    if not nib.streamlines.is_supported(args.out_tractogram):
        parser.error('Invalid output streamline file format (must be trk or ' +
                     'tck): {0}'.format(args.out_tractogram))

    verify_streamline_length_options(parser, args)
    verify_compression_th(args.compress)
    verify_seed_options(parser, args)

    mask_img = nib.load(args.in_mask)
    mask_data = get_data_as_mask(mask_img, dtype=bool)

    # Make sure the data is isotropic. Else, the strategy used
    # when providing information to dipy (i.e. working as if in voxel space)
    # will not yield correct results.
    odf_sh_img = nib.load(args.in_odf)
    if not np.allclose(np.mean(odf_sh_img.header.get_zooms()[:3]),
                       odf_sh_img.header.get_zooms()[0], atol=1e-03):
        parser.error(
            'ODF SH file is not isotropic. Tracking cannot be ran robustly.')

    if args.npv:
        nb_seeds = args.npv
        seed_per_vox = True
    elif args.nt:
        nb_seeds = args.nt
        seed_per_vox = False
    else:
        nb_seeds = 1
        seed_per_vox = True

    voxel_size = odf_sh_img.header.get_zooms()[0]
    vox_step_size = args.step_size / voxel_size
    seed_img = nib.load(args.in_seed)
    seeds = track_utils.random_seeds_from_mask(
        seed_img.get_fdata(dtype=np.float32),
        np.eye(4),
        seeds_count=nb_seeds,
        seed_count_per_voxel=seed_per_vox,
        random_seed=args.seed)

    # Tracking is performed in voxel space
    max_steps = int(args.max_length / args.step_size) + 1
    streamlines_generator = LocalTracking(
        _get_direction_getter(args),
        BinaryStoppingCriterion(mask_data),
        seeds, np.eye(4),
        step_size=vox_step_size, max_cross=1,
        maxlen=max_steps,
        fixedstep=True, return_all=True,
        random_seed=args.seed,
        save_seeds=args.save_seeds)

    scaled_min_length = args.min_length / voxel_size
    scaled_max_length = args.max_length / voxel_size

    if args.save_seeds:
        filtered_streamlines, seeds = \
            zip(*((s, p) for s, p in streamlines_generator
                  if scaled_min_length <= length(s) <= scaled_max_length))
        data_per_streamlines = {'seeds': lambda: seeds}
    else:
        filtered_streamlines = \
            (s for s in streamlines_generator
             if scaled_min_length <= length(s) <= scaled_max_length)
        data_per_streamlines = {}

    if args.compress:
        filtered_streamlines = (
            compress_streamlines(s, args.compress)
            for s in filtered_streamlines)

    tractogram = LazyTractogram(lambda: filtered_streamlines,
                                data_per_streamlines,
                                affine_to_rasmm=seed_img.affine)

    filetype = nib.streamlines.detect_format(args.out_tractogram)
    reference = get_reference_info(seed_img)
    header = create_tractogram_header(filetype, *reference)

    # Use generator to save the streamlines on-the-fly
    nib.streamlines.save(tractogram, args.out_tractogram, header=header)
예제 #26
0
def cluster_confidence(streamlines,
                       max_mdf=5,
                       subsample=12,
                       power=1,
                       override=False):
    """ Computes the cluster confidence index (cci), which is an
    estimation of the support a set of streamlines gives to
    a particular pathway.

    Ex: A single streamline with no others in the dataset
    following a similar pathway has a low cci. A streamline
    in a bundle of 100 streamlines that follow similar
    pathways has a high cci.

    See: Jordan et al. 2017
    (Based on streamline MDF distance from Garyfallidis et al. 2012)

    Parameters
    ----------
    streamlines : list of 2D (N, 3) arrays
        A sequence of streamlines of length N (# streamlines)
    max_mdf : int
        The maximum MDF distance (mm) that will be considered a
        "supporting" streamline and included in cci calculation
    subsample: int
        The number of points that are considered for each streamline
        in the calculation. To save on calculation time, each
        streamline is subsampled to subsampleN points.
    power: int
        The power to which the MDF distance for each streamline
        will be raised to determine how much it contributes to
        the cci. High values of power make the contribution value
        degrade much faster. Example: a streamline with 5mm MDF
        similarity contributes 1/5 to the cci if power is 1, but
        only contributes 1/5^2 = 1/25 if power is 2.
    override: bool, False by default
        override means that the cci calculation will still occur even
        though there are short streamlines in the dataset that may alter
        expected behaviour.

    Returns
    -------
    Returns an array of CCI scores

    References
    ----------
    [Jordan17] Jordan K. Et al., Cluster Confidence Index: A Streamline-Wise
    Pathway Reproducibility Metric for Diffusion-Weighted MRI Tractography,
    Journal of Neuroimaging, vol 28, no 1, 2017.

    [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
    tractography simplification, Frontiers in Neuroscience,
    vol 6, no 175, 2012.

    """

    # error if any streamlines are shorter than 20mm
    lengths = list(length(streamlines))
    if min(lengths) < 20 and not override:
        raise ValueError('Short streamlines found. We recommend removing them.'
                         ' To continue without removing short streamlines set'
                         ' override=True')

    # calculate the pairwise MDF distance between all streamlines in dataset
    subsamp_sls = set_number_of_points(streamlines, subsample)

    cci_score_mtrx = np.zeros([len(subsamp_sls)])

    for i, sl in enumerate(subsamp_sls):
        mdf_mx = bundles_distances_mdf([subsamp_sls[i]], subsamp_sls)
        if (mdf_mx == 0).sum() > 1:
            raise ValueError('Identical streamlines. CCI calculation invalid')
        mdf_mx_oi = (mdf_mx > 0) & (mdf_mx < max_mdf) & ~np.isnan(mdf_mx)
        mdf_mx_oi_only = mdf_mx[mdf_mx_oi]
        cci_score = np.sum(np.divide(1, np.power(mdf_mx_oi_only, power)))
        cci_score_mtrx[i] = cci_score

    return cci_score_mtrx
예제 #27
0
def _processing_wrapper(args):
    hdf5_filename = args[0]
    labels_img = args[1]
    in_label, out_label = args[2]
    measures_to_compute = copy.copy(args[3])
    if args[4] is not None:
        similarity_directory = args[4][0]
    weighted = args[5]
    include_dps = args[6]

    hdf5_file = h5py.File(hdf5_filename, 'r')
    key = '{}_{}'.format(in_label, out_label)
    if key not in hdf5_file:
        return
    streamlines = reconstruct_streamlines_from_hdf5(hdf5_file, key)

    affine, dimensions, voxel_sizes, _ = get_reference_info(labels_img)
    measures_to_return = {}

    if not (np.allclose(hdf5_file.attrs['affine'], affine, atol=1e-03)
            and np.array_equal(hdf5_file.attrs['dimensions'], dimensions)):
        raise ValueError('Provided hdf5 have incompatible headers.')

    # Precompute to save one transformation, insert later
    if 'length' in measures_to_compute:
        streamlines_copy = list(streamlines)
        # scil_decompose_connectivity.py requires isotropic voxels
        mean_length = np.average(length(streamlines_copy)) * voxel_sizes[0]

    # If density is not required, do not compute it
    # Only required for volume, similarity and any metrics
    if not ((len(measures_to_compute) == 1 and
             ('length' in measures_to_compute
              or 'streamline_count' in measures_to_compute)) or
            (len(measures_to_compute) == 2 and
             ('length' in measures_to_compute
              and 'streamline_count' in measures_to_compute))):

        density = compute_tract_counts_map(streamlines, dimensions)

    if 'volume' in measures_to_compute:
        measures_to_return['volume'] = np.count_nonzero(density) * \
            np.prod(voxel_sizes)
        measures_to_compute.remove('volume')
    if 'streamline_count' in measures_to_compute:
        measures_to_return['streamline_count'] = len(streamlines)
        measures_to_compute.remove('streamline_count')
    if 'length' in measures_to_compute:
        measures_to_return['length'] = mean_length
        measures_to_compute.remove('length')
    if 'similarity' in measures_to_compute and similarity_directory:
        density_sim = load_node_nifti(similarity_directory, in_label,
                                      out_label, labels_img)
        if density_sim is None:
            ba_vox = 0
        else:
            ba_vox = compute_bundle_adjacency_voxel(density, density_sim)

        measures_to_return['similarity'] = ba_vox
        measures_to_compute.remove('similarity')

    for measure in measures_to_compute:
        if isinstance(measure, str) and os.path.isdir(measure):
            map_dirname = measure
            map_data = load_node_nifti(map_dirname, in_label, out_label,
                                       labels_img)
            measures_to_return[map_dirname] = np.average(
                map_data[map_data > 0])
        elif isinstance(measure, tuple) and os.path.isfile(measure[0]):
            metric_filename = measure[0]
            metric_img = measure[1]
            if not is_header_compatible(metric_img, labels_img):
                logging.error('{} do not have a compatible header'.format(
                    metric_filename))
                raise IOError

            metric_data = metric_img.get_fdata(dtype=np.float64)
            if weighted:
                density = density / np.max(density)
                voxels_value = metric_data * density
                voxels_value = voxels_value[voxels_value > 0]
            else:
                voxels_value = metric_data[density > 0]

            measures_to_return[metric_filename] = np.average(voxels_value)

    if include_dps:
        for dps_key in hdf5_file[key].keys():
            if dps_key not in ['data', 'offsets', 'lengths']:
                out_file = os.path.join(include_dps, dps_key)
                measures_to_return[out_file] = np.average(
                    hdf5_file[key][dps_key])

    return {(in_label, out_label): measures_to_return}
예제 #28
0
def _processing_wrapper(args):
    hdf5_filename = args[0]
    labels_img = args[1]
    in_label, out_label = args[2]
    measures_to_compute = copy.copy(args[3])
    if args[4] is not None:
        similarity_directory = args[4][0]
    weighted = args[5]
    include_dps = args[6]
    min_lesion_vol = args[7]

    hdf5_file = h5py.File(hdf5_filename, 'r')
    key = '{}_{}'.format(in_label, out_label)
    if key not in hdf5_file:
        return
    streamlines = reconstruct_streamlines_from_hdf5(hdf5_file, key)
    if len(streamlines) == 0:
        return

    affine, dimensions, voxel_sizes, _ = get_reference_info(labels_img)
    measures_to_return = {}

    if not (np.allclose(hdf5_file.attrs['affine'], affine, atol=1e-03)
            and np.array_equal(hdf5_file.attrs['dimensions'], dimensions)):
        raise ValueError('Provided hdf5 have incompatible headers.')

    # Precompute to save one transformation, insert later
    if 'length' in measures_to_compute:
        streamlines_copy = list(streamlines)
        # scil_decompose_connectivity.py requires isotropic voxels
        mean_length = np.average(length(streamlines_copy))*voxel_sizes[0]

    # If density is not required, do not compute it
    # Only required for volume, similarity and any metrics
    if not ((len(measures_to_compute) == 1 and
             ('length' in measures_to_compute or
              'streamline_count' in measures_to_compute)) or
            (len(measures_to_compute) == 2 and
             ('length' in measures_to_compute and
              'streamline_count' in measures_to_compute))):

        density = compute_tract_counts_map(streamlines,
                                           dimensions)

    if 'volume' in measures_to_compute:
        measures_to_return['volume'] = np.count_nonzero(density) * \
            np.prod(voxel_sizes)
        measures_to_compute.remove('volume')
    if 'streamline_count' in measures_to_compute:
        measures_to_return['streamline_count'] = len(streamlines)
        measures_to_compute.remove('streamline_count')
    if 'length' in measures_to_compute:
        measures_to_return['length'] = mean_length
        measures_to_compute.remove('length')
    if 'similarity' in measures_to_compute and similarity_directory:
        density_sim = load_node_nifti(similarity_directory,
                                      in_label, out_label,
                                      labels_img)
        if density_sim is None:
            ba_vox = 0
        else:
            ba_vox = compute_bundle_adjacency_voxel(density, density_sim)

        measures_to_return['similarity'] = ba_vox
        measures_to_compute.remove('similarity')

    for measure in measures_to_compute:
        # Maps
        if isinstance(measure, str) and os.path.isdir(measure):
            map_dirname = measure
            map_data = load_node_nifti(map_dirname,
                                       in_label, out_label,
                                       labels_img)
            measures_to_return[map_dirname] = np.average(
                map_data[map_data > 0])
        elif isinstance(measure, tuple):
            if not isinstance(measure[0], tuple) \
                    and os.path.isfile(measure[0]):
                metric_filename = measure[0]
                metric_img = measure[1]
                if not is_header_compatible(metric_img, labels_img):
                    logging.error('{} do not have a compatible header'.format(
                        metric_filename))
                    raise IOError

                metric_data = metric_img.get_fdata(dtype=np.float64)
                if weighted:
                    avg_value = np.average(metric_data, weights=density)
                else:
                    avg_value = np.average(metric_data[density > 0])
                measures_to_return[metric_filename] = avg_value
            # lesion
            else:
                lesion_filename = measure[0][0]
                computed_lesion_labels = measure[0][1]
                lesion_img = measure[1]
                if not is_header_compatible(lesion_img, labels_img):
                    logging.error('{} do not have a compatible header'.format(
                        lesion_filename))
                    raise IOError

                voxel_sizes = lesion_img.header.get_zooms()[0:3]
                lesion_img.set_filename('tmp.nii.gz')
                lesion_atlas = get_data_as_label(lesion_img)
                tmp_dict = compute_lesion_stats(
                    density.astype(bool), lesion_atlas,
                    voxel_sizes=voxel_sizes, single_label=True,
                    min_lesion_vol=min_lesion_vol,
                    precomputed_lesion_labels=computed_lesion_labels)

                tmp_ind = _streamlines_in_mask(list(streamlines),
                                               lesion_atlas.astype(np.uint8),
                                               np.eye(3), [0, 0, 0])
                streamlines_count = len(
                    np.where(tmp_ind == [0, 1][True])[0].tolist())

                if tmp_dict:
                    measures_to_return[lesion_filename+'vol'] = \
                        tmp_dict['lesion_total_volume']
                    measures_to_return[lesion_filename+'count'] = \
                        tmp_dict['lesion_count']
                    measures_to_return[lesion_filename+'sc'] = \
                        streamlines_count
                else:
                    measures_to_return[lesion_filename+'vol'] = 0
                    measures_to_return[lesion_filename+'count'] = 0
                    measures_to_return[lesion_filename+'sc'] = 0

    if include_dps:
        for dps_key in hdf5_file[key].keys():
            if dps_key not in ['data', 'offsets', 'lengths']:
                out_file = os.path.join(include_dps, dps_key)
                if 'commit' in dps_key:
                    measures_to_return[out_file] = np.sum(
                        hdf5_file[key][dps_key])
                else:
                    measures_to_return[out_file] = np.average(
                        hdf5_file[key][dps_key])

    return {(in_label, out_label): measures_to_return}
예제 #29
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.isVerbose:
        logging.basicConfig(level=logging.DEBUG)

    assert_inputs_exist(parser, [args.sh_file, args.seed_file, args.mask_file])
    assert_outputs_exist(parser, args, [args.output_file])

    if not nib.streamlines.is_supported(args.output_file):
        parser.error('Invalid output streamline file format (must be trk or ' +
                     'tck): {0}'.format(args.output_file))

    if not args.min_length > 0:
        parser.error('minL must be > 0, {}mm was provided.'.format(
            args.min_length))
    if args.max_length < args.min_length:
        parser.error(
            'maxL must be > than minL, (minL={}mm, maxL={}mm).'.format(
                args.min_length, args.max_length))

    if args.compress:
        if args.compress < 0.001 or args.compress > 1:
            logging.warning(
                'You are using an error rate of {}.\nWe recommend setting it '
                'between 0.001 and 1.\n0.001 will do almost nothing to the '
                'tracts while 1 will higly compress/linearize the tracts'.
                format(args.compress))

    if args.npv and args.npv <= 0:
        parser.error('Number of seeds per voxel must be > 0.')

    if args.nt and args.nt <= 0:
        parser.error('Total number of seeds must be > 0.')

    mask_img = nib.load(args.mask_file)
    mask_data = mask_img.get_data()

    # Make sure the mask is isotropic. Else, the strategy used
    # when providing information to dipy (i.e. working as if in voxel space)
    # will not yield correct results.
    fodf_sh_img = nib.load(args.sh_file)
    if not np.allclose(np.mean(fodf_sh_img.header.get_zooms()[:3]),
                       fodf_sh_img.header.get_zooms()[0],
                       atol=1.e-3):
        parser.error(
            'SH file is not isotropic. Tracking cannot be ran robustly.')

    if args.npv:
        nb_seeds = args.npv
        seed_per_vox = True
    elif args.nt:
        nb_seeds = args.nt
        seed_per_vox = False
    else:
        nb_seeds = 1
        seed_per_vox = True

    voxel_size = fodf_sh_img.header.get_zooms()[0]
    vox_step_size = args.step_size / voxel_size
    seed_img = nib.load(args.seed_file)
    seeds = track_utils.random_seeds_from_mask(
        seed_img.get_data(),
        seeds_count=nb_seeds,
        seed_count_per_voxel=seed_per_vox,
        random_seed=args.seed)

    # Tracking is performed in voxel space
    max_steps = int(args.max_length / args.step_size) + 1
    streamlines = LocalTracking(_get_direction_getter(args, mask_data),
                                BinaryTissueClassifier(mask_data),
                                seeds,
                                np.eye(4),
                                step_size=vox_step_size,
                                max_cross=1,
                                maxlen=max_steps,
                                fixedstep=True,
                                return_all=True,
                                random_seed=args.seed)

    scaled_min_length = args.min_length / voxel_size
    scaled_max_length = args.max_length / voxel_size

    filtered_streamlines = (
        s for s in streamlines
        if scaled_min_length <= length(s) <= scaled_max_length)
    if args.compress:
        filtered_streamlines = (compress_streamlines(s, args.compress)
                                for s in filtered_streamlines)

    tractogram = LazyTractogram(lambda: filtered_streamlines,
                                affine_to_rasmm=seed_img.affine)

    filetype = nib.streamlines.detect_format(args.output_file)
    header = create_header_from_anat(seed_img, base_filetype=filetype)

    # Use generator to save the streamlines on-the-fly
    nib.streamlines.save(tractogram, args.output_file, header=header)