Esempio n. 1
0
def threshold_MT_maps(computed_map, in_mask, lower_threshold, upper_threshold):
    """
    Remove NaN and apply different threshold based on
       - maximum and minimum threshold value
       - T1 mask

    Parameters
    ----------
    computed_map        3D-Array Myelin map.
    in_mask             Path to binary T1 mask from T1 segmentation.
                        Must be the sum of GM+WM+CSF.
    lower_threshold     Value for low thresold <int>
    upper_thresold      Value for up thresold <int>

    Returns
    ----------
    Thresholded matrix in 3D-array.
    """
    # Remove NaN and apply thresold based on lower and upper value
    computed_map[np.isnan(computed_map)] = 0
    computed_map[np.isinf(computed_map)] = 0
    computed_map[computed_map < lower_threshold] = 0
    computed_map[computed_map > upper_threshold] = 0

    # Load and apply sum of T1 probability maps on myelin maps
    mask_image = nib.load(in_mask)
    mask_data = get_data_as_mask(mask_image)
    computed_map[np.where(mask_data == 0)] = 0

    return computed_map
Esempio n. 2
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(
        parser, [args.map_include, args.map_exclude, args.additional_mask])
    assert_outputs_exist(parser, args,
                         [args.map_include_corr, args.map_exclude_corr])

    map_inc = nib.load(args.map_include)
    map_inc_data = map_inc.get_fdata(dtype=np.float32)

    map_exc = nib.load(args.map_exclude)
    map_exc_data = map_exc.get_fdata(dtype=np.float32)

    additional_mask = nib.load(args.additional_mask)
    additional_mask_data = get_data_as_mask(additional_mask)

    map_inc_data[additional_mask_data > 0] = 0
    map_exc_data[additional_mask_data > 0] = 0

    # TODO Remove header or add optional argument name
    nib.save(
        nib.Nifti1Image(map_inc_data.astype('float32'), map_inc.affine,
                        map_inc.header), args.map_include_corr)
    nib.save(
        nib.Nifti1Image(map_exc_data.astype('float32'), map_exc.affine,
                        map_exc.header), args.map_exclude_corr)
Esempio n. 3
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_sh, optional=args.mask)

    # Load data
    sh_img = nib.load(args.in_sh)
    sh = sh_img.get_fdata(dtype=np.float32)
    mask = None
    if args.mask:
        mask = get_data_as_mask(nib.load(args.mask), dtype=bool)

    # Precompute output filenames to check if they exist
    sh_order = order_from_ncoef(sh.shape[-1], full_basis=args.full_basis)
    _, order_ids = sph_harm_ind_list(sh_order, full_basis=args.full_basis)
    orders = sorted(np.unique(order_ids))
    output_fnames = ["{}{}.nii.gz".format(args.out_prefix, i) for i in orders]
    assert_outputs_exist(parser, args, output_fnames)

    # Compute RISH features
    rish, final_orders = compute_rish(sh, mask, full_basis=args.full_basis)

    # Make sure the precomputed orders match the orders returned
    assert np.all(orders == np.array(final_orders))

    # Save each RISH feature as a separate file
    for i, fname in enumerate(output_fnames):
        nib.save(nib.Nifti1Image(rish[..., i], sh_img.affine), fname)
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_dwi, args.in_bias_field], args.mask)
    assert_outputs_exist(parser, args, args.out_name)

    dwi_img = nib.load(args.in_dwi)
    dwi_data = dwi_img.get_fdata(dtype=np.float32)

    bias_field_img = nib.load(args.in_bias_field)
    bias_field_data = bias_field_img.get_fdata(dtype=np.float32)

    if args.mask:
        mask_img = nib.load(args.mask)
        nz_mask_data = np.nonzero(get_data_as_mask(mask_img))
    else:
        nz_mask_data = np.nonzero(np.average(dwi_data, axis=-1))

    nuc_dwi_data = np.divide(
        dwi_data[nz_mask_data], bias_field_data[nz_mask_data].reshape(
            (len(nz_mask_data[0]), 1)))

    rescaled_nuc_data = _rescale_dwi(dwi_data[nz_mask_data], nuc_dwi_data)

    dwi_data[nz_mask_data] = rescaled_nuc_data
    nib.save(nib.Nifti1Image(dwi_data, dwi_img.affine, dwi_img.header),
             args.out_name)
Esempio n. 5
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()
    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser, args.in_sh, args.mask)
    assert_outputs_exist(parser, args, args.out_bingham)

    sh_im = nib.load(args.in_sh)
    data = sh_im.get_fdata()
    mask = get_data_as_mask(nib.load(args.mask), dtype=bool)\
        if args.mask else None

    # validate number of processes
    nbr_processes = validate_nbr_processes(parser, args)
    logging.info('Number of processes: {}'.format(nbr_processes))

    t0 = time.perf_counter()
    logging.info('Fitting Bingham functions.')
    bingham = bingham_fit_sh(data, args.max_lobes,
                             abs_th=args.at, rel_th=args.rt,
                             min_sep_angle=args.min_sep_angle,
                             max_fit_angle=args.max_fit_angle,
                             mask=mask,
                             nbr_processes=nbr_processes)
    t1 = time.perf_counter()
    logging.info('Fitting done in (s): {0}'.format(t1 - t0))
    nib.save(nib.Nifti1Image(bingham, sh_im.affine), args.out_bingham)
Esempio n. 6
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_metric, args.in_mask])
    assert_outputs_exist(parser, args, args.out_png)

    # Load metric image
    metric_img = nib.load(args.in_metric)
    metric_img_data = metric_img.get_fdata(dtype=np.float32)

    # Load mask image
    mask_img = nib.load(args.in_mask)
    mask_img_data = get_data_as_mask(mask_img)

    # Select value from mask
    curr_data = metric_img_data[np.where(mask_img_data > 0)]

    # Display figure
    fig, ax = plt.subplots()
    n, bins, patches = ax.hist(curr_data, bins=args.n_bins,
                               color=args.colors, alpha=0.5, rwidth=0.85)
    plt.xlabel(args.x_label)
    plt.title(args.title)

    if args.show_only:
        plt.show()
    else:
        plt.savefig(args.out_png, dpi=300, bbox_inches='tight')
Esempio n. 7
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_dwi, args.in_bval, args.in_bvec])
    assert_outputs_exist(parser, args, args.out_sh)

    vol = nib.load(args.in_dwi)
    dwi = vol.get_fdata(dtype=np.float32)

    bvals, bvecs = read_bvals_bvecs(args.in_bval, args.in_bvec)
    gtab = gradient_table(args.in_bval, args.in_bvec, b0_threshold=bvals.min())

    mask = None
    if args.mask:
        mask = get_data_as_mask(nib.load(args.mask), dtype=bool)

    sh = compute_sh_coefficients(dwi,
                                 gtab,
                                 args.sh_order,
                                 args.sh_basis,
                                 args.smooth,
                                 use_attenuation=args.use_attenuation,
                                 mask=mask)

    nib.save(nib.Nifti1Image(sh.astype(np.float32), vol.affine), args.out_sh)
Esempio n. 8
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser, [args.in_dwi, args.in_bval, args.in_bvec])
    assert_outputs_exist(parser, args, args.frf_file)

    if len(args.roi_radii) == 1:
        roi_radii = args.roi_radii[0]
    elif len(args.roi_radii) == 3:
        roi_radii = args.roi_radii
    else:
        parser.error('Wrong size for --roi_radii, can only be a scalar' +
                     'or an array of size (3,)')

    vol = nib.load(args.in_dwi)
    data = vol.get_fdata(dtype=np.float32)

    bvals, bvecs = read_bvals_bvecs(args.in_bval, args.in_bvec)

    mask = None
    if args.mask:
        mask = get_data_as_mask(nib.load(args.mask), dtype=bool)

    mask_wm = None
    if args.mask_wm:
        mask_wm = get_data_as_mask(nib.load(args.mask_wm), dtype=bool)

    full_response = compute_ssst_frf(
        data,
        bvals,
        bvecs,
        mask=mask,
        mask_wm=mask_wm,
        fa_thresh=args.fa_thresh,
        min_fa_thresh=args.min_fa_thresh,
        min_nvox=args.min_nvox,
        roi_radii=roi_radii,
        roi_center=args.roi_center,
        force_b0_threshold=args.force_b0_threshold)

    np.savetxt(args.frf_file, full_response)
Esempio n. 9
0
def extract_false_connections(sft, mask_1_filename, mask_2_filename,
                              dilate_endpoints):
    """
    Extract false connections based on two regions from a tractogram.

    Parameters
    ----------
    sft: StatefulTractogram
        Tractogram containing the streamlines to be extracted.
    mask_1_filename: str
        Filename of the "head" of the bundle.
    mask_2_filename: str
        Filename of the "tail" of the bundle.
    dilate_endpoints: int or None
        If set, dilate the masks for n iterations.

    Returns
    -------
    fc_sft: StatefulTractogram
        SFT of false connections.
    sft: StatefulTractogram
        SFT of remaining streamlines.
    """

    mask_1_img = nib.load(mask_1_filename)
    mask_2_img = nib.load(mask_2_filename)
    mask_1 = get_data_as_mask(mask_1_img)
    mask_2 = get_data_as_mask(mask_2_img)

    if dilate_endpoints:
        mask_1 = binary_dilation(mask_1, iterations=dilate_endpoints)
        mask_2 = binary_dilation(mask_2, iterations=dilate_endpoints)

    if len(sft.streamlines) > 0:
        tmp_sft, sft = extract_streamlines(mask_1, mask_2, sft)

        streamlines = tmp_sft.streamlines
        fc_streamlines = streamlines

        fc_sft = StatefulTractogram.from_sft(fc_streamlines, sft)
        return fc_sft, sft
    else:
        return sft, sft
Esempio n. 10
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()
    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    inputs = [args.in_bvec, args.in_peaks, args.in_FA]
    optional = [args.mask, args.peaks_vals]

    assert_inputs_exist(parser, inputs, optional=optional)
    assert_outputs_exist(parser, args, args.out_bvec)

    _, bvecs = read_bvals_bvecs(None, args.in_bvec)
    fa = nib.load(args.in_FA).get_fdata()
    peaks = nib.load(args.in_peaks).get_fdata()

    # convert peaks to a volume of shape (H, W, D, N, 3)
    if args.column_wise:
        peaks = np.reshape(peaks, peaks.shape[:3] + (3, -1))
        peaks = np.transpose(peaks, axes=(0, 1, 2, 4, 3))
    else:
        peaks = np.reshape(peaks, peaks.shape[:3] + (-1, 3))

    N = peaks.shape[3]
    if N > 1:
        if not args.peaks_vals:
            parser.error('More than one principal direction per voxel. Specify'
                         ' peaks values with --peaks_vals to proceed.')
        peaks_vals = nib.load(args.peaks_vals).get_fdata()
        indices_max = np.argmax(peaks_vals, axis=-1)[..., None, None]
        peaks = np.take_along_axis(peaks, indices_max, axis=-2)

    peaks = np.squeeze(peaks)
    if args.mask:
        mask = get_data_as_mask(nib.load(args.mask))
        fa[np.logical_not(mask)] = 0
        peaks[np.logical_not(mask)] = 0

    peaks[fa < args.fa_th] = 0
    coherence, transform =\
        compute_fiber_coherence_table(peaks, fa)

    best_t = transform[np.argmax(coherence)]
    if (best_t == np.eye(3)).all():
        logging.info('b-vectors are already correct.')
        correct_bvecs = bvecs
    else:
        logging.info('Applying correction to b-vectors. '
                     'Transform is: \n{0}.'.format(best_t))
        correct_bvecs = np.dot(bvecs, best_t)

    logging.info('Saving bvecs to file: {0}.'.format(args.out_bvec))

    # FSL format (3, N)
    np.savetxt(args.out_bvec, correct_bvecs.T, '%.8f')
Esempio n. 11
0
def compute_gt_masks(gt_bundles, parser, args):
    """
    Compute ground-truth masks. If the ground-truth is
    already a mask, load it. If the ground-truth is a
    bundle, compute the mask.

    Parameters
    ----------
    gt_bundles: list
        List of either StatefulTractograms or niftis.
    parser: ArgumentParser
        Argument parser which handles the script's arguments.
    args: Namespace
        List of arguments passed to the script.

    Returns
    -------
    mask_1: numpy.ndarray
        "Head" of the mask.
    mask_2: numpy.ndarray
        "Tail" of the mask.
    """

    gt_bundle_masks = []
    gt_bundle_inv_masks = []

    for gt_bundle in args.gt_bundles:
        # Support ground truth as streamlines or masks
        # Will be converted to binary masks immediately
        _, ext = split_name_with_nii(gt_bundle)
        if ext in ['.gz', '.nii.gz']:
            gt_img = nib.load(gt_bundle)
            gt_mask = get_data_as_mask(gt_img)
            affine = gt_img.affine
            dimensions = gt_mask.shape
        else:
            gt_sft = load_tractogram_with_reference(parser,
                                                    args,
                                                    gt_bundle,
                                                    bbox_check=False)
            gt_sft.to_vox()
            gt_sft.to_corner()
            affine, dimensions, _, _ = gt_sft.space_attributes
            gt_mask = compute_tract_counts_map(gt_sft.streamlines,
                                               dimensions).astype(np.int16)
        gt_inv_mask = np.zeros(dimensions, dtype=np.int16)
        gt_inv_mask[gt_mask == 0] = 1
        gt_mask[gt_mask > 0] = 1
        gt_bundle_masks.append(gt_mask)
        gt_bundle_inv_masks.append(gt_inv_mask)

    return gt_bundle_masks, gt_bundle_inv_masks, affine, dimensions
Esempio n. 12
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser, [args.input, args.bvals, args.bvecs])
    assert_outputs_exist(parser, args, args.frf_file)

    vol = nib.load(args.input)
    data = vol.get_fdata(dtype=np.float32)

    bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs)

    mask = None
    if args.mask:
        mask = get_data_as_mask(nib.load(args.mask), dtype=np.bool)

    mask_wm = None
    if args.mask_wm:
        mask_wm = get_data_as_mask(nib.load(args.mask_wm), dtype=np.bool)

    full_response = compute_ssst_frf(
        data,
        bvals,
        bvecs,
        mask=mask,
        mask_wm=mask_wm,
        fa_thresh=args.fa_thresh,
        min_fa_thresh=args.min_fa_thresh,
        min_nvox=args.min_nvox,
        roi_radius=args.roi_radius,
        roi_center=args.roi_center,
        force_b0_threshold=args.force_b0_threshold)

    np.savetxt(args.frf_file, full_response)
Esempio n. 13
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser, [args.in_tractogram, args.in_mask])
    assert_outputs_exist(parser, args, args.out_tractogram)

    sft = load_tractogram_with_reference(parser, args, args.in_tractogram)
    if args.step_size is not None:
        sft = resample_streamlines_step_size(sft, args.step_size)

    mask_img = nib.load(args.in_mask)
    binary_mask = get_data_as_mask(mask_img)

    if not is_header_compatible(sft, mask_img):
        parser.error('Incompatible header between the tractogram and mask.')

    bundle_disjoint, _ = ndi.label(binary_mask)
    unique, count = np.unique(bundle_disjoint, return_counts=True)
    if args.biggest_blob:
        val = unique[np.argmax(count[1:]) + 1]
        binary_mask[bundle_disjoint != val] = 0
        unique = [0, val]
    if len(unique) == 2:
        logging.info('The provided mask has 1 entity '
                     'cut_outside_of_mask_streamlines function selected.')
        new_sft = cut_outside_of_mask_streamlines(sft, binary_mask)
    elif len(unique) == 3:
        logging.info('The provided mask has 2 entity '
                     'cut_between_masks_streamlines function selected.')
        new_sft = cut_between_masks_streamlines(sft, binary_mask)

    else:
        logging.error('The provided mask has more than 2 entities. Cannot cut '
                      'between >2.')
        return

    if len(new_sft) == 0:
        logging.warning('No streamline intersected the provided mask. '
                        'Saving empty tractogram.')
    elif args.error_rate is not None:
        compressed_strs = [
            compress_streamlines(s, args.error_rate)
            for s in new_sft.streamlines
        ]
        new_sft = StatefulTractogram.from_sft(compressed_strs, sft)

    save_tractogram(new_sft, args.out_tractogram)
Esempio n. 14
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()
    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    if not args.not_all:
        args.out_fd = args.out_fd or 'fd.nii.gz'
        args.out_fs = args.out_fs or 'fs.nii.gz'
        args.out_ff = args.out_ff or 'ff.nii.gz'

    arglist = [args.out_fd, args.out_fs, args.out_ff]
    if args.not_all and not any(arglist):
        parser.error('At least one output file must be specified.')

    outputs = [args.out_fd, args.out_fs, args.out_ff]
    assert_inputs_exist(parser, args.in_bingham, args.mask)
    assert_outputs_exist(parser, args, outputs)

    bingham_im = nib.load(args.in_bingham)
    bingham = bingham_im.get_fdata()
    mask = get_data_as_mask(nib.load(args.mask), dtype=bool)\
        if args.mask else None

    nbr_processes = validate_nbr_processes(parser, args)

    t0 = time.perf_counter()
    logging.info('Computing fiber density.')
    fd = compute_fiber_density(bingham, m=args.nbr_integration_steps,
                               mask=mask, nbr_processes=nbr_processes)
    t1 = time.perf_counter()
    logging.info('FD computed in (s): {0}'.format(t1 - t0))
    if args.out_fd:
        nib.save(nib.Nifti1Image(fd, bingham_im.affine), args.out_fd)

    if args.out_fs:
        t0 = time.perf_counter()
        logging.info('Computing fiber spread.')
        fs = compute_fiber_spread(bingham, fd)
        t1 = time.perf_counter()
        logging.info('FS computed in (s): {0}'.format(t1 - t0))
        nib.save(nib.Nifti1Image(fs, bingham_im.affine), args.out_fs)

    if args.out_ff:
        t0 = time.perf_counter()
        logging.info('Computing fiber fraction.')
        ff = compute_fiber_fraction(fd)
        t1 = time.perf_counter()
        logging.info('FS computed in (s): {0}'.format(t1 - t0))
        nib.save(nib.Nifti1Image(ff, bingham_im.affine), args.out_ff)
Esempio n. 15
0
def extract_false_connections(sft, mask_1_filename, mask_2_filename,
                              dilate_endpoints):
    """
    Extract false connections based on two regions from a tractogram.

    Parameters
    ----------
    sft: StatefulTractogram
        Tractogram containing the streamlines to be extracted.
    mask_1_filename: str
        Filename of the "head" of the bundle.
    mask_2_filename: str
        Filename of the "tail" of the bundle.
    dilate_endpoints: int or None
        If set, dilate the masks for n iterations.

    Returns
    -------
    fc_sft: StatefulTractogram
        SFT of false connections.
    sft: StatefulTractogram
        SFT of remaining streamlines.
    """

    mask_1_img = nib.load(mask_1_filename)
    mask_2_img = nib.load(mask_2_filename)
    mask_1 = get_data_as_mask(mask_1_img)
    mask_2 = get_data_as_mask(mask_2_img)

    if dilate_endpoints:
        mask_1 = binary_dilation(mask_1, iterations=dilate_endpoints)
        mask_2 = binary_dilation(mask_2, iterations=dilate_endpoints)

    _, fc_ids = filter_grid_roi_both(sft, mask_1, mask_2)

    fc_sft = sft[fc_ids]
    return fc_sft, fc_ids
Esempio n. 16
0
def _get_data_from_inputs(args):
    """
    Load data given by args. Perform checks to ensure dimensions agree
    between the data for mask, background, peaks and fODF.
    """
    fodf = nib.nifti1.load(args.in_fodf).get_fdata(dtype=np.float32)
    data = {'fodf': _crop_along_axis(fodf, args.slice_index, args.axis_name)}
    if args.background:
        bg = nib.nifti1.load(args.background).get_fdata(dtype=np.float32)
        if bg.shape[:3] != fodf.shape[:-1]:
            raise ValueError('Background dimensions {0} do not agree with fODF'
                             ' dimensions {1}.'.format(bg.shape, fodf.shape))
        data['bg'] = _crop_along_axis(bg, args.slice_index, args.axis_name)
    if args.mask:
        mask = get_data_as_mask(nib.nifti1.load(args.mask), dtype=bool)
        if mask.shape != fodf.shape[:-1]:
            raise ValueError('Mask dimensions {0} do not agree with fODF '
                             'dimensions {1}.'.format(mask.shape, fodf.shape))
        data['mask'] = _crop_along_axis(mask, args.slice_index, args.axis_name)
    if args.peaks:
        peaks = nib.nifti1.load(args.peaks).get_fdata(dtype=np.float32)
        if peaks.shape[:3] != fodf.shape[:-1]:
            raise ValueError('Peaks volume dimensions {0} do not agree '
                             'with fODF dimensions {1}.'.format(
                                 bg.shape, fodf.shape))
        if len(peaks.shape) == 4:
            last_dim = peaks.shape[-1]
            if last_dim % 3 == 0:
                npeaks = int(last_dim / 3)
                peaks = peaks.reshape((peaks.shape[:3] + (npeaks, 3)))
            else:
                raise ValueError('Peaks volume last dimension ({0}) cannot '
                                 'be reshaped as (npeaks, 3).'.format(
                                     peaks.shape[-1]))
        data['peaks'] = _crop_along_axis(peaks, args.slice_index,
                                         args.axis_name)
        if args.peaks_values:
            peak_vals =\
                nib.nifti1.load(args.peaks_values).get_fdata(dtype=np.float32)
            if peak_vals.shape[:3] != fodf.shape[:-1]:
                raise ValueError('Peaks volume dimensions {0} do not agree '
                                 'with fODF dimensions {1}.'.format(
                                     peak_vals.shape, fodf.shape))
            data['peaks_values'] =\
                _crop_along_axis(peak_vals, args.slice_index,
                                 args.axis_name)

    grid_shape = data['fodf'].shape[:3]
    return data, grid_shape
Esempio n. 17
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_image)
    assert_outputs_exist(parser, args, args.out_image, args.logfile)

    logging.basicConfig()
    log = logging.getLogger(__name__)
    if args.verbose:
        log.setLevel(level=logging.INFO)
    else:
        log.setLevel(level=logging.WARNING)

    if args.logfile is not None:
        log.addHandler(logging.FileHandler(args.logfile, mode='w'))

    vol = nib.load(args.in_image)
    data = vol.get_fdata(dtype=np.float32)
    if args.mask is None:
        mask = np.zeros(data.shape[0:3], dtype=bool)
        if data.ndim == 4:
            mask[np.sum(data, axis=-1) > 0] = 1
        else:
            mask[data > 0] = 1
    else:
        mask = get_data_as_mask(nib.load(args.mask), dtype=bool)

    sigma = args.sigma

    if sigma is not None:
        log.info('User supplied noise standard deviation is {}'.format(sigma))
        # Broadcast the single value to a whole 3D volume for nlmeans
        sigma = np.ones(data.shape[:3]) * sigma
    else:
        log.info('Estimating noise')
        sigma = _get_basic_sigma(vol.get_fdata(dtype=np.float32), log)

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=DeprecationWarning)
        data_denoised = nlmeans(data,
                                sigma,
                                mask=mask,
                                rician=args.number_coils > 0,
                                num_threads=args.nbr_processes)

    nib.save(nib.Nifti1Image(data_denoised, vol.affine, header=vol.header),
             args.out_image)
Esempio n. 18
0
def extract_tails_heads_from_endpoints(gt_endpoints, out_dir):
    """
    Extract two masks from a single mask containing two regions.

    Parameters
    ----------
    gt_endpoints: list of str
        List of ground-truth mask filenames.

    Returns
    -------
    tails: list
        List of tail filenames.
    heads: list
        List of head filenames.
    affine: numpy.ndarray
        Affine of mask image.
    dimensions: tuple of int
        Dimensions of the mask image.
    """

    tails = []
    heads = []
    for mask_filename in gt_endpoints:
        mask_img = nib.load(mask_filename)
        mask = get_data_as_mask(mask_img)
        affine = mask_img.affine
        dimensions = mask.shape

        head, tail = split_heads_tails_kmeans(mask)

        basename = os.path.basename(
            split_name_with_nii(mask_filename)[0])
        tail_filename = os.path.join(
            out_dir, '{}_tail.nii.gz'.format(basename))
        head_filename = os.path.join(
            out_dir, '{}_head.nii.gz'.format(basename))
        nib.save(nib.Nifti1Image(head.astype(
            mask.dtype), affine), head_filename)
        nib.save(nib.Nifti1Image(tail.astype(
            mask.dtype), affine), tail_filename)

        tails.append(tail_filename)
        heads.append(head_filename)

    return tails, heads, affine, dimensions
Esempio n. 19
0
def threshold_ihMT_maps(computed_map, contrasts_maps, in_mask, lower_threshold,
                        upper_threshold, idx_contrast_list):
    """
    Remove NaN and apply different threshold based on
       - maximum and minimum threshold value
       - T1 mask
       - combination of specific contrasts maps

    Parameters
    ----------
    computed_map        3D-Array data.
                        Myelin map (ihMT or non-ihMT maps)
    contrasts_maps      List of 3D-Array. File must containing the
                        6 contrasts maps.
    in_mask             Path to binary T1 mask from T1 segmentation.
                        Must be the sum of GM+WM+CSF.
    lower_threshold     Value for low thresold <int>
    upper_thresold      Value for up thresold <int>
    idx_contrast_list   List of indexes of contrast maps corresponding to
                        that of input contrasts_maps ex.: [0, 2, 5]
                        Altnp = 0; Atlpn = 1; Reference = 2; Negative = 3;
                        Positive = 4; T1weighted = 5
    Returns
    ----------
    Thresholded matrix in 3D-array.
    """
    # Remove NaN and apply thresold based on lower and upper value
    computed_map[np.isnan(computed_map)] = 0
    computed_map[np.isinf(computed_map)] = 0
    computed_map[computed_map < lower_threshold] = 0
    computed_map[computed_map > upper_threshold] = 0

    # Load and apply sum of T1 probability maps on myelin maps
    mask_image = nib.load(in_mask)
    mask_data = get_data_as_mask(mask_image)
    computed_map[np.where(mask_data == 0)] = 0

    # Apply threshold based on combination of specific contrasts maps
    for idx in idx_contrast_list:
        computed_map[contrasts_maps[idx] == 0] = 0

    return computed_map
Esempio n. 20
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_tractogram)
    assert_outputs_exist(parser, args, args.out_tractogram)
    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)

    roi_opt_list, only_filtering_list = prepare_filtering_list(parser, args)
    o_dict = {}

    sft = load_tractogram_with_reference(parser, args, args.in_tractogram)

    # Streamline count before filtering
    o_dict['streamline_count_before_filtering'] = len(sft.streamlines)

    for i, roi_opt in enumerate(roi_opt_list):
        curr_dict = {}
        # Atlas needs an extra argument (value in the LUT)
        if roi_opt[0] == 'atlas_roi':
            filter_type, filter_arg, filter_arg_2, \
                filter_mode, filter_criteria = roi_opt
        else:
            filter_type, filter_arg, filter_mode, filter_criteria = roi_opt

        curr_dict['filename'] = os.path.abspath(filter_arg)
        curr_dict['type'] = filter_type
        curr_dict['mode'] = filter_mode
        curr_dict['criteria'] = filter_criteria

        is_exclude = False if filter_criteria == 'include' else True

        if filter_type == 'drawn_roi' or filter_type == 'atlas_roi':
            img = nib.load(filter_arg)
            if not is_header_compatible(img, sft):
                parser.error('Headers from the tractogram and the mask are '
                             'not compatible.')
            if filter_type == 'drawn_roi':
                mask = get_data_as_mask(img)
            else:
                atlas = get_data_as_label(img)
                mask = np.zeros(atlas.shape, dtype=np.uint16)
                mask[atlas == int(filter_arg_2)] = 1
            filtered_sft, indexes = filter_grid_roi(sft, mask,
                                                    filter_mode, is_exclude)

        # For every case, the input number must be greater or equal to 0 and
        # below the dimension, since this is a voxel space operation
        elif filter_type in ['x_plane', 'y_plane', 'z_plane']:
            filter_arg = int(filter_arg)
            _, dim, _, _ = sft.space_attributes
            mask = np.zeros(dim, dtype=np.int16)
            error_msg = None
            if filter_type == 'x_plane':
                if 0 <= filter_arg < dim[0]:
                    mask[filter_arg, :, :] = 1
                else:
                    error_msg = 'X plane ' + str(filter_arg)

            elif filter_type == 'y_plane':
                if 0 <= filter_arg < dim[1]:
                    mask[:, filter_arg, :] = 1
                else:
                    error_msg = 'Y plane ' + str(filter_arg)

            elif filter_type == 'z_plane':
                if 0 <= filter_arg < dim[2]:
                    mask[:, :, filter_arg] = 1
                else:
                    error_msg = 'Z plane ' + str(filter_arg)

            if error_msg:
                parser.error('{} is not valid according to the '
                             'tractogram header.'.format(error_msg))

            filtered_sft, indexes = filter_grid_roi(sft, mask,
                                                    filter_mode, is_exclude)

        elif filter_type == 'bdo':
            geometry, radius, center = read_info_from_mb_bdo(filter_arg)
            if geometry == 'Ellipsoid':
                filtered_sft, indexes = filter_ellipsoid(sft,
                                                         radius, center,
                                                         filter_mode, is_exclude)
            elif geometry == 'Cuboid':
                filtered_sft, indexes = filter_cuboid(sft,
                                                      radius, center,
                                                      filter_mode, is_exclude)

        logging.debug('The filtering options {0} resulted in '
                      '{1} streamlines'.format(roi_opt, len(filtered_sft)))

        sft = filtered_sft

        if only_filtering_list:
            filtering_Name = 'Filter_' + str(i)
            curr_dict['streamline_count_after_filtering'] = len(sft.streamlines)
            o_dict[filtering_Name] = curr_dict

    # Streamline count after filtering
    o_dict['streamline_count_final_filtering'] = len(sft.streamlines)
    if args.display_counts:
        print(json.dumps(o_dict, indent=args.indent))

    if not filtered_sft:
        if args.no_empty:
            logging.debug("The file {} won't be written (0 streamline)".format(
                args.out_tractogram))

            return

        logging.debug('The file {} contains 0 streamline'.format(
            args.out_tractogram))

    save_tractogram(sft, args.out_tractogram)
Esempio n. 21
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_hdf5, args.in_labels],
                        args.force_labels_list)

    log_level = logging.WARNING
    if args.verbose:
        log_level = logging.INFO
    logging.basicConfig(level=log_level)
    coloredlogs.install(level=log_level)

    measures_to_compute = []
    measures_output_filename = []
    if args.volume:
        measures_to_compute.append('volume')
        measures_output_filename.append(args.volume)
    if args.streamline_count:
        measures_to_compute.append('streamline_count')
        measures_output_filename.append(args.streamline_count)
    if args.length:
        measures_to_compute.append('length')
        measures_output_filename.append(args.length)
    if args.similarity:
        measures_to_compute.append('similarity')
        measures_output_filename.append(args.similarity[1])

    dict_maps_out_name = {}
    if args.maps is not None:
        for in_folder, out_name in args.maps:
            measures_to_compute.append(in_folder)
            dict_maps_out_name[in_folder] = out_name
            measures_output_filename.append(out_name)

    dict_metrics_out_name = {}
    if args.metrics is not None:
        for in_name, out_name in args.metrics:
            # Verify that all metrics are compatible with each other
            if not is_header_compatible(args.metrics[0][0], in_name):
                raise IOError('Metrics {} and  {} do not share a compatible '
                              'header'.format(args.metrics[0][0], in_name))

            # This is necessary to support more than one map for weighting
            measures_to_compute.append((in_name, nib.load(in_name)))
            dict_metrics_out_name[in_name] = out_name
            measures_output_filename.append(out_name)

    dict_lesion_out_name = {}
    if args.lesion_load is not None:
        in_name = args.lesion_load[0]
        lesion_img = nib.load(in_name)
        lesion_data = get_data_as_mask(lesion_img, dtype=bool)
        lesion_atlas, _ = ndi.label(lesion_data)
        measures_to_compute.append(((in_name, np.unique(lesion_atlas)[1:]),
                                    nib.Nifti1Image(lesion_atlas,
                                                    lesion_img.affine)))

        out_name_1 = os.path.join(args.lesion_load[1], 'lesion_vol.npy')
        out_name_2 = os.path.join(args.lesion_load[1], 'lesion_count.npy')
        out_name_3 = os.path.join(args.lesion_load[1], 'lesion_sc.npy')

        dict_lesion_out_name[in_name+'vol'] = out_name_1
        dict_lesion_out_name[in_name+'count'] = out_name_2
        dict_lesion_out_name[in_name+'sc'] = out_name_3
        measures_output_filename.extend([out_name_1, out_name_2, out_name_3])

    assert_outputs_exist(parser, args, measures_output_filename)
    if not measures_to_compute:
        parser.error('No connectivity measures were selected, nothing '
                     'to compute.')

    logging.info('The following measures will be computed and save: {}'.format(
        measures_output_filename))

    if args.include_dps:
        if not os.path.isdir(args.include_dps):
            os.makedirs(args.include_dps)
        logging.info('data_per_streamline weighting is activated.')

    img_labels = nib.load(args.in_labels)
    data_labels = get_data_as_label(img_labels)
    if not args.force_labels_list:
        labels_list = np.unique(data_labels)[1:].tolist()
    else:
        labels_list = np.loadtxt(
            args.force_labels_list, dtype=np.int16).tolist()

    comb_list = list(itertools.combinations(labels_list, r=2))
    if not args.no_self_connection:
        comb_list.extend(zip(labels_list, labels_list))

    nbr_cpu = validate_nbr_processes(parser, args)
    measures_dict_list = []
    if nbr_cpu == 1:
        for comb in comb_list:
            measures_dict_list.append(_processing_wrapper([args.in_hdf5,
                                                           img_labels, comb,
                                                           measures_to_compute,
                                                           args.similarity,
                                                           args.density_weighting,
                                                           args.include_dps,
                                                           args.min_lesion_vol]))
    else:
        pool = multiprocessing.Pool(nbr_cpu)
        measures_dict_list = pool.map(_processing_wrapper,
                                      zip(itertools.repeat(args.in_hdf5),
                                          itertools.repeat(img_labels),
                                          comb_list,
                                          itertools.repeat(
                                              measures_to_compute),
                                          itertools.repeat(args.similarity),
                                          itertools.repeat(
                                          args.density_weighting),
                                          itertools.repeat(args.include_dps),
                                          itertools.repeat(args.min_lesion_vol)))
        pool.close()
        pool.join()

    # Removing None entries (combinaisons that do not exist)
    # Fusing the multiprocessing output into a single dictionary
    measures_dict_list = [it for it in measures_dict_list if it is not None]
    if not measures_dict_list:
        raise ValueError('Empty matrix, no entries to save.')
    measures_dict = measures_dict_list[0]
    for dix in measures_dict_list[1:]:
        measures_dict.update(dix)

    if args.no_self_connection:
        total_elem = len(labels_list)**2 - len(labels_list)
        results_elem = len(measures_dict.keys())*2 - len(labels_list)
    else:
        total_elem = len(labels_list)**2
        results_elem = len(measures_dict.keys())*2

    logging.info('Out of {} possible nodes, {} contain value'.format(
        total_elem, results_elem))

    # Filling out all the matrices (symmetric) in the order of labels_list
    nbr_of_measures = len(list(measures_dict.values())[0])
    matrix = np.zeros((len(labels_list), len(labels_list), nbr_of_measures))

    for in_label, out_label in measures_dict:
        curr_node_dict = measures_dict[(in_label, out_label)]
        measures_ordering = list(curr_node_dict.keys())

        for i, measure in enumerate(curr_node_dict):
            in_pos = labels_list.index(in_label)
            out_pos = labels_list.index(out_label)
            matrix[in_pos, out_pos, i] = curr_node_dict[measure]
            matrix[out_pos, in_pos, i] = curr_node_dict[measure]

    # Saving the matrices separatly with the specified name or dps
    for i, measure in enumerate(measures_ordering):
        if measure == 'volume':
            matrix_basename = args.volume
        elif measure == 'streamline_count':
            matrix_basename = args.streamline_count
        elif measure == 'length':
            matrix_basename = args.length
        elif measure == 'similarity':
            matrix_basename = args.similarity[1]
        elif measure in dict_metrics_out_name:
            matrix_basename = dict_metrics_out_name[measure]
        elif measure in dict_maps_out_name:
            matrix_basename = dict_maps_out_name[measure]
        elif measure in dict_lesion_out_name:
            matrix_basename = dict_lesion_out_name[measure]
        else:
            matrix_basename = measure

        np.save(matrix_basename, matrix[:, :, i])
Esempio n. 22
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()
    logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser, args.in_tractogram,
                        [args.mask, args.reference])

    output_file_list = []
    if args.out_mask:
        output_file_list.append(args.out_mask)
    if args.out_tdi:
        output_file_list.append(args.out_tdi)
    if args.out_todi_sf:
        output_file_list.append(args.out_todi_sf)
    if args.out_todi_sh:
        output_file_list.append(args.out_todi_sh)

    if not output_file_list:
        parser.error('No output to be done')

    assert_outputs_exist(parser, args, output_file_list)

    sft = load_tractogram_with_reference(parser, args, args.in_tractogram)
    affine, data_shape, _, _ = sft.space_attributes
    sft.to_vox()

    logging.info('Computing length-weighted TODI ...')
    todi_obj = TrackOrientationDensityImaging(tuple(data_shape), args.sphere)
    todi_obj.compute_todi(sft.streamlines, length_weights=True)

    if args.smooth_todi:
        logging.info('Smoothing ...')
        todi_obj.smooth_todi_dir()
        todi_obj.smooth_todi_spatial()

    if args.mask:
        mask = get_data_as_mask(nib.load(args.mask))
        todi_obj.mask_todi(mask)

    logging.info('Saving Outputs ...')
    if args.out_mask:
        data = todi_obj.get_mask()
        img = todi_obj.reshape_to_3d(data)
        img = nib.Nifti1Image(img.astype(np.int16), affine)
        img.to_filename(args.out_mask)

    if args.out_todi_sh:
        if args.normalize_per_voxel:
            todi_obj.normalize_todi_per_voxel()
        img = todi_obj.get_sh(args.sh_basis, args.sh_order)
        img = todi_obj.reshape_to_3d(img)
        img = nib.Nifti1Image(img.astype(np.float32), affine)
        img.to_filename(args.out_todi_sh)

    if args.out_tdi:
        img = todi_obj.get_tdi()
        img = todi_obj.reshape_to_3d(img)
        img = nib.Nifti1Image(img.astype(np.float32), affine)
        img.to_filename(args.out_tdi)

    if args.out_todi_sf:
        img = todi_obj.get_todi()
        img = todi_obj.reshape_to_3d(img)
        img = nib.Nifti1Image(img.astype(np.float32), affine)
        img.to_filename(args.out_todi_sf)
Esempio n. 23
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if not args.not_all:
        args.fa = args.fa or 'fa.nii.gz'
        args.ga = args.ga or 'ga.nii.gz'
        args.rgb = args.rgb or 'rgb.nii.gz'
        args.md = args.md or 'md.nii.gz'
        args.ad = args.ad or 'ad.nii.gz'
        args.rd = args.rd or 'rd.nii.gz'
        args.mode = args.mode or 'mode.nii.gz'
        args.norm = args.norm or 'tensor_norm.nii.gz'
        args.tensor = args.tensor or 'tensor.nii.gz'
        args.evecs = args.evecs or 'tensor_evecs.nii.gz'
        args.evals = args.evals or 'tensor_evals.nii.gz'
        args.residual = args.residual or 'dti_residual.nii.gz'
        args.p_i_signal =\
            args.p_i_signal or 'physically_implausible_signals_mask.nii.gz'
        args.pulsation = args.pulsation or 'pulsation_and_misalignment.nii.gz'

    outputs = [
        args.fa, args.ga, args.rgb, args.md, args.ad, args.rd, args.mode,
        args.norm, args.tensor, args.evecs, args.evals, args.residual,
        args.p_i_signal, args.pulsation
    ]
    if args.not_all and not any(outputs):
        parser.error('When using --not_all, you need to specify at least ' +
                     'one metric to output.')

    assert_inputs_exist(parser, [args.input, args.bvals, args.bvecs],
                        args.mask)
    assert_outputs_exist(parser, args, outputs)

    img = nib.load(args.input)
    data = img.get_fdata(dtype=np.float32)
    affine = img.affine
    if args.mask is None:
        mask = None
    else:
        mask = get_data_as_mask(nib.load(args.mask), dtype=bool)

    # Validate bvals and bvecs
    logging.info('Tensor estimation with the {} method...'.format(args.method))
    bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs)

    if not is_normalized_bvecs(bvecs):
        logging.warning('Your b-vectors do not seem normalized...')
        bvecs = normalize_bvecs(bvecs)

    check_b0_threshold(args.force_b0_threshold, bvals.min())
    gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min())

    # Get tensors
    if args.method == 'restore':
        sigma = ne.estimate_sigma(data)
        tenmodel = TensorModel(gtab,
                               fit_method=args.method,
                               sigma=sigma,
                               min_signal=_get_min_nonzero_signal(data))
    else:
        tenmodel = TensorModel(gtab,
                               fit_method=args.method,
                               min_signal=_get_min_nonzero_signal(data))

    tenfit = tenmodel.fit(data, mask)

    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    FA = np.clip(FA, 0, 1)

    if args.tensor:
        # Get the Tensor values and format them for visualisation
        # in the Fibernavigator.
        tensor_vals = lower_triangular(tenfit.quadratic_form)
        correct_order = [0, 1, 3, 2, 4, 5]
        tensor_vals_reordered = tensor_vals[..., correct_order]
        fiber_tensors = nib.Nifti1Image(
            tensor_vals_reordered.astype(np.float32), affine)
        nib.save(fiber_tensors, args.tensor)

    if args.fa:
        fa_img = nib.Nifti1Image(FA.astype(np.float32), affine)
        nib.save(fa_img, args.fa)

    if args.ga:
        GA = geodesic_anisotropy(tenfit.evals)
        GA[np.isnan(GA)] = 0

        ga_img = nib.Nifti1Image(GA.astype(np.float32), affine)
        nib.save(ga_img, args.ga)

    if args.rgb:
        RGB = color_fa(FA, tenfit.evecs)
        rgb_img = nib.Nifti1Image(np.array(255 * RGB, 'uint8'), affine)
        nib.save(rgb_img, args.rgb)

    if args.md:
        MD = mean_diffusivity(tenfit.evals)
        md_img = nib.Nifti1Image(MD.astype(np.float32), affine)
        nib.save(md_img, args.md)

    if args.ad:
        AD = axial_diffusivity(tenfit.evals)
        ad_img = nib.Nifti1Image(AD.astype(np.float32), affine)
        nib.save(ad_img, args.ad)

    if args.rd:
        RD = radial_diffusivity(tenfit.evals)
        rd_img = nib.Nifti1Image(RD.astype(np.float32), affine)
        nib.save(rd_img, args.rd)

    if args.mode:
        # Compute tensor mode
        inter_mode = dipy_mode(tenfit.quadratic_form)

        # Since the mode computation can generate NANs when not masked,
        # we need to remove them.
        non_nan_indices = np.isfinite(inter_mode)
        mode = np.zeros(inter_mode.shape)
        mode[non_nan_indices] = inter_mode[non_nan_indices]

        mode_img = nib.Nifti1Image(mode.astype(np.float32), affine)
        nib.save(mode_img, args.mode)

    if args.norm:
        NORM = norm(tenfit.quadratic_form)
        norm_img = nib.Nifti1Image(NORM.astype(np.float32), affine)
        nib.save(norm_img, args.norm)

    if args.evecs:
        evecs = tenfit.evecs.astype(np.float32)
        evecs_img = nib.Nifti1Image(evecs, affine)
        nib.save(evecs_img, args.evecs)

        # save individual e-vectors also
        e1_img = nib.Nifti1Image(evecs[..., 0], affine)
        e2_img = nib.Nifti1Image(evecs[..., 1], affine)
        e3_img = nib.Nifti1Image(evecs[..., 2], affine)

        nib.save(e1_img, add_filename_suffix(args.evecs, '_v1'))
        nib.save(e2_img, add_filename_suffix(args.evecs, '_v2'))
        nib.save(e3_img, add_filename_suffix(args.evecs, '_v3'))

    if args.evals:
        evals = tenfit.evals.astype(np.float32)
        evals_img = nib.Nifti1Image(evals, affine)
        nib.save(evals_img, args.evals)

        # save individual e-values also
        e1_img = nib.Nifti1Image(evals[..., 0], affine)
        e2_img = nib.Nifti1Image(evals[..., 1], affine)
        e3_img = nib.Nifti1Image(evals[..., 2], affine)

        nib.save(e1_img, add_filename_suffix(args.evals, '_e1'))
        nib.save(e2_img, add_filename_suffix(args.evals, '_e2'))
        nib.save(e3_img, add_filename_suffix(args.evals, '_e3'))

    if args.p_i_signal:
        S0 = np.mean(data[..., gtab.b0s_mask], axis=-1, keepdims=True)
        DWI = data[..., ~gtab.b0s_mask]
        pis_mask = np.max(S0 < DWI, axis=-1)

        if args.mask is not None:
            pis_mask *= mask

        pis_img = nib.Nifti1Image(pis_mask.astype(np.int16), affine)
        nib.save(pis_img, args.p_i_signal)

    if args.pulsation:
        STD = np.std(data[..., ~gtab.b0s_mask], axis=-1)

        if args.mask is not None:
            STD *= mask

        std_img = nib.Nifti1Image(STD.astype(np.float32), affine)
        nib.save(std_img, add_filename_suffix(args.pulsation, '_std_dwi'))

        if np.sum(gtab.b0s_mask) <= 1:
            logger.info('Not enough b=0 images to output standard '
                        'deviation map')
        else:
            if len(np.where(gtab.b0s_mask)) == 2:
                logger.info('Only two b=0 images. Be careful with the '
                            'interpretation of this std map')

            STD = np.std(data[..., gtab.b0s_mask], axis=-1)

            if args.mask is not None:
                STD *= mask

            std_img = nib.Nifti1Image(STD.astype(np.float32), affine)
            nib.save(std_img, add_filename_suffix(args.pulsation, '_std_b0'))

    if args.residual:
        # Mean residual image
        S0 = np.mean(data[..., gtab.b0s_mask], axis=-1)
        data_p = tenfit.predict(gtab, S0)
        R = np.mean(np.abs(data_p[..., ~gtab.b0s_mask] -
                           data[..., ~gtab.b0s_mask]),
                    axis=-1)

        if args.mask is not None:
            R *= mask

        R_img = nib.Nifti1Image(R.astype(np.float32), affine)
        nib.save(R_img, args.residual)

        # Each volume's residual statistics
        if args.mask is None:
            logger.info("Outlier detection will not be performed, since no "
                        "mask was provided.")
        stats = [
            dict.fromkeys([
                'label', 'mean', 'iqr', 'cilo', 'cihi', 'whishi', 'whislo',
                'fliers', 'q1', 'med', 'q3'
            ], []) for i in range(data.shape[-1])
        ]  # stats with format for boxplots
        # Note that stats will be computed manually and plotted using bxp
        # but could be computed using stats = cbook.boxplot_stats
        # or pyplot.boxplot(x)
        R_k = np.zeros(data.shape[-1])  # mean residual per DWI
        std = np.zeros(data.shape[-1])  # std residual per DWI
        q1 = np.zeros(data.shape[-1])  # first quartile per DWI
        q3 = np.zeros(data.shape[-1])  # third quartile per DWI
        iqr = np.zeros(data.shape[-1])  # interquartile per DWI
        percent_outliers = np.zeros(data.shape[-1])
        nb_voxels = np.count_nonzero(mask)
        for k in range(data.shape[-1]):
            x = np.abs(data_p[..., k] - data[..., k])[mask]
            R_k[k] = np.mean(x)
            std[k] = np.std(x)
            q3[k], q1[k] = np.percentile(x, [75, 25])
            iqr[k] = q3[k] - q1[k]
            stats[k]['med'] = (q1[k] + q3[k]) / 2
            stats[k]['mean'] = R_k[k]
            stats[k]['q1'] = q1[k]
            stats[k]['q3'] = q3[k]
            stats[k]['whislo'] = q1[k] - 1.5 * iqr[k]
            stats[k]['whishi'] = q3[k] + 1.5 * iqr[k]
            stats[k]['label'] = k

            # Outliers are observations that fall below Q1 - 1.5(IQR) or
            # above Q3 + 1.5(IQR) We check if a voxel is an outlier only if
            # we have a mask, else we are biased.
            if args.mask is not None:
                outliers = (x < stats[k]['whislo']) | (x > stats[k]['whishi'])
                percent_outliers[k] = np.sum(outliers) / nb_voxels * 100
                # What would be our definition of too many outliers?
                # Maybe mean(all_means)+-3SD?
                # Or we let people choose based on the figure.
                # if percent_outliers[k] > ???? :
                #    logger.warning('   Careful! Diffusion-Weighted Image'
                #                   ' i=%s has %s %% outlier voxels',
                #                   k, percent_outliers[k])

        # Saving all statistics as npy values
        residual_basename, _ = split_name_with_nii(args.residual)
        res_stats_basename = residual_basename + ".npy"
        np.save(add_filename_suffix(res_stats_basename, "_mean_residuals"),
                R_k)
        np.save(add_filename_suffix(res_stats_basename, "_q1_residuals"), q1)
        np.save(add_filename_suffix(res_stats_basename, "_q3_residuals"), q3)
        np.save(add_filename_suffix(res_stats_basename, "_iqr_residuals"), iqr)
        np.save(add_filename_suffix(res_stats_basename, "_std_residuals"), std)

        # Showing results in graph
        if args.mask is None:
            fig, axe = plt.subplots(nrows=1, ncols=1, squeeze=False)
        else:
            fig, axe = plt.subplots(nrows=1,
                                    ncols=2,
                                    squeeze=False,
                                    figsize=[10, 4.8])
            # Default is [6.4, 4.8]. Increasing width to see better.

        medianprops = dict(linestyle='-', linewidth=2.5, color='firebrick')
        meanprops = dict(linestyle='-', linewidth=2.5, color='green')
        axe[0, 0].bxp(stats,
                      showmeans=True,
                      meanline=True,
                      showfliers=False,
                      medianprops=medianprops,
                      meanprops=meanprops)
        axe[0, 0].set_xlabel('DW image')
        axe[0, 0].set_ylabel('Residuals per DWI volume. Red is median,\n'
                             'green is mean. Whiskers are 1.5*interquartile')
        axe[0, 0].set_title('Residuals')
        axe[0, 0].set_xticks(range(0, q1.shape[0], 5))
        axe[0, 0].set_xticklabels(range(0, q1.shape[0], 5))

        if args.mask is not None:
            axe[0, 1].plot(range(data.shape[-1]), percent_outliers)
            axe[0, 1].set_xticks(range(0, q1.shape[0], 5))
            axe[0, 1].set_xticklabels(range(0, q1.shape[0], 5))
            axe[0, 1].set_xlabel('DW image')
            axe[0, 1].set_ylabel('Percentage of outlier voxels')
            axe[0, 1].set_title('Outliers')
        plt.savefig(residual_basename + '_residuals_stats.png')
Esempio n. 24
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser, args.in_bundles)
    assert_outputs_exist(parser, args, args.out_json)

    if (not args.streamlines_measures) and (not args.voxels_measures):
        parser.error('At least one of the two modes is needed')

    nbr_cpu = validate_nbr_processes(parser, args)

    all_binary_metrics = []
    bundles_references_tuple_extended = link_bundles_and_reference(
        parser, args, args.in_bundles)

    if args.streamlines_measures:
        # Gold standard related indices are computed once
        wb_sft = load_tractogram_with_reference(parser, args,
                                                args.streamlines_measures[1])
        wb_sft.to_vox()
        wb_sft.to_corner()
        wb_streamlines = wb_sft.streamlines

        gs_sft = load_tractogram_with_reference(parser, args,
                                                args.streamlines_measures[0])
        gs_sft.to_vox()
        gs_sft.to_corner()
        gs_streamlines = gs_sft.streamlines
        _, gs_dimensions, _, _ = gs_sft.space_attributes

        # Prepare the gold standard only once
        _, gs_streamlines_indices = perform_streamlines_operation(
            intersection, [wb_streamlines, gs_streamlines], precision=0)

        if nbr_cpu == 1:
            streamlines_dict = []
            for i in bundles_references_tuple_extended:
                streamlines_dict.append(
                    compute_streamlines_measures(
                        [i, wb_streamlines, gs_streamlines_indices]))
        else:
            pool = multiprocessing.Pool(nbr_cpu)
            streamlines_dict = pool.map(
                compute_streamlines_measures,
                zip(bundles_references_tuple_extended,
                    itertools.repeat(wb_streamlines),
                    itertools.repeat(gs_streamlines_indices)))
            pool.close()
            pool.join()
        all_binary_metrics.extend(streamlines_dict)

    if not args.voxels_measures:
        gs_binary_3d = compute_tract_counts_map(gs_streamlines, gs_dimensions)
        gs_binary_3d[gs_binary_3d > 0] = 1

        tracking_mask_data = compute_tract_counts_map(wb_streamlines,
                                                      gs_dimensions)
        tracking_mask_data[tracking_mask_data > 0] = 1
    else:
        gs_binary_3d = get_data_as_mask(nib.load(args.voxels_measures[0]))
        gs_binary_3d[gs_binary_3d > 0] = 1
        tracking_mask_data = get_data_as_mask(nib.load(
            args.voxels_measures[1]))
        tracking_mask_data[tracking_mask_data > 0] = 1

    if nbr_cpu == 1:
        voxels_dict = []
        for i in bundles_references_tuple_extended:
            voxels_dict.append(
                compute_voxel_measures([i, tracking_mask_data, gs_binary_3d]))
    else:
        voxels_dict = pool.map(
            compute_voxel_measures,
            zip(bundles_references_tuple_extended,
                itertools.repeat(tracking_mask_data),
                itertools.repeat(gs_binary_3d)))
        pool.close()
        pool.join()
    all_binary_metrics.extend(voxels_dict)

    # After all processing, write the json file and skip None value
    output_binary_dict = {}
    for binary_dict in all_binary_metrics:
        if binary_dict is not None:
            for measure_name in binary_dict.keys():
                if measure_name not in output_binary_dict:
                    output_binary_dict[measure_name] = []
                output_binary_dict[measure_name].append(
                    float(binary_dict[measure_name]))

    with open(args.out_json, 'w') as outfile:
        json.dump(output_binary_dict,
                  outfile,
                  indent=args.indent,
                  sort_keys=args.sort_keys)
Esempio n. 25
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()
    logging.basicConfig(level=logging.INFO)

    if not args.not_all:
        args.wm_out_fODF = args.wm_out_fODF or 'wm_fodf.nii.gz'
        args.gm_out_fODF = args.gm_out_fODF or 'gm_fodf.nii.gz'
        args.csf_out_fODF = args.csf_out_fODF or 'csf_fodf.nii.gz'
        args.vf = args.vf or 'vf.nii.gz'
        args.vf_rgb = args.vf_rgb or 'vf_rgb.nii.gz'

    arglist = [args.wm_out_fODF, args.gm_out_fODF, args.csf_out_fODF,
               args.vf, args.vf_rgb]
    if args.not_all and not any(arglist):
        parser.error('When using --not_all, you need to specify at least ' +
                     'one file to output.')

    assert_inputs_exist(parser, [args.in_dwi, args.in_bval, args.in_bvec,
                                 args.in_wm_frf, args.in_gm_frf,
                                 args.in_csf_frf])
    assert_outputs_exist(parser, args, arglist)

    # Loading data
    wm_frf = np.loadtxt(args.in_wm_frf)
    gm_frf = np.loadtxt(args.in_gm_frf)
    csf_frf = np.loadtxt(args.in_csf_frf)
    vol = nib.load(args.in_dwi)
    data = vol.get_fdata(dtype=np.float32)
    bvals, bvecs = read_bvals_bvecs(args.in_bval, args.in_bvec)

    # Checking mask
    if args.mask is None:
        mask = None
    else:
        mask = get_data_as_mask(nib.load(args.mask), dtype=bool)
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")

    sh_order = args.sh_order

    # Checking data and sh_order
    b0_thr = check_b0_threshold(
        args.force_b0_threshold, bvals.min(), bvals.min())
    if data.shape[-1] < (sh_order + 1) * (sh_order + 2) / 2:
        logging.warning(
            'We recommend having at least {} unique DWIs volumes, but you '
            'currently have {} volumes. Try lowering the parameter --sh_order '
            'in case of non convergence.'.format(
                (sh_order + 1) * (sh_order + 2) / 2, data.shape[-1]))

    # Checking bvals, bvecs values and loading gtab
    if not is_normalized_bvecs(bvecs):
        logging.warning('Your b-vectors do not seem normalized...')
        bvecs = normalize_bvecs(bvecs)
    gtab = gradient_table(bvals, bvecs, b0_threshold=b0_thr)

    # Checking response functions and computing msmt response function
    if not wm_frf.shape[1] == 4:
        raise ValueError('WM frf file did not contain 4 elements. '
                         'Invalid or deprecated FRF format')
    if not gm_frf.shape[1] == 4:
        raise ValueError('GM frf file did not contain 4 elements. '
                         'Invalid or deprecated FRF format')
    if not csf_frf.shape[1] == 4:
        raise ValueError('CSF frf file did not contain 4 elements. '
                         'Invalid or deprecated FRF format')
    ubvals = unique_bvals_tolerance(bvals, tol=20)
    msmt_response = multi_shell_fiber_response(sh_order, ubvals,
                                               wm_frf, gm_frf, csf_frf)

    # Loading spheres
    reg_sphere = get_sphere('symmetric362')

    # Computing msmt-CSD
    msmt_model = MultiShellDeconvModel(gtab, msmt_response,
                                       reg_sphere=reg_sphere,
                                       sh_order=sh_order)

    # Computing msmt-CSD fit
    msmt_fit = fit_from_model(msmt_model, data,
                              mask=mask, nbr_processes=args.nbr_processes)

    shm_coeff = msmt_fit.all_shm_coeff

    nan_count = len(np.argwhere(np.isnan(shm_coeff[..., 0])))
    voxel_count = np.prod(shm_coeff.shape[:-1])

    if nan_count / voxel_count >= 0.05:
        msg = """There are {} voxels out of {} that could not be solved by
        the solver, reaching a critical amount of voxels. Make sure to tune the
        response functions properly, as the solving process is very sensitive
        to it. Proceeding to fill the problematic voxels by 0.
        """
        logging.warning(msg.format(nan_count, voxel_count))
    elif nan_count > 0:
        msg = """There are {} voxels out of {} that could not be solved by
        the solver. Make sure to tune the response functions properly, as the
        solving process is very sensitive to it. Proceeding to fill the
        problematic voxels by 0.
        """
        logging.warning(msg.format(nan_count, voxel_count))

    shm_coeff = np.where(np.isnan(shm_coeff), 0, shm_coeff)

    # Saving results
    if args.wm_out_fODF:
        wm_coeff = shm_coeff[..., 2:]
        if args.sh_basis == 'tournier07':
            wm_coeff = convert_sh_basis(wm_coeff, reg_sphere, mask=mask,
                                        nbr_processes=args.nbr_processes)
        nib.save(nib.Nifti1Image(wm_coeff.astype(np.float32),
                                 vol.affine), args.wm_out_fODF)

    if args.gm_out_fODF:
        gm_coeff = shm_coeff[..., 1]
        if args.sh_basis == 'tournier07':
            gm_coeff = gm_coeff.reshape(gm_coeff.shape + (1,))
            gm_coeff = convert_sh_basis(gm_coeff, reg_sphere, mask=mask,
                                        nbr_processes=args.nbr_processes)
        nib.save(nib.Nifti1Image(gm_coeff.astype(np.float32),
                                 vol.affine), args.gm_out_fODF)

    if args.csf_out_fODF:
        csf_coeff = shm_coeff[..., 0]
        if args.sh_basis == 'tournier07':
            csf_coeff = csf_coeff.reshape(csf_coeff.shape + (1,))
            csf_coeff = convert_sh_basis(csf_coeff, reg_sphere, mask=mask,
                                         nbr_processes=args.nbr_processes)
        nib.save(nib.Nifti1Image(csf_coeff.astype(np.float32),
                                 vol.affine), args.csf_out_fODF)

    if args.vf:
        nib.save(nib.Nifti1Image(msmt_fit.volume_fractions.astype(np.float32),
                                 vol.affine), args.vf)

    if args.vf_rgb:
        vf = msmt_fit.volume_fractions
        vf_rgb = vf / np.max(vf) * 255
        vf_rgb = np.clip(vf_rgb, 0, 255)
        nib.save(nib.Nifti1Image(vf_rgb.astype(np.uint8),
                                 vol.affine), args.vf_rgb)
Esempio n. 26
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_volume)
    assert_outputs_exist(parser, args, args.out_image)

    output_names = [
        'axial_superior', 'axial_inferior', 'coronal_posterior',
        'coronal_anterior', 'sagittal_left', 'sagittal_right'
    ]

    for filename in args.in_bundles:
        _, ext = os.path.splitext(filename)
        if ext == '.tck':
            tractogram = load_tractogram_with_reference(parser, args, filename)
        else:
            tractogram = filename
        if not is_header_compatible(args.in_volume, tractogram):
            parser.error('{} does not have a compatible header with {}'.format(
                filename, args.in_volume))
        # Delete temporary tractogram
        else:
            del tractogram

    output_dir = os.path.dirname(args.out_image)
    if output_dir:
        assert_output_dirs_exist_and_empty(parser,
                                           args,
                                           output_dir,
                                           create_dir=True)

    _, extension = os.path.splitext(args.out_image)

    # ----------------------------------------------------------------------- #
    # Mosaic, column 0: orientation names and data description
    # ----------------------------------------------------------------------- #
    width = args.resolution_of_thumbnails
    height = args.resolution_of_thumbnails
    rows = 6
    cols = len(args.in_bundles)
    text_pos_x = 50
    text_pos_y = 50

    # Creates a new empty image, RGB mode
    mosaic = Image.new('RGB', ((cols + 1) * width, (rows + 1) * height))

    # Prepare draw and font objects to render text
    draw = ImageDraw.Draw(mosaic)
    font = get_font(args)

    # Data of the volume used as background
    ref_img = nib.load(args.in_volume)
    data = ref_img.get_fdata(dtype=np.float32)
    affine = ref_img.affine
    mean, std = data[data > 0].mean(), data[data > 0].std()
    value_range = (mean - 0.5 * std, mean + 1.5 * std)

    # First column with rows description
    draw_column_with_names(draw, output_names, text_pos_x, text_pos_y, height,
                           font)

    # ----------------------------------------------------------------------- #
    # Columns with bundles
    # ----------------------------------------------------------------------- #
    random.seed(args.random_coloring)
    for idx_bundle, bundle_file in enumerate(args.in_bundles):

        bundle_file_name = os.path.basename(bundle_file)
        bundle_name, bundle_ext = split_name_with_nii(bundle_file_name)

        i = (idx_bundle + 1) * width

        if not os.path.isfile(bundle_file):
            print('\nInput file {} doesn\'t exist.'.format(bundle_file))

            number_streamlines = 0

            view_number = 6
            j = height * view_number

            draw_bundle_information(draw, bundle_file_name, number_streamlines,
                                    i + text_pos_x, j + text_pos_y, font)

        else:
            if args.uniform_coloring:
                colors = args.uniform_coloring
            elif args.random_coloring is not None:
                colors = random_rgb()
            # Select the streamlines to plot
            if bundle_ext in ['.tck', '.trk']:
                if (args.random_coloring is None
                        and args.uniform_coloring is None):
                    colors = None
                bundle_tractogram_file = nib.streamlines.load(bundle_file)
                streamlines = bundle_tractogram_file.streamlines
                bundle_actor = actor.line(streamlines, colors)
                nbr_of_elem = len(streamlines)
            # Select the volume to plot
            elif bundle_ext in ['.nii.gz', '.nii']:
                if not args.random_coloring and not args.uniform_coloring:
                    colors = [1.0, 1.0, 1.0]
                bundle_img_file = nib.load(bundle_file)
                roi = get_data_as_mask(bundle_img_file)
                bundle_actor = actor.contour_from_roi(roi,
                                                      bundle_img_file.affine,
                                                      colors)
                nbr_of_elem = np.count_nonzero(roi)

            # Render
            ren = window.Scene()
            zoom = args.zoom
            opacity = args.opacity_background

            # Structural data
            slice_actor = actor.slicer(data, affine, value_range)
            slice_actor.opacity(opacity)
            ren.add(slice_actor)

            # Streamlines
            ren.add(bundle_actor)
            ren.reset_camera()
            ren.zoom(zoom)
            view_number = 0
            set_img_in_cell(mosaic, ren, view_number, width, height, i)

            ren.pitch(180)
            ren.reset_camera()
            ren.zoom(zoom)
            view_number = 1
            set_img_in_cell(mosaic, ren, view_number, width, height, i)

            ren.rm(slice_actor)
            slice_actor2 = slice_actor.copy()
            slice_actor2.display(None, slice_actor2.shape[1] // 2, None)
            slice_actor2.opacity(opacity)
            ren.add(slice_actor2)

            ren.pitch(90)
            ren.set_camera(view_up=(0, 0, 1))
            ren.reset_camera()
            ren.zoom(zoom)
            view_number = 2
            set_img_in_cell(mosaic, ren, view_number, width, height, i)

            ren.pitch(180)
            ren.set_camera(view_up=(0, 0, 1))
            ren.reset_camera()
            ren.zoom(zoom)
            view_number = 3
            set_img_in_cell(mosaic, ren, view_number, width, height, i)

            ren.rm(slice_actor2)
            slice_actor3 = slice_actor.copy()
            slice_actor3.display(slice_actor3.shape[0] // 2, None, None)
            slice_actor3.opacity(opacity)
            ren.add(slice_actor3)

            ren.yaw(90)
            ren.reset_camera()
            ren.zoom(zoom)
            view_number = 4
            set_img_in_cell(mosaic, ren, view_number, width, height, i)

            ren.yaw(180)
            ren.reset_camera()
            ren.zoom(zoom)
            view_number = 5
            set_img_in_cell(mosaic, ren, view_number, width, height, i)

            view_number = 6
            j = height * view_number
            draw_bundle_information(draw, bundle_file_name, nbr_of_elem,
                                    i + text_pos_x, j + text_pos_y, font)

    # Save image to file
    mosaic.save(args.out_image)
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if not args.not_all:
        args.afd_max = args.afd_max or 'afd_max.nii.gz'
        args.afd_total = args.afd_total or 'afd_total_sh0.nii.gz'
        args.afd_sum = args.afd_sum or 'afd_sum.nii.gz'
        args.nufo = args.nufo or 'nufo.nii.gz'
        args.rgb = args.rgb or 'rgb.nii.gz'
        args.peaks = args.peaks or 'peaks.nii.gz'
        args.peak_values = args.peak_values or 'peak_values.nii.gz'
        args.peak_indices = args.peak_indices or 'peak_indices.nii.gz'

    arglist = [args.afd_max, args.afd_total, args.afd_sum, args.nufo,
               args.rgb, args.peaks, args.peak_values,
               args.peak_indices]
    if args.not_all and not any(arglist):
        parser.error('When using --not_all, you need to specify at least '
                     'one file to output.')

    assert_inputs_exist(parser, args.in_fODF)
    assert_outputs_exist(parser, args, arglist)

    vol = nib.load(args.in_fODF)
    data = vol.get_fdata(dtype=np.float32)
    affine = vol.affine

    if args.mask is None:
        mask = None
    else:
        mask = get_data_as_mask(nib.load(args.mask), dtype=bool)
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")

    sphere = get_sphere(args.sphere)

    # Computing peaks
    peak_dirs, peak_values, \
        peak_indices = peaks_from_sh(data,
                                     sphere,
                                     mask=mask,
                                     relative_peak_threshold=args.r_threshold,
                                     absolute_threshold=args.a_threshold,
                                     min_separation_angle=25,
                                     normalize_peaks=False,
                                     sh_basis_type=args.sh_basis,
                                     nbr_processes=args.nbr_processes)

    # Computing maps
    nufo_map, afd_max, afd_sum, rgb_map, \
        _, _ = maps_from_sh(data, peak_dirs,
                                       peak_values, peak_indices,
                                       sphere, nbr_processes=args.nbr_processes)

    # Save result
    if args.nufo:
        nib.save(nib.Nifti1Image(nufo_map.astype(np.float32),
                                 affine), args.nufo)

    if args.afd_max:
        nib.save(nib.Nifti1Image(afd_max.astype(np.float32),
                                 affine), args.afd_max)

    if args.afd_total:
        # this is the analytical afd total
        afd_tot = data[:, :, :, 0]
        nib.save(nib.Nifti1Image(afd_tot.astype(np.float32),
                                 affine), args.afd_total)

    if args.afd_sum:
        nib.save(nib.Nifti1Image(afd_sum.astype(np.float32),
                                 affine), args.afd_sum)

    if args.rgb:
        nib.save(nib.Nifti1Image(rgb_map.astype('uint8'), affine), args.rgb)

    if args.peaks or args.peak_values:
        peak_values = np.divide(peak_values, peak_values[..., 0, None],
                                out=np.zeros_like(peak_values),
                                where=peak_values[..., 0, None]!=0)
        peak_dirs[...] *= peak_values[..., :, None]
        if args.peaks:
            nib.save(nib.Nifti1Image(reshape_peaks_for_visualization(peak_dirs),
                                     affine), args.peaks)
        if args.peak_values:
            nib.save(nib.Nifti1Image(peak_values, vol.affine), args.peak_values)

    if args.peak_indices:
        nib.save(nib.Nifti1Image(peak_indices, vol.affine), args.peak_indices)
Esempio n. 28
0
def extract_true_connections(
    sft, mask_1_filename, mask_2_filename, gt_config, length_dict,
    gt_bundle, gt_bundle_inv_mask, dilate_endpoints, wrong_path_as_separate
):
    """
    Extract true connections based on two regions from a tractogram.
    May extract false and no connections if the config is passed.

    Parameters
    ----------
    sft: StatefulTractogram
        Tractogram containing the streamlines to be extracted.
    mask_1_filename: str
        Filename of the "head" of the bundle.
    mask_2_filename: str
        Filename of the "tail" of the bundle.
    gt_config: dict or None
        Dictionary containing the bundle's parameters.
    length_dict: dict or None
        Dictionary containing the bundle's length parameters.
    gt_bundle: str
        Bundle's name.
    gt_bundle_inv_mask: np.ndarray
        Inverse mask of the bundle.
    dilate_endpoints: int or None
        If set, dilate the masks for n iterations.
    wrong_path_as_separate: bool
        If true, save the WPCs as separate from TCs.

    Returns
    -------
    tc_sft: StatefulTractogram
        SFT of true connections.
    wpc_sft: StatefulTractogram
        SFT of wrong-path-connections.
    fc_sft: StatefulTractogram
        SFT of false connections (streamlines that are too long).
    nc_streamlines: StatefulTractogram
        SFT of no connections (streamlines that loop)
    sft: StatefulTractogram
        SFT of remaining streamlines.
    """

    mask_1_img = nib.load(mask_1_filename)
    mask_2_img = nib.load(mask_2_filename)
    mask_1 = get_data_as_mask(mask_1_img)
    mask_2 = get_data_as_mask(mask_2_img)

    if dilate_endpoints:
        mask_1 = binary_dilation(mask_1, iterations=dilate_endpoints)
        mask_2 = binary_dilation(mask_2, iterations=dilate_endpoints)

    # TODO: Handle streamline IDs instead of streamlines
    tmp_sft, sft = extract_streamlines(mask_1, mask_2, sft)

    streamlines = tmp_sft.streamlines
    tc_streamlines = streamlines
    wpc_streamlines = []
    fc_streamlines = []
    nc_streamlines = []

    # Config file for each 'bundle'
    # Loops => no connection (nc) # TODO Is this legit ?
    # Length => false connection (fc) # TODO Is this legit ?
    if gt_config:
        min_len, max_len = \
            length_dict[gt_bundle]['length']

        # Bring streamlines to world coordinates so proper length
        # is calculated
        tmp_sft.to_rasmm()
        streamlines = tmp_sft.streamlines
        lengths = np.array(list(length(streamlines)))
        tmp_sft.to_vox()
        streamlines = tmp_sft.streamlines

        valid_min_length_mask = lengths > min_len
        valid_max_length_mask = lengths < max_len
        valid_length_mask = np.logical_and(valid_min_length_mask,
                                           valid_max_length_mask)
        streamlines = ArraySequence(streamlines)

        val_len_streamlines = streamlines[valid_length_mask]
        fc_streamlines = streamlines[~valid_length_mask]

        angle = length_dict[gt_bundle]['angle']
        tc_streamlines_ids = remove_loops_and_sharp_turns(
            val_len_streamlines, angle)

        loop_ids = np.setdiff1d(
            range(len(val_len_streamlines)), tc_streamlines_ids)

        loops = val_len_streamlines[list(loop_ids)]
        tc_streamlines = val_len_streamlines[list(tc_streamlines_ids)]

        if loops:
            nc_streamlines = loops

    # Streamlines getting out of the bundle mask can be considered
    # separately as wrong path connection (wpc)
    # TODO: Maybe only consider if they cross another GT bundle ?
    if wrong_path_as_separate:
        tmp_sft = StatefulTractogram.from_sft(tc_streamlines, sft)
        _, wp_ids = filter_grid_roi(
            tmp_sft, gt_bundle_inv_mask, 'any', False)
        wpc_streamlines = tmp_sft.streamlines[list(wp_ids)]
        tc_ids = np.setdiff1d(range(len(tmp_sft)), wp_ids)
        tc_streamlines = tmp_sft.streamlines[list(tc_ids)]

    tc_sft = StatefulTractogram.from_sft(tc_streamlines, sft)
    wpc_sft = StatefulTractogram.from_sft([], sft)
    fc_sft = StatefulTractogram.from_sft(fc_streamlines, sft)
    if wrong_path_as_separate and len(wpc_streamlines):
        wpc_sft = StatefulTractogram.from_sft(wpc_streamlines, sft)

    return tc_sft, wpc_sft, fc_sft, nc_streamlines, sft
Esempio n. 29
0
def compute_masks(gt_files, parser, args):
    """
    Compute ground-truth masks. If the file is already a mask, load it.
    If it is a bundle, compute the mask.

    Parameters
    ----------
    gt_files: list
        List of either StatefulTractograms or niftis.
    parser: ArgumentParser
        Argument parser which handles the script's arguments.
    args: Namespace
        List of arguments passed to the script.

    Returns
    -------
    mask_1: numpy.ndarray
        "Head" of the mask.
    mask_2: numpy.ndarray
        "Tail" of the mask.
    """
    save_ref = args.reference

    gt_bundle_masks = []
    gt_bundle_inv_masks = []

    affine = None
    dimensions = None
    for gt_bundle in gt_files:
        if gt_bundle is not None:
            # Support ground truth as streamlines or masks
            # Will be converted to binary masks immediately
            _, ext = split_name_with_nii(gt_bundle)
            if ext in ['.gz', '.nii.gz']:
                gt_img = nib.load(gt_bundle)
                gt_mask = get_data_as_mask(gt_img)

                if affine is not None:
                    # compare affines.
                    # todO
                    logging.debug('Previous affine discarded. (todo)')
                affine = gt_img.affine
                dimensions = gt_mask.shape
            else:
                # Cheating ref because it may send a lot of warning if loading
                # many trk with ref (reference was maybe added only for some
                # of these files)
                if ext == '.trk':
                    args.reference = None
                else:
                    args.reference = save_ref
                gt_sft = load_tractogram_with_reference(parser,
                                                        args,
                                                        gt_bundle,
                                                        bbox_check=False)
                gt_sft.to_vox()
                gt_sft.to_corner()
                _affine, _dimensions, _, _ = gt_sft.space_attributes
                if affine is not None:
                    # compare affines.
                    # todO
                    logging.debug('Previous affine discarded. (todo)')
                affine = _affine
                dimensions = _dimensions
                gt_mask = compute_tract_counts_map(gt_sft.streamlines,
                                                   dimensions).astype(np.int16)
            gt_inv_mask = np.zeros(dimensions, dtype=np.int16)
            gt_inv_mask[gt_mask == 0] = 1
            gt_mask[gt_mask > 0] = 1
        else:
            gt_mask = None
            gt_inv_mask = None

        gt_bundle_masks.append(gt_mask)
        gt_bundle_inv_masks.append(gt_inv_mask)

    return gt_bundle_masks, gt_bundle_inv_masks, affine, dimensions
Esempio n. 30
0
def extract_vb_vs(sft, head_filename, tail_filename, limits_length, angle,
                  orientation_length, abs_orientation_length,
                  inclusion_inv_mask, dilate_endpoints):
    """
    Extract valid bundle (and valid streamline ids) from a tractogram, based
    on two regions of interest for the endpoints, one region of interest for
    the inclusion of streamlines, and maximum length, maximum angle,
    maximum length per orientation.

    Parameters
    ----------
    sft: StatefulTractogram
        Tractogram containing the streamlines to be extracted.
    head_filename: str
        Filename of the "head" of the bundle.
    tail_filename: str
        Filename of the "tail" of the bundle.
    limits_length: list or None
        Bundle's length parameters: [min max].
    angle: int or None
        Bundle's max angle.
    orientation_length: list or None
        Bundle's length parameters in each direction:
        [[min_x, max_x], [min_y, max_y], [min_z, max_z]]
    abs_orientation_length: idem, computed in absolute values.
    inclusion_inv_mask: np.ndarray or None
        Inverse mask of the bundle.
    dilate_endpoints: int or None
        If set, dilate the masks for n iterations.

    Returns
    -------
    tc_sft: StatefulTractogram
        SFT of true connections.
    wpc_sft: StatefulTractogram
        SFT of wrong-path-connections.
    fc_sft: StatefulTractogram
        SFT of false connections (streamlines that are too long).
    nc_streamlines: StatefulTractogram
        SFT of no connections (streamlines that loop)
    sft: StatefulTractogram
        SFT of remaining streamlines.
    """
    mask_1_img = nib.load(head_filename)
    mask_2_img = nib.load(tail_filename)
    mask_1 = get_data_as_mask(mask_1_img)
    mask_2 = get_data_as_mask(mask_2_img)

    if dilate_endpoints:
        mask_1 = binary_dilation(mask_1, iterations=dilate_endpoints)
        mask_2 = binary_dilation(mask_2, iterations=dilate_endpoints)

    _, vs_ids = filter_grid_roi_both(sft, mask_1, mask_2)

    wpc_ids = []
    bundle_stats = {"Initial count head to tail": len(vs_ids)}

    # Remove out of inclusion mask (limits_mask)
    if len(vs_ids) > 0 and inclusion_inv_mask is not None:
        tmp_sft = StatefulTractogram.from_sft(sft.streamlines[vs_ids], sft)
        _, out_of_mask_ids_from_vs = filter_grid_roi(tmp_sft,
                                                     inclusion_inv_mask, 'any',
                                                     False)
        out_of_mask_ids = vs_ids[out_of_mask_ids_from_vs]

        bundle_stats.update({"WPC_out_of_mask": len(out_of_mask_ids)})

        # Update ids
        wpc_ids.extend(out_of_mask_ids)
        vs_ids = np.setdiff1d(vs_ids, wpc_ids)

    # Remove invalid lengths
    if len(vs_ids) > 0 and limits_length is not None:
        min_len, max_len = limits_length

        # Bring streamlines to world coordinates so proper length
        # is calculated
        sft.to_rasmm()
        lengths = np.array(list(length(sft.streamlines[vs_ids])))
        sft.to_vox()

        # Compute valid lengths
        valid_length_ids_mask_from_vs = np.logical_and(lengths > min_len,
                                                       lengths < max_len)

        bundle_stats.update(
            {"WPC_invalid_length": int(sum(~valid_length_ids_mask_from_vs))})

        # Update ids
        wpc_ids.extend(vs_ids[~valid_length_ids_mask_from_vs])
        vs_ids = vs_ids[valid_length_ids_mask_from_vs]

    # Remove invalid lengths per orientation
    if len(vs_ids) > 0 and orientation_length is not None:
        # Compute valid lengths
        limits_x, limits_y, limits_z = orientation_length

        _, valid_orientation_ids_from_vs, _ = \
            filter_streamlines_by_total_length_per_dim(
                sft[vs_ids], limits_x, limits_y, limits_z,
                use_abs=False, save_rejected=False)

        # Update ids
        valid_orientation_ids = vs_ids[valid_orientation_ids_from_vs]
        invalid_orientation_ids = np.setdiff1d(vs_ids, valid_orientation_ids)

        bundle_stats.update(
            {"WPC_invalid_orientation": len(invalid_orientation_ids)})

        wpc_ids.extend(invalid_orientation_ids)
        vs_ids = valid_orientation_ids

    # Idem in abs
    if len(vs_ids) > 0 and abs_orientation_length is not None:
        # Compute valid lengths
        limits_x, limits_y, limits_z = abs_orientation_length

        _, valid_orientation_ids_from_vs, _ = \
            filter_streamlines_by_total_length_per_dim(
                sft[vs_ids], limits_x, limits_y,
                limits_z,
                use_abs=True, save_rejected=False)

        # Update ids
        valid_orientation_ids = vs_ids[valid_orientation_ids_from_vs]
        invalid_orientation_ids = np.setdiff1d(vs_ids, valid_orientation_ids)

        bundle_stats.update(
            {"WPC_invalid_orientation_abs": len(invalid_orientation_ids)})

        wpc_ids.extend(invalid_orientation_ids)
        vs_ids = valid_orientation_ids

    # Remove loops from tc
    if len(vs_ids) > 0 and angle is not None:
        # Compute valid angles
        valid_angle_ids_from_vs = remove_loops_and_sharp_turns(
            sft.streamlines[vs_ids], angle)

        # Update ids
        valid_angle_ids = vs_ids[valid_angle_ids_from_vs]
        invalid_angle_ids = np.setdiff1d(vs_ids, valid_angle_ids)

        bundle_stats.update({"WPC_invalid_length": len(invalid_angle_ids)})

        wpc_ids.extend(invalid_angle_ids)
        vs_ids = valid_angle_ids

    bundle_stats.update({"VS": len(vs_ids)})

    return list(vs_ids), list(wpc_ids), bundle_stats