Пример #1
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_metric, args.in_mask])
    assert_outputs_exist(parser, args, args.out_png)

    # Load metric image
    metric_img = nib.load(args.in_metric)
    metric_img_data = metric_img.get_fdata(dtype=np.float32)

    # Load mask image
    mask_img = nib.load(args.in_mask)
    mask_img_data = get_data_as_mask(mask_img)

    assert_same_resolution((metric_img, mask_img))

    # Select value from mask
    curr_data = metric_img_data[np.where(mask_img_data > 0)]

    # Display figure
    fig, ax = plt.subplots()
    n, bins, patches = ax.hist(curr_data,
                               bins=args.n_bins,
                               color=args.colors,
                               alpha=0.5,
                               rwidth=0.85)
    plt.xlabel(args.x_label)
    plt.title(args.title)

    if args.show_only:
        plt.show()
    else:
        plt.savefig(args.out_png, dpi=300, bbox_inches='tight')
Пример #2
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_bundle] + args.metrics,
                        optional=args.reference)

    assert_same_resolution(args.metrics)
    metrics = [nib.load(metric) for metric in args.metrics]

    sft = load_tractogram_with_reference(parser, args, args.in_bundle)
    sft.to_vox()
    sft.to_corner()

    bundle_stats = get_bundle_metrics_mean_std(sft.streamlines,
                                               metrics,
                                               args.density_weighting)

    bundle_name, _ = os.path.splitext(os.path.basename(args.in_bundle))

    stats = {bundle_name: {}}
    for metric, (mean, std) in zip(metrics, bundle_stats):
        metric_name = split_name_with_nii(
            os.path.basename(metric.get_filename()))[0]
        stats[bundle_name][metric_name] = {
            'mean': mean,
            'std': std
        }

    print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
Пример #3
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.bundle] + args.metrics)
    if args.num_points <= 1:
        parser.error('--num_points {} needs to be greater than '
                     '1'.format(args.num_points))

    metrics = [nib.load(m) for m in args.metrics]
    assert_same_resolution(*metrics)

    bundle_tractogram_file = nib.streamlines.load(args.bundle)

    bundle_name, _ = os.path.splitext(os.path.basename(args.bundle))
    stats = {}
    if len(bundle_tractogram_file.streamlines) == 0:
        stats[bundle_name] = None
        print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
        return

    bundle_streamlines_vox = load_in_voxel_space(bundle_tractogram_file,
                                                 metrics[0])
    bundle_subsampled = subsample_streamlines(bundle_streamlines_vox,
                                              num_points=args.num_points,
                                              arc_length=True)

    # Make sure all streamlines go in the same direction. We want to make
    # sure point #1 / 20 of streamline A is matched with point #1 / 20 of
    # streamline B and so on
    num_streamlines = len(bundle_subsampled)
    reference = bundle_subsampled[0]
    for s in np.arange(num_streamlines):
        streamline = bundle_subsampled[s]
        direct = average_euclidean(reference, streamline)
        flipped = average_euclidean(reference, streamline[::-1])

        if flipped < direct:
            bundle_subsampled[s] = streamline[::-1]

    profiles = get_metrics_profile_over_streamlines(bundle_subsampled, metrics)
    t_profiles = np.expand_dims(profiles, axis=1)
    t_profiles = np.rollaxis(t_profiles, 3, 2)

    stats[bundle_name] = {}
    for metric, profile, t_profile in zip(metrics, profiles, t_profiles):
        metric_name, _ = split_name_with_nii(
            os.path.basename(metric.get_filename()))
        stats[bundle_name][metric_name] = {
            'mean': np.mean(profile, axis=0).tolist(),
            'std': np.std(profile, axis=0).tolist(),
            'tractprofile': t_profile.tolist()
        }

    print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
Пример #4
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_bundle] + args.in_metrics)
    assert_output_dirs_exist_and_empty(parser, args,
                                       args.out_folder,
                                       create_dir=True)

    assert_same_resolution(args.in_metrics)

    sft = load_tractogram_with_reference(parser, args, args.in_bundle)
    sft.to_vox()
    sft.to_corner()

    if len(sft.streamlines) == 0:
        logging.warning('Empty bundle file {}. Skipping'.format(args.bundle))
        return

    mins, maxs, indices = _process_streamlines(sft.streamlines)

    metrics = [nib.load(metric) for metric in args.in_metrics]
    for metric in metrics:
        data = metric.get_fdata(dtype=np.float32)
        endpoint_metric_map = np.zeros(metric.shape)
        count = np.zeros(metric.shape)
        for cur_min, cur_max, cur_ind, orig_s in zip(mins, maxs,
                                                     indices,
                                                     sft.streamlines):
            streamline_mean = _compute_streamline_mean(cur_ind,
                                                       cur_min,
                                                       cur_max,
                                                       data)

            xyz = orig_s[0, :].astype(int)
            endpoint_metric_map[xyz[0], xyz[1], xyz[2]] += streamline_mean
            count[xyz[0], xyz[1], xyz[2]] += 1

            xyz = orig_s[-1, :].astype(int)
            endpoint_metric_map[xyz[0], xyz[1], xyz[2]] += streamline_mean
            count[xyz[0], xyz[1], xyz[2]] += 1

        endpoint_metric_map[count != 0] /= count[count != 0]
        metric_fname, ext = split_name_with_nii(
            os.path.basename(metric.get_filename()))
        nib.save(nib.Nifti1Image(endpoint_metric_map, metric.affine,
                                 metric.header),
                 os.path.join(args.out_folder,
                              '{}_endpoints_metric{}'.format(metric_fname,
                                                             ext)))
Пример #5
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser,
                        [args.in_bundle, args.label_map, args.distance_map] +
                        args.metrics)
    assert_outputs_exist(parser, args, '', args.out_json)

    # Load everything
    sft = load_tractogram_with_reference(parser, args, args.in_bundle)
    sft.to_vox()
    sft.to_corner()

    bundle_name, _ = os.path.splitext(os.path.basename(args.in_bundle))
    if len(sft) == 0:
        stats = {bundle_name: None}
        print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
        return

    assert_same_resolution(args.metrics)
    metrics = [nib.load(metric) for metric in args.metrics]

    label_file = np.load(args.label_map)
    labels = label_file['arr_0']

    distance_file = np.load(args.distance_map)
    distances_to_centroid_streamline = distance_file['arr_0']

    if len(labels) != len(distances_to_centroid_streamline):
        raise Exception(
            "Label map doesn't contain the same number of entries as the "
            "distance map. {} != {}".format(
                len(labels), len(distances_to_centroid_streamline)))

    # Compute stats
    stats = get_bundle_metrics_mean_std_per_point(
        sft.streamlines, bundle_name, distances_to_centroid_streamline,
        metrics, labels, args.density_weighting, args.distance_weighting)

    if args.out_json:
        with open(args.out_json, 'w') as outfile:
            json.dump(stats,
                      outfile,
                      indent=args.indent,
                      sort_keys=args.sort_keys)
    else:
        print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
Пример #6
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.sh_files)
    assert_outputs_exist(parser, args, args.out_sh)
    assert_same_resolution(args.sh_files)

    first_im = nb.load(args.sh_files[0])
    out_coeffs = first_im.get_data()

    for sh_file in args.sh_files[1:]:
        im = nb.load(sh_file)
        im_dat = im.get_data()

        out_coeffs = np.where(
            np.abs(im_dat) > np.abs(out_coeffs), im_dat, out_coeffs)

    nb.save(nb.Nifti1Image(out_coeffs, first_im.affine, first_im.header),
            args.out_sh)
Пример #7
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.bundle] + args.metrics)

    metrics = [nib.load(metric) for metric in args.metrics]
    assert_same_resolution(*metrics)
    streamlines_vox = load_in_voxel_space(args.bundle, metrics[0])
    bundle_stats = get_metrics_stats_over_streamlines_robust(
        streamlines_vox, metrics, args.density_weighting)

    bundle_name, _ = os.path.splitext(os.path.basename(args.bundle))

    stats = {bundle_name: {}}
    for metric, (mean, std) in zip(metrics, bundle_stats):
        metric_name = split_name_with_nii(
            os.path.basename(metric.get_filename()))[0]
        stats[bundle_name][metric_name] = {'mean': mean, 'std': std}

    print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.bundle] + args.metrics)
    assert_outputs_dir_exists_and_empty(parser, args, args.output_folder)

    metrics = [nib.load(metric) for metric in args.metrics]
    assert_same_resolution(*metrics)

    bundle_tractogram_file = nib.streamlines.load(args.bundle)
    if int(bundle_tractogram_file.header['nb_streamlines']) == 0:
        logging.warning('Empty bundle file {}. Skipping'.format(args.bundle))
        return
    bundle_streamlines_vox = load_in_voxel_space(bundle_tractogram_file,
                                                 metrics[0])

    for metric in metrics:
        data = metric.get_data()
        endpoint_metric_map = np.zeros(metric.shape)
        count = np.zeros(metric.shape)
        for streamline in bundle_streamlines_vox:
            streamline_mean = _compute_streamline_mean(streamline, data)

            xyz = streamline[0, :].astype(int)
            endpoint_metric_map[xyz[0], xyz[1], xyz[2]] += streamline_mean
            count[xyz[0], xyz[1], xyz[2]] += 1

            xyz = streamline[-1, :].astype(int)
            endpoint_metric_map[xyz[0], xyz[1], xyz[2]] += streamline_mean
            count[xyz[0], xyz[1], xyz[2]] += 1

        endpoint_metric_map[count != 0] /= count[count != 0]
        metric_fname, ext = split_name_with_nii(
            os.path.basename(metric.get_filename()))
        nib.save(
            nib.Nifti1Image(endpoint_metric_map, metric.affine, metric.header),
            os.path.join(args.output_folder,
                         '{}_endpoints_metric{}'.format(metric_fname, ext)))
Пример #9
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser,
                        [args.in_bundle, args.label_map, args.distance_map] +
                        args.metrics)

    sft = load_tractogram_with_reference(parser, args, args.in_bundle)
    sft.to_vox()
    sft.to_corner()

    stats = {}
    bundle_name, _ = os.path.splitext(os.path.basename(args.in_bundle))
    if len(sft) == 0:
        stats[bundle_name] = None
        print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
        return

    assert_same_resolution(args.metrics)
    metrics = [nib.load(metric) for metric in args.metrics]

    if args.density_weighting:
        track_count = compute_tract_counts_map(
            sft.streamlines, metrics[0].shape).astype(np.float64)
    else:
        track_count = np.ones(metrics[0].shape)

    label_file = np.load(args.label_map)
    labels = label_file['arr_0']

    unique_labels = np.unique(labels)
    num_digits_labels = len(str(np.max(unique_labels)))

    distance_file = np.load(args.distance_map)
    distances_to_centroid_streamline = distance_file['arr_0']
    # Bigger weight near the centroid streamline
    distances_to_centroid_streamline = 1.0 / distances_to_centroid_streamline

    if len(labels) != len(distances_to_centroid_streamline):
        raise Exception('Label map doesn\'t contain the same number of '
                        'entries as the distance map. {} != {}'.format(
                            len(labels),
                            len(distances_to_centroid_streamline)))

    bundle_data_int = sft.streamlines.data.astype(np.int)
    stats[bundle_name] = {}

    for metric in metrics:
        metric_data = metric.get_fdata()
        current_metric_fname, _ = split_name_with_nii(
            os.path.basename(metric.get_filename()))
        stats[bundle_name][current_metric_fname] = {}

        for i in unique_labels:
            number_key = '{}'.format(i).zfill(num_digits_labels)
            label_stats = {}
            stats[bundle_name][current_metric_fname][number_key] = label_stats

            label_indices = bundle_data_int[labels == i]
            label_metric = metric_data[label_indices[:, 0],
                                       label_indices[:, 1], label_indices[:,
                                                                          2]]
            track_weight = track_count[label_indices[:, 0],
                                       label_indices[:, 1], label_indices[:,
                                                                          2]]
            label_weight = track_weight
            if args.distance_weighting:
                label_weight *= distances_to_centroid_streamline[labels == i]
            if np.sum(label_weight) == 0:
                logging.warning('Weights sum to zero, can\'t be normalized. '
                                'Disabling weighting')
                label_weight = None

            label_mean = np.average(label_metric, weights=label_weight)
            label_std = np.sqrt(
                np.average((label_metric - label_mean)**2,
                           weights=label_weight))
            label_stats['mean'] = float(label_mean)
            label_stats['std'] = float(label_std)

    print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_bundle] + args.in_metrics,
                        optional=args.in_centroid)

    if args.nb_pts_per_streamline <= 1:
        parser.error('--nb_pts_per_streamline {} needs to be greater than '
                     '1'.format(args.nb_pts_per_streamline))

    assert_same_resolution(args.in_metrics + [args.in_bundle])
    sft = load_tractogram_with_reference(parser, args, args.in_bundle)

    metrics = [nib.load(m) for m in args.in_metrics]

    bundle_name, _ = os.path.splitext(os.path.basename(args.in_bundle))
    stats = {}
    if len(sft) == 0:
        stats[bundle_name] = None
        print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
        return

    # Centroid - will be use as reference to reorient each streamline
    if args.in_centroid:
        is_header_compatible(args.in_bundle, args.in_centroid)
        sft_centroid = load_tractogram_with_reference(parser, args,
                                                      args.in_centroid)
        centroid_streamlines = sft_centroid.streamlines[0]
        nb_pts_per_streamline = len(centroid_streamlines)
    else:
        centroid_streamlines = get_streamlines_centroid(
            sft.streamlines, args.nb_pts_per_streamline)
        nb_pts_per_streamline = args.nb_pts_per_streamline

    resampled_sft = resample_streamlines_num_points(sft, nb_pts_per_streamline)

    # Make sure all streamlines go in the same direction. We want to make
    # sure point #1 / args.nb_pts_per_streamline of streamline A is matched
    # with point #1 / 20 of streamline B and so on
    num_streamlines = len(resampled_sft)

    for s in np.arange(num_streamlines):
        streamline = resampled_sft.streamlines[s]
        direct = average_euclidean(centroid_streamlines, streamline)
        flipped = average_euclidean(centroid_streamlines, streamline[::-1])

        if flipped < direct:
            resampled_sft.streamlines[s] = streamline[::-1]

    profiles = get_bundle_metrics_profiles(resampled_sft, metrics)
    t_profiles = np.expand_dims(profiles, axis=1)
    t_profiles = np.rollaxis(t_profiles, 3, 2)

    stats[bundle_name] = {}
    for metric, profile, t_profile in zip(metrics, profiles, t_profiles):
        metric_name, _ = split_name_with_nii(
            os.path.basename(metric.get_filename()))
        stats[bundle_name][metric_name] = {
            'mean': np.mean(profile, axis=0).tolist(),
            'std': np.std(profile, axis=0).tolist(),
            'bundleprofile': t_profile.tolist()
        }

    print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
Пример #11
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_AD, args.in_FA, args.in_MD])
    assert_outputs_exist(parser, args, [],
                         [args.out_mask_1fiber,
                          args.out_mask_ventricles,
                          args.out_txt_ventricles,
                          args.out_txt_1fiber])

    assert_same_resolution([args.in_AD, args.in_FA, args.in_MD])

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    fa_img = nib.load(args.in_FA)
    fa_data = fa_img.get_fdata(dtype=np.float32)
    affine = fa_img.affine

    md_data = nib.load(args.in_MD).get_fdata(dtype=np.float32)
    ad_data = nib.load(args.in_AD).get_fdata(dtype=np.float32)

    mask_cc = np.zeros(fa_data.shape, dtype=np.uint8)
    mask_vent = np.zeros(fa_data.shape, dtype=np.uint8)

    # center
    if args.roi_center is None:
        ci, cj, ck = np.array(fa_data.shape[:3]) // 2
    else:
        if len(args.roi_center) != 3:
            parser.error("roi_center needs to receive 3 values")
        elif not np.all(np.asarray(args.roi_center) > 0):
            parser.error("roi_center needs to be positive")
        else:
            ci, cj, ck = args.roi_center

    w = args.roi_radius
    fa_shape = fa_data.shape
    roi_ad = ad_data[max(int(ci - w), 0): min(int(ci + w), fa_shape[0]),
                     max(int(cj - w), 0): min(int(cj + w), fa_shape[1]),
                     max(int(ck - w), 0): min(int(ck + w), fa_shape[2])]
    roi_md = md_data[max(int(ci - w), 0): min(int(ci + w), fa_shape[0]),
                     max(int(cj - w), 0): min(int(cj + w), fa_shape[1]),
                     max(int(ck - w), 0): min(int(ck + w), fa_shape[2])]
    roi_fa = fa_data[max(int(ci - w), 0): min(int(ci + w), fa_shape[0]),
                     max(int(cj - w), 0): min(int(cj + w), fa_shape[1]),
                     max(int(ck - w), 0): min(int(ck + w), fa_shape[2])]

    logging.debug('fa_min, fa_max, md_min: {}, {}, {}'.format(
        args.fa_min, args.fa_max, args.md_min))

    indices = np.where((roi_fa > args.fa_min) & (roi_fa < 0.95))
    N = roi_ad[indices].shape[0]

    logging.debug('Number of voxels found in single fiber area: {}'.format(N))

    cc_avg = np.mean(roi_ad[indices])
    cc_std = np.std(roi_ad[indices])

    indices[0][:] += ci - w
    indices[1][:] += cj - w
    indices[2][:] += ck - w
    mask_cc[indices] = 1

    indices = np.where((roi_md > args.md_min) & (roi_fa < args.fa_max))
    N = roi_md[indices].shape[0]

    logging.debug('Number of voxels found in ventricles: {}'.format(N))

    vent_avg = np.mean(roi_md[indices])
    vent_std = np.std(roi_md[indices])

    indices[0][:] += ci - w
    indices[1][:] += cj - w
    indices[2][:] += ck - w
    mask_vent[indices] = 1

    if args.out_mask_1fiber:
        nib.save(nib.Nifti1Image(mask_cc, affine), args.out_mask_1fiber)

    if args.out_mask_ventricles:
        nib.save(nib.Nifti1Image(mask_vent, affine), args.out_mask_ventricles)

    if args.out_txt_1fiber:
        np.savetxt(args.out_txt_1fiber, [cc_avg], fmt='%f')

    if args.out_txt_ventricles:
        np.savetxt(args.out_txt_ventricles, [vent_avg], fmt='%f')

    logging.info("Average AD in single fiber areas: {} +- {}".format(cc_avg,
                                                                     cc_std))
    logging.info("Average MD in ventricles: {} +- {}".format(vent_avg,
                                                             vent_std))
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()
    inputs = [args.in_dwi, args.in_bval]
    if args.mask:
        inputs.append(args.mask)

    assert_inputs_exist(parser, inputs)
    assert_outputs_exist(parser, args, args.out_avg)

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)

    img = nib.load(args.in_dwi)
    data = img.get_fdata(dtype=np.float32)
    affine = img.affine
    if args.mask is None:
        mask = None
    else:
        mask_img = nib.load(args.mask)
        assert_same_resolution((img, mask_img))
        mask = get_data_as_mask(mask_img, dtype='uint8')

    # Read bvals (bvecs not needed at this point)
    logging.info('Performing powder average')
    bvals, _ = read_bvals_bvecs(args.in_bval, None)

    # Select diffusion volumes to average
    if not (args.shells):
        # If no shell given, average all diffusion weighted images
        pwd_avg_idx = np.squeeze(np.where(bvals > 0 + args.b0_thr))
        logging.debug('Calculating powder average from all diffusion'
                      '-weighted volumes, {} volumes '
                      'included.'.format(len(pwd_avg_idx)))
    else:
        pwd_avg_idx = []
        logging.debug('Calculating powder average from {} '
                      'shells {}'.format(len(args.shells), args.shells))

        for shell in args.shells:
            pwd_avg_idx = np.int64(
                np.concatenate((pwd_avg_idx,
                                get_shell_indices(bvals,
                                                  shell,
                                                  tol=args.shell_thr))))
            logging.debug('{} b{} volumes detected and included'.format(
                len(pwd_avg_idx), shell))

        # remove b0 indices
        b0_idx = get_shell_indices(bvals, 0, args.b0_thr)
        logging.debug('{} b0 volumes detected and not included'.format(
            len(b0_idx)))
        for val in b0_idx:
            pwd_avg_idx = pwd_avg_idx[pwd_avg_idx != val]

    if len(pwd_avg_idx) == 0:
        raise ValueError('No shells selected for powder average, ensure '
                         'shell, shell_thr and b0_thr are set '
                         'appropriately')

    powder_avg = np.squeeze(np.mean(data[:, :, :, pwd_avg_idx], axis=3))

    if args.mask:
        powder_avg = powder_avg * mask

    powder_avg_img = nib.Nifti1Image(powder_avg.astype(np.float32), affine)
    nib.save(powder_avg_img, args.out_avg)