Esempio n. 1
0
def _parse_args(parser):
    args = parser.parse_args()
    inputs = []
    output = []
    inputs.append(args.in_bingham)
    if args.output:
        output.append(args.output)
    else:
        if args.silent:
            parser.error('Silent mode is enabled but no output is specified.'
                         'Specify an output with --output to use silent mode.')

    assert_inputs_exist(parser, inputs)
    assert_outputs_exist(parser, args, output)

    return args
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.tracts, args.ref_anat])
    assert_outputs_exists(parser, args, [args.out])
    check_tracts_support(parser, args.tracts, False)
    check_tracts_same_format(parser, args.tracts, args.out)

    # Deactivated for now.
    # Tested implicitely with the 2 previous tracts checks.
    # if not tc.is_supported(args.out):
    #     parser.error('Format of "{0}" not supported.'.format(args.out))

    filter_points(args.tracts, args.ref_anat, args.out,
                  args.nifti_compliant_gen, args.for_nifti_compliant)
Esempio n. 3
0
def main():
    parser = _build_args_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.tracts, args.ref_anat])
    assert_outputs_exists(parser, args, [args.out])
    check_tracts_support(parser, args.tracts, False)

    if not tc.is_supported(args.out):
        parser.error('Format of "{0}" not supported.'.format(args.out))

    if not args.x and not args.y and not args.z:
        parser.error('No flipping axis specified.')

    flip_streamlines(args.tracts, args.ref_anat, args.out, args.x, args.y,
                     args.z, args.mode)
Esempio n. 4
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.tractogram_filename])
    assert_outputs_exist(parser, args, [args.seed_density_filename])

    tracts_format = detect_format(args.tractogram_filename)
    if tracts_format is not TrkFile:
        raise ValueError("Invalid input streamline file format " +
                         "(must be trk): {0}".format(args.tractogram_filename))

    max_ = np.iinfo(np.int16).max
    if args.binary is not None and (args.binary <= 0 or args.binary > max_):
        parser.error(
            'The value of --binary ({}) '
            'must be greater than 0 and smaller or equal to {}'.format(
                args.binary, max_))

    # Load files and data. TRKs can have 'same' as reference
    # Can handle streamlines outside of bbox
    sft = load_tractogram(args.tractogram_filename,
                          'same',
                          bbox_valid_check=False)
    # Streamlines are saved in RASMM but seeds are saved in VOX
    # This might produce weird behavior with non-iso
    sft.to_vox()
    sft.to_corner()
    if 'seeds' in sft.data_per_streamline:
        seeds = sft.data_per_streamline['seeds']
    else:
        parser.error('Tractogram does not contain seeds')

    # Create seed density map
    _, shape, _, _ = sft.space_attributes
    seed_density = np.zeros(shape, dtype=np.int32)
    for seed in seeds:
        # Set value at mask, either binary or increment
        seed_voxel = np.round(seed).astype(int)
        if args.binary is not None:
            seed_density[tuple(seed_voxel)] = args.binary
        else:
            seed_density[tuple(seed_voxel)] += 1

    # Save seed density map
    dm_img = Nifti1Image(seed_density.astype(np.int32), sft.affine)
    dm_img.to_filename(args.seed_density_filename)
Esempio n. 5
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_bundle] + args.metrics)
    assert_output_dirs_exist_and_empty(parser,
                                       args,
                                       args.output_folder,
                                       create_dir=True)

    assert_same_resolution(args.metrics)

    sft = load_tractogram_with_reference(parser, args, args.in_bundle)
    sft.to_vox()
    sft.to_corner()

    if len(sft.streamlines) == 0:
        logging.warning('Empty bundle file {}. Skipping'.format(args.bundle))
        return

    mins, maxs, indices = _process_streamlines(sft.streamlines)

    metrics = [nib.load(metric) for metric in args.metrics]
    for metric in metrics:
        data = metric.get_data()
        endpoint_metric_map = np.zeros(metric.shape)
        count = np.zeros(metric.shape)
        for cur_min, cur_max, cur_ind, orig_s in zip(mins, maxs, indices,
                                                     sft.streamlines):
            streamline_mean = _compute_streamline_mean(cur_ind, cur_min,
                                                       cur_max, data)

            xyz = orig_s[0, :].astype(int)
            endpoint_metric_map[xyz[0], xyz[1], xyz[2]] += streamline_mean
            count[xyz[0], xyz[1], xyz[2]] += 1

            xyz = orig_s[-1, :].astype(int)
            endpoint_metric_map[xyz[0], xyz[1], xyz[2]] += streamline_mean
            count[xyz[0], xyz[1], xyz[2]] += 1

        endpoint_metric_map[count != 0] /= count[count != 0]
        metric_fname, ext = split_name_with_nii(
            os.path.basename(metric.get_filename()))
        nib.save(
            nib.Nifti1Image(endpoint_metric_map, metric.affine, metric.header),
            os.path.join(args.output_folder,
                         '{}_endpoints_metric{}'.format(metric_fname, ext)))
Esempio n. 6
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser, [args.in_dwi, args.in_bval, args.in_bvec])
    assert_outputs_exist(parser, args, args.frf_file)

    if len(args.roi_radii) == 1:
        roi_radii = args.roi_radii[0]
    elif len(args.roi_radii) == 3:
        roi_radii = args.roi_radii
    else:
        parser.error('Wrong size for --roi_radii, can only be a scalar' +
                     'or an array of size (3,)')

    vol = nib.load(args.in_dwi)
    data = vol.get_fdata(dtype=np.float32)

    bvals, bvecs = read_bvals_bvecs(args.in_bval, args.in_bvec)

    mask = None
    if args.mask:
        mask = get_data_as_mask(nib.load(args.mask), dtype=bool)

    mask_wm = None
    if args.mask_wm:
        mask_wm = get_data_as_mask(nib.load(args.mask_wm), dtype=bool)

    full_response = compute_ssst_frf(
        data,
        bvals,
        bvecs,
        mask=mask,
        mask_wm=mask_wm,
        fa_thresh=args.fa_thresh,
        min_fa_thresh=args.min_fa_thresh,
        min_nvox=args.min_nvox,
        roi_radii=roi_radii,
        roi_center=args.roi_center,
        force_b0_threshold=args.force_b0_threshold)

    np.savetxt(args.frf_file, full_response)
def main():

    parser = _build_args_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_tractogram)
    assert_outputs_exist(parser, args, args.out_tractogram)

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)

    sft = load_tractogram_with_reference(parser, args, args.in_tractogram)

    new_streamlines, \
        new_per_point, \
        new_per_streamline = filter_streamlines_by_length(sft,
                                                          args.minL,
                                                          args.maxL)

    new_sft = StatefulTractogram.from_sft(
        new_streamlines,
        sft,
        data_per_streamline=new_per_streamline,
        data_per_point=new_per_point)

    if not new_streamlines:
        if args.no_empty:
            logging.debug("The file {} won't be written "
                          "(0 streamline).".format(args.out_tractogram))

            return

        logging.debug('The file {} contains 0 streamline'.format(
            args.out_tractogram))

    save_tractogram(new_sft, args.out_tractogram)

    if args.display_counts:
        tc_bf = len(sft.streamlines)
        tc_af = len(new_streamlines)
        print(
            json.dumps(
                {
                    'tract_count_before_filtering': int(tc_bf),
                    'tract_count_after_filtering': int(tc_af)
                },
                indent=args.indent))
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_matrix,
                        [args.labels_list, args.in_ordering])
    assert_output_dirs_exist_and_empty(parser, args, [], args.out_dir)
    if args.out_dir is None:
        args.out_dir = './'
    out_filenames = []
    for filename in args.in_matrix:
        basename, _ = os.path.splitext(filename)
        basename = os.path.basename(basename)
        out_filenames.append('{}/{}{}.npy'.format(args.out_dir,
                                                  args.out_prefix, basename))

    assert_outputs_exist(parser, args, out_filenames)
    with open(args.in_ordering, 'r') as my_file:
        lines = my_file.readlines()
        ordering = [[int(val) for val in lines[0].split()],
                    [int(val) for val in lines[1].split()]]

    for filename in args.in_matrix:
        basename, _ = os.path.splitext(filename)
        basename = os.path.basename(basename)
        matrix = load_matrix_in_any_format(filename)

        if args.labels_list:
            labels_list = np.loadtxt(args.labels_list, dtype=np.int16).tolist()
            indices_1, indices_2 = [], []
            for j in ordering[0]:
                indices_1.append(labels_list.index(j))
            for j in ordering[1]:
                indices_2.append(labels_list.index(j))
        else:
            indices_1 = ordering[0]
            indices_2 = ordering[1]

        if (np.array(indices_1) > matrix.shape[0]).any() \
                or (indices_2 > np.array(matrix.shape[1])).any():
            raise ValueError('Indices from config higher than matrix size, '
                             'maybe you need a labels list?')
        tmp_matrix = matrix[tuple(indices_1), :]
        tmp_matrix = tmp_matrix[:, tuple(indices_2)]
        save_matrix_in_any_format(
            '{}/{}{}.npy'.format(args.out_dir, args.out_prefix, basename),
            tmp_matrix)
Esempio n. 9
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.input)
    assert_outputs_exist(parser, args, args.output, args.logfile)

    logging.basicConfig()
    log = logging.getLogger(__name__)
    if args.verbose:
        log.setLevel(level=logging.INFO)
    else:
        log.setLevel(level=logging.WARNING)

    if args.logfile is not None:
        log.addHandler(logging.FileHandler(args.logfile, mode='w'))

    vol = nb.load(args.input)
    data = vol.get_fdata(dtype=np.float32)
    if args.mask is None:
        mask = np.zeros(data.shape, dtype=np.bool)
        if data.ndim == 4:
            mask[np.sum(data, axis=-1) > 0] = 1
        else:
            mask[data > 0] = 1
    else:
        mask = get_data_as_mask(nb.load(args.mask), dtype=np.bool)

    sigma = args.sigma

    if sigma is not None:
        log.info('User supplied noise standard deviation is {}'.format(sigma))
        # Broadcast the single value to a whole 3D volume for nlmeans
        sigma = np.ones(data.shape[:3]) * sigma
    else:
        log.info('Estimating noise')
        sigma = _get_basic_sigma(vol.get_fdata(dtype=np.float32), log)

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=DeprecationWarning)
        data_denoised = nlmeans(data,
                                sigma,
                                mask=mask,
                                rician=args.N > 0,
                                num_threads=args.nbr_processes)

    nb.save(nb.Nifti1Image(data_denoised, vol.affine, vol.header), args.output)
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_bundle, args.in_label_map,
                                 args.in_distance_map] + args.in_metrics)
    assert_outputs_exist(parser, args, '', args.out_json)

    # Load everything
    sft = load_tractogram_with_reference(parser, args, args.in_bundle)
    sft.to_vox()
    sft.to_corner()

    bundle_name, _ = os.path.splitext(os.path.basename(args.in_bundle))
    if len(sft) == 0:
        stats = {bundle_name: None}
        print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
        return

    assert_same_resolution(args.in_metrics)
    metrics = [nib.load(metric) for metric in args.in_metrics]

    label_file = np.load(args.in_label_map)
    labels = label_file['arr_0']

    distance_file = np.load(args.in_distance_map)
    distances_to_centroid_streamline = distance_file['arr_0']

    if len(labels) != len(distances_to_centroid_streamline):
        raise Exception(
            "Label map doesn't contain the same number of entries as the "
            "distance map. {} != {}".format(len(labels),
                                            len(distances_to_centroid_streamline)))

    # Compute stats
    stats = get_bundle_metrics_mean_std_per_point(sft.streamlines, bundle_name,
                                                  distances_to_centroid_streamline,
                                                  metrics, labels,
                                                  args.density_weighting,
                                                  args.distance_weighting)

    if args.out_json:
        with open(args.out_json, 'w') as outfile:
            json.dump(stats, outfile, indent=args.indent,
                      sort_keys=args.sort_keys)
    else:
        print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    required = args.in_label
    assert_inputs_exist(parser, required)

    label_img = nib.load(args.in_label)
    label_img_data = get_data_as_label(label_img)

    if args.scilpy_lut:
        with open(os.path.join(get_lut_dir(), args.scilpy_lut + '.json')) as f:
            label_dict = json.load(f)
        (label_indices, label_names) = zip(*label_dict.items())
    else:
        with open(args.custom_lut) as f:
            label_dict = json.load(f)
        (label_indices, label_names) = zip(*label_dict.items())

    output_filenames = []
    for label, name in zip(label_indices, label_names):
        if int(label) != 0:
            if args.out_prefix:
                output_filenames.append(
                    os.path.join(
                        args.out_dir,
                        '{0}_{1}.nii.gz'.format(args.out_prefix, name)))
            else:
                output_filenames.append(
                    os.path.join(args.out_dir, '{0}.nii.gz'.format(name)))

    assert_output_dirs_exist_and_empty(parser, args, [], optional=args.out_dir)
    assert_outputs_exist(parser, args, output_filenames)

    # Extract the voxels that match the label and save them to a file.
    cnt_filename = 0
    for label in label_indices:
        if int(label) != 0:
            split_label = np.zeros(label_img.shape, dtype=np.uint16)
            split_label[np.where(label_img_data == int(label))] = label

            split_image = nib.Nifti1Image(split_label,
                                          label_img.affine,
                                          header=label_img.header)
            nib.save(split_image, output_filenames[cnt_filename])
            cnt_filename += 1
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_bundle)

    sft = load_tractogram_with_reference(parser, args, args.in_bundle)
    streamlines = sft.streamlines
    lengths = [0]
    if streamlines:
        lengths = list(length(streamlines))

    print(json.dumps({'min_length': float(np.min(lengths)),
                      'mean_length': float(np.mean(lengths)),
                      'max_length': float(np.max(lengths)),
                      'std_length': float(np.std(lengths))},
                     indent=args.indent, sort_keys=args.sort_keys))
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_hdf5, args.in_fodf])
    assert_outputs_exist(parser, args, [args.out_hdf5])

    # HDF5 will not overwrite the file
    if os.path.isfile(args.out_hdf5):
        os.remove(args.out_hdf5)

    fodf_img = nib.load(args.in_fodf)

    nbr_cpu = validate_nbr_processes(parser, args, args.nbr_processes)
    in_hdf5_file = h5py.File(args.in_hdf5, 'r')
    keys = list(in_hdf5_file.keys())
    in_hdf5_file.close()
    if nbr_cpu == 1:
        results_list = []
        for key in keys:
            results_list.append(_afd_rd_wrapper([args.in_hdf5, key, fodf_img,
                                                 args.sh_basis,
                                                 args.length_weighting]))

    else:
        pool = multiprocessing.Pool(nbr_cpu)
        results_list = pool.map(_afd_rd_wrapper,
                                zip(itertools.repeat(args.in_hdf5),
                                    keys,
                                    itertools.repeat(fodf_img),
                                    itertools.repeat(args.sh_basis),
                                    itertools.repeat(args.length_weighting)))
        pool.close()
        pool.join()

    shutil.copy(args.in_hdf5, args.out_hdf5)
    out_hdf5_file = h5py.File(args.out_hdf5, 'a')
    for key, afd_fixel, rd_fixel in results_list:
        group = out_hdf5_file[key]
        if 'afd_fixel' in group:
            del group['afd_fixel']
        group.create_dataset('afd_fixel', data=afd_fixel)
        if 'rd_fixel' in group:
            del group['rd_fixel']
        group.create_dataset('rd_fixel', data=rd_fixel)
    out_hdf5_file.close()
Esempio n. 14
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.bundle])
    assert_outputs_exists(parser, args, [args.filtered_bundle, args.outliers])
    if args.alpha <= 0 or args.alpha > 1:
        parser.error('--alpha should be ]0, 1]')

    tractogram = nib.streamlines.load(args.bundle)

    if int(tractogram.header['nb_streamlines']) == 0:
        logging.warning("Bundle file contains no streamline")
        return

    streamlines = tractogram.streamlines

    summary = outliers_removal_using_hierarchical_quickbundles(streamlines)
    outliers, outliers_removed = prune(streamlines, args.alpha, summary)

    outliers_cluster = Cluster(indices=outliers, refdata=streamlines)
    outliers_removed_cluster = Cluster(indices=outliers_removed,
                                       refdata=streamlines)

    if len(outliers_removed_cluster) == 0:
        print("All streamlines are considered outliers. Please lower the "
              "--alpha parameter")
    else:
        outlier_removed_tractogram = LazyTractogram(
            lambda: outliers_removed_cluster,
            affine_to_rasmm=np.eye(4))
        nib.streamlines.save(
            outlier_removed_tractogram,
            args.filtered_bundle,
            header=tractogram.header)

    if len(outliers_cluster) == 0:
        print("No outlier found. Please raise the --alpha parameter")
    else:
        outlier_tractogram = LazyTractogram(
            lambda: outliers_cluster,
            affine_to_rasmm=np.eye(4))
        nib.streamlines.save(
            outlier_tractogram,
            args.outliers,
            header=tractogram.header)
Esempio n. 15
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.wm, args.gm, args.csf])
    assert_outputs_exists(parser, args,
                          [args.include, args.exclude, args.interface])

    # Load volume
    wm_img = nib.load(args.wm)
    img_affine = wm_img.affine
    img_shape = wm_img.shape

    wm_pve = wm_img.get_data()
    gm_pve = nib.load(args.gm).get_data()
    csf_pve = nib.load(args.csf).get_data()
    sc_pve = nib.load(args.sc).get_data()

    # distribute  Sub-cortical to white/gray matter
    gm_ratio = args.sc_include_val
    wm_pve += (1.0 - gm_ratio) * sc_pve
    gm_pve += gm_ratio * sc_pve

    # Background
    background = np.ones(img_shape)
    background[gm_pve > 0.0] = 0.0
    background[wm_pve > 0.0] = 0.0
    background[csf_pve > 0.0] = 0.0

    # Interface
    interface = np.zeros(img_shape)
    interface[gm_pve >= args.threshold] = 1.0
    interface[wm_pve < args.threshold] = 0.0

    # Include Exclude maps
    include_map = gm_pve
    include_map[background > 0.0] = 1.0

    exclude_map = csf_pve

    nib.Nifti1Image(include_map.astype('float32'),
                    img_affine).to_filename(args.include)
    nib.Nifti1Image(exclude_map.astype('float32'),
                    img_affine).to_filename(args.exclude)
    nib.Nifti1Image(interface.astype('float32'),
                    img_affine).to_filename(args.interface)
Esempio n. 16
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()
    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    assert_inputs_exist(parser, [args.dwi, args.bvals, args.bvecs])

    # We don't assert the existence of any output here because there
    # are many possible inputs/outputs.

    bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs)
    bvals_min = bvals.min()

    # TODO refactor those checks
    # Should be min bval, then b0.
    if bvals_min < 0 or bvals_min > 20:
        raise ValueError(
            'The minimal b-value is lesser than 0 or greater than 20. This '
            'is highly suspicious. Please check your data to ensure '
            'everything is correct. Value found: {}'.format(bvals_min))

    b0_threshold = args.b0_thr
    if b0_threshold < 0 or b0_threshold > 20:
            raise ValueError('Invalid --b0_thr value (<0 or >20). This is '
                             'highly suspicious. Value found: {}'
                             .format(b0_threshold))

    if not np.isclose(bvals_min, 0.0):
        b0_threshold = b0_threshold if b0_threshold > bvals_min else bvals_min
        logging.warning('No b=0 image. Setting b0_threshold to %s',
                        b0_threshold)

    gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold)
    b0_idx = np.where(gtab.b0s_mask)[0]

    logger.info('Number of b0 images in the data: %s', len(b0_idx))

    if args.mean:
        logger.info('Using mean of indices %s for b0', b0_idx)
        _mean_in_time(args.dwi, b0_idx, args.output)
    else:
        if not args.all:
            b0_idx = [b0_idx[0]]
        logger.info("Keeping %s for b0", b0_idx)
        _keep_time_step(args.dwi, b0_idx, args.output)
Esempio n. 17
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_seed_map, [args.tractogram])

    # Seed map informations
    seed_map_img = nib.load(args.in_seed_map)
    seed_map_data = seed_map_img.get_fdata().astype(np.uint8)
    seed_map_affine = seed_map_img.affine

    # Load seed density as labels
    values = np.delete(np.unique(seed_map_data), 0)
    # Create colormap based on labels
    cmap = actor.create_colormap(values, name=args.colormap, auto=False)
    # Append opacity to colormap
    cmap = np.concatenate((cmap, np.full(
        (cmap.shape[0], 1), args.seed_opacity)),
                          axis=-1)

    scene = window.Scene()
    scene.background(tuple(map(int, args.background)))

    seedroi_actor = actor.contour_from_label(seed_map_data,
                                             seed_map_affine,
                                             color=cmap)
    scene.add(seedroi_actor)

    # Load tractogram as tubes or lines, with color if specified
    if args.tractogram:
        tractogram = nib.streamlines.load(args.tractogram).tractogram
        color = None
        if args.tractogram_color:
            color = tuple(map(int, args.tractogram_color))

        line_actor = streamline_actor[args.tractogram_shape](
            tractogram.streamlines,
            opacity=args.tractogram_opacity,
            colors=color,
            linewidth=args.tractogram_width)
        scene.add(line_actor)

    # Showtime !
    showm = window.ShowManager(scene, reset_camera=True)
    showm.initialize()
    showm.start()
Esempio n. 18
0
def main():
    parser = _build_args_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.tracts])
    assert_outputs_exists(parser, args, [args.out])
    check_tracts_support(parser, args.tracts, False)
    check_tracts_same_format(parser, args.tracts, args.out)

    if args.errorRate < 0.001 or args.errorRate > 1:
        logging.warn(
            'You are using an error rate of {}.\nWe recommend setting it '
            'between 0.001 and 1.\n0.001 will do almost nothing to the tracts '
            'while 1 will higly compress/linearize the tracts'.format(
                args.errorRate))

    compression_wrapper(args.tracts, args.out, args.errorRate)
Esempio n. 19
0
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    in_files = [args.json_stats]
    if args.ignore_bundles:
        in_files.append(args.ignore_bundles)

    assert_inputs_exist(parser, *in_files)
    assert_outputs_exists(parser, args, args.xlsx_stats)

    _create_xlsx_from_json(args.json_stats,
                           args.xlsx_stats,
                           sort_subs=args.no_sort_subs,
                           sort_bundles=args.no_sort_bundles,
                           ignored_bundles_fpath=args.ignore_bundles,
                           stats_over_population=args.stats_over_population)
Esempio n. 20
0
def main():

    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_tractogram)
    assert_outputs_exist(parser, args, args.out_tractogram)

    sft = load_tractogram_with_reference(parser, args, args.in_tractogram)

    if args.nb_pts_per_streamline:
        new_sft = resample_streamlines_num_points(sft,
                                                  args.nb_pts_per_streamline)
    else:
        new_sft = resample_streamlines_step_size(sft, args.step_size)

    save_tractogram(new_sft, args.out_tractogram)
Esempio n. 21
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)

    assert_inputs_exist(parser, [args.in_moving_tractogram,
                                 args.in_target_file,
                                 args.in_transfo], args.in_deformation)
    assert_outputs_exist(parser, args, args.out_tractogram)

    moving_sft = load_tractogram_with_reference(parser, args,
                                                args.in_moving_tractogram,
                                                bbox_check=False)

    transfo = load_matrix_in_any_format(args.in_transfo)
    deformation_data = None
    if args.in_deformation is not None:
        deformation_data = np.squeeze(nib.load(
            args.in_deformation).get_fdata(dtype=np.float32))

    new_sft = transform_warp_sft(moving_sft, transfo,
                                 args.in_target_file,
                                 inverse=args.inverse,
                                 reverse_op=args.reverse_operation,
                                 deformation_data=deformation_data,
                                 remove_invalid=args.remove_invalid,
                                 cut_invalid=args.cut_invalid)

    if len(new_sft.streamlines) == 0:
        if args.no_empty:
            logging.debug("The file {} won't be written "
                          "(0 streamline).".format(args.out_tractogram))
            return

    if args.keep_invalid:
        if not new_sft.is_bbox_in_vox_valid():
            logging.warning('Saving tractogram with invalid streamlines.')
        save_tractogram(new_sft, args.out_tractogram, bbox_valid_check=False)
    else:
        if not new_sft.is_bbox_in_vox_valid():
            logging.warning('Removing invalid streamlines before '
                            'saving tractogram.')
            new_sft.remove_invalid_streamlines()
        save_tractogram(new_sft, args.out_tractogram)
def main():
    parser = _build_args_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_files)

    all_valid = True
    for filepath in args.in_files:
        _, in_extension = split_name_with_nii(filepath)
        if in_extension not in ['.trk', '.nii', '.nii.gz']:
            parser.error(
                '{} does not have a supported extension'.format(filepath))
        if not is_header_compatible(args.in_files[0], filepath):
            print('{} and {} do not have compatible header.'.format(
                args.in_files[0], filepath))
            all_valid = False
    if all_valid:
        print('All input files have compatible headers.')
Esempio n. 23
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_sh)
    assert_outputs_exist(parser, args, args.out_sh)

    sphere = get_sphere('repulsion724').subdivide(1)
    img = nib.load(args.in_sh)
    data = img.get_fdata(dtype=np.float32)

    new_data = convert_sh_basis(data,
                                sphere,
                                input_basis=args.sh_basis,
                                nbr_processes=args.nbr_processes)

    nib.save(nib.Nifti1Image(new_data, img.affine, header=img.header),
             args.out_sh)
Esempio n. 24
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.in_bundle, args.in_fodf])
    assert_outputs_exist(parser, args, [args.afd_mean_map, args.rd_mean_map])

    sft = load_tractogram_with_reference(parser, args, args.in_bundle)
    fodf_img = nib.load(args.in_fodf)

    afd_mean_map, rd_mean_map = afd_map_along_streamlines(
        sft, fodf_img, args.sh_basis, args.length_weighting)

    nib.Nifti1Image(afd_mean_map.astype(np.float32),
                    fodf_img.affine).to_filename(args.afd_mean_map)

    nib.Nifti1Image(rd_mean_map.astype(np.float32),
                    fodf_img.affine).to_filename(args.rd_mean_map)
Esempio n. 25
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.bundle])
    assert_outputs_exists(parser, args, [args.centroid_streamline])
    if args.distance_thres < 0.0:
        parser.error('--distance_thres {} should be '
                     'positive'.format(args.distance_thres))
    if args.nb_points < 2 or args.nb_points > 99:
        parser.error('--nb_points {} should be [2, 99]'.format(args.nb_points))

    tractogram = nib.streamlines.load(args.bundle)
    centroid_streamline = get_centroid_streamline(tractogram, args.nb_points,
                                                  args.distance_thres)
    nib.streamlines.save(centroid_streamline,
                         args.centroid_streamline,
                         header=tractogram.header)
Esempio n. 26
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_bundle)
    assert_outputs_exist(parser, args, args.out_centroid)

    if args.nb_points < 2:
        parser.error('--nb_points {} should be >= 2'.format(args.nb_points))

    sft = load_tractogram_with_reference(parser, args, args.in_bundle)

    centroid_streamlines = get_streamlines_centroid(sft.streamlines,
                                                    args.nb_points)

    sft = StatefulTractogram.from_sft(centroid_streamlines, sft)

    save_tractogram(sft, args.out_centroid)
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.voxel_label_map])

    voxel_label_map_img = nib.load(args.voxel_label_map)
    voxel_label_map_data = voxel_label_map_img.get_data()
    spacing = voxel_label_map_img.header['pixdim'][1:4]

    labels = np.unique(voxel_label_map_data.astype(np.uint8))[1:]
    voxel_volume = np.prod(spacing)
    stats = {args.bundle_name: {'volume': {}}}
    for i in labels:
        stats[args.bundle_name]['volume']['{:02}'.format(i)] =\
            len(voxel_label_map_data[voxel_label_map_data == i]) * voxel_volume

    print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
def main():
    parser = _build_args_parser()
    args = parser.parse_args()

    assert_inputs_exist(
        parser, [args.in_tractogram, args.config_file, args.transformation])

    for directory in args.models_directories:
        if not os.path.isdir(directory):
            parser.error('Input folder {0} does not exist'.format(directory))

    assert_output_dirs_exist_and_empty(parser, args, args.output)

    logging.basicConfig(
        filename=os.path.join(args.output, 'logfile.txt'),
        filemode='w',
        format='%(asctime)s, %(name)s %(levelname)s %(message)s',
        datefmt='%H:%M:%S',
        level=args.log_level)

    coloredlogs.install(level=args.log_level)

    transfo = np.loadtxt(args.transformation)
    if args.inverse:
        transfo = np.linalg.inv(np.loadtxt(args.transformation))

    with open(args.config_file) as json_data:
        config = json.load(json_data)

    voting = VotingScheme(
        config,
        args.models_directories,
        transfo,
        args.output,
        tractogram_clustering_thr=args.tractogram_clustering_thr,
        minimal_vote_ratio=args.minimal_vote_ratio,
        multi_parameters=args.multi_parameters)

    if args.seeds is None:
        seeds = [random.randint(1, 1000)]
    else:
        seeds = args.seeds

    voting(args.in_tractogram, nbr_processes=args.processes, seeds=seeds)
Esempio n. 29
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, [args.input])
    assert_outputs_exists(parser, args, [args.output])

    if args.min_fit < 2:
        parser.error('--min_fit should be at least 2. Current value: {}'
                     .format(args.min_fit))
    if args.max_iter < 1:
        parser.error('--max_iter should be at least 1. Current value: {}'
                     .format(args.max_iter))
    if args.fit_thr <= 0:
        parser.error('--fit_thr should be greater than 0. Current value: {}'
                     .format(args.fit_thr))

    logging.basicConfig(level=getattr(logging, args.log))

    in_img = nib.load(args.input)
    in_data = in_img.get_data()

    in_data_flat = in_data.flatten()
    in_nzr_ind = np.nonzero(in_data_flat)
    in_nzr_val = np.array(in_data_flat[in_nzr_ind])

    X = in_nzr_ind[0][:, np.newaxis]
    model_ransac = linear_model.RANSACRegressor(
        base_estimator=linear_model.LinearRegression(),
        min_samples=args.min_fit,
        residual_threshold=args.fit_thr,
        max_trials=args.max_iter)
    model_ransac.fit(X, in_nzr_val)

    outlier_mask = np.logical_not(model_ransac.inlier_mask_)
    outliers = X[outlier_mask]

    logging.info('# outliers: %s', len(outliers))

    in_data_flat[outliers] = 0

    out_data = np.reshape(in_data_flat, in_img.shape)
    nib.save(nib.Nifti1Image(out_data, in_img.affine, in_img.header),
             args.output)
Esempio n. 30
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()
    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    # Checking args
    outputs = [args.out_sh]
    if args.out_sym:
        outputs.append(args.out_sym)
    assert_outputs_exist(parser, args, outputs)
    assert_inputs_exist(parser, args.in_sh)

    nbr_processes = validate_nbr_processes(parser, args)

    # Prepare data
    sh_img = nib.load(args.in_sh)
    data = sh_img.get_fdata(dtype=np.float32)

    sh_order, full_basis = get_sh_order_and_fullness(data.shape[-1])

    t0 = time.perf_counter()
    logging.info('Executing angle-aware bilateral filtering.')
    asym_sh = angle_aware_bilateral_filtering(
        data, sh_order=sh_order,
        sh_basis=args.sh_basis,
        in_full_basis=full_basis,
        sphere_str=args.sphere,
        sigma_spatial=args.sigma_spatial,
        sigma_angular=args.sigma_angular,
        sigma_range=args.sigma_range,
        use_gpu=args.use_gpu,
        nbr_processes=nbr_processes)
    t1 = time.perf_counter()
    logging.info('Elapsed time (s): {0}'.format(t1 - t0))

    logging.info('Saving filtered SH to file {0}.'.format(args.out_sh))
    nib.save(nib.Nifti1Image(asym_sh, sh_img.affine), args.out_sh)

    if args.out_sym:
        _, orders = sph_harm_ind_list(sh_order, full_basis=True)
        logging.info('Saving symmetric SH to file {0}.'.format(args.out_sym))
        nib.save(nib.Nifti1Image(asym_sh[..., orders % 2 == 0], sh_img.affine),
                 args.out_sym)