def main(): parser = _buildArgsParser() args = parser.parse_args() # Check if the files exist if not os.path.isfile(args.transformation): parser.error('"{0}" must be a file!'.format(args.transformation)) if not os.path.isfile(args.ref_file): parser.error('"{0}" must be a file!'.format(args.ref_file)) if not os.path.isfile(args.in_file): parser.error('"{0}" must be a file!'.format(args.in_file)) if os.path.isfile(args.out_name) and not args.force_overwrite: parser.error('"{0}" already exists! Use -f to overwrite it.'.format( args.out_name)) transfo = np.loadtxt(args.transformation) if args.inverse: transfo = np.linalg.inv(transfo) ref_name, ref_extension = split_name_with_nii(args.ref_file) in_name, in_extension = split_name_with_nii(args.in_file) if ref_extension not in ['.nii', '.nii.gz']: parser.error('"{0}" is in an unsupported format.'.format( args.ref_file)) if in_extension not in ['.nii', '.nii.gz']: parser.error('"{0}" is in an unsupported format.'.format(args.in_file)) transform_anatomy(transfo, args.ref_file, args.in_file, args.out_name)
def main(): parser = _build_arg_parser() args = parser.parse_args() if args.metrics_dir and os.path.exists(args.metrics_dir): list_metrics_files = glob.glob( os.path.join(args.metrics_dir, '*nii.gz')) assert_inputs_exist(parser, [args.in_mask] + list_metrics_files) elif args.metrics_file_list: assert_inputs_exist(parser, [args.in_mask] + args.metrics_file_list) # Load mask and validate content depending on flags mask_img = nib.load(args.in_mask) if len(mask_img.shape) > 3: logging.error('Mask should be a 3D image.') # Can be a weighted image mask_data = mask_img.get_fdata(dtype=np.float32) if np.min(mask_data) < 0: logging.error('Mask should not contain negative values.') # Discussion about the way the normalization is done. # https://github.com/scilus/scilpy/pull/202#discussion_r411355609 if args.normalize_weights: mask_data /= np.max(mask_data) if np.min(mask_data) < 0.0 or np.max(mask_data) > 1.0: parser.error('Mask data should only contain values between 0 and 1. ' 'Try --normalize_weights.') if args.bin: mask_data[np.where(mask_data > 0.0)] = 1.0 # Load all metrics files. if args.metrics_dir: metrics_files = [ nib.load(args.metrics_dir + f) for f in sorted(os.listdir(args.metrics_dir)) ] elif args.metrics_file_list: metrics_files = [nib.load(f) for f in args.metrics_file_list] # Compute the mean values and standard deviations stats = get_roi_metrics_mean_std(mask_data, metrics_files) roi_name = split_name_with_nii(os.path.basename(args.in_mask))[0] json_stats = {roi_name: {}} for metric_file, (mean, std) in zip(metrics_files, stats): metric_name = split_name_with_nii( os.path.basename(metric_file.get_filename()))[0] json_stats[roi_name][metric_name] = { 'mean': mean.item(), 'std': std.item() } print(json.dumps(json_stats, indent=args.indent, sort_keys=args.sort_keys))
def transform_dataset(in_filename, ref_filename, def_filename, filename_to_save, field_source): in_tractogram = nib.streamlines.load(in_filename) _, ref_extension = split_name_with_nii(ref_filename) if ref_extension == '.trk': ref_tractogram = nib.streamlines.load(ref_filename, lazy_load=True) ref_header = ref_tractogram.header else: ref_img = nib.load(ref_filename) ref_header = modify_tractogram_header_using_anat_header( in_tractogram.header, ref_img) deformation = nib.load(def_filename) deformation_data = np.squeeze(deformation.get_data()) if not np.allclose(deformation.affine, in_tractogram.header["voxel_to_rasmm"]): raise ValueError('Both affines are not equal') if not np.array_equal(deformation_data.shape[0:3], in_tractogram.header["dimensions"]): raise ValueError('Both dimensions are not equal') transfo = in_tractogram.header["voxel_to_rasmm"] warp_tractogram(in_tractogram.streamlines, transfo, deformation_data, field_source) new_tractogram = nib.streamlines.Tractogram(in_tractogram.streamlines, affine_to_rasmm=np.eye(4)) trkfile = nib.streamlines.TrkFile(new_tractogram, header=ref_header) nib.streamlines.save(trkfile, filename_to_save)
def main(): parser = _build_arg_parser() args = parser.parse_args() if args.verbose: logging.basicConfig(level=logging.INFO) required_args = [args.dwi, args.bvec, args.bval, args.table] baseName, extension = split_name_with_nii(args.dwi) output_filenames = [args.baseName + extension, args.baseName + '.bval', args.baseName + '.bvec'] assert_inputs_exist(parser, required_args) assert_outputs_exist(parser, args, output_filenames) oTable = np.loadtxt(args.table, skiprows=1) bvals, bvecs = read_bvals_bvecs(args.bval, args.bvec) dwis = nb.load(args.dwi) newIndex = valideInputs(oTable, dwis, bvals, bvecs) bvecs = bvecs[newIndex] bvals = bvals[newIndex] data = dwis.get_data() data = data[:, :, :, newIndex] nb.save(nb.Nifti1Image(data.astype(dwis.get_data_dtype()), dwis.affine, header=dwis.header), output_filenames[0]) np.savetxt(args.baseName + '.bval', bvals.reshape(1, len(bvals)), '%d') np.savetxt(args.baseName + '.bvec', bvecs.T, '%0.15f')
def fsl2mrtrix(fsl_bval_filename, fsl_bvec_filename, mrtrix_filename): """ Convert a fsl dir_grad.bvec/.bval files to mrtrix encoding.b file. Parameters ---------- fsl_bval_filename: str path to input fsl bval file. fsl_bvec_filename: str path to input fsl bvec file. mrtrix_filename : str, optional path to output mrtrix encoding.b file. Default is fsl_bvec_filename.b. Returns ------- """ shells = np.loadtxt(fsl_bval_filename) points = np.loadtxt(fsl_bvec_filename) bvals = np.unique(shells).tolist() if not points.shape[0] == 3: points = points.transpose() logging.warning('WARNING: Your bvecs seem transposed. ' + 'Transposing them.') shell_idx = [int(np.where(bval == bvals)[0]) for bval in shells] basefilename, ext = split_name_with_nii(mrtrix_filename) save_scheme_mrtrix(points, shell_idx, bvals, basefilename, verbose=1)
def save_scheme_bvecs_bvals(points, shell_idx, bvals, filename=None, filename_bval=None, filename_bvec=None): """ Save table gradient (FSL format) Parameters ---------- points: numpy.array bvecs normalized to 1. shell_idx: numpy.array Shell index for bvecs in points. bvals: numpy.array filename: str output file name ------ """ if filename: fullfilename, ext = split_name_with_nii(filename) filename_bval = fullfilename + '.bval' filename_bvec = fullfilename + '.bvec' np.savetxt(filename_bvec, points.T, fmt='%.8f') np.savetxt(filename_bval, np.array([bvals[idx] for idx in shell_idx])[None, :], fmt='%.3f') logging.info('Scheme saved in FSL format as {}'.format(fullfilename + '{.bvec/.bval}'))
def main(): parser = _buildArgsParser() args = parser.parse_args() # Check if the files exist if not os.path.isfile(args.in_file): parser.error('"{0}" must be a file!'.format(args.in_file)) if not os.path.isfile(args.ref_file): parser.error('"{0}" must be a file!'.format(args.ref_file)) if not os.path.isfile(args.deformation): parser.error('"{0}" must be a file!'.format(args.deformation)) if os.path.isfile(args.out_name) and not args.force_overwrite: parser.error('"{0}" already exists! Use -f to overwrite it.'.format( args.out_name)) if not nib.streamlines.TrkFile.is_correct_format(args.in_file): parser.error('The input file needs to be a TRK file') _, ref_extension = split_name_with_nii(args.ref_file) if ref_extension not in ['.trk', '.nii', '.nii.gz']: raise ValueError('"{0}" is in an unsupported format.'.format( args.ref_file)) transform_dataset(args.in_file, args.ref_file, args.deformation, args.out_name, args.field_source)
def save_scheme_caru(points, shell_idx, filename): """ Save gradient table (Caruyer format) Parameters ---------- points: numpy.array bvecs normalized to 1. shell_idx: numpy.array Shell index for bvecs in points. filename: output file name ------ """ fullfilename, ext = split_name_with_nii(filename) fullfilename = fullfilename + '.caru' with open(fullfilename) as f: f.write('# Caruyer format sampling scheme\n') f.write('# X Y Z shell_idx\n') for idx in range(points.shape[0]): f.write('{:.8f} {:.8f} {:.8f} {:.0f}\n'.format( points[idx, 0], points[idx, 1], points[idx, 2], shell_idx[idx])) logging.info('Scheme saved in Caruyer format as {}'.format(fullfilename))
def main(): parser = _build_arg_parser() args = parser.parse_args() assert_inputs_exist(parser, [args.in_bundle] + args.metrics, optional=args.reference) assert_same_resolution(args.metrics) metrics = [nib.load(metric) for metric in args.metrics] sft = load_tractogram_with_reference(parser, args, args.in_bundle) sft.to_vox() sft.to_corner() bundle_stats = get_bundle_metrics_mean_std(sft.streamlines, metrics, args.density_weighting) bundle_name, _ = os.path.splitext(os.path.basename(args.in_bundle)) stats = {bundle_name: {}} for metric, (mean, std) in zip(metrics, bundle_stats): metric_name = split_name_with_nii( os.path.basename(metric.get_filename()))[0] stats[bundle_name][metric_name] = { 'mean': mean, 'std': std } print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
def main(): parser = _buildArgsParser() args = parser.parse_args() # Check if the files exist if not os.path.isfile(args.in_file): parser.error('"{0}" must be a file!'.format(args.in_file)) if not os.path.isfile(args.ref_file): parser.error('"{0}" must be a file!'.format(args.ref_file)) if not os.path.isfile(args.transformation): parser.error('"{0}" must be a file!'.format(args.transformation)) if os.path.isfile(args.out_name) and not args.force_overwrite: parser.error('"{0}" already exists! Use -f to overwrite it.'.format( args.out_name)) if not nib.streamlines.TrkFile.is_correct_format(args.in_file): parser.error('The input file needs to be a TRK file') _, ref_extension = split_name_with_nii(args.ref_file) if ref_extension == '.trk': if not nib.streamlines.TrkFile.is_correct_format(args.ref_file): parser.error('"{0}" is not a valid TRK file.'.format( args.ref_file)) elif ref_extension not in ['.nii', '.nii.gz']: parser.error('"{0}" is in an unsupported format.'.format( args.ref_file)) transfo = np.loadtxt(args.transformation) if args.inverse: transfo = np.linalg.inv(transfo) transform_tractogram(args.in_file, args.ref_file, transfo, args.out_name)
def save_scheme_mrtrix(points, shell_idx, bvals, filename): """ Save table gradient (MRtrix format) Parameters ---------- points: numpy.array bvecs normalized to 1. shell_idx: numpy.array Shell index for bvecs in points. bvals: numpy.array filename: str output file name ------ """ fullfilename, ext = split_name_with_nii(filename) fullfilename = fullfilename + '.b' with open(fullfilename) as f: for idx in range(points.shape[0]): f.write('{:.8f} {:.8f} {:.8f} {:.2f}\n'.format( points[idx, 0], points[idx, 1], points[idx, 2], bvals[shell_idx[idx]])) logging.info('Scheme saved in MRtrix format as {}'.format(fullfilename))
def main(): parser = buildArgsParser() args = parser.parse_args() if not os.path.isfile(args.mask): parser.error('"{0}" must be a file!'.format(args.mask)) # Load mask and validate content depending on flags img = nb.load(args.mask) if not issubclass(img.get_data_dtype().type, np.floating) and \ not args.normalize_weights: parser.error('The mask file must contain floating point numbers.') weighting_data = img.get_data().astype(np.float64) if args.normalize_weights: weighting_data /= np.sum(weighting_data) if np.min(weighting_data) < 0.0 or np.max(weighting_data) > 1.0: parser.error('Mask data should only contain values between 0 and 1. ' 'Try --normalize_weights.') if args.bin: weighting_data[np.where(weighting_data > 0.0)] = 1.0 # Load all metrics files, and keep some header information. if args.metrics_dir: metrics_files = [ nb.load(args.metrics_dir + f) for f in sorted(os.listdir(args.metrics_dir)) ] elif args.metrics_file_list: metrics_files = [nb.load(f) for f in args.metrics_file_list] # Compute the mean values and standard deviations stats = get_metrics_stats_over_volume(weighting_data, metrics_files) roi_name = split_name_with_nii(os.path.basename(args.mask))[0] json_stats = {roi_name: {}} for metric_file, (mean, std) in zip(metrics_files, stats): metric_name = split_name_with_nii( os.path.basename(metric_file.get_filename()))[0] json_stats[roi_name][metric_name] = {'mean': mean, 'std': std} print(json.dumps(json_stats, indent=args.indent, sort_keys=args.sort_keys))
def _split_time_steps(b0, affine, header, output): fname, fext = split_name_with_nii(os.path.basename(output)) multiple_b0 = b0.shape[-1] > 1 for t in range(b0.shape[-1]): out_name = os.path.join( os.path.dirname(os.path.abspath(output)), '{}_{}{}'.format(fname, t, fext)) if multiple_b0 else output nib.save(nib.Nifti1Image(b0[..., t], affine, header), out_name)
def main(): parser = _build_arg_parser() args = parser.parse_args() assert_inputs_exist(parser, [args.bundle] + args.metrics) if args.num_points <= 1: parser.error('--num_points {} needs to be greater than ' '1'.format(args.num_points)) metrics = [nib.load(m) for m in args.metrics] assert_same_resolution(*metrics) bundle_tractogram_file = nib.streamlines.load(args.bundle) bundle_name, _ = os.path.splitext(os.path.basename(args.bundle)) stats = {} if len(bundle_tractogram_file.streamlines) == 0: stats[bundle_name] = None print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys)) return bundle_streamlines_vox = load_in_voxel_space(bundle_tractogram_file, metrics[0]) bundle_subsampled = subsample_streamlines(bundle_streamlines_vox, num_points=args.num_points, arc_length=True) # Make sure all streamlines go in the same direction. We want to make # sure point #1 / 20 of streamline A is matched with point #1 / 20 of # streamline B and so on num_streamlines = len(bundle_subsampled) reference = bundle_subsampled[0] for s in np.arange(num_streamlines): streamline = bundle_subsampled[s] direct = average_euclidean(reference, streamline) flipped = average_euclidean(reference, streamline[::-1]) if flipped < direct: bundle_subsampled[s] = streamline[::-1] profiles = get_metrics_profile_over_streamlines(bundle_subsampled, metrics) t_profiles = np.expand_dims(profiles, axis=1) t_profiles = np.rollaxis(t_profiles, 3, 2) stats[bundle_name] = {} for metric, profile, t_profile in zip(metrics, profiles, t_profiles): metric_name, _ = split_name_with_nii( os.path.basename(metric.get_filename())) stats[bundle_name][metric_name] = { 'mean': np.mean(profile, axis=0).tolist(), 'std': np.std(profile, axis=0).tolist(), 'tractprofile': t_profile.tolist() } print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
def main(): parser = _build_args_parser() args = parser.parse_args() assert_inputs_exist(parser, [args.in_file, args.ref_file, args.transformation]) assert_outputs_exists(parser, args, [args.out_name]) transfo = np.loadtxt(args.transformation) if args.inverse: transfo = np.linalg.inv(transfo) _, ref_extension = split_name_with_nii(args.ref_file) _, in_extension = split_name_with_nii(args.in_file) if ref_extension not in ['.nii', '.nii.gz']: parser.error('{} is an unsupported format.'.format(args.in_file)) if in_extension not in ['.nii', '.nii.gz']: parser.error('{} is an unsupported format.'.format(args.ref_file)) transform_anatomy(transfo, args.ref_file, args.in_file, args.out_name)
def main(): parser = _build_arg_parser() args = parser.parse_args() assert_inputs_exist(parser, [args.in_file, args.in_target_file, args.in_transfo]) assert_outputs_exist(parser, args, args.out_name) transfo = load_matrix_in_any_format(args.in_transfo) if args.inverse: transfo = np.linalg.inv(transfo) _, ref_extension = split_name_with_nii(args.in_target_file) _, in_extension = split_name_with_nii(args.in_file) if ref_extension not in ['.nii', '.nii.gz']: parser.error('{} is an unsupported format.'.format(args.in_target_file)) if in_extension not in ['.nii', '.nii.gz']: parser.error('{} is an unsupported format.'.format(args.in_file)) transform_anatomy(transfo, args.in_target_file, args.in_file, args.out_name, keep_dtype=args.keep_dtype)
def transform_tractogram(in_filename, ref_filename, transfo, filename_to_save): tractogram = nib.streamlines.load(in_filename) _, out_extension = split_name_with_nii(filename_to_save) if out_extension == '.trk': # Only TRK/NII can be a reference, because they have an affine _, ref_extension = split_name_with_nii(ref_filename) if ref_extension == '.trk': ref_tractogram = nib.streamlines.load(ref_filename, lazy_load=True) ref_header = ref_tractogram.header else: ref_img = nib.load(ref_filename) ref_header = create_header_from_anat(ref_img) elif out_extension == '.tck': ref_header = nib.streamlines.TckFile.create_empty_header() tractogram.tractogram.apply_affine(transfo) new_tractogram = nib.streamlines.Tractogram(tractogram.streamlines, affine_to_rasmm=np.eye(4)) nib.streamlines.save(new_tractogram, filename_to_save, header=ref_header)
def compute_gt_masks(gt_bundles, parser, args): """ Compute ground-truth masks. If the ground-truth is already a mask, load it. If the ground-truth is a bundle, compute the mask. Parameters ---------- gt_bundles: list List of either StatefulTractograms or niftis. parser: ArgumentParser Argument parser which handles the script's arguments. args: Namespace List of arguments passed to the script. Returns ------- mask_1: numpy.ndarray "Head" of the mask. mask_2: numpy.ndarray "Tail" of the mask. """ gt_bundle_masks = [] gt_bundle_inv_masks = [] for gt_bundle in args.gt_bundles: # Support ground truth as streamlines or masks # Will be converted to binary masks immediately _, ext = split_name_with_nii(gt_bundle) if ext in ['.gz', '.nii.gz']: gt_img = nib.load(gt_bundle) gt_mask = get_data_as_mask(gt_img) affine = gt_img.affine dimensions = gt_mask.shape else: gt_sft = load_tractogram_with_reference(parser, args, gt_bundle, bbox_check=False) gt_sft.to_vox() gt_sft.to_corner() affine, dimensions, _, _ = gt_sft.space_attributes gt_mask = compute_tract_counts_map(gt_sft.streamlines, dimensions).astype(np.int16) gt_inv_mask = np.zeros(dimensions, dtype=np.int16) gt_inv_mask[gt_mask == 0] = 1 gt_mask[gt_mask > 0] = 1 gt_bundle_masks.append(gt_mask) gt_bundle_inv_masks.append(gt_inv_mask) return gt_bundle_masks, gt_bundle_inv_masks, affine, dimensions
def main(): parser = _build_arg_parser() args = parser.parse_args() logging.basicConfig(level=getattr(logging, args.log)) assert_inputs_exist(parser, [args.input]) assert_outputs_exists(parser, args, [args.output]) fname, fext = split_name_with_nii(args.output) output_folder_content = glob.glob( os.path.join(os.path.dirname(args.output), "{}_*".format(fname))) if output_folder_content and not args.overwrite: parser.error( 'Output folder contains file(s) that might be ' 'overwritten. Either remove files {}_* or use -f'.format(fname)) img = nib.load(args.input) number_of_dimensions = len(img.shape) data = img.get_data() if args.structure_connectivity < 1 or\ args.structure_connectivity > number_of_dimensions: raise ValueError('--structure_connectivity should be greater than 0 ' 'and less or equal to the number of dimension of the ' 'input data. Value found: {}'.format( args.structure_connectivity)) s = generate_binary_structure(len(img.shape), args.structure_connectivity) labeled_data, num_labels = label(data, structure=s) logging.info('Found %s labels', num_labels) img.header.set_data_dtype(labeled_data.dtype) nib.save(nib.Nifti1Image(labeled_data, img.affine, img.header), args.output) if args.split: img.header.set_data_dtype(np.uint8) num_digits_labels = len(str(num_labels + 1)) for i in xrange(1, num_labels + 1): current_label_data = np.zeros_like(data, dtype=np.uint8) current_label_data[labeled_data == i] = 1 out_name =\ os.path.join(os.path.dirname(os.path.abspath(args.output)), '{}_{}{}'.format(fname, str(i).zfill(num_digits_labels), fext)) nib.save( nib.Nifti1Image(current_label_data, img.affine, img.header), out_name)
def main(): parser = _build_arg_parser() args = parser.parse_args() assert_inputs_exist(parser, [args.in_bundle] + args.in_metrics) assert_output_dirs_exist_and_empty(parser, args, args.out_folder, create_dir=True) assert_same_resolution(args.in_metrics) sft = load_tractogram_with_reference(parser, args, args.in_bundle) sft.to_vox() sft.to_corner() if len(sft.streamlines) == 0: logging.warning('Empty bundle file {}. Skipping'.format(args.bundle)) return mins, maxs, indices = _process_streamlines(sft.streamlines) metrics = [nib.load(metric) for metric in args.in_metrics] for metric in metrics: data = metric.get_fdata(dtype=np.float32) endpoint_metric_map = np.zeros(metric.shape) count = np.zeros(metric.shape) for cur_min, cur_max, cur_ind, orig_s in zip(mins, maxs, indices, sft.streamlines): streamline_mean = _compute_streamline_mean(cur_ind, cur_min, cur_max, data) xyz = orig_s[0, :].astype(int) endpoint_metric_map[xyz[0], xyz[1], xyz[2]] += streamline_mean count[xyz[0], xyz[1], xyz[2]] += 1 xyz = orig_s[-1, :].astype(int) endpoint_metric_map[xyz[0], xyz[1], xyz[2]] += streamline_mean count[xyz[0], xyz[1], xyz[2]] += 1 endpoint_metric_map[count != 0] /= count[count != 0] metric_fname, ext = split_name_with_nii( os.path.basename(metric.get_filename())) nib.save(nib.Nifti1Image(endpoint_metric_map, metric.affine, metric.header), os.path.join(args.out_folder, '{}_endpoints_metric{}'.format(metric_fname, ext)))
def _keep_time_step(dwi, time, output): image = nib.load(dwi) data = image.get_fdata(dtype=np.float32) fname, fext = split_name_with_nii(os.path.basename(output)) multi_b0 = len(time) > 1 for t in time: t_data = data[..., t] out_name = os.path.join( os.path.dirname(os.path.abspath(output)), '{}_{}{}'.format( fname, t, fext)) if multi_b0 else output nib.save(nib.Nifti1Image(t_data, image.affine, image.header), out_name)
def transform_tractogram(in_filename, ref_filename, def_filename, filename_to_save, field_source): in_tractogram = nib.streamlines.load(in_filename) _, out_extension = split_name_with_nii(filename_to_save) if out_extension == '.trk': # Only TRK/NII can be a reference, because they have an affine _, ref_extension = split_name_with_nii(ref_filename) if ref_extension == '.trk': ref_tractogram = nib.streamlines.load(ref_filename, lazy_load=True) ref_header = ref_tractogram.header else: ref_img = nib.load(ref_filename) ref_header = create_header_from_anat(ref_img) elif out_extension == '.tck': ref_header = nib.streamlines.TckFile.create_empty_header() deformation = nib.load(def_filename) deformation_data = np.squeeze(deformation.get_data()) if not np.allclose(deformation.affine, in_tractogram.header["voxel_to_rasmm"]): raise ValueError('Both affines are not equal') if not np.array_equal(deformation_data.shape[0:3], in_tractogram.header["dimensions"]): raise ValueError('Both dimensions are not equal') transfo = in_tractogram.header["voxel_to_rasmm"] # Warning: Apply warp in-place warp_tractogram(in_tractogram.streamlines, transfo, deformation_data, field_source) new_tractogram = nib.streamlines.Tractogram(in_tractogram.streamlines, affine_to_rasmm=np.eye(4)) nib.streamlines.save(new_tractogram, filename_to_save, header=ref_header)
def extract_tails_heads_from_endpoints(gt_endpoints, out_dir): """ Extract two masks from a single mask containing two regions. Parameters ---------- gt_endpoints: list of str List of ground-truth mask filenames. Returns ------- tails: list List of tail filenames. heads: list List of head filenames. affine: numpy.ndarray Affine of mask image. dimensions: tuple of int Dimensions of the mask image. """ tails = [] heads = [] for mask_filename in gt_endpoints: mask_img = nib.load(mask_filename) mask = get_data_as_mask(mask_img) affine = mask_img.affine dimensions = mask.shape head, tail = split_heads_tails_kmeans(mask) basename = os.path.basename( split_name_with_nii(mask_filename)[0]) tail_filename = os.path.join( out_dir, '{}_tail.nii.gz'.format(basename)) head_filename = os.path.join( out_dir, '{}_head.nii.gz'.format(basename)) nib.save(nib.Nifti1Image(head.astype( mask.dtype), affine), head_filename) nib.save(nib.Nifti1Image(tail.astype( mask.dtype), affine), tail_filename) tails.append(tail_filename) heads.append(head_filename) return tails, heads, affine, dimensions
def _test(self, bundle_path, gt): centroids_path = os.path.join(self._tmp_dir, 'centroids.trk') label_map = os.path.join(self._tmp_dir, 'label.npz') distance_map = os.path.join(self._tmp_dir, 'distance.npz') metrics = generate_metrics(self._tmp_dir) meanstdperpoint_path = os.path.join( self._tmp_dir, 'meanstdperpoint.json') # We need to create the centroids, label and distance maps, then # the mean/std per point in order to test the plot. self.call(main_centroids, '-f', bundle_path, centroids_path, nb_points=5) self.call(main_label_and_distance_maps, '-f', bundle_path, centroids_path, label_map, distance_map) with RedirectStdOut() as output: self.call(main_meanstdperpoint, bundle_path, label_map, distance_map, *metrics) with open(meanstdperpoint_path, 'w') as meanstdperpoint_file: meanstdperpoint_file.writelines(output) bundle_name, _ = os.path.splitext(os.path.basename(bundle_path)) save_plots_to = os.path.join(self._tmp_dir, bundle_name) os.mkdir(save_plots_to) self.call(main_plot, meanstdperpoint_path, save_plots_to) save_plots_gt_to = os.path.join(self._tmp_dir, bundle_name + '_gt') os.mkdir(save_plots_gt_to) for metric_path in metrics: metric, _ = split_name_with_nii(os.path.basename(metric_path)) fig = plot_metrics_stats( np.array(gt[metric]['means']), np.array(gt[metric]['stds']), title=bundle_name, xlabel='Location along the streamline', ylabel=metric) fig.savefig( os.path.join(save_plots_gt_to, '{}_{}.png'.format(bundle_name, metric)), bbox_inches='tight') dcmp = dircmp(save_plots_to, save_plots_gt_to) if dcmp.diff_files: self.failureException()
def main(): parser = _build_args_parser() args = parser.parse_args() assert_inputs_exist(parser, args.in_files) all_valid = True for filepath in args.in_files: _, in_extension = split_name_with_nii(filepath) if in_extension not in ['.trk', '.nii', '.nii.gz']: parser.error( '{} does not have a supported extension'.format(filepath)) if not is_header_compatible(args.in_files[0], filepath): print('{} and {} do not have compatible header.'.format( args.in_files[0], filepath)) all_valid = False if all_valid: print('All input files have compatible headers.')
def save_scheme_siemens(points, shell_idx, bvals, filename): """ Save table gradient (Siemens format) Parameters ---------- points: numpy.array bvecs normalized to 1. shell_idx: numpy.array Shell index for bvecs in points. bvals: numpy.array filename: str output file name ------ """ str_save = [] str_save.append('[Directions={}]'.format(points.shape[0])) str_save.append('CoordinateSystem = XYZ') str_save.append('Normalisation = None') # Scale bvecs with q-value bvals = np.array([bvals[idx] for idx in shell_idx]) bmax = np.array(bvals).max() bvecs_norm = (bvals / float(bmax))**(0.5) # ugly work around for the division by b0 / replacing NaNs with 0.0 old_settings = np.seterr(divide='ignore', invalid='ignore') points = points / bvecs_norm[:, None] np.seterr(**old_settings) points[np.isnan(points)] = 0.0 points[np.isinf(points)] = 0.0 for idx in range(points.shape[0]): str_save.append('vector[{}] = ( {}, {}, {} )'.format( idx, points[idx, 0], points[idx, 1], points[idx, 2])) fullfilename, ext = split_name_with_nii(filename) fullfilename = fullfilename + '.dvs' with open(fullfilename) as f: for idx in range(len(str_save)): f.write(str_save[idx] + '\n') logging.info('Scheme saved in Siemens format as {}'.format(fullfilename))
def main(): parser = _build_args_parser() args = parser.parse_args() assert_inputs_exist(parser, [args.in_file, args.ref_file, args.deformation]) assert_outputs_exist(parser, args, args.out_name) if not nib.streamlines.TrkFile.is_correct_format(args.in_file): parser.error('The input file needs to be a TRK file') _, ref_extension = split_name_with_nii(args.ref_file) if ref_extension == '.trk': if not nib.streamlines.TrkFile.is_correct_format(args.ref_file): parser.error('{} is not a valid TRK file.'.format(args.ref_file)) elif ref_extension not in ['.nii', '.nii.gz']: parser.error('{} is an unsupported format.'.format(args.ref_file)) transform_tractogram(args.in_file, args.ref_file, args.deformation, args.out_name, args.field_source)
def main(): parser = _build_args_parser() args = parser.parse_args() assert_inputs_exist(parser, [args.in_file, args.ref_file, args.transformation]) assert_outputs_exist(parser, args, [args.out_name]) _, ref_extension = split_name_with_nii(args.ref_file) if ref_extension == '.trk': if not nib.streamlines.TrkFile.is_correct_format(args.ref_file): parser.error('{} is not a valid TRK file.'.format(args.ref_file)) elif ref_extension not in ['.nii', '.nii.gz']: parser.error('{} is an unsupported format.'.format(args.ref_file)) transfo = np.loadtxt(args.transformation) if args.inverse: transfo = np.linalg.inv(transfo) transform_tractogram(args.in_file, args.ref_file, transfo, args.out_name)
def main(): parser = _build_arg_parser() args = parser.parse_args() assert_inputs_exist(parser, [args.bundle] + args.metrics) metrics = [nib.load(metric) for metric in args.metrics] assert_same_resolution(*metrics) streamlines_vox = load_in_voxel_space(args.bundle, metrics[0]) bundle_stats = get_metrics_stats_over_streamlines_robust( streamlines_vox, metrics, args.density_weighting) bundle_name, _ = os.path.splitext(os.path.basename(args.bundle)) stats = {bundle_name: {}} for metric, (mean, std) in zip(metrics, bundle_stats): metric_name = split_name_with_nii( os.path.basename(metric.get_filename()))[0] stats[bundle_name][metric_name] = {'mean': mean, 'std': std} print(json.dumps(stats, indent=args.indent, sort_keys=args.sort_keys))
def main(): parser = _build_arg_parser() args = parser.parse_args() assert_inputs_exist(parser, [args.bundle] + args.metrics) assert_outputs_dir_exists_and_empty(parser, args, args.output_folder) metrics = [nib.load(metric) for metric in args.metrics] assert_same_resolution(*metrics) bundle_tractogram_file = nib.streamlines.load(args.bundle) if int(bundle_tractogram_file.header['nb_streamlines']) == 0: logging.warning('Empty bundle file {}. Skipping'.format(args.bundle)) return bundle_streamlines_vox = load_in_voxel_space(bundle_tractogram_file, metrics[0]) for metric in metrics: data = metric.get_data() endpoint_metric_map = np.zeros(metric.shape) count = np.zeros(metric.shape) for streamline in bundle_streamlines_vox: streamline_mean = _compute_streamline_mean(streamline, data) xyz = streamline[0, :].astype(int) endpoint_metric_map[xyz[0], xyz[1], xyz[2]] += streamline_mean count[xyz[0], xyz[1], xyz[2]] += 1 xyz = streamline[-1, :].astype(int) endpoint_metric_map[xyz[0], xyz[1], xyz[2]] += streamline_mean count[xyz[0], xyz[1], xyz[2]] += 1 endpoint_metric_map[count != 0] /= count[count != 0] metric_fname, ext = split_name_with_nii( os.path.basename(metric.get_filename())) nib.save( nib.Nifti1Image(endpoint_metric_map, metric.affine, metric.header), os.path.join(args.output_folder, '{}_endpoints_metric{}'.format(metric_fname, ext)))