def _crop_to_param(aff0, aff, shape): dim = aff0.shape[-1] - 1 shape = shape[:dim] layout0 = spatial.affine_to_layout(aff0) layout = spatial.affine_to_layout(aff) if (layout0 != layout).any(): raise ValueError('Input and Ref do not have the same layout: ' f'{spatial.volume_layout_to_name(layout0)} vs ' f'{spatial.volume_layout_to_name(layout)}.') size = shape layout = None unit = 'vox' center = torch.as_tensor(shape, dtype=torch.float).sub_(1).mul_(0.5) like_aff = spatial.affine_lmdiv(aff0, aff) center = spatial.affine_matvec(like_aff, center) return size, center, unit, layout
def get_readout(readout, affine, shape): """ Convert the provided readout dir from semantic (R/A/S) to index (0/1/2) or Guess the readout direction as the one with largest number of voxels """ dim = len(shape) layout = spatial.affine_to_layout(affine) layout = spatial.volume_layout_to_name(layout).lower() if readout is None: readout = py.argmax(shape) else: readout = readout.lower() for i, l in enumerate(layout): if l in readout: readout = i break print(f'Layout: {layout.upper()} | readout direction: {layout[readout].upper()}') if readout > 0: readout = readout - dim return readout
def crop(inp, size=None, center=None, space='vx', like=None, bbox=False, output=None, transform=None): """Crop a ND volume, while preserving the orientation matrices. Parameters ---------- inp : str or (tensor, tensor) Either a path to a volume file or a tuple `(dat, affine)`, where the first element contains the volume data and the second contains the orientation matrix. size : [sequence of] int, optional Size of the patch to extract. Its unit and axes are defined by `units` and `layout`. center : [sequence of] int, optional Coordinate of the center of the patch. Its unit and axes are defined by `units` and `layout`. By default, the center of the FOV is used. space : [sequence of] {'vox', 'ras'}, default='vox' The space in which the `size` and `center` parameters are expressed. bbox : bool or float, default=False Crop at the bounding box of `inp > threshold`. If `bbox` is a float, it is the threshold to use. If `bbox` is `True`, the threshold is 0. like : str or (tensor, tensor), optional Reference patch. Either a path to a volume file or a tuple `(dat, affine)`, where the first element contains the volume data and the second contains the orientation matrix. output : [sequence of] str, optional Output filename(s). If the input is not a path, the unstacked data is not written on disk by default. If the input is a path, the default output filename is '{dir}/{base}.{i}{ext}', where `dir`, `base` and `ext` are the directory, base name and extension of the input file, `i` is the coordinate (starting at 1) of the slice. transform : [sequence of] str, optional Input or output filename(s) of the corresponding transforms. Not written by default. If a transform is provided and all other parameters (i.e., `size` and `like`) are None, the transform is considered as an input transform to apply. Returns ------- output : list[str or (tensor, tensor)] If the input is a path, the output paths are returned. Else, the unstacked data and orientation matrices are returned. """ dir = '' base = '' ext = '' fname = None transform_in = False use_bbox = bool(bbox or isinstance(bbox, float)) # --- Open input --- is_file = isinstance(inp, str) if is_file: fname = inp f = io.volumes.map(inp) inp = (f.data(numpy=True) if use_bbox else f, f.affine) if output is None: output = '{dir}{sep}{base}.crop{ext}' dir, base, ext = py.fileparts(fname) dat, aff0 = inp dim = aff0.shape[-1] - 1 shape0 = dat.shape[:dim] layout0 = spatial.affine_to_layout(aff0) # save input space in case we reorient later aff00 = aff0 shape00 = shape0 if bool(size) + bool(like) + bool(bbox or isinstance(bbox, float)) > 1: raise ValueError('Can only use one of `size`, `like` and `bbox`.') # --- Open reference and compute size/center --- if like: like_is_file = isinstance(like, str) if like_is_file: f = io.volumes.map(like) like = (f.shape, f.affine) like_shape, like_aff = like like_layout = spatial.affine_to_layout(like_aff) if (layout0 != like_layout).any(): aff0, dat = spatial.affine_reorient(aff0, dat, like_layout) shape0 = dat.shape[:dim] if torch.is_tensor(like_shape): like_shape = like_shape.shape size, center, unit, layout = _crop_to_param(aff0, like_aff, like_shape) space = 'vox' elif bbox or isinstance(bbox, float): if bbox is True: bbox = 0. mask = torch.as_tensor(dat > bbox) while mask.dim() > 3: mask = mask.any(dim=-1) mins = [] maxs = [] for d in range(dim): n = mask.shape[d] idx = utils.movedim(mask, d, 0).reshape([n, -1 ]).any(-1).nonzero(as_tuple=False) mins.append(idx.min()) maxs.append(idx.max()) mins = utils.as_tensor(mins) maxs = utils.as_tensor(maxs) size = maxs + 1 - mins center = (maxs + 1 + mins).float() / 2 space = 'vox' del mask # --- Open transformation file and compute size/center --- elif not size: if not transform: raise ValueError('At least one of size/like/transform must ' 'be provided') transform_in = True t = io.transforms.map(transform) if not isinstance(t, io.transforms.LinearTransformArray): raise TypeError('Expected an LTA file') like_aff, like_shape = t.destination_space() size, center, unit, layout = _crop_to_param(aff0, like_aff, like_shape) # --- use center of the FOV --- if not torch.is_tensor(center) and not center: center = torch.as_tensor(shape0[:dim], dtype=torch.float) center = center.sub_(1).mul_(0.5) # --- convert size/center to voxels --- size = utils.make_vector(size, dim, dtype=torch.long) center = utils.make_vector(center, dim, dtype=torch.float) space_size, space_center = py.make_list(space, 2) if space_center.lower() == 'ras': center = spatial.affine_matvec(spatial.affine_inv(aff0), center) if space_size.lower() == 'ras': perm = spatial.affine_to_layout(aff0)[:, 0] size = size[perm.long()] size = size / spatial.voxel_size(aff0) # --- compute first/last --- center = center.float() size = (size.ceil() if size.dtype.is_floating_point else size).long() first = center - size.float().sub_(1).mul_(0.5) first = first.round().long() last = (first + size).tolist() first = [max(f, 0) for f in first.tolist()] last = [min(l, s) for l, s in zip(last, shape0[:dim])] verb = 'Cropping patch [' verb += ', '.join([f'{f}:{l}' for f, l in zip(first, last)]) verb += f'] from volume with shape {shape0[:dim]}' print(verb) slicer = tuple(slice(f, l) for f, l in zip(first, last)) # --- do crop --- if use_bbox: dat = dat.numpy() dat = dat[slicer] if not torch.is_tensor(dat): dat = dat.data(numpy=True) aff, _ = spatial.affine_sub(aff0, shape0[:dim], slicer) shape = dat.shape[:dim] if output: if is_file: output = output.format(dir=dir or '.', base=base, ext=ext, sep=os.path.sep) io.volumes.save(dat, output, like=fname, affine=aff) else: output = output.format(sep=os.path.sep) io.volumes.save(dat, output, affine=aff) if transform and not transform_in: if is_file: transform = transform.format(dir=dir or '.', base=base, ext=ext, sep=os.path.sep) else: transform = transform.format(sep=os.path.sep) trf = io.transforms.LinearTransformArray(transform, 'w') trf.set_source_space(aff00, shape00) trf.set_destination_space(aff, shape) trf.set_metadata({ 'src': { 'filename': fname }, 'dst': { 'filename': output }, 'type': 1 }) # RAS_TO_RAS trf.set_fdata(torch.eye(4)) trf.save() if is_file: return output else: return dat, aff
def crop(inp, size=None, center=None, space='vx', like=None, output=None, transform=None): """Crop a ND volume, while preserving the orientation matrices. Parameters ---------- inp : str or (tensor, tensor) Either a path to a volume file or a tuple `(dat, affine)`, where the first element contains the volume data and the second contains the orientation matrix. size : [sequence of] int, optional Size of the patch to extract. Its unit and axes are defined by `units` and `layout`. center : [sequence of] int, optional Coordinate of the center of the patch. Its unit and axes are defined by `units` and `layout`. By default, the center of the FOV is used. space : [sequence of] {'vox', 'ras'}, default='vox' The space in which the `size` and `center` parameters are expressed. like : str or (tensor, tensor), optional Reference patch. Either a path to a volume file or a tuple `(dat, affine)`, where the first element contains the volume data and the second contains the orientation matrix. output : [sequence of] str, optional Output filename(s). If the input is not a path, the unstacked data is not written on disk by default. If the input is a path, the default output filename is '{dir}/{base}.{i}{ext}', where `dir`, `base` and `ext` are the directory, base name and extension of the input file, `i` is the coordinate (starting at 1) of the slice. transform : [sequence of] str, optional Input or output filename(s) of the corresponding transforms. Not written by default. If a transform is provided and all other parameters (i.e., `size` and `like`) are None, the transform is considered as an input transform to apply. Returns ------- output : list[str or (tensor, tensor)] If the input is a path, the output paths are returned. Else, the unstacked data and orientation matrices are returned. """ dir = '' base = '' ext = '' fname = None transform_in = False # --- Open input --- is_file = isinstance(inp, str) if is_file: fname = inp f = io.volumes.map(inp) inp = (f.data(numpy=True), f.affine) if output is None: output = '{dir}{sep}{base}.crop{ext}' dir, base, ext = py.fileparts(fname) dat, aff0 = inp dim = aff0.shape[-1] - 1 shape0 = dat.shape[:dim] if size and like: raise ValueError('Cannot use both `size` and `like`.') # --- Open reference and compute size/center --- if like: like_is_file = isinstance(like, str) if like_is_file: f = io.volumes.map(like) like = (f.shape, f.affine) like_shape, like_aff = like if torch.is_tensor(like_shape): like_shape = like_shape.shape size, center, unit, layout = _crop_to_param(aff0, like_aff, like_shape) # --- Open transformation file and compute size/center --- elif not size: if not transform: raise ValueError('At least one of size/like/transform must ' 'be provided') transform_in = True t = io.transforms.map(transform) if not isinstance(t, io.transforms.LinearTransformArray): raise TypeError('Expected an LTA file') like_aff, like_shape = t.destination_space() size, center, unit, layout = _crop_to_param(aff0, like_aff, like_shape) # --- use center of the FOV --- if not torch.is_tensor(center) and not center: center = torch.as_tensor(shape0[:dim], dtype=torch.float) * 0.5 # --- convert size/center to voxels --- size = utils.make_vector(size, dim, dtype=torch.long) center = utils.make_vector(center, dim, dtype=torch.float) space_size, space_center = py.make_list(space, 2) if space_center.lower() == 'ras': center = spatial.affine_matvec(spatial.affine_inv(aff0), center) if space_size.lower() == 'ras': perm = spatial.affine_to_layout(aff0)[:, 0] size = size[perm.long()] size = size / spatial.voxel_size(aff0) # --- compute first/last --- center = center.float() size = size.ceil().long() first = (center - size.float() / 2).round().long() last = (first + size).tolist() first = [max(f, 0) for f in first.tolist()] last = [min(l, s) for l, s in zip(last, shape0[:dim])] verb = 'Cropping patch [' verb += ', '.join([f'{f}:{l}' for f, l in zip(first, last)]) verb += f'] from volume with shape {shape0[:dim]}' print(verb) slicer = tuple(slice(f, l) for f, l in zip(first, last)) # --- do crop --- dat = dat[slicer] aff, _ = spatial.affine_sub(aff0, shape0[:dim], slicer) shape = dat.shape[:dim] if output: if is_file: output = output.format(dir=dir or '.', base=base, ext=ext, sep=os.path.sep) io.volumes.save(dat, output, like=fname, affine=aff) else: output = output.format(sep=os.path.sep) io.volumes.save(dat, output, affine=aff) if transform and not transform_in: if is_file: transform = transform.format(dir=dir or '.', base=base, ext=ext, sep=os.path.sep) else: transform = transform.format(sep=os.path.sep) trf = io.transforms.LinearTransformArray(transform, 'w') trf.set_source_space(aff0, shape0) trf.set_destination_space(aff, shape) trf.set_metadata({ 'src': { 'filename': fname }, 'dst': { 'filename': output }, 'type': 1 }) # RAS_TO_RAS trf.set_fdata(torch.eye(4)) trf.save() if is_file: return output else: return dat, aff
def orient(inp, affine=None, layout=None, voxel_size=None, center=None, like=None, output=None, output_transform=None): """Overwrite the orientation matrix Parameters ---------- inp : str or (tuple, tensor) Either a path to a volume file or a tuple `(shape, affine)`, where the first element contains the volume shape and the second contains the orientation matrix. affine : {'self', 'like'} or (4, 4) tensor_like, default='like' Target affine matrix layout : {'self', 'like'} or layout-like, default='like' Target orientation. voxel_size : {'self', 'like'} or [sequence of] float, default='like' Target voxel size. center : {'self', 'like'} or [sequence of] float, default='like' World coordinate of the center of the field of view. like : str or (tuple, tensor) Either a path to a volume file or a tuple `(shape, affine)`, where the first element contains the volume shape and the second contains the orientation matrix. output : str, optional Output filename. If the input is not a path, the reoriented data is not written on disk by default. If the input is a path, the default output filename is '{dir}/{base}.{layout}{ext}', where `dir`, `base` and `ext` are the directory, base name and extension of the input file. output_transform : str, optional Filename of output transform. If the input is not a path, the reoriented data is not written on disk by default. If the input is a path, the default output filename is '{dir}/{base}_to_{layout}.lta', where `dir` and `base` are the directory and base name of the input file. Returns ------- output : str or (tuple, tensor) If the input is a path, the output path is returned. Else, the new shape and orientation matrix are returned. """ dir = '' base = '' ext = '' fname = '' is_file = isinstance(inp, str) if is_file: fname = inp f = io.volumes.map(inp) dim = f.affine.shape[-1] - 1 inp = (f.shape[:dim], f.affine) if output is None: output = '{dir}{sep}{base}.{layout}{ext}' if output_transform is None: output_transform = '{dir}{sep}{base}_to_{layout}.lta' dir, base, ext = py.fileparts(fname) like_is_file = isinstance(like, str) and like if like_is_file: f = io.volumes.map(like) dim = f.affine.shape[-1] - 1 like = (f.shape[:dim], f.affine) shape, aff0 = inp dim = aff0.shape[-1] - 1 if like: shape_like, aff_like = like else: shape_like, aff_like = (shape, aff0) if voxel_size in (None, 'like') or len(voxel_size) == 0: voxel_size = spatial.voxel_size(aff_like) elif voxel_size == 'self': voxel_size = spatial.voxel_size(aff0) elif voxel_size == 'standard': voxel_size = 1. voxel_size = utils.make_vector(voxel_size, dim) if not layout or layout == 'like': layout = spatial.affine_to_layout(aff_like) elif layout == 'self': layout = spatial.affine_to_layout(aff0) elif layout == 'standard': layout = 'RAS' layout = spatial.volume_layout(layout) if center in (None, 'like') or len(center) == 0: center = (torch.as_tensor(shape_like, dtype=torch.float) - 1) * 0.5 center = spatial.affine_matvec(aff_like, center) elif center == 'self': center = (torch.as_tensor(shape, dtype=torch.float) - 1) * 0.5 center = spatial.affine_matvec(aff0, center) elif center == 'standard': center = 0. center = utils.make_vector(center, dim) if affine in (None, 'like') or len(affine) == 0: affine = aff_like elif affine == 'self': affine = aff0 elif affine == 'standard': affine = torch.eye(dim+1, dim+1) affine = torch.as_tensor(affine, dtype=torch.float) if affine.numel() == dim*(dim+1): affine = spatial.affine_make_rect(affine.reshape(dim, dim+1)) elif affine.numel() == (dim+1)**2: affine = affine.reshape(dim+1, dim+1) else: raise ValueError(f'Input affine should have {dim*(dim+1)} or ' f'{(dim+1)**2} element but got {affine.numel()}.') affine = spatial.affine_modify(affine, shape, voxel_size=voxel_size, layout=layout, center=center) affine = affine.double() if output: dat = io.volumes.load(fname, numpy=True) layout = spatial.volume_layout_to_name(layout) if is_file: output = output.format(dir=dir or '.', base=base, ext=ext, sep=os.path.sep, layout=layout) io.volumes.save(dat, output, like=fname, affine=affine) else: output = output.format(sep=os.path.sep, layout=layout) io.volumes.save(dat, output, affine=affine) if output_transform: transform = spatial.affine_rmdiv(affine, aff0) output_transform = output_transform.format( dir=dir or '.', base=base, sep=os.path.sep, layout=layout) io.transforms.savef(transform.cpu(), output_transform, type=2) if is_file: return output else: return shape, affine
def _main(options): if isinstance(options.gpu, str): device = torch.device(options.gpu) else: assert isinstance(options.gpu, int) device = torch.device(f'cuda:{options.gpu}') if not torch.cuda.is_available(): device = 'cpu' # prepare options estatics_opt = ESTATICSOptions() estatics_opt.likelihood = options.likelihood estatics_opt.verbose = options.verbose >= 1 estatics_opt.plot = options.verbose >= 2 estatics_opt.recon.space = options.space if isinstance(options.space, str) and options.space != 'mean': for c, contrast in enumerate(options.contrast): if contrast.name == options.space: estatics_opt.recon.space = c break estatics_opt.backend.device = device estatics_opt.optim.nb_levels = options.levels estatics_opt.optim.max_iter_rls = options.iter estatics_opt.optim.tolerance = options.tol estatics_opt.regularization.norm = options.regularization estatics_opt.regularization.factor = [*options.lam_intercept, options.lam_decay] estatics_opt.distortion.enable = options.meetup estatics_opt.distortion.bending = options.lam_meetup estatics_opt.preproc.register = options.register # prepare files contrasts = [] distortion = [] for i, c in enumerate(options.contrast): # read meta-parameters meta = {} if c.te: te, unit = c.te, '' if isinstance(te[-1], str): *te, unit = te if unit: if unit == 'ms': te = [t * 1e-3 for t in te] elif unit not in ('s', 'sec'): raise ValueError(f'TE unit: {unit}') if c.echo_spacing: delta, *unit = c.echo_spacing unit = unit[0] if unit else '' if unit == 'ms': delta = delta * 1e-3 elif unit not in ('s', 'sec'): raise ValueError(f'echo spacing unit: {unit}') ne = sum(io.map(f).unsqueeze(-1).shape[3] for f in c.echoes) te = [te[0] + e*delta for e in range(ne)] meta['te'] = te # map volumes contrasts.append(qio.GradientEchoMulti.from_fname(c.echoes, **meta)) if c.readout: layout = spatial.affine_to_layout(contrasts[-1].affine) layout = spatial.volume_layout_to_name(layout) readout = None for j, l in enumerate(layout): if l.lower() in c.readout.lower(): readout = j - 3 contrasts[-1].readout = readout if c.b0: bw = c.bandwidth b0, *unit = c.b0 unit = unit[-1] if unit else 'vx' fb0 = b0.map(b0) b0 = fb0.fdata(device=device) b0 = spatial.reslice(b0, fb0.affine, contrasts[-1][0].affine, contrasts[-1][0].shape) if unit.lower() == 'hz': if not bw: raise ValueError('Bandwidth required to convert fieldmap' 'from Hz to voxel') b0 /= bw b0 = DenseDistortion(b0) distortion.append(b0) else: distortion.append(None) # run algorithm [te0, r2s, *b0] = estatics(contrasts, distortion, opt=estatics_opt) # write results # --- intercepts --- odir0 = options.odir for i, te1 in enumerate(te0): ifname = contrasts[i].echo(0).volume.fname odir, obase, oext = py.fileparts(ifname) odir = odir0 or odir obase = obase + '_TE0' ofname = os.path.join(odir, obase + oext) io.savef(te1.volume, ofname, affine=te1.affine, like=ifname, te=0, dtype='float32') # --- decay --- ifname = contrasts[0].echo(0).volume.fname odir, obase, oext = py.fileparts(ifname) odir = odir0 or odir io.savef(r2s.volume, os.path.join(odir, 'R2star' + oext), affine=r2s.affine, dtype='float32') # --- fieldmap + undistorted --- if b0: b0 = b0[0] for i, b01 in enumerate(b0): ifname = contrasts[i].echo(0).volume.fname odir, obase, oext = py.fileparts(ifname) odir = odir0 or odir obase = obase + '_B0' ofname = os.path.join(odir, obase + oext) io.savef(b01.volume, ofname, affine=b01.affine, like=ifname, te=0, dtype='float32') for i, (c, b) in enumerate(zip(contrasts, b0)): readout = c.readout grid_up, grid_down, jac_up, jac_down = b.exp2( add_identity=True, jacobian=True) for j, e in enumerate(c): blip = e.blip or (2*(j % 2) - 1) grid_blip = grid_down if blip > 0 else grid_up # inverse of jac_blip = jac_down if blip > 0 else jac_up # forward model ifname = e.volume.fname odir, obase, oext = py.fileparts(ifname) odir = odir0 or odir obase = obase + '_unwrapped' ofname = os.path.join(odir, obase + oext) d = e.fdata(device=device) d, _ = pull1d(d, grid_blip, readout) d *= jac_blip io.savef(d, ofname, affine=e.affine, like=ifname) del d del grid_up, grid_down, jac_up, jac_down if options.register: for i, c in enumerate(contrasts): for j, e in enumerate(c): ifname = e.volume.fname odir, obase, oext = py.fileparts(ifname) odir = odir0 or odir obase = obase + '_registered' ofname = os.path.join(odir, obase + oext) io.save(e.volume, ofname, affine=e.affine)
def _ras_to_layout(x, affine): """Guess layout (e.g. "RAS") from an affine matrix""" layout = spatial.affine_to_layout(affine) ras_to_layout = layout[..., 0] return [x[i] for i in ras_to_layout]
def info(inp, meta=None, stat=False): """Print information on a volume. Parameters ---------- inp : str or (tensor, tensor) Either a path to a volume file or a tuple `(dat, affine)`, where the first element contains the volume data and the second contains the orientation matrix. meta : sequence of str List of fields to print. By default, a list of common fields is used. stat : bool, default=False Compute intensity statistics """ meta = meta or [] metadata = {} is_file = isinstance(inp, str) if is_file: fname = inp f = io.volumes.map(inp) if stat: inp = (f.fdata(), f.affine) else: inp = (f.shape, f.affine) metadata = f.metadata(meta) metadata['dtype'] = f.dtype dat, aff = inp if not is_file: metadata['dtype'] = dat.dtype if torch.is_tensor(dat): shape = dat.shape else: shape = dat pad = max([0] + [len(m) for m in metadata.keys()]) if not meta: more_fields = ['shape', 'layout', 'filename'] pad = max(pad, max(len(f) for f in more_fields)) title = lambda tag: ('{tag:' + str(pad) + 's}').format(tag=tag) if not meta: if is_file: print(f'{title("filename")} : {fname}') print(f'{title("shape")} : {tuple(shape)}') layout = spatial.affine_to_layout(aff) layout = spatial.volume_layout_to_name(layout) print(f'{title("layout")} : {layout}') center = torch.as_tensor(shape[:3], dtype=torch.float) / 2 center = spatial.affine_matvec(aff, center) print(f'{title("center")} : {tuple(center.tolist())} mm (RAS)') if stat and torch.is_tensor(dat): chandim = list(range(3, dat.ndim)) if not chandim: vmin = dat.min().tolist() vmax = dat.max().tolist() vmean = dat.mean().tolist() else: dat1 = dat.reshape([-1, *chandim]) vmin = dat1.min(dim=0).values.tolist() vmax = dat1.max(dim=0).values.tolist() vmean = dat1.mean(dim=0).tolist() print(f'{title("min")} : {vmin}') print(f'{title("max")} : {vmax}') print(f'{title("mean")} : {vmean}') for key, value in metadata.items(): if value is None and not meta: continue if torch.is_tensor(value): value = str(value.numpy()) value = value.split('\n') value = ('\n' + ' ' * (pad + 3)).join(value) print(f'{title(key)} : {value}')
def _prepare(data, dist, opt): # --- options ------------------------------------------------------ # we deepcopy all options so that we can overwrite/simplify them in place opt = ESTATICSOptions().update(opt).cleanup_() backend = dict(dtype=opt.backend.dtype, device=opt.backend.device) # --- be polite ---------------------------------------------------- if len(data) > 1: pstr = f'Fitting a (shared) exponential decay model with {len(data)} contrasts.' else: pstr = f'Fitting an exponential decay model.' print(pstr) print('Echo times:') for i, contrast in enumerate(data): print(f' - contrast {i:2d}: [' + ', '.join([f'{te*1e3:.1f}' for te in contrast.te]) + '] ms') # --- estimate noise / register / initialize maps ------------------ data, maps, dist = preproc(data, dist, opt) nb_contrasts = len(maps) - 1 if opt.distortion.enable: print('Readout directions:') for i, contrast in enumerate(data): layout = spatial.affine_to_layout(contrast.affine) layout = spatial.volume_layout_to_name(layout) readout = layout[contrast.readout] readout = ('left-right' if 'L' in readout or 'R' in readout else 'infra-supra' if 'I' in readout or 'S' in readout else 'antero-posterior' if 'A' in readout or 'P' in readout else 'unknown') print(f' - contrast {i:2d}: {readout}') # --- prepare regularization factor -------------------------------- # 1. Parameter maps regularization # -> we want lam = [*lam_intercepts, lam_decay] *lam, lam_decay = opt.regularization.factor lam = core.py.make_list(lam, nb_contrasts) lam.append(lam_decay) if not any(lam): opt.regularization.norm = '' opt.regularization.factor = lam # 2. Distortion fields regularization lam_dist = dict(factor=opt.distortion.factor, absolute=opt.distortion.absolute, membrane=opt.distortion.membrane, bending=opt.distortion.bending) opt.distortion.factor = lam_dist # --- initialize weights (RLS) ------------------------------------- mean_shape = maps.decay.volume.shape rls = None if opt.regularization.norm.endswith('tv'): rls_shape = mean_shape if opt.regularization.norm == 'tv': rls_shape = (len(maps), *rls_shape) rls = ParameterMap(rls_shape, fill=1, **backend).volume if opt.regularization.norm: print('Regularization:') print(f' - type: {opt.regularization.norm.upper()}') print(f' - log intercepts: [' + ', '.join([f'{i:.3g}' for i in lam[:-1]]) + ']') print(f' - decay: {lam[-1]:.3g}') else: print('Without regularization') if opt.distortion.enable: print('Distortion correction:') print(f' - model: {opt.distortion.model.lower()}') print( f' - absolute: {opt.distortion.absolute * opt.distortion.factor["factor"]}' ) print( f' - membrane: {opt.distortion.membrane * opt.distortion.factor["factor"]}' ) print( f' - bending: {opt.distortion.bending * opt.distortion.factor["factor"]}' ) else: print('Without distortion correction') # --- initialize nb of iterations ---------------------------------- if not opt.regularization.norm.endswith('tv'): # no reweighting -> do more gauss-newton updates instead opt.optim.max_iter_prm *= opt.optim.max_iter_rls opt.optim.max_iter_rls = 1 print('Optimization:') print(f' - Tolerance: {opt.optim.tolerance}') if opt.regularization.norm.endswith('tv'): print(f' - IRLS iterations: {opt.optim.max_iter_rls}') print(f' - Param iterations: {opt.optim.max_iter_prm}') if opt.distortion.enable: print(f' - Dist iterations: {opt.optim.max_iter_dist}') print(f' - FMG cycles: 2') print(f' - CG iterations: {opt.optim.max_iter_cg}' f' (tolerance: {opt.optim.tolerance_cg})') if opt.optim.nb_levels > 1: print(f' - Levels: {opt.optim.nb_levels}') # ------------------------------------------------------------------ # MAIN OPTIMIZATION LOOP # ------------------------------------------------------------------ if opt.verbose: pstr = f'{"rls":^3s} | {"gn":^3s} | {"step":^4s} | ' pstr += f'{"fit":^12s} + {"reg":^12s} + {"rls":^12s} ' if opt.distortion.enable: pstr += f'+ {"dist":^12s} ' pstr += f'= {"crit":^12s}' if opt.optim.nb_levels > 1: pstr = f'{"lvl":3s} | ' + pstr print('\n' + pstr) print('-' * len(pstr)) return data, maps, dist, opt, rls
def orient(inp, layout=None, voxel_size=None, center=None, like=None, output=None): """Overwrite the orientation matrix Parameters ---------- inp : str or (tuple, tensor) Either a path to a volume file or a tuple `(shape, affine)`, where the first element contains the volume shape and the second contains the orientation matrix. layout : str or layout-like, default=None (= preserve) Target orientation. voxel_size : [sequence of] float, default=None (= preserve) Target voxel size. center : [sequence of] float, default=None (= preserve) World coordinate of the center of the field of view. like : str or (tuple, tensor) Either a path to a volume file or a tuple `(shape, affine)`, where the first element contains the volume shape and the second contains the orientation matrix. output : str, optional Output filename. If the input is not a path, the reoriented data is not written on disk by default. If the input is a path, the default output filename is '{dir}/{base}.{layout}{ext}', where `dir`, `base` and `ext` are the directory, base name and extension of the input file. Returns ------- output : str or (tuple, tensor) If the input is a path, the output path is returned. Else, the new shape and orientation matrix are returned. """ dir = '' base = '' ext = '' fname = '' is_file = isinstance(inp, str) if is_file: fname = inp f = io.volumes.map(inp) dim = f.affine.shape[-1] - 1 inp = (f.shape[:dim], f.affine) if output is None: output = '{dir}{sep}{base}.{layout}{ext}' dir, base, ext = py.fileparts(fname) like_is_file = isinstance(like, str) and like if like_is_file: f = io.volumes.map(like) dim = f.affine.shape[-1] - 1 like = (f.shape[:dim], f.affine) shape, aff0 = inp dim = aff0.shape[-1] - 1 if like: shape_like, aff_like = like else: shape_like, aff_like = (shape, aff0) if voxel_size in (None, 'like') or len(voxel_size) == 0: voxel_size = spatial.voxel_size(aff_like) elif voxel_size == 'self': voxel_size = spatial.voxel_size(aff0) voxel_size = utils.make_vector(voxel_size, dim) if not layout or layout == 'like': layout = spatial.affine_to_layout(aff_like) elif layout == 'self': layout = spatial.affine_to_layout(aff0) layout = spatial.volume_layout(layout) if center in (None, 'like') or len(voxel_size) == 0: center = torch.as_tensor(shape_like, dtype=torch.float) * 0.5 center = spatial.affine_matvec(aff_like, center) elif center == 'self': center = torch.as_tensor(shape, dtype=torch.float) * 0.5 center = spatial.affine_matvec(aff0, center) center = utils.make_vector(center, dim) aff = spatial.affine_default(shape, voxel_size=voxel_size, layout=layout, center=center, dtype=torch.double) if output: dat = io.volumes.load(fname, numpy=True) layout = spatial.volume_layout_to_name(layout) if is_file: output = output.format(dir=dir or '.', base=base, ext=ext, sep=os.path.sep, layout=layout) io.volumes.save(dat, output, like=fname, affine=aff) else: output = output.format(sep=os.path.sep, layout=layout) io.volumes.save(dat, output, affine=aff) if is_file: return output else: return shape, aff
def ras_to_layout(x, affine): layout = spatial.affine_to_layout(affine) ras_to_layout = layout[..., 0] return [x[i] for i in ras_to_layout]