def test_get_array_module_special_cases(): # fallback to user-provided xp xp, on_gpu = utils.get_array_module(None, np) assert xp is np # non-array type should be np xp_detected, on_gpu = utils.get_array_module([5, 2, 1]) assert xp is np assert ~on_gpu
def forward(self, x): """ image gradient """ xp, on_gpu = get_array_module(x) nreps = int(x.size / prod(self.arr_shape)) if nreps == 1: x = x.reshape(self.arr_shape, order=self.order) g = self.grad_func( self._prior_subtract(x), deltas=self.grid_size, direction="forward", grad_axis=self.grad_axis, ) else: if self.order == "C": g = xp.zeros((nreps, ) + self.grad_shape, dtype=x.dtype) for r in range(nreps): xr = x[r, ...].reshape(self.arr_shape, order=self.order) g[r, ...] = self.grad_func( self._prior_subtract(xr), deltas=self.grid_size, direction="forward", grad_axis=self.grad_axis, ) else: g = xp.zeros(self.grad_shape + (nreps, ), dtype=x.dtype) for r in range(nreps): xr = x[..., r].reshape(self.arr_shape, order=self.order) g[..., r] = self.grad_func( self._prior_subtract(xr), deltas=self.grid_size, direction="forward", grad_axis=self.grad_axis, ) return g
def divergence_ravel_offsets( g, direction="forward", deltas=None, grad_axis="last", offsets=None, use_corners=False, ): xp, on_gpu = get_array_module(g) g = xp.asanyarray(g) # Note: direction here is opposite of the corresponding gradient_periodic if direction.lower() == "forward": n_roll = 1 elif direction.lower() == "backward": n_roll = -1 else: raise ValueError("direction must be 'forward' or 'backward'") otype = g.dtype.char if otype not in ["f", "d", "F", "D", "m", "M"]: otype = "d" if grad_axis in [0, "first"]: grad_axis = 0 fshape = g.shape[1:] elif grad_axis in [-1, "last"]: grad_axis = -1 fshape = g.shape[:-1] else: raise ValueError("Unsupported grad_axis: {}".format(grad_axis) + "... must be first or last axis") if offsets is None: offsets = compute_offsets(fshape, use_corners) n = len(offsets) if grad_axis in [0, "first"]: g = g.reshape((n, prod(fshape)), order="F") elif grad_axis in [-1, "last"]: g = g.reshape((prod(fshape), n), order="F") f = xp.empty(g.shape, dtype=otype) if deltas is not None: deltas = np.asanyarray(deltas) if len(deltas) != n: raise ValueError("deltas array length must match f.ndim") for n, off in enumerate(offsets): if grad_axis == 0: f[n, ...] = xp.roll(g[n, ...], n_roll * off, axis=0) - g[n, ...] if deltas is not None: f[n, ...] /= deltas[n] elif grad_axis == -1: f[..., n] = xp.roll(g[..., n], n_roll * off, axis=0) - g[..., n] if deltas is not None: f[..., n] /= deltas[n] div = -f.sum(axis=grad_axis) return div.reshape(fshape, order="F")
def _check_gpu(self, xp): """Initialize ``on_gpu`` class property. Parameters ---------- xp : module, numpy.ndarray, cupy.ndarray or str ``xp`` should be the numpy or cupy module. It can also be a numpy or cupy ndarray. Finally, one can specify the string "numpy" or "cupy" as well. """ self._on_gpu = False if isinstance(xp, ModuleType): if xp != np: self._on_gpu = True elif hasattr(xp, "__array_interface__") or hasattr( xp, "__cuda_array_interface__"): xp, self._on_gpu = get_array_module(xp) elif xp == "numpy": pass elif xp == "cupy": if not config.have_cupy: raise ValueError("Cannot select cupy: CuPy is unavailable.") self._on_gpu = True else: raise ValueError("xp must be a module, 'numpy', or 'cupy'")
def opnorm(self, x, aniso=False, alpha=0): """ L1 TV norm """ xp, on_gpu = get_array_module(x) if aniso: # TODO : check this nrm = self.weight * xp.linalg.norm(self.forward(x), ord=1, axis=-1) else: nrm = self.weight * TVnorm( self.forward(x), alpha=alpha, grad_axis=self.grad_axis) return nrm
def TVnorm(grad, grad_axis=-1, alpha=0): """ TVnorm. This is the isotropic TV norm (L2 norm) """ xp, on_gpu = get_array_module(grad) if xp.iscomplexobj(grad): gradsq = xp.real(grad * xp.conj(grad)) else: gradsq = grad * grad if alpha == 0: return xp.sqrt(gradsq.sum(axis=grad_axis)) else: return xp.sqrt(gradsq.sum(axis=grad_axis) + alpha)
def calculate_prewhitening( noise, coil_axis=-1, scale_factor=1.0, return_full=False, xp=None ): """Calculates the noise prewhitening matrix Parameters ---------- noise : ndarray Input noise data (array or matrix) coil_axis : int Must correspond to the axis in ``noise`` that corresponds to coils. scale_factor : float Applied on the noise covariance matrix. Used to adjust for effective noise bandwith and difference in sampling rate between noise calibration and actual measurement: scale_factor = (T_acq_dwell/T_noise_dwell)*NoiseReceiverBandwidthRatio return_full : bool If True, also return the noise correlation matrix. Returns ------- W : ndarray Prewhitening matrix (upper triangular) of shape (coil, coil). ``data_pw = numpy.dot(data, w)`` where `data` is an (nsamples, ncoils) array gives prewhitened data. R : ndarray noise correlation matrix, shape = (coil, coil). References ---------- .. [1] Pruessman KP, Weiger M, Bornert P and Boesiger P. Advances in Sensitivity Encoding with Arbitrary k-Space Trajectories. Magn. Reson. Med. 46:638-651 """ xp, on_gpu = get_array_module(noise, xp) noise = xp.asarray(noise) coil_axis = coil_axis % noise.ndim ncoils = noise.shape[coil_axis] if coil_axis != noise.ndim - 1: # coil axis must come last noise = xp.swapaxes(noise, -1, coil_axis) noise = noise.reshape((noise.size // ncoils, ncoils), order="F") M = float(noise.shape[0]) R = (1 / (M - 1)) * xp.dot(noise.T, xp.conj(noise)) W = xp.linalg.inv(xp.linalg.cholesky(R)) W = W.T * xp.sqrt(2) * xp.sqrt(scale_factor) if return_full: return W, R else: return W
def mri_exp_mult(A, u, v, xp=None, debug=False): """ Y = A.H * exp(-u * v.T) [L x M] """ xp, on_gpu = get_array_module(A, xp) if debug: print("A.shape = {}".format(A.shape)) print("u.shape = {}".format(u.shape)) print("v.shape = {}".format(v.shape)) if A.ndim == 1: A = A[:, np.newaxis] elif A.ndim != 2: raise ValueError("A must be 2d") n, segs = A.shape if u.ndim > 1 or v.ndim > 1: raise ValueError("u, v must be 1d") u = u[:, np.newaxis] v = v[np.newaxis, :] m = v.size if n != u.size: raise ValueError("Inconsistent Dimensions: n={}, u.shape[1]={}".format( n, u.shape[1])) if debug: print("mri_exp_mult: n={}, m={}, segs={}".format(n, m, segs)) if v.size < 4e6: tmp = -xp.dot(u, v) xp.exp(tmp, out=tmp) return xp.dot(xp.conj(A.T), tmp) else: # break into chunks to reduce memory required nchunks = int(xp.ceil(v.size / 1e6)) nper = int(xp.ceil(v.size / nchunks)) arrays = [] for ci in range(nchunks): print("computing chunk {} of {}".format(ci + 1, nchunks)) if ci == nchunks - 1: sl = (slice(None), slice(ci * nper, v.size)) else: sl = (slice(None), slice(ci * nper, (ci + 1) * nper)) tmp = -xp.dot(u, v[sl]) tmp = xp.exp(tmp, out=tmp) arrays.append(xp.dot(xp.conj(A.T), tmp)) return xp.concatenate(arrays, axis=1)
def gradient(self, x): """ gradient of the TV norm """ xp, on_gpu = get_array_module(x) Tf = self * x # self.forward(x) term1 = self.norm_p * Tf if self.norm_p == 1: term2 = 1 / xp.sqrt(Tf * xp.conj(Tf) + self._limit) elif self.norm_p == 2: term2 = 1 else: p = self.norm_p / 2 - 1 term2 = (Tf * xp.conj(Tf) + self._limit)**p return self.weight * (self.H * (term1 * term2))
def backward_diff(f, axis, mode="periodic", xp=None): # (periodic) backward difference of f along the specified axis if xp is None: xp, on_gpu = get_array_module(f) if mode == "periodic": return f - xp.roll(f, 1, axis=axis) elif mode == "edge": tmp = f - xp.roll(f, 1, axis=axis) sl = [slice(None)] * tmp.ndim sl[axis] = slice(0, 1) tmp[tuple(sl)] = 0 return tmp else: raise NotImplementedError( "Only periodic boundary currently implemented.")
def _mri_smap1(x, y, z, a, xp=None): """circular coil in "x-y plane" of radius a note that coil x-y plane is not same as object x-y plane! """ xp, on_gpu = get_array_module(x) x = x / a y = y / a z = z / a r = xp.sqrt(x * x + y * y) r[r == 0] = 1e-7 # avoid divide by zero zsq = z * z rp1sq = (r + 1)**2 M = 4 * r / (rp1sq + zsq) # elliptic integral of the first and second kind if xp != np: # CuPy doesn't currently have ellipk or ellipe so have to transfer to # the CPU to evaluate the elliptic integrals. M = M.get() (K, E) = ellipk(M), ellipe(M) if xp != np: K, E = map(xp.asarray, (K, E)) # B_z in eqn (18) in grivich:00:tmf tmp = (rp1sq + zsq)**(-0.5) tmp2 = (1 - r)**2 + zsq rsq = r * r smap_z = 2 * tmp * (K + (1 - rsq - zsq) / tmp2 * E) smap_z /= a # B_r in eqn (17) in grivich:00:tmf smap_r = 2 * z / r * tmp * ((1 + rsq + zsq) / tmp2 * E - K) bad = xp.abs(r) < 1e-6 smap_r[bad] = 3 * xp.pi * z[bad] / ((1 + z[bad]**2)**2.5) * r[bad] smap_r /= a if xp.any(xp.isnan(smap_r)) or xp.any(xp.isnan(smap_z)): raise Exception("Nan found in smap") phi = xp.arctan2(y, x) smap_x = smap_r * xp.cos(phi) smap_y = smap_r * xp.sin(phi) return (smap_x, smap_y, smap_z)
def prewhiten(data, noise_cal_data, coil_axis=-1, coil_axis_noi=None, xp=None): """Noise prewhitening of multichannel MRI data. Parameters ---------- data : ndarray The data to prewhiten. noise_cal_data : ndarray Noise calibration data. coil_axis : int, optional The axis in ``data`` corresponding to coils. By default, the last axis is assumed. coil_axis_noi : int, optional The axis in ``noise_cal_data`` corresponding to coils. By default, the last axis is assumed. xp : {np, cupy} The array module to use. Returns ------- data_prewhite The prewhitened data. W : xp.ndarray The noise prewhitening matrix. R : xp.ndarray The noise correlatin matrix corresponding to ``noise_cal_data``. """ xp, on_gpu = get_array_module(data, xp) if coil_axis_noi is None: coil_axis_noi = np.argmin(noise_cal_data.shape) if not xp.iscomplexobj(data): raise ValueError("data must have a complex dtype") if data.shape[coil_axis] != noise_cal_data.shape[coil_axis_noi]: raise ValueError( "data and noise calibration data must have the " "same number of channels." ) W, R = calculate_prewhitening( noise_cal_data, coil_axis=coil_axis_noi, return_full=True ) data_prewhite = apply_prewhitening(data, W, coil_axis=coil_axis) return data_prewhite, W, R
def compute_bias_field(recon, affine=None, down=None, basename="xtrue", xp=None): # TODO: call N4BiasFieldCorrection via SimpleITK or the ITK Python wrappers # instead of via shell import nibabel as nib import subprocess xp, on_gpu = get_array_module(recon, xp) if affine is None: affine = np.eye(4) ndim = recon.ndim recon_abs = np.abs(recon) recon_abs = (32767 * recon_abs / recon_abs.max()).astype(np.uint16) if xp != np: recon_abs = recon_abs.get() recon_nii = nib.Nifti1Image(recon_abs, affine=affine) recon_nii.to_filename("{0}.nii".format(basename)) if down is None: if ndim == 3: down = 3 cstr = "[200x200x200x200]" else: down = 1 cstr = "[400x200x200]" cmd = ("N4BiasFieldCorrection" " -d {0}" " -s {1} -c " + cstr + " -i {2}.nii" " -o [{2}_biascor.nii, {2}_biasfield.nii]") cmd = cmd.format(ndim, down, basename) try: subprocess.check_output(cmd, shell=True) except RuntimeError: print("Subprocess failed: is ANTS N4BiasFieldCorrection " "on the system path?") raise bias = nib.load("{0}_biasfield.nii".format(basename)).get_data() np.save("{}_biasfield".format(basename), np.asarray(bias, dtype=np.float32)) return xp.asarray(bias)
def _nufft_coef(om, j, k, kernel, xp=None): """ Make NUFFT interpolation coefficient vector given kernel function. Parameters ---------- om : array_like [M,1] digital frequency omega in radians j : int # of neighbors used per frequency location k : int FFT size (should be >= N, the signal_length) kernel : function kernel function Returns ------- coef : array_like [j,M] coef vector for each frequency arg : array_like [j,M] kernel argument """ xp, on_gpu = get_array_module(om, xp) om = xp.atleast_1d(xp.squeeze(om)) if om.ndim > 1: raise ValueError("omega array must be 1D") # M = om.shape[0]; gam = 2 * np.pi / k dk = om / gam - _nufft_offset(om, j, k, xp=xp) # [M,1] # outer sum via broadcasting arg = -xp.arange(1, j + 1)[:, None] + dk[None, :] # [j,M] try: # try calling kernel without j in case it is baked into the kernel coef = kernel(arg) except TypeError: # otherwise, provide j to the kernel coef = kernel(arg, j=j) return (coef, arg)
def apply_prewhitening(data, W, order="F", coil_axis=-1, xp=None): """Apply the noise prewhitening matrix. Parameters ---------- noise : ndarray The data to prewhiten. W : ndarray Input noise prewhitening matrix. This can be computed from noise-only data via ``calculate_prewhitening``. coil_axis : int The axis in ``data`` containing coils. Returns ------- w_data : ndarray Prewhitened data. References ---------- .. [1] Pruessman KP, Weiger M, Bornert P and Boesiger P. Advances in Sensitivity Encoding with Arbitrary k-Space Trajectories. Magn. Reson. Med. 46:638-651 (2001). """ xp, on_gpu = get_array_module(data, xp) data = xp.asanyarray(data) W = xp.asanyarray(W) coil_axis = coil_axis % data.ndim ncoils = data.shape[coil_axis] if coil_axis != data.ndim - 1: # coil axis must come last data = xp.swapaxes(data, -1, coil_axis) s = data.shape data = data.reshape((-1, ncoils), order="F") data = xp.dot(data, W).reshape(s, order="F") if coil_axis != -1: # restore coil axis back to original position data = xp.swapaxes(data, -1, coil_axis) return data
def _as_1d_ints(arr, n=None, xp=None): """Make sure arr is a 1D array of integers. Returns an error if the elements of ``arr`` aren't an integer type or if ``arr`` has more than one non-singleton dimension. Parameters ---------- arr : array-like The array to check. If it is a scalar and ``n`` is specified, it will be broadcast to length ``n``. n : int, optional If specified, an error is raised if the array doesn't contain ``n`` elements. xp : {numpy, cupy} The array module. Returns iarr : xp.ndarray ``arr`` cast to np.intp dtype. """ if xp is None: xp, on_gpu = get_array_module(arr) arr = xp.atleast_1d(xp.squeeze(arr)) if arr.ndim > 1: raise ValueError("arr must be scalar or 1d") if not issubclass(arr.dtype.type, np.integer): # float only OK if values are integers if not xp.all(xp.mod(arr, 1) == 0): raise ValueError("arr contains non-integer values") if n is not None: if arr.size != n: if arr.size == 1: arr = xp.asarray([arr[0]] * n) else: raise ValueError( "array did not have the expected size of {}".format(n) ) return arr.astype(np.intp) # case to ints
def _nufft_offset(om, j, k, xp=None): """ offset for NUFFT Parameters ---------- om : array_like omega in [-pi, pi) (not essential!) j : int # of neighbors used k : int FFT size Returns ------- k0 : array_like offset for NUFFT """ if xp is None: xp, on_gpu = get_array_module(om) om = xp.asanyarray(om) gam = 2 * np.pi / k k0 = xp.floor(om / gam - j / 2.0) # new way return k0
def adjoint(self, g): """ image divergence """ xp, on_gpu = get_array_module(g) nreps = int(g.size / prod(self.grad_shape)) # TODO: fix gradient_adjoint for N-dimensional case # TODO: prior case-> add prior back? # return gradient_adjoint(g, grad_axis='last') if nreps == 1: g = g.reshape(self.grad_shape, order=self.order) d = self.div_func( g, deltas=self.grid_size, direction="forward", grad_axis=self.grad_axis, ) else: if self.order == "C": d = xp.zeros((nreps, ) + self.arr_shape, dtype=g.dtype) for r in range(nreps): d[r, ...] = self.div_func( g[r, ...], deltas=self.grid_size, direction="forward", grad_axis=self.grad_axis, ) else: d = xp.zeros(self.arr_shape + (nreps, ), dtype=g.dtype) for r in range(nreps): d[..., r] = self.div_func( g[..., r], deltas=self.grid_size, direction="forward", grad_axis=self.grad_axis, ) # d = self._prior_add(d) # don't think prior should be added back here return -d # adjoint of grad is -div
def gradient(f, dx=None, order=1, grad_axis="last"): """ modified version of numpy's gradient function. This differs in two ways 1.) returns ndim+1 dimensional array instead of a list the extra dimension can be appended at either the start or end 2.) if order=1, first-order differences are performed Return the gradient of an N-dimensional array. The gradient is computed using second order accurate central differences in the interior and second order accurate one-sides (forward or backwards) differences at the boundaries. The returned gradient hence has the same shape as the input array. Parameters ---------- f : array_like An N-dimensional array containing samples of a scalar function. Returns ------- gradient : ndarray N arrays of the same shape as `f` giving the derivative of `f` with respect to each dimension. Examples -------- >>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) >>> np.gradient(x) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(x, 2) array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)) [array([[ 2., 2., -1.], [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], [ 1. , 1. , 1. ]])] >>> x = np.array([0,1,2,3,4]) >>> dx = gradient(x) >>> y = x**2 >>> gradient(y,dx) array([0., 2., 4., 6., 8.]) """ xp, on_gpu = get_array_module(f) f = xp.asanyarray(f) n = len(f.shape) # number of dimensions if dx is not None: if np.isscalar(dx): dx = [dx] * n else: dx = list(dx) if order < 1 or order > 2: raise ValueError("Only first or second order differences supported") # use central differences on interior and one-sided differences on the # endpoints. This preserves second order-accuracy over the full domain. # create slice objects --- initially all are [:, :, ..., :] # output has additional dimension for the differences slice1 = [slice(None)] * (n + 1) slice2 = [slice(None)] * (n + 1) slice3 = [slice(None)] * (n + 1) slice4 = [slice(None)] * (n + 1) otype = f.dtype.char if otype not in ["f", "d", "F", "D", "m", "M"]: otype = "d" # Difference of datetime64 elements results in timedelta64 if otype == "M": # Need to use the full dtype name because it contains unit information otype = f.dtype.name.replace("datetime", "timedelta") elif otype == "m": # Needs to keep the specific units, can't be a general unit otype = f.dtype # Convert datetime64 data into ints. Make dummy variable `y` # that is a view of ints if the data is datetime64, otherwise # just set y equal to the the array `f`. if f.dtype.char in ["M", "m"]: y = f.view("int64") else: y = f shape_orig = y.shape if grad_axis in [0, "first"]: grad_axis = 0 out = xp.empty((n, ) + y.shape, dtype=otype) y = y[np.newaxis, ...] elif grad_axis in [-1, "last"]: grad_axis = -1 out = xp.empty(y.shape + (n, ), dtype=otype) y = y[..., np.newaxis] else: raise ValueError("Unsupported grad_axis: {}".format(grad_axis)) slice2[grad_axis] = 0 slice3[grad_axis] = 0 slice4[grad_axis] = 0 for axis in range(n): slice1[grad_axis] = axis if grad_axis == 0: out_axis = axis + 1 else: out_axis = axis if y.shape[out_axis] < 2: raise ValueError( "Shape of array too small to calculate a numerical gradient, " "at least two elements are required.") if order == 1: slice1[out_axis] = slice(0, -1) slice2[out_axis] = slice(1, None) slice3[out_axis] = slice(None, -1) # 1D equivalent -- out[0:-1] = y[1:] - y[:-1] out[slice1] = y[slice2] - y[slice3] # 1D equivalent -- out[-1] = (y[-1] - y[-2]) slice1[out_axis] = -1 slice2[out_axis] = -1 slice3[out_axis] = -2 out[slice1] = y[slice2] - y[slice3] elif order == 2: # numpy.gradient's usual case # Numerical differentiation: 1st order edges, 2nd order interior if y.shape[out_axis] == 2: # Use first order differences for time data slice1[out_axis] = slice(1, -1) slice2[out_axis] = slice(2, None) slice3[out_axis] = slice(None, -2) # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0 out[slice1] = (y[slice2] - y[slice3]) / 2.0 slice1[out_axis] = 0 slice2[out_axis] = 1 slice3[out_axis] = 0 # 1D equivalent -- out[0] = (y[1] - y[0]) out[slice1] = y[slice2] - y[slice3] slice1[out_axis] = -1 slice2[out_axis] = -1 slice3[out_axis] = -2 # 1D equivalent -- out[-1] = (y[-1] - y[-2]) out[slice1] = y[slice2] - y[slice3] # Numerical differentiation: 2st order edges, 2nd order interior else: # Use second order differences where possible slice1[out_axis] = slice(1, -1) slice2[out_axis] = slice(2, None) slice3[out_axis] = slice(None, -2) # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0 out[slice1] = (y[slice2] - y[slice3]) / 2.0 slice1[out_axis] = 0 slice2[out_axis] = 0 slice3[out_axis] = 1 slice4[out_axis] = 2 # 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0 out[slice1] = ( -(3.0 * y[slice2] - 4.0 * y[slice3] + y[slice4]) / 2.0) slice1[out_axis] = -1 slice2[out_axis] = -1 slice3[out_axis] = -2 slice4[out_axis] = -3 # 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3]) out[slice1] = (3.0 * y[slice2] - 4.0 * y[slice3] + y[slice4]) / 2.0 else: raise ValueError("Unsupported Order: {}".format(order)) # divide by step size # outvals.append(out / dx[axis]) # reset the slice object in this dimension to ":" slice1[out_axis] = slice(None) slice2[out_axis] = slice(None) slice3[out_axis] = slice(None) slice4[out_axis] = slice(None) if dx is not None: slice1[grad_axis] = slice(axis, axis + 1) out[slice1] = out[slice1] / dx[axis] slice1[grad_axis] = slice(None) y.shape = shape_orig # restore original shape return out
def magnitude(self, x, aniso=False, alpha=0): xp, on_gpu = get_array_module(x) """ summed TV norm """ return self.weight * xp.sum(self.opnorm(x, aniso=aniso, alpha=alpha))
def cycle_spin( x, func, max_shifts, shift_steps=1, num_workers=None, multichannel=False, func_kw={}, xp=None, ): """Cycle spinning (repeatedly apply func to shifted versions of x). Parameters ---------- x : array-like Data for input to ``func``. func : function A function to apply to circularly shifted versions of ``x``. Should take ``x`` as its first argument. Any additional arguments can be supplied via ``func_kw``. max_shifts : int or tuple If an integer, shifts in ``range(0, max_shifts+1)`` will be used along each axis of ``x``. If a tuple, ``range(0, max_shifts[i]+1)`` will be along axis i. shift_steps : int or tuple, optional The step size for the shifts applied along axis, i, are:: ``range((0, max_shifts[i]+1, shift_steps[i]))``. If an integer is provided, the same step size is used for all axes. num_workers : int or None, optional The number of parallel threads to use during cycle spinning. If set to ``None``, the full set of available cores are used. multichannel : bool, optional Whether to treat the final axis as channels (no cycle shifts are performed over the channels axis). func_kw : dict, optional Additional keyword arguments to supply to ``func``. xp : {numpy, cupy} or None The array module to use. If None, it is inferred from ``x``. Returns ------- avg_y : np.ndarray The output of ``func(x, **func_kw)`` averaged over all combinations of the specified axis shifts. Notes ----- Cycle spinning was proposed as a way to approach shift-invariance via performing several circular shifts of a shift-variant transform [1]_. For a n-level discrete wavelet transforms, one may wish to perform all shifts up to ``max_shifts = 2**n - 1``. In practice, much of the benefit can often be realized with only a small number of shifts per axis. For transforms such as the blockwise discrete cosine transform, one may wish to evaluate shifts up to the block size used by the transform. References ---------- .. [1] R.R. Coifman and D.L. Donoho. "Translation-Invariant De-Noising". Wavelets and Statistics, Lecture Notes in Statistics, vol.103. Springer, New York, 1995, pp.125-150. :DOI:10.1007/978-1-4612-2544-7_9 Examples -------- >>> import skimage.data >>> from skimage import img_as_float >>> from skimage.restoration import denoise_wavelet, cycle_spin >>> img = img_as_float(skimage.data.camera()) >>> sigma = 0.1 >>> img = img + sigma * np.random.standard_normal(img.shape) >>> denoised = cycle_spin(img, func=denoise_wavelet, max_shifts=3) """ xp, on_gpu = get_array_module(x, xp) x = xp.asanyarray(x) all_shifts = _generate_shifts(x.ndim, multichannel, max_shifts, shift_steps) all_shifts = list(all_shifts) roll_axes = tuple(range(x.ndim)) def _run_one_shift(shift): # shift, apply function, inverse shift xs = xp.roll(x, shift, axis=roll_axes) tmp = func(xs, **func_kw) return xp.roll(tmp, tuple(-s for s in shift), axis=roll_axes) # compute a running average across the cycle shifts if num_workers == 1 or not have_dask: if num_workers != 1: warnings.warn("dask not found: using only one worker") # serial processing mean = _run_one_shift(all_shifts[0]) for shift in all_shifts[1:]: mean += _run_one_shift(shift) mean /= len(all_shifts) else: # multithreaded via dask futures = [dask.delayed(_run_one_shift)(s) for s in all_shifts] mean = sum(futures) / len(futures) mean = mean.compute(num_workers=num_workers) return mean
def kaiser_bessel(x=None, J=6, alpha=None, m=0, K_N=None): """Generalized Kaiser-Bessel function for x in support [-J/2,J/2]. Parameters ---------- x : array_like or str arguments [M,1] J : int, optional kernel size in each dimension alpha : float, optional shape parameter (default 2.34 * J) m : float, optional order parameter K_N : grid oversampling factor (typically 1.25 < K_N <= 2) Returns ------- kb : array_like or str or function [M,1] KB function values, if x is an array of numbers or string for kernel(k,J), if x is 'string' or inline function, if x is 'inline' Notes ----- see (A1) in lewitt:90:mdi, JOSA-A, Oct. 1990 Adapted from Matlab version: Copyright 2001-3-30, Jeff Fessler, The University of Michigan Modification 2002-10-29 by Samuel Matej - for Negative & NonInteger m the besseli() function has singular behavior at the boundaries - KB values shooting-up/down (worse for small alpha) leading to unacceptable interpolators - for real arguments and higher/reasonable values of alpha the besseli() gives similar values for positive and negative m except close to boundaries - tested for m=-2.35:0.05:2.35 (besseli() gives exactly same values for integer +- m) => besseli(m,...) approximated by besseli(abs(m),...), which behaves well at the boundaries WARNING: it is not clear how correct the FT formula (JOSA) is for this approximation (for NonInteger Negative m) NOTE: Even for the original KB formula, the JOSA FT formula is derived only for m > -1 ! """ xp, on_gpu = get_array_module(x) if alpha is None: alpha = 2.34 * J """Warn about use of modified formula for negative m""" if (m < 0) and ((abs(round(m) - m)) > np.finfo(float).eps): wstr = "Negative NonInt m=%g\n" % (m) wstr += "\t- using modified definition of KB function\n" warnings.warn(wstr) m_bi = abs(m) # modified "m" as described above ii = (2 * np.abs(x) < J).nonzero() tmp = 2 * x[ii] / J tmp *= tmp f = np.sqrt(1 - tmp) if m_bi != 0: denom = _iv(m_bi, alpha, xp=xp) else: denom = _i0(alpha, xp=xp) if denom == 0: print("m=%g alpha=%g" % (m, alpha)) kb = xp.zeros_like(x) if m_bi != 0: kb[ii] = (f**m * _iv(m_bi, alpha * f, xp=xp)) / float(denom) else: kb[ii] = _i0(alpha * f, xp=xp) / float(denom) kb = kb.real return kb
def kaiser_bessel_ft(u, J=6, alpha=None, m=0, d=1): """Fourier transform of generalized Kaiser-Bessel function, in dimension d. Parameters ---------- u : array_like [M,1] frequency arguments J : int, optional kernel size in each dimension alpha : float, optional shape parameter (default: 2.34 J) m : float, optional order parameter (default: 0) d : int, optional dimension (default: 1) Returns ------- y : array_like [M,1] transform values if x is an array of numbers or string for kernel_ft(k,J), if x is 'string' or inline function, if x is 'inline' Notes ----- See (A3) in lewitt:90:mdi, JOSA-A, Oct. 1990. Matlab ver.Copyright 2001-3-30, Jeff Fessler, The University of Michigan Python adaptation: Gregory Lee """ xp, on_gpu = get_array_module(u) if not alpha: alpha = 2.34 * J if m < -1: # Check for validity of FT formula wstr = "m=%g < -1" % (m) wstr += " in kaiser_bessel_ft()\n" wstr += " - validity of FT formula uncertain for m < -1\n" warnings.warn(wstr) elif (m < 0) & ((np.abs(np.round(m) - m)) > np.finfo(float).eps): wstr = "\nNeg NonInt m=%g in " % (m) wstr += "kaiser_bessel_ft()\n\t- validity of FT formula uncertain\n" warnings.warn(wstr) # trick: scipy.special.jv can handle complex args tmp = (np.pi * J) * u tmp *= tmp tmp -= alpha * alpha if xp is np: # lib.scimath.sqrt gives complex value instead of NaN for negative # inputs z = np.lib.scimath.sqrt(tmp) else: # no cupy.lib.scimath.sqrt, but it is just equivalent to: # convert tmp to complex dtype before calling xp.sqrt tmp_cplx = tmp.astype(xp.result_type(tmp.dtype, xp.complex64), copy=False) z = xp.sqrt(tmp_cplx) nu = d / 2.0 + m const1 = (2 * np.pi)**(d / 2.0) * (J / 2.0)**d * alpha**m if m == 0: const1 /= _i0(alpha, xp=xp) else: const1 /= _iv(m, alpha, xp=xp) if nu == 0: y = const1 * _j0(z, xp=xp) y /= z else: y = const1 * _jv(nu, z, xp=xp) y /= z**nu y = reale(y.real) return y
def gradient_periodic( f, direction="forward", axes=None, deltas=None, grad_axis="last", mode="periodic", ): """ This version based on np.roll is very simple, but a bit slower than the gradient defined in numpy.gradient and it's variant below Parameters ---------- f : array n-dimensional array over which to compute the gradient direction : {'forward', 'backward'} whether to use forward or backward differencing axes : list or array, optional list of axes along which to compute the gradient (default = all) deltas : list or array, optional grid spacing along each dimension (defaults to 1 on all dimensions) grad_axis : {'last', 'first', 0, -1} output array has dimensions f.ndim + 1. `grad_axis` controls whether the extra dimension is added in the first or last position """ xp, on_gpu = get_array_module(f) f = xp.asanyarray(f) N = f.ndim # number of dimensions if direction.lower() in ["forward", "forw", "f"]: diff_func = functools.partial(forward_diff, xp=xp, mode=mode) elif direction.lower() in ["backward", "back", "b"]: if mode != "periodic": raise NotImplementedError("untested") diff_func = functools.partial(backward_diff, xp=xp) else: raise ValueError("direction must be 'forward' or 'backward'") if axes is None: axes = np.arange(N) else: if np.isscalar(axes): axes = (axes, ) axes = np.asanyarray(axes) if axes.max() > (N - 1): raise ValueError("maximum axis = {}, but f.ndim only {}".format( axes.max(), N)) if deltas is not None: try: if len(deltas) != len(axes): raise ValueError("deltas array length must match f.ndim") except TypeError: raise TypeError("deltas should be a sequence") otype = f.dtype.char if otype not in ["f", "d", "F", "D", "m", "M"]: otype = "d" if grad_axis in [0, "first"]: grad_axis = 0 g = xp.empty((len(axes), ) + f.shape, dtype=otype) elif grad_axis in [-1, "last"]: grad_axis = -1 g = xp.empty(f.shape + (len(axes), ), dtype=otype) else: raise ValueError("Unsupported grad_axis: {}".format(grad_axis)) for n, axis in enumerate(axes): if grad_axis == 0: g[n, ...] = diff_func(f, axis=axis) if deltas is not None: g[n, ...] /= deltas[n] elif grad_axis == -1: g[..., n] = diff_func(f, axis=axis) if deltas is not None: g[..., n] /= deltas[n] return g
def test_get_array_module(xp): x = xp.arange(8) xp_detected, on_gpu = utils.get_array_module(x) assert xp is xp_detected
def dtft(x, omega, shape=None, n_shift=None, useloop=False, xp=None): """Compute exact (slow) n-dimensional non-uniform Fourier transform. This function is used as a reference for testing the NUFFT. It is not a fast transform. Parameters ---------- x : ndarray The data to transform. omega : ndarray, optional Frequency locations (radians). shape : tuple of int, optional The shape of the transform. If not specified and ``x.ndim == omega.shape[1]`` all axes are transformed. Otherwise, all but the last axis is transformed. User-specified shape can be used to allow operator on raveled input, ``x``, but only if ``x.ravel(order='F')`` was used. n_shift : tuple of int, optional Spatial indices correspond to ``np.arange(n) - n_shift`` where ``n`` is the size of x on a given axis. useloop : bool, optional If True, less memory is used (slower). xp : {numpy, cupy} The array module to use. Returns ------- xk : array DTFT values Notes ----- Requires enough memory to store M * prod(shape) size matrices (for testing only) Matlab version: Copyright 2001-9-17, Jeff Fessler, The University of Michigan """ xp, on_gpu = get_array_module(omega, xp) x = xp.asarray(x) omega = xp.asarray(omega) if omega.ndim != 2: raise ValueError("omega must be 2d") dd = omega.shape[1] if shape is None: if x.ndim == dd + 1: shape = x.shape[:-1] elif x.ndim == dd: shape = x.shape else: raise ValueError("shape must be specified") shape = xp.atleast_1d(shape) if len(shape) == dd: # just one image x = x.ravel(order="F") x = x[:, xp.newaxis] elif len(shape) == dd + 1: # multiple images shape = shape[:-1] x = xp.reshape(x, (xp.prod(shape), -1)) # [*shape,L] else: print("bad input signal size") if n_shift is None: n_shift = np.zeros(dd) n_shift = np.atleast_1d(np.squeeze(n_shift)) if len(n_shift) != dd: raise ValueError("must specify one shift per axis") if np.any(n_shift != 0): nng = [] for d in range(dd): nng.append(xp.arange(0, shape[d]) - n_shift[d]) nng = xp.meshgrid(*nng, indexing="ij") else: nng = xp.indices(shape) if useloop: # # loop way: slower but less memory # Could make a numba version of this if desired # m = len(omega) xk = xp.zeros( (x.size // xp.prod(shape), m), dtype=xp.result_type(x.dtype, omega.dtype, xp.complex64), ) # [L,m] if omega.shape[1] < 3: # trick: make '3d' omega = xp.hstack((omega, xp.zeros(omega.shape[0])[:, xp.newaxis])) for d in range(dd): nng[d] = nng[d].ravel(order="F") for mm in range(0, m): tmp = omega[mm, 0] * nng[0] for d in range(1, dd): tmp += omega[mm, d] * nng[d] xk[:, mm] = xp.dot(xp.exp(-1j * tmp), x) xk = xk.T # [m,L] else: xk = xp.outer(omega[:, 0], nng[0].ravel(order="F")) for d in range(1, dd): xk += xp.outer(omega[:, d], nng[d].ravel(order="F")) xk = xp.dot(xp.exp(-1j * xk), x) if xk.shape[-1] == 1: xk.shape = xk.shape[:-1] return xk
def dtft_adj(xk, omega, shape=None, n_shift=None, useloop=False, xp=None): """Compute adjoint of d-dim DTFT for spectrum xk at frequency locations omega. Parameters ---------- xk : array DTFT values omega : array, optional frequency locations (radians) n_shift : array, optional indexed as range(0, N)-n_shift useloop : bool, optional True to reduce memory use (slower) Returns ------- x : array signal values Requires enough memory to store m * (*shape) size matrices. (For testing only) """ if xp is None: xp, on_gpu = get_array_module(omega) else: on_gpu = xp != np dd = omega.shape[1] if shape is None: if xk.ndim == dd + 1: shape = xk.shape[:-1] elif xk.ndim == dd: shape = xk.shape else: raise ValueError("shape must be specified") shape = xp.atleast_1d(shape) if len(shape) == dd: # just one image xk = xk.ravel(order="F") xk = xk[:, xp.newaxis] elif len(shape) == dd + 1: # multiple images shape = shape[:-1] xk = xp.reshape(xk, (xp.prod(shape), -1)) # [*shape,L] else: print("bad input signal size") if len(shape) != dd: raise ValueError( "length of shape must match number of columns in omega" ) if n_shift is None: n_shift = (0,) * dd elif np.isscalar(n_shift): n_shift = (n_shift,) * dd if len(n_shift) != dd: raise ValueError("must specify one shift per axis") n_shift = xp.asarray(n_shift) if any(s != 0 for s in n_shift): nn = [] for idx in range(dd): nn.append(xp.arange(0, shape[idx]) - n_shift[idx]) nn = xp.meshgrid(*nn, indexing="ij") else: nn = xp.indices(shape) if on_gpu and isinstance(shape, xp.ndarray): shape = shape.get() shape = tuple(shape) if useloop: # slower, but low memory # Could make a numba version of this if desired m = omega.shape[0] x = xp.zeros(shape) # [(shape), m] for mm in range(0, m): t = omega[mm, 0] * nn[0] for d in range(1, dd): t += omega[mm, d] * nn[d] x = x + xp.exp(1j * t) * xk[mm] else: x = xp.outer(nn[0].ravel(order="F"), omega[:, 0]) for d in range(1, dd): x += xp.outer(nn[d].ravel(order="F"), omega[:, d]) x = xp.dot(xp.exp(1j * x[:, xp.newaxis]), xk) # [(*shape),L] x = xp.reshape(x, shape, order="F") # [(shape),L] if x.shape[-1] == 1: x.shape = x.shape[:-1] return x
def gradient_ravel_offsets( f, direction="forward", axes=None, deltas=None, grad_axis="last", offsets=None, use_corners=False, ): """ Can use this version for various offsets as in Fessler's CDiff objects. Note: boundary conditions won't exactly match. problematic? Parameters ---------- f : array n-dimensional array over which to compute the gradient direction : {'forward', 'backward'} whether to use forward or backward differencing axes : list or array, optional list of axes along which to compute the gradient (default = all) deltas : list or array, optional grid spacing along each dimension (defaults to 1 on all dimensions) grad_axis : {'last', 'first', 0, -1} output array has dimensions f.ndim + 1. `grad_axis` controls whether the extra dimension is added in the first or last position """ xp, on_gpu = get_array_module(f) f = xp.asanyarray(f) if offsets is None: offsets = compute_offsets(f.shape, use_corners) num_offsets = len(offsets) ndim = f.ndim if direction.lower() in ["forward", "forw", "f"]: n_roll = -1 elif direction.lower() in ["backward", "back", "b"]: n_roll = 1 else: raise ValueError("direction must be 'forward' or 'backward'") if deltas is not None: deltas = np.asanyarray(deltas) if len(deltas) != num_offsets: raise ValueError("deltas array length must match the number of " "offsets") if axes is None: axes = np.arange(ndim) else: axes = np.asanyarray(axes) if axes.max() > (ndim - 1): raise ValueError("maximum axis = {}, but f.ndim only {}".format( axes.max(), ndim)) otype = f.dtype.char if otype not in ["f", "d", "F", "D", "m", "M"]: otype = "d" fshape = f.shape f = f.ravel(order="F") if grad_axis in [0, "first"]: grad_axis = 0 g = xp.empty((num_offsets, ) + f.shape, dtype=otype) elif grad_axis in [-1, "last"]: grad_axis = -1 g = xp.empty(f.shape + (num_offsets, ), dtype=otype) else: raise ValueError("Unsupported grad_axis: {}".format(grad_axis)) for n, off in enumerate(offsets): if grad_axis == 0: g[n, ...] = xp.roll(f, n_roll * off, axis=0) - f if deltas is not None: g[n, ...] /= deltas[n] elif grad_axis == -1: g[..., n] = xp.roll(f, n_roll * off, axis=0) - f if deltas is not None: g[..., n] /= deltas[n] if grad_axis in [0, "first"]: g = g.reshape((num_offsets, ) + fshape, order="F") else: g = g.reshape(fshape + (num_offsets, ), order="F") return g
def divergence_periodic( g, direction="forward", axes=None, deltas=None, grad_axis="last", mode="periodic", ): xp, on_gpu = get_array_module(g) g = xp.asanyarray(g) n = g.ndim - 1 # number of dimensions # Note: direction here is opposite of the corresponding gradient_periodic if direction.lower() == "forward": diff_func = functools.partial(backward_diff, xp=xp) elif direction.lower() == "backward": diff_func = functools.partial(forward_diff, xp=xp) else: raise ValueError("direction must be 'forward' or 'backward'") if axes is None: axes = np.arange(n) else: if np.isscalar(axes): axes = (axes, ) axes = np.asanyarray(axes) if axes.max() > (n - 1): raise ValueError("maximum axis = {}, but f.ndim only {}".format( axes.max(), n)) if deltas is not None: try: if len(deltas) != len(axes): raise ValueError("deltas array length must match f.ndim") except TypeError: raise TypeError("deltas should be a sequence") otype = g.dtype.char if otype not in ["f", "d", "F", "D", "m", "M"]: otype = "d" f = xp.empty(g.shape, dtype=otype) if grad_axis in [0, "first"]: grad_axis = 0 elif grad_axis in [-1, "last"]: grad_axis = -1 else: raise ValueError("Unsupported grad_axis: {}".format(grad_axis)) for n, axis in enumerate(axes): if grad_axis == 0: f[n, ...] = diff_func(g[n, ...], axis=axis) if deltas is not None: f[n, ...] /= deltas[n] elif grad_axis == -1: f[..., n] = diff_func(g[..., n], axis=axis) if deltas is not None: f[..., n] /= deltas[n] if mode == "edge": if grad_axis == 0: sl_axis = axis + 1 else: sl_axis = axis sl_beg = _slice_at_axis(slice(0, 1), sl_axis) sl_end = _slice_at_axis(slice(-1, None), sl_axis) f[sl_beg] = -g[sl_end] f[sl_end] = g[sl_beg] div = f.sum(axis=grad_axis) return div
def mri_partial_fourier_nd( partial_kspace, pf_mask, niter=5, tw_inner=8, tw_outer=3, fill_conj=False, init=None, verbose=False, show=False, return_all_estimates=False, xp=None, ): """Partial Fourier reconstruction. Parameters ---------- Returns ------- Notes ----- The implementation is based on a multi-dimensional iterative reconstruction technique as described in [1]_. This is an extension of the 1D iterative methods described in [2]_ and [3]_. The concept of partial Fourier imaging was first proposed in [4]_, [5]_. References ---------- .. [1] Xu, Y. and Haacke, E. M. Partial Fourier imaging in multi-dimensions: A means to save a full factor of two in time. J. Magn. Reson. Imaging, 2001; 14:628–635. doi:10.1002/jmri.1228 .. [2] Haacke, E.; Lindskogj, E. & Lin, W. A fast, iterative, partial-Fourier technique capable of local phase recovery. Journal of Magnetic Resonance, 1991; 92:126-145. .. [3] Liang, Z.-P.; Boada, F.; Constable, R. T.; Haacke, E.; Lauterbur, P. & Smith, M. Constrained reconstruction methods in MR imaging. Rev Magn Reson Med, 1992; 4: 67-185 .. [4] Margosian, P.; Schmitt, F. & Purdy, D. Faster MR imaging: imaging with half the data Health Care Instrum, 1986; 1:195. .. [5] Feinberg, D. A.; Hale, J. D.; Watts, J. C.; Kaufman, L. & Mark, A. Halving MR imaging time by conjugation: demonstration at 3.5 kG. Radiology, 1986, 161, 527-531 """ xp, on_gpu = get_array_module(partial_kspace, xp) partial_kspace = xp.asarray(partial_kspace) # dtype = partial_kspace.dtype pf_mask = xp.asarray(pf_mask) if pf_mask.dtype != xp.bool: pf_mask = pf_mask.astype(xp.bool) im_shape = pf_mask.shape ndim = pf_mask.ndim if not xp.all(xp.asarray(im_shape) % 2 == 0): raise ValueError( "This function assumes all k-space dimensions have even length.") if partial_kspace.size != xp.count_nonzero(pf_mask): raise ValueError( "partial kspace should have total size equal to the number of " "non-zeros in pf_mask") kspace_init = embed(partial_kspace, pf_mask) img_est = ifftnc(kspace_init) lr_kspace = xp.zeros_like(kspace_init) nz = xp.where(pf_mask) lr_slices = [slice(None)] * ndim pf_slices = [slice(None)] * ndim win2_slices = [slice(None)] * ndim lr_shape = [0] * ndim # pf_shape = [0, ]*ndim win2_shape = [0] * ndim for d in range(ndim): nz_min = xp.min(nz[d]) nz_max = xp.max(nz[d]) if hasattr(nz_min, "get"): # 0-dim GPU array to scalar nz_min, nz_max = nz_min.get(), nz_max.get() i_mid = im_shape[d] // 2 if nz_min == 0: i_end = nz_max width = i_end - i_mid else: i_start = nz_min width = i_mid - i_start lr_slices[d] = slice(i_mid - width, i_mid + width + 1) lr_shape[d] = 2 * width + 1 # pf_slices[d] = slice(nz_min, nz_max + 1) pf_shape = nz_max - nz_min + 1 pf_slices[d] = slice(nz_min, nz_max + 1) win2_shape[d] = pf_shape + tw_outer if nz_min == 0: # pf_slices[d] = slice(nz_min, nz_max + 1 + tw_outer) win2_slices[d] = slice(tw_outer, tw_outer + pf_shape) else: # pf_slices[d] = slice(nz_min - tw_outer, nz_max + 1) win2_slices[d] = slice(pf_shape) lr_slices = tuple(lr_slices) win2_slices = tuple(win2_slices) pf_slices = tuple(pf_slices) # lr_mask = xp.zeros(pf_mask, dtype=xp.zeros) lr_win = hanning_apodization_window(lr_shape, tw_inner, xp) lr_kspace[lr_slices] = kspace_init[lr_slices] * lr_win img_lr = ifftnc(lr_kspace) phi = xp.angle(img_lr) pf_win = hanning_apodization_window(win2_shape, tw_outer, xp)[win2_slices] lr_mask = xp.zeros(pf_mask.shape, dtype=xp.float32) lr_mask[lr_slices] = lr_win win2_mask = xp.zeros(pf_mask.shape, dtype=xp.float32) win2_mask[pf_slices] = pf_win if show and ndim == 2: from matplotlib import pyplot as plt fig, axes = plt.subplots(2, 2) axes = axes.ravel() axes[0].imshow(pf_mask) axes[0].set_title("PF mask") axes[1].imshow(lr_mask) axes[1].set_title("LR Filter") axes[2].imshow(win2_mask) axes[2].set_title("Filter") axes[3].imshow(xp.abs(img_est).T) axes[3].set_title("Initial Estimate") for ax in axes: ax.set_xticklabels("") ax.set_yticklabels("") if verbose: norm0 = xp.linalg.norm(img_est) max0 = xp.max(xp.abs(img_est)) if return_all_estimates: all_img_est = [img_est] # POCS iterations for i in range(niter): # step 5 rho1 = xp.abs(img_est) * xp.exp(1j * phi) if verbose: change2 = xp.linalg.norm(rho1 - img_est) / norm0 change1 = xp.max(xp.abs(rho1 - img_est)) / max0 print("change = {}%% {}%%".format(100 * change2, 100 * change1)) # step 6 s1 = fftnc(rho1) # steps 7 & 8 full_kspace = win2_mask * kspace_init + (1 - win2_mask) * s1 # step 9 img_est = ifftnc(full_kspace) if return_all_estimates: all_img_est.append(img_est) if return_all_estimates: return all_img_est return img_est