def setUpClass(cls): # Point self to cls so we can use it like in a normal method self = cls # Load in test data self.path = ('mr_utils/test_data/tests/cs/convex/' 'temporal_gd_tv') self.Coil, self.mask = load_test_data(self.path, ['Coil6', 'mask']) # For some reason they are the wrong dimensions coming out # of MATLAB, probably because they are old format self.Coil = self.Coil.T self.mask = self.mask.T # Set the recon parameters self.weight_fidelity = 1 self.weight_temporal = .01 self.beta_sqrd = 0.0000001 self.niter = 200 # Get an encoding model self.uft = UFT(self.mask, axes=(0, 1)) # Compute reduced kspace self.reduced_kspace = self.Coil * self.mask # Compute prior self.prior = generate_prior(self.Coil * self.mask, self.uft.inverse_s) # Compute monotonic sort order (self.sort_order_real, self.sort_order_imag) = sort_real_imag_parts(self.prior, axis=-1) # Compute measured image domain self.measuredImgDomain = self.uft.inverse_s(self.reduced_kspace) # Get reduced data, whatever that is self.reduced_data = np.abs(self.measuredImgDomain) # Get initial estimates self.img_est = self.measuredImgDomain.copy() self.W_img_est = self.measuredImgDomain.copy() # Construct R and C (rows and columns, I assume) rows, cols, pages = self.img_est.shape[:] self.R = np.tile(np.arange(rows), (cols, pages, 1)).transpose( (2, 0, 1)) self.C = np.tile(np.arange(cols), (rows, pages, 1)).transpose( (0, 2, 1)) # From R and C get the indices we'll actually use self.nIdx_real = self.R + self.C * rows + ( self.sort_order_real) * rows * cols self.nIdx_imag = self.R + self.C * rows + ( self.sort_order_imag) * rows * cols
class TestAMP(unittest.TestCase): '''Make sure we line up with Stanford results.''' def setUp(self): data = load_test_data('mr_utils/test_data/tests/cs/thresholding/amp', ['cdf97', 'mask', 'x0', 'y']) self.cdf97, self.mask, self.x0, self.y = data[:] #pylint: disable=W0632 self.uft = UFT(self.mask) self.level = 5 def test_uft(self): '''Test undersampled fourier encoding.''' y0 = self.uft.forward_ortho(self.x0) self.assertTrue(np.allclose(self.y, y0)) @unittest.skip('Currently do not know how to match cdf97 using pywavelets') def test_wavelet_decomposition(self): '''Make sure we decompose using the same wavelet transformation.''' wavelet_transform, locations = cdf97_2d_forward(self.x0, self.level) # # Check 'em out # view(np.stack((np.log(np.abs(self.cdf97)), # np.log(np.abs(wavelet_transform))))) # view(np.stack((self.cdf97 - wavelet_transform)), log=True) # # # Make sure we can go back inverse = cdf97_2d_inverse(wavelet_transform, locations) self.assertTrue(np.allclose(self.x0, inverse)) # view(self.x0 - inverse) # view(cdf97_2d_inverse(wavelet_transform, locations)) # Currently failing... self.assertTrue(np.allclose(wavelet_transform, self.cdf97))
# Undersampling pattern samp0 = np.zeros((sx, sy, st)) desc = 'Making sampling mask' num_spokes = 16 offsets = np.random.randint(0, high=st, size=st) for ii in trange(st, leave=False, desc=desc): samp0[..., ii] = radial( (sx, sy), num_spokes, offset=offsets[ii], extend=True, skinny=False) # view(samp0) # Set up the recon x = imspace_true.copy() ax = (0, 1) uft = UFT(samp0, axes=ax, scale=True) forward = uft.forward_ortho inverse = uft.inverse_ortho y = forward(x) # view(y, log=True) imspace_u = inverse(y) # from mr_utils.cs import SpatioTemporalTVSB # recon, err = SpatioTemporalTVSB( # samp0, y, betaxy=1/4, betat=1, mu=1, lam=1, gamma=1/2, # nInner=1, niter=3, x=x) # view(recon) w = 50 recon_l1 = ptv.tvgen(
from mr_utils import view from mr_utils.cs import proximal_GD from mr_utils.cs.models import UFT from mr_utils.utils.wavelet import cdf97_2d_forward, cdf97_2d_inverse from mr_utils.utils.orderings import bulk_up, whittle_down, colwise, rowwise from mr_utils.utils.sort2d import sort2d if __name__ == '__main__': # We need a mask mask = load_test_data('mr_utils/test_data/tests/recon/reordering', ['mask'])[0] mask = np.fft.fftshift(mask) # Get the encoding model uft = UFT(mask) # Load in the test data kspace = load_test_data('mr_utils/test_data/tests/recon/reordering', ['coil1'])[0] kspace = np.fft.fftshift(kspace) imspace = uft.inverse(kspace) # Undersample data to get prior kspace_u = kspace * mask imspace_u = uft.inverse(kspace_u) # Sparsifying transforms level = 3 wvlt, locations = cdf97_2d_forward(imspace, level) sparsify = lambda x: cdf97_2d_forward(x, level)[0]
class TestTemporalGDTV(unittest.TestCase): '''Make sure output of function matches MATLAB output.''' @classmethod def setUpClass(cls): # Point self to cls so we can use it like in a normal method self = cls # Load in test data self.path = ('mr_utils/test_data/tests/cs/convex/' 'temporal_gd_tv') self.Coil, self.mask = load_test_data(self.path, ['Coil6', 'mask']) # For some reason they are the wrong dimensions coming out # of MATLAB, probably because they are old format self.Coil = self.Coil.T self.mask = self.mask.T # Set the recon parameters self.weight_fidelity = 1 self.weight_temporal = .01 self.beta_sqrd = 0.0000001 self.niter = 200 # Get an encoding model self.uft = UFT(self.mask, axes=(0, 1)) # Compute reduced kspace self.reduced_kspace = self.Coil * self.mask # Compute prior self.prior = generate_prior(self.Coil * self.mask, self.uft.inverse_s) # Compute monotonic sort order (self.sort_order_real, self.sort_order_imag) = sort_real_imag_parts(self.prior, axis=-1) # Compute measured image domain self.measuredImgDomain = self.uft.inverse_s(self.reduced_kspace) # Get reduced data, whatever that is self.reduced_data = np.abs(self.measuredImgDomain) # Get initial estimates self.img_est = self.measuredImgDomain.copy() self.W_img_est = self.measuredImgDomain.copy() # Construct R and C (rows and columns, I assume) rows, cols, pages = self.img_est.shape[:] self.R = np.tile(np.arange(rows), (cols, pages, 1)).transpose( (2, 0, 1)) self.C = np.tile(np.arange(cols), (rows, pages, 1)).transpose( (0, 2, 1)) # From R and C get the indices we'll actually use self.nIdx_real = self.R + self.C * rows + ( self.sort_order_real) * rows * cols self.nIdx_imag = self.R + self.C * rows + ( self.sort_order_imag) * rows * cols def test_reduced_kspace(self): '''Verify reduced_kspace variable is the same as MATLAB''' reduced_kspace_true = load_test_data(self.path, ['reduced_kspace']) self.assertTrue(np.allclose(self.reduced_kspace, reduced_kspace_true)) def test_generate_prior(self): '''Verify prior variable is the same as MATLAB''' prior_true = load_test_data( self.path, ['prior'], ) self.assertTrue(np.allclose(self.prior, prior_true)) def test_monotonic_sort_real_imag_parts(self): '''Verify sort orders are the same as MATLAB''' real_true, imag_true = load_test_data( self.path, ['sort_order_real', 'sort_order_imag']) # 0-based indexing real_true -= 1 imag_true -= 1 self.assertTrue(np.alltrue(self.sort_order_real == real_true)) self.assertTrue(np.alltrue(self.sort_order_imag == imag_true)) def test_measuredImgDomain(self): '''Verify measuredImgDomain variable same as MATLAB''' mid = load_test_data(self.path, ['measuredImgDomain']) self.assertTrue(np.allclose(self.measuredImgDomain, mid)) def test_reduced_data(self): '''Verify reduced_data variable same as MATLAB''' reduced_data = load_test_data(self.path, ['reduced_data']) self.assertTrue(np.allclose(self.reduced_data, reduced_data)) def test_R_and_C(self): '''Verify R, C variables are the same as MATLAB''' R, C = load_test_data(self.path, ['R', 'C']) self.assertTrue(np.allclose(self.R, R - 1)) self.assertTrue(np.allclose(self.C, C - 1)) def test_nIdx_real_and_imag(self): '''Verify nIdx_real/imag variables are the same as MATLAB''' real, imag = load_test_data(self.path, ['nIdx_real', 'nIdx_imag']) # print(np.unravel_index(self.nIdx_real, self.prior.shape)) self.assertTrue(np.allclose(self.nIdx_real, real - 1)) self.assertTrue(np.allclose(self.nIdx_imag, imag - 1)) def test_recon(self): '''See if the full recon matches the MATLAB output''' recon = GD_temporal_TV(self.prior, self.reduced_kspace, self.mask, self.weight_fidelity, self.weight_temporal, self.uft.forward_s, self.uft.inverse_s, x=self.Coil) view(recon)
from mr_utils import view from mr_utils.cs import GD_TV from mr_utils.cs.models import UFT from mr_utils.test_data.phantom import binary_smiley from mr_utils.sim.traj import cartesian_pe if __name__ == '__main__': # Same binary smiley face example do_reordering = True N = 1000 x = binary_smiley(N) k = np.sum(np.abs(np.diff(x)) > 0) np.random.seed(5) samp = cartesian_pe(x.shape, undersample=.2, reflines=5) uft = UFT(samp) # Make the complex measurement in kspace # Note this is different than uft.forward, as fftshift must be performed y = uft.forward_ortho(x) # Solve inverse problem using gradient descent with TV sparsity constraint x_hat = GD_TV(y, forward_fun=uft.forward_ortho, inverse_fun=uft.inverse_ortho, alpha=.5, lam=.022, do_reordering=do_reordering, x=x, ignore_residual=True, disp=True,
plt.hist(im.imag.flatten(), density=True) plt.title('Distribution of Imag') plt.xlabel('Variance: %g' % laplace_sample_var(im.imag.flatten())) plt.subplot(1, 3, 3) plt.hist(np.abs(im.flatten()), density=True) plt.title('Distribution of Mag') plt.xlabel('Variance: %g' % exp_sample_var(np.abs(im).flatten())) plt.show() # Radial sampling pattern for retrospective undersampling num_spokes = 16 samp = radial(im.shape, num_spokes, skinny=True, extend=True) samp_percent = np.sum(samp.flatten()) / samp.size * 100 uft = UFT(samp) kspace_u = np.fft.fft2(im) * samp imspace_u = np.fft.ifft2(kspace_u) # view(samp) # view(imspace_u) # view(kspace_u) # Use wavelet transform lvl = 3 _coeffs, locs = cdf97_2d_forward(im, lvl) sparsify = lambda x0: cdf97_2d_forward(x0, lvl)[0] unsparsify = lambda x0: cdf97_2d_inverse(x0, locs) assert np.allclose(unsparsify(sparsify(im)), im) # Recon params ignore_mse = False
from mr_utils.test_data.phantom import binary_smiley from mr_utils.sim.traj import radial from mr_utils.cs.models import UFT from mr_utils.cs import proximal_GD from mr_utils.utils.wavelet import cdf97_2d_forward, cdf97_2d_inverse from mr_utils.utils.sort2d import sort2d if __name__ == '__main__': # Phantom N = 2**9 x = binary_smiley(N) # Sampling mask and encoding model mask = radial(x.shape, 16, extend=True) uft = UFT(mask) # Sample y = uft.forward_ortho(x) # Decide how we'll be selective in our updates percent_to_keep = .04 num_to_keep = int(percent_to_keep * y.size) def select_n(x_hat, update): '''Return indices of n largest updates each iteration.''' return np.unravel_index( np.argpartition(np.abs(x_hat - update).flatten(), -num_to_keep)[-num_to_keep:], x_hat.shape) idx = np.arange(y.size).reshape(y.shape)
from mr_utils.utils.wavelet import cdf97_2d_forward, cdf97_2d_inverse from mr_utils.sim.traj import radial from mr_utils.cs.models import UFT from mr_utils.cs import proximal_GD from mr_utils import view if __name__ == '__main__': # Get a phantom N = 64 x = binary_smiley(N) # Do some sampling num_spokes = 12 mask = radial(x.shape, num_spokes) uft = UFT(mask) y = uft.forward_ortho(x) # Sparsifying transforms level = 3 wvlt, locations = cdf97_2d_forward(x, level) sparsify = lambda x0: cdf97_2d_forward(x0, level)[0] unsparsify = lambda x0: cdf97_2d_inverse(x0, locations) # Do the recon alpha = .15 disp = True ignore = False maxiter = 200 x_c = proximal_GD(y, forward_fun=uft.forward_ortho,
def setUp(self): data = load_test_data('mr_utils/test_data/tests/cs/thresholding/amp', ['cdf97', 'mask', 'x0', 'y']) self.cdf97, self.mask, self.x0, self.y = data[:] #pylint: disable=W0632 self.uft = UFT(self.mask) self.level = 5
num_spokes = 16 run = ['monosort', 'lagrangian'] # none is always run to get prior # Need a reasonable numerical phantom x = np.rot90(modified_shepp_logan((N, N, N))[:, :, int(N / 2)]) # view(x) # Sparsifying transform level = 3 wvlt, locations = cdf97_2d_forward(x, level) sparsify = lambda x: cdf97_2d_forward(x, level)[0] unsparsify = lambda x: cdf97_2d_inverse(x, locations) # Do radial golden-angle sampling mask = radial(x.shape, num_spokes, skinny=True, extend=False) uft = UFT(mask) kspace_u = uft.forward_ortho(x) view(kspace_u, fft=True) # # We need to find the best alpha for the no ordering recon # pGD = partial( # proximal_GD, y=kspace_u, forward_fun=uft.forward_ortho, # inverse_fun=uft.inverse_ortho, sparsify=sparsify, # unsparsify=unsparsify, mode='soft', thresh_sep=True, # selective=None, x=x, ignore_residual=False, disp=False, # maxiter=500) # obj = lambda alpha0: compare_mse( # np.abs(x), np.abs(pGD(alpha=alpha0))) # alpha0 = 0.05 # res = minimize(obj, alpha0) # print(res)
import matplotlib.pyplot as plt from skimage.measure import compare_mse from mr_utils.cs import IHT_TV from mr_utils.cs.models import UFT from mr_utils.test_data.phantom import binary_smiley from mr_utils.sim.traj import cartesian_pe if __name__ == '__main__': do_reordering = True N = 1000 x = binary_smiley(N) k = np.sum(np.abs(np.diff(x)) > 0) np.random.seed(5) samp = cartesian_pe(x.shape, undersample=.01, reflines=5) uft = UFT(samp) # acquisiton model # Show sampling pattern plt.imshow(samp, cmap='gray') plt.title('Sampling Pattern') plt.show() # Simulate acquisiton kspace_u = uft.forward_ortho(x) imspace_u = uft.inverse_ortho(kspace_u) # Look at the aliased acquired signal plt.imshow(np.abs(imspace_u), cmap='gray') plt.title('Acquired') plt.show()