def test_14(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) frc = 0.5 msk = su.rndmask(s.shape, frc, dtype=np.float32) s *= msk lmbda = 1e-1 mu = 1e-2 # Create a random ℓ2 of gradient term weighting array. There is no # need to extend this array to account for the AMS impulse filter # since this is taken care of automatically by cucbpdn.cbpdngrdmsk Wgrd = np.random.randn(M).astype(np.float32) # Append a zero entry to the GradWeight array, corresponding to # the impulse filter appended to the dictionary by cbpdn.AddMaskSim, # since this is not done automatically by cbpdn.AddMaskSim Wgrdi = np.hstack((Wgrd, np.zeros((1,)))) opt = cbpdn.ConvBPDNGradReg.Options({'Verbose': False, 'MaxMainIter': 50, 'AutoRho': {'Enabled': False}}) opt['GradWeight'] = Wgrdi b = cbpdn.AddMaskSim(cbpdn.ConvBPDNGradReg, D, s, msk, lmbda, mu, opt) X1 = b.solve() opt['GradWeight'] = Wgrd X2 = cucbpdn.cbpdngrdmsk(D, s, msk, lmbda, mu, opt) assert(sm.mse(X1, X2) < 1e-10)
def test_13(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) frc = 0.5 msk = su.rndmask(s.shape, frc, dtype=np.float32) s *= msk lmbda = 1e-1 mu = 1e-2 Wl1 = np.random.randn(1, 1, M).astype(np.float32) Wl1i = np.concatenate((Wl1, np.ones(Wl1.shape[0:-1] + (1,))), axis=-1) Wgrdi = np.hstack((np.ones(M,), np.zeros((1,)))) opt = cbpdn.ConvBPDNGradReg.Options({'Verbose': False, 'MaxMainIter': 50, 'AutoRho': {'Enabled': False}}) opt['L1Weight'] = Wl1i opt['GradWeight'] = Wgrdi b = cbpdn.AddMaskSim(cbpdn.ConvBPDNGradReg, D, s, msk, lmbda, mu, opt) X1 = b.solve() opt['L1Weight'] = Wl1 opt['GradWeight'] = 1.0 X2 = cucbpdn.cbpdngrdmsk(D, s, msk, lmbda, mu, opt) assert(sm.mse(X1, X2) < 1e-10)
def test_11(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) frc = 0.5 msk = su.rndmask(s.shape, frc, dtype=np.float32) s *= msk lmbda = 1e-1 # Create a random ℓ1 term weighting array. There is no need to # extend this array to account for the AMS impulse filter since # this is taken care of automatically by cucbpdn.cbpdnmsk Wl1 = np.random.randn(1, 1, M).astype(np.float32) # Append a zero entry to the L1Weight array, corresponding to # the impulse filter appended to the dictionary by cbpdn.AddMaskSim, # since this is not done automatically by cbpdn.AddMaskSim Wl1i = np.concatenate((Wl1, np.zeros(Wl1.shape[0:-1] + (1,))), axis=-1) opt = cbpdn.ConvBPDN.Options({'Verbose': False, 'MaxMainIter': 50, 'AutoRho': {'Enabled': False}}) opt['L1Weight'] = Wl1i b = cbpdn.AddMaskSim(cbpdn.ConvBPDN, D, s, msk, lmbda, opt=opt) X1 = b.solve() opt['L1Weight'] = Wl1 X2 = cucbpdn.cbpdnmsk(D, s, msk, lmbda, opt) assert(sm.mse(X1, X2) < 1e-10)
def test_12(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) frc = 0.5 msk = su.rndmask(s.shape, frc, dtype=np.float32) s *= msk lmbda = 1e-1 mu = 1e-2 # Since cucbpdn.cbpdngrdmsk automatically ensures that the ℓ2 of # gradient term is not applied to the AMS impulse filter, while # cbpdn.AddMaskSim does not, we have to pass a GradWeight array # with a zero entry corresponding to the AMS impulse filter to # cbpdn.AddMaskSim Wgrdi = np.hstack((np.ones(M,), np.zeros((1,)))) opt = cbpdn.ConvBPDNGradReg.Options({'Verbose': False, 'MaxMainIter': 50, 'AutoRho': {'Enabled': False}}) opt['GradWeight'] = Wgrdi b = cbpdn.AddMaskSim(cbpdn.ConvBPDNGradReg, D, s, msk, lmbda, mu, opt) X1 = b.solve() opt['GradWeight'] = 1.0 X2 = cucbpdn.cbpdngrdmsk(D, s, msk, lmbda, mu, opt) assert(sm.mse(X1, X2) < 1e-10)
def masked_transform(blob, pad_size=None, noise_fraction=0.5, l2denoise=True, gray=False): mask = su.rndmask(blob.shape, noise_fraction, dtype=blob.dtype) blobw = blob * mask if pad_size is not None: pad = [(pad_size, pad_size), (pad_size, pad_size)] + \ [(0, 0) for _ in range(blob.ndim-2)] blobw = np.pad(blobw, pad, mode='constant') mask = np.pad(mask, pad, 'constant') if l2denoise: tvl2opt = tvl2.TVL2Denoise.Options({ 'Verbose': False, 'MaxMainIter': 200, 'gEvalY': False, 'AutoRho': { 'Enabled': True }, 'DFidWeight': mask }) denoiser = tvl2.TVL2Denoise(blobw, 0.05, tvl2opt, caxis=None if gray else 2) sl = denoiser.solve() sh = mask * (blobw - sl) else: sl, sh = np.zeros_like(blobw), blobw return sl, sh, mask
def test_10(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) frc = 0.5 msk = su.rndmask(s.shape, frc, dtype=np.float32) s *= msk lmbda = 1e-1 opt = cbpdn.ConvBPDN.Options({'Verbose': False, 'MaxMainIter': 50, 'AutoRho': {'Enabled': False}}) b = cbpdn.AddMaskSim(cbpdn.ConvBPDN, D, s, msk, lmbda, opt=opt) X1 = b.solve() X2 = cucbpdn.cbpdnmsk(D, s, msk, lmbda, opt) assert(sm.mse(X1, X2) < 1e-10)
def train_models(solvers, train_loader, args): """Train for all solvers.""" dname = args.dataset if not args.use_gray else args.dataset+'.gray' masks = [] shs = [] for e, blob in enumerate(train_loader): mask = su.rndmask(blob.shape, args.noise_fraction, dtype=blob.dtype) blobw = blob * mask if not args.dont_pad_boundary: pad = [(0, args.patch_size-1), (0, args.patch_size-1)] + \ [(0, 0) for _ in range(blob.ndim-2)] blobw = np.pad(blobw, pad, 'constant') mask = np.pad(mask, pad, 'constant') # l2-TV denoising tvl2opt = tvl2.TVL2Denoise.Options({ 'Verbose': False, 'MaxMainIter': 200, 'gEvalY': False, 'AutoRho': {'Enabled': True}, 'DFidWeight': mask }) denoiser = tvl2.TVL2Denoise(blobw, args.l2_lambda, tvl2opt, caxis=None if args.use_gray else 2) sl = denoiser.solve() sh = mask * (blobw - sl) # save masks and sh masks.append(mask) shs.append(sh) # Update solvers for k, solver in solvers.items(): solver.solve(sh, W=mask) np.save(os.path.join(args.output_path, k, '{}.{}.npy'.format(dname, e)), solver.getdict().squeeze()) if args.visdom is not None: tiled_dict = su.tiledict(solver.getdict().squeeze()) if not args.use_gray: tiled_dict = tiled_dict.transpose(2, 0, 1) args.visdom.image(tiled_dict, opts=dict(caption=f'{k}.{e}')) # snapshot blobs and masks masks = np.concatenate(masks, axis=-1) shs = np.concatenate(shs, axis=-1) np.save(os.path.join(args.output_path, 'train_masks.npy'), masks) np.save(os.path.join(args.output_path, 'train_blobs.npy'), shs) return solvers, shs, masks
def test_13(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) frc = 0.5 msk = su.rndmask(s.shape, frc, dtype=np.float32) s *= msk lmbda = 1e-1 mu = 1e-2 # Create a random ℓ1 term weighting array. There is no need to # extend this array to account for the AMS impulse filter since # this is taken care of automatically by cucbpdn.cbpdngrdmsk Wl1 = np.random.randn(1, 1, M).astype(np.float32) # Append a zero entry to the L1Weight array, corresponding to # the impulse filter appended to the dictionary by cbpdn.AddMaskSim, # since this is not done automatically by cbpdn.AddMaskSim Wl1i = np.concatenate((Wl1, np.zeros(Wl1.shape[0:-1] + (1, ))), axis=-1) # Since cucbpdn.cbpdngrdmsk automatically ensures that the ℓ2 of # gradient term is not applied to the AMS impulse filter, while # cbpdn.AddMaskSim does not, we have to pass a GradWeight array # with a zero entry corresponding to the AMS impulse filter to # cbpdn.AddMaskSim Wgrdi = np.hstack((np.ones(M, ), np.zeros((1, )))) opt = cbpdn.ConvBPDNGradReg.Options({ 'Verbose': False, 'MaxMainIter': 50, 'AutoRho': { 'Enabled': False } }) opt['L1Weight'] = Wl1i opt['GradWeight'] = Wgrdi b = cbpdn.AddMaskSim(cbpdn.ConvBPDNGradReg, D, s, msk, lmbda, mu, opt) X1 = b.solve() opt['L1Weight'] = Wl1 opt['GradWeight'] = 1.0 X2 = cucbpdn.cbpdngrdmsk(D, s, msk, lmbda, mu, opt) assert (sm.mse(X1, X2) < 1e-10)
def test_11(self): Nr = 32 Nc = 31 Nd = 5 M = 4 D = np.random.randn(Nd, Nd, M).astype(np.float32) s = np.random.randn(Nr, Nc).astype(np.float32) frc = 0.5 msk = su.rndmask(s.shape, frc, dtype=np.float32) s *= msk lmbda = 1e-1 Wl1 = np.random.randn(1, 1, M).astype(np.float32) Wl1i = np.concatenate((Wl1, np.ones(Wl1.shape[0:-1] + (1,))), axis=-1) opt = cbpdn.ConvBPDN.Options({'Verbose': False, 'MaxMainIter': 50, 'AutoRho': {'Enabled': False}}) opt['L1Weight'] = Wl1i b = cbpdn.AddMaskSim(cbpdn.ConvBPDN, D, s, msk, lmbda, opt=opt) X1 = b.solve() opt['L1Weight'] = Wl1 X2 = cucbpdn.cbpdnmsk(D, s, msk, lmbda, opt) assert(sm.mse(X1, X2) < 1e-10)
def test_11(self): msk = util.rndmask((16, 17), 0.25, dtype=np.float32)
def test_10(self): msk = util.rndmask((16, 17), 0.25)
exim = util.ExampleImages(scaled=True, zoom=0.25, gray=True) S1 = exim.image('barbara.png', idxexp=np.s_[10:522, 100:612]) S2 = exim.image('kodim23.png', idxexp=np.s_[:, 60:572]) S = np.dstack((S1, S2)) """ Construct initial dictionary. """ np.random.seed(12345) D0 = np.random.randn(8, 8, 32) """ Create random mask and apply to training images. """ frc = 0.5 W = util.rndmask(S.shape[0:2] + (1, ), frc, dtype=np.float32) Sw = W * S """ $\ell_2$-TV denoising with a spatial mask as a non-linear lowpass filter. """ lmbda = 0.1 opt = tvl2.TVL2Denoise.Options({ 'Verbose': False, 'MaxMainIter': 200, 'DFidWeight': W, 'gEvalY': False, 'AutoRho': { 'Enabled': True } })
S5 = exim.image('tulips.png', idxexp=np.s_[:, 30:542]) S = np.dstack((S1, S2, S3, S4, S5)) """ Highpass filter training images. """ npd = 16 fltlmbd = 5 sl, sh = util.tikhonov_filter(S, fltlmbd, npd) """ Create random mask and apply to highpass filtered training image set. """ np.random.seed(12345) frc = 0.25 W = util.rndmask(S.shape, frc, dtype=np.float32) shw = W * sh """ Construct initial dictionary. """ D0 = np.random.randn(8, 8, 32) """ Set regularization parameter and options for dictionary learning solver. """ lmbda = 0.1 opt = onlinecdl.OnlineConvBPDNMaskDictLearn.Options({ 'Verbose': True, 'ZeroMean': False, 'eta_a': 10.0,
""" Load a reference image. """ img = util.ExampleImages().image('monarch.png', zoom=0.5, scaled=True, idxexp=np.s_[:, 160:672]) """ Create random mask and apply to reference image to obtain test image. (The call to ``numpy.random.seed`` ensures that the pseudo-random noise is reproducible.) """ np.random.seed(12345) frc = 0.5 msk = util.rndmask(img.shape, frc, dtype=np.float32) imgw = msk * img """ Define pad and crop functions. """ pn = 8 spad = lambda x: np.pad(x, ((pn, pn), (pn, pn), (0, 0)), mode='symmetric') zpad = lambda x: np.pad(x, ((pn, pn), (pn, pn), (0, 0)), mode='constant') crop = lambda x: x[pn:-pn, pn:-pn] """ Construct padded mask and test image.
""" Construct initial dictionary. """ np.random.seed(12345) D0 = np.random.randn(8, 8, 32) """ Create random mask and apply to training images. """ frc = 0.5 W = util.rndmask(S.shape[0:2] + (1,), frc, dtype=np.float32) Sw = W * S """ $\ell_2$-TV denoising with a spatial mask as a non-linear lowpass filter. """ lmbda = 0.1 opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'MaxMainIter': 200, 'DFidWeight': W, 'gEvalY': False, 'AutoRho': {'Enabled': True}}) b = tvl2.TVL2Denoise(Sw, lmbda, opt) sl = b.solve() sh = Sw - sl
""" Load a reference image. """ img = util.ExampleImages().image('monarch.png', zoom=0.5, scaled=True, gray=True, idxexp=np.s_[:, 160:672]) """ Create random mask and apply to reference image to obtain test image. (The call to ``numpy.random.seed`` ensures that the pseudo-random noise is reproducible.) """ np.random.seed(12345) frc = 0.5 msk = util.rndmask(img.shape, frc, dtype=np.float32) imgw = msk * img """ Define pad and crop functions. """ pn = 8 spad = lambda x: np.pad(x, pn, mode='symmetric') zpad = lambda x: np.pad(x, pn, mode='constant') crop = lambda x: x[pn:-pn, pn:-pn] """ Construct padded mask and test image. """ mskp = zpad(msk) imgwp = spad(imgw)