Ejemplo n.º 1
0
"""

lmbda = 0.05
opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'MaxMainIter': 200,
                    'DFidWeight': mskp, 'gEvalY': False,
                    'AutoRho': {'Enabled': True}})
b = tvl2.TVL2Denoise(imgwp, lmbda, opt, caxis=2)
sl = b.solve()
sh = mskp * (imgwp - sl)


"""
Load dictionary.
"""

D = util.convdicts()['RGB:8x8x3x64']


"""
Set up :class:`.admm.cbpdn.ConvBPDN` options.
"""

lmbda = 2e-2
opt = cbpdn.ConvBPDN.Options({'Verbose': True, 'MaxMainIter': 200,
                    'HighMemSolve': True, 'RelStopTol': 5e-3,
                    'AuxVarObj': False, 'RelaxParam': 1.8,
                    'rho': 5e1*lmbda + 1e-1, 'AutoRho': {'Enabled': False,
                    'StdResiduals': False}})


"""
Ejemplo n.º 2
0

"""
Highpass filter test image.
"""

npd = 16
fltlmbd = 5.0
imgnl, imgnh = util.tikhonov_filter(imgn, fltlmbd, npd)


"""
Load dictionary.
"""

D = util.convdicts()['G:8x8x128']


"""
Set solver options. See Section 8 of :cite:`wohlberg-2017-convolutional2` for details of construction of $\ell_1$ weighting matrix $W$.
"""

imgnpl, imgnph = util.tikhonov_filter(pad(imgn), fltlmbd, npd)
W = spl.irfftn(np.conj(spl.rfftn(D, imgnph.shape, (0, 1))) *
               spl.rfftn(imgnph[..., np.newaxis], None, (0, 1)),
               imgnph.shape, (0,1))
W = W**2
W = 1.0/(np.maximum(np.abs(W), 1e-8))

lmbda = 4.8e-2
Ejemplo n.º 3
0
    f.write(vid.read())
    f.close()

img = sio.loadmat(pth)['indian_pines'].astype(np.float32)
img = img[16:-17, 16:-17, 0:200:2]
img /= img.max()

np.random.seed(12345)
imgn = util.spnoise(img, 0.33)


"""
We use a product dictionary :cite:`garcia-2018-convolutional2` constructed from a single-channel convolutional dictionary for the spatial axes of the image, and a standard (non-convolutional) dictionary for the spectral axis of the image. The impulse denoising problem is solved by appending an additional filter to the learned dictionary ``D0``, which is one of those distributed with SPORCO. This additional component consist of an impulse filters that will represent the low frequency image components when used together with a gradient penalty on the coefficient maps, as discussed below. The spectral axis dictionary is learned from the noise-free ground-truth image since the primary purpose of this script is as a code usage example: in a real application, this dictionary would be estimated from a relevant noise-free image.
"""

D0 = util.convdicts()['G:8x8x32']
Di = np.zeros(D0.shape[0:2] + (1,), dtype=np.float32)
Di[0, 0] = 1.0
D = np.concatenate((Di, D0), axis=2)

S = img.reshape((-1, img.shape[-1])).T
np.random.seed(12345)
B0 = np.random.randn(S.shape[0], 20)
lmbda = 0.02
opt = bpdndl.BPDNDictLearn.Options({'Verbose': True, 'MaxMainIter': 100,
                                    'BPDN': {'rho': 10.0*lmbda + 0.1},
                                    'CMOD': {'rho': S.shape[1] / 2e2}})

d = bpdndl.BPDNDictLearn(B0, S, lmbda, opt)
B = d.solve()
Ejemplo n.º 4
0
"""

blksz = (8, 8, 3)
stpsz = (2, 2, 1)

blocks = util.extractblocks(imgn, blksz, stpsz)
blockmeans = np.mean(blocks, axis=(0, 1))
blocks -= blockmeans
blocks = blocks.reshape(np.product(blksz), -1)


"""
Load dictionary.
"""

D = util.convdicts()['RGB:8x8x3x64'].reshape(np.product(blksz), -1)


"""
Set solver options.
"""

lmbda = 1e-1
opt = bpdn.BPDN.Options({'Verbose': True, 'MaxMainIter': 250,
                         'RelStopTol': 3e-3, 'AuxVarObj': False,
                         'AutoRho': {'Enabled': False}, 'rho':
                         1e1*lmbda})


"""
Initialise the :class:`.admm.bpdn.BPDN` object and call the ``solve`` method.
Ejemplo n.º 5
0
                                 gray=True, idxexp=np.s_[160:416, 60:316])

"""
Highpass filter example image.
"""

npd = 16
fltlmbd = 10
sl, sh = util.tikhonov_filter(img, fltlmbd, npd)


"""
Load dictionary and display it.
"""

D = util.convdicts()['G:12x12x216']
plot.imview(util.tiledict(D), fgsz=(7, 7))

lmbda = 5e-2

"""
The RelStopTol option was chosen for the two different methods to stop with similar functional values
"""

"""
Initialise and run standard serial CSC solver using ADMM with an equality constraint :cite:`wohlberg-2014-efficient`.
"""

opt = cbpdn.ConvBPDN.Options({'Verbose': True, 'MaxMainIter': 200,
                              'RelStopTol': 5e-3, 'AuxVarObj': False,
                              'AutoRho': {'Enabled': False}})
Ejemplo n.º 6
0
from sporco import util
from sporco import plot

# Training images
exim = util.ExampleImages(scaled=True, zoom=0.5)
img1 = exim.image('standard', 'lena.png')
img2 = exim.image('standard', 'mandrill.png')
S = np.concatenate((img1[..., np.newaxis], img2[..., np.newaxis]), axis=3)

# Highpass filter test images
npd = 16
fltlmbd = 5
sl, sh = util.tikhonov_filter(S, fltlmbd, npd)

# Construct initial dictionary
D0 = np.ones((1, 1, 3, 1)) * util.convdicts()['G:12x12x36'][..., np.newaxis, :]
D0 = ccmod.normalise(D0)

# Compute sparse representation on current dictionary
lmbda = 0.01
opt = cbpdn.ConvBPDN.Options({'Verbose': True, 'MaxMainIter': 200})
b = cbpdn.ConvBPDN(D0, sh, lmbda, opt)
b.solve()

# Update dictionary for training set sh
opt = ccmod.ConvCnstrMOD.Options({
    'Verbose': True,
    'MaxMainIter': 100,
    'rho': 5.0
})
c = ccmod.ConvCnstrMOD(b.Y, sh, D0.shape, opt)
Ejemplo n.º 7
0
"""
Load a reference image and corrupt it with 33% salt and pepper noise. (The call to ``np.random.seed`` ensures that the pseudo-random noise is reproducible.)
"""

img = util.ExampleImages().image('monarch.png', zoom=0.5, scaled=True,
                                 idxexp=np.s_[:, 160:672])
np.random.seed(12345)
imgn = util.spnoise(img, 0.33)


"""
We use a colour dictionary. The impulse denoising problem is solved by appending some additional filters to the learned dictionary ``D0``, which is one of those distributed with SPORCO. The first of these additional components is a set of three impulse filters, one per colour channel, that will represent the impulse noise, and the second is an identical set of impulse filters that will represent the low frequency image components when used together with a gradient penalty on the coefficient maps, as discussed below.
"""

D0 = util.convdicts()['RGB:8x8x3x64']
Di = np.zeros(D0.shape[0:2] + (3, 3))
np.fill_diagonal(Di[0, 0], 1.0)
D = np.concatenate((Di, Di, D0), axis=3)


"""
The problem is solved using class :class:`.admm.cbpdn.ConvBPDNGradReg`, which implements the form of CBPDN with an additional gradient regularization term, as defined above. The regularization parameters for the $\ell_1$ and gradient terms are ``lmbda`` and ``mu`` respectively. Setting correct weighting arrays for these regularization terms is critical to obtaining good performance. For the $\ell_1$ norm, the weights on the filters that are intended to represent the impulse noise are tuned to an appropriate value for the impulse noise density (this value sets the relative cost of representing an image feature by one of the impulses or by one of the filters in the learned dictionary), the weights on the filters that are intended to represent low frequency components are set to zero (we only want them penalised by the gradient term), and the weights of the remaining filters are set to zero. For the gradient penalty, all weights are set to zero except for those corresponding to the filters intended to represent low frequency components, which are set to unity.
"""

lmbda = 2.8e-2
mu = 3e-1
w1 = np.ones((1, 1, 1, 1, D.shape[-1]))
w1[..., 0:3] = 0.33
w1[..., 3:6] = 0.0
wg = np.zeros((D.shape[-1]))
Ejemplo n.º 8
0
                                 gray=True,
                                 idxexp=np.s_[:, 160:672])
np.random.seed(12345)
imgn = img + np.random.normal(0.0, 0.1, img.shape)
"""
Highpass filter test image.
"""

npd = 16
fltlmbd = 5.0
imgnl, imgnh = util.tikhonov_filter(imgn, fltlmbd, npd)
"""
Load dictionary.
"""

D = util.convdicts()['G:8x8x128']
"""
Set solver options. See Section 8 of :cite:`wohlberg-2017-convolutional2` for details of construction of $\ell_1$ weighting matrix $W$.
"""

imgnpl, imgnph = util.tikhonov_filter(pad(imgn), fltlmbd, npd)
W = spl.irfftn(
    np.conj(spl.rfftn(D, imgnph.shape,
                      (0, 1))) * spl.rfftn(imgnph[..., np.newaxis], None,
                                           (0, 1)), imgnph.shape, (0, 1))
W = W**2
W = 1.0 / (np.maximum(np.abs(W), 1e-8))

lmbda = 4.8e-2

opt = cbpdn.ConvBPDN.Options({
Ejemplo n.º 9
0
img = util.ExampleImages().image('kodim23.png',
                                 scaled=True,
                                 gray=True,
                                 idxexp=np.s_[160:416, 60:316])
"""
Highpass filter example image.
"""

npd = 16
fltlmbd = 10
sl, sh = signal.tikhonov_filter(img, fltlmbd, npd)
"""
Load dictionary and display it.
"""

D = util.convdicts()['G:12x12x36']
# Repeat the dictionary twice, adding noise to each respective pair
D = np.append(D + 0.01 * np.random.randn(*D.shape),
              D + 0.01 * np.random.randn(*D.shape),
              axis=-1)
plot.imview(util.tiledict(D), fgsz=(10, 10))
"""
Set :class:`.admm.cbpdnin.ConvBPDNInhib` solver options.
"""

lmbda = 5e-2
mu = 5e-2
opt = cbpdnin.ConvBPDNInhib.Options({
    'Verbose': True,
    'MaxMainIter': 200,
    'RelStopTol': 5e-3,
Ejemplo n.º 10
0
img = util.ExampleImages().image('kodim23.png',
                                 scaled=True,
                                 idxexp=np.s_[160:416, 60:316])
"""
Highpass filter example image.
"""

npd = 16
fltlmbd = 10
slc, shc = signal.tikhonov_filter(img, fltlmbd, npd)
"""
Load greyscale convolutional dictionary.
"""

D = util.convdicts()['G:8x8x64']
"""
Learn a standard dictionary $B$ to represent all pixel colours in the example image. Since the standard dictionary is a $3 \times 6$ matrix, the sparse representation $X$ has 6 pseudo-channels, which are converted to the 3 channels of the example image by right-multiplication by the dictionary $B$, giving $XB$.
"""

S = shc.reshape((-1, shc.shape[-1])).T
np.random.seed(12345)
B0 = np.random.randn(S.shape[0], 6)
lmbda = 1e-2
opt = bpdndl.BPDNDictLearn.Options({
    'Verbose': True,
    'MaxMainIter': 100,
    'BPDN': {
        'rho': 10.0 * lmbda,
        'AutoRho': {
            'Enabled': False
Ejemplo n.º 11
0
    vid = util.netgetdata(url)
    f = open(pth, 'wb')
    f.write(vid.read())
    f.close()

img = sio.loadmat(pth)['indian_pines'].astype(np.float32)
img = img[16:-17, 16:-17, 0:200:2]
img /= img.max()

np.random.seed(12345)
imgn = signal.spnoise(img, 0.33)
"""
We use a product dictionary :cite:`garcia-2018-convolutional2` constructed from a single-channel convolutional dictionary for the spatial axes of the image, and a standard (non-convolutional) dictionary for the spectral axis of the image. The impulse denoising problem is solved by appending an additional filter to the learned dictionary ``D0``, which is one of those distributed with SPORCO. This additional component consist of an impulse filters that will represent the low frequency image components when used together with a gradient penalty on the coefficient maps, as discussed below. The spectral axis dictionary is learned from the noise-free ground-truth image since the primary purpose of this script is as a code usage example: in a real application, this dictionary would be estimated from a relevant noise-free image.
"""

D0 = util.convdicts()['G:8x8x32']
Di = np.zeros(D0.shape[0:2] + (1, ), dtype=np.float32)
Di[0, 0] = 1.0
D = np.concatenate((Di, D0), axis=2)

S = img.reshape((-1, img.shape[-1])).T
np.random.seed(12345)
B0 = np.random.randn(S.shape[0], 20)
lmbda = 0.02
opt = bpdndl.BPDNDictLearn.Options({
    'Verbose': True,
    'MaxMainIter': 100,
    'BPDN': {
        'rho': 10.0 * lmbda + 0.1
    },
    'CMOD': {
Ejemplo n.º 12
0
 def test_16(self):
     D = util.convdicts()['G:12x12x72']
     assert D.shape == (12, 12, 72)
Ejemplo n.º 13
0
 def test_16(self):
     D = util.convdicts()['G:12x12x72']
     assert(D.shape == (12,12,72))
Ejemplo n.º 14
0
    'Verbose': False,
    'MaxMainIter': 200,
    'DFidWeight': mskp,
    'gEvalY': False,
    'AutoRho': {
        'Enabled': True
    }
})
b = tvl2.TVL2Denoise(imgwp, lmbda, opt)
sl = b.solve()
sh = mskp * (imgwp - sl)
"""
Load dictionary.
"""

D = util.convdicts()['G:12x12x216']
plot.imview(util.tiledict(D), fgsz=(7, 7))

lmbda = 2e-2
"""
The RelStopTol was chosen for the two different methods to stop with similar functional values
"""
"""
Initialise and run serial CSC solver using masked decoupling :cite:`heide-2015-fast`.
"""

opt = cbpdn.ConvBPDNMaskDcpl.Options({
    'Verbose': True,
    'MaxMainIter': 200,
    'HighMemSolve': True,
    'RelStopTol': 5e-2,
Ejemplo n.º 15
0

"""
Highpass filter example image.
"""

npd = 16
fltlmbd = 10
sl, sh = util.tikhonov_filter(img, fltlmbd, npd)


"""
Load greyscale dictionary and display it.
"""

D = util.convdicts()['G:8x8x64']
plot.imview(util.tiledict(D), fgsz=(7, 7))


"""
Set :class:`.admm.cbpdn.ConvBPDNJoint` solver options.
"""

lmbda = 1e-1
mu = 1e-2
opt = cbpdn.ConvBPDNJoint.Options({'Verbose': True, 'MaxMainIter': 200,
                              'RelStopTol': 5e-3, 'AuxVarObj': False})


"""
Initialise and run CSC solver.
Ejemplo n.º 16
0
# Load demo image
img = util.ExampleImages().image('barbara.png', scaled=True, gray=True)

# Highpass filter test image
npd = 16
fltlmbd = 5
sl, sh = util.tikhonov_filter(img, fltlmbd, npd)

# Apply random mask to highpass component
frc = 0.5
np.random.seed(12345)
msk = util.rndmask(img.shape, frc, dtype=np.float32)
shw = msk * sh

# Load dictionary
D = util.convdicts()['G:12x12x72']

# Set up ConvBPDN options
lmbda = 1e-2
opt = cbpdn.ConvBPDN.Options({
    'Verbose': True,
    'MaxMainIter': 20,
    'HighMemSolve': True,
    'LinSolveCheck': False,
    'RelStopTol': 2e-3,
    'AuxVarObj': False,
    'rho': 1.5e0,
    'AutoRho': {
        'Enabled': False
    }
})
Ejemplo n.º 17
0

"""
Highpass filter example image.
"""

npd = 16
fltlmbd = 10
sl, sh = util.tikhonov_filter(img, fltlmbd, npd)


"""
Load dictionary and display it.
"""

D = util.convdicts()['G:12x12x36']
plot.imview(util.tiledict(D), fgsz=(7, 7))


"""
Set :class:`.admm.cbpdn.ConvBPDNProjL1` solver options.
"""

gamma = 4.05e2
opt = cbpdn.ConvBPDNProjL1.Options({'Verbose': True, 'MaxMainIter': 250,
                    'HighMemSolve': True, 'LinSolveCheck': False,
                    'RelStopTol': 5e-3, 'AuxVarObj': True, 'rho': 3e0,
                    'AutoRho': {'Enabled': True}})


"""
Ejemplo n.º 18
0
"""
Extract blocks and center each channel of image patches, taking steps of size 2.
"""

blksz = (8, 8, 3)
stpsz = (2, 2, 1)

blocks = array.extract_blocks(imgn, blksz, stpsz)
blockmeans = np.mean(blocks, axis=(0, 1))
blocks -= blockmeans
blocks = blocks.reshape(np.product(blksz), -1)
"""
Load dictionary.
"""

D = util.convdicts()['RGB:8x8x3x64'].reshape(np.product(blksz), -1)
"""
Set solver options.
"""

lmbda = 1e-1
opt = bpdn.BPDN.Options({
    'Verbose': True,
    'MaxMainIter': 250,
    'RelStopTol': 3e-3,
    'AuxVarObj': False,
    'AutoRho': {
        'Enabled': False
    },
    'rho': 1e1 * lmbda
})