Beispiel #1
0
 def setup(data, noise):
     if noise == 's&p':
         n1 = TestData.random_noise(data.as_array(), mode = noise, salt_vs_pepper = 0.9, amount=0.2, seed=10)
     elif noise == 'poisson':
         scale = 5
         n1 = TestData.random_noise( data.as_array()/scale, mode = noise, seed = 10)*scale
     elif noise == 'gaussian':
         n1 = TestData.random_noise(data.as_array(), mode = noise, seed = 10)
     else:
         raise ValueError('Unsupported Noise ', noise)
     noisy_data = ig.allocate()
     noisy_data.fill(n1)
 
     # Regularisation Parameter depending on the noise distribution
     if noise == 's&p':
         alpha = 0.8
     elif noise == 'poisson':
         alpha = 1
     elif noise == 'gaussian':
         alpha = .3
         # fidelity
     if noise == 's&p':
         g = L1Norm(b=noisy_data)
     elif noise == 'poisson':
         g = KullbackLeibler(b=noisy_data)
     elif noise == 'gaussian':
         g = 0.5 * L2NormSquared(b=noisy_data)
     return noisy_data, alpha, g
plt.colorbar()
plt.show()

# Regularisation Parameter depending on the noise distribution
if noise == 's&p':
    alpha = 0.8
elif noise == 'poisson':
    alpha = .3
elif noise == 'gaussian':
    alpha = .2

beta = 2 * alpha

# Fidelity
if noise == 's&p':
    f3 = L1Norm(b=noisy_data)
elif noise == 'poisson':
    f3 = KullbackLeibler(noisy_data)
elif noise == 'gaussian':
    f3 = 0.5 * L2NormSquared(b=noisy_data)

if method == '0':

    # Create operators
    op11 = Gradient(ig)
    op12 = Identity(op11.range_geometry())

    op22 = SymmetrizedGradient(op11.domain_geometry())
    op21 = ZeroOperator(ig, op22.range_geometry())

    op31 = Identity(ig, ag)
Beispiel #3
0
# Display reconstruction and criterion
ff0, axarrf0 = plt.subplots(1, numchannels)
for k in numpy.arange(3):
    axarrf0[k].imshow(x_FISTA.as_array()[k], vmin=0, vmax=2.5)
plt.show()

plt.figure()
plt.semilogy(FISTA_alg.objective)
plt.title('Criterion vs iterations, least squares')
plt.show()

# FISTA can also solve regularised forms by specifying a second function object
# such as 1-norm regularisation with choice of regularisation parameter lam.
# Again the regulariser is over all channels:
lam = 10
g1 = lam * L1Norm()

# Run FISTA for least squares plus 1-norm regularisation.
FISTA_alg1 = FISTA()
FISTA_alg1.set_up(x_init=x_init, f=f, g=g1)
FISTA_alg1.max_iteration = 2000
FISTA_alg1.run(opt['iter'])
x_FISTA1 = FISTA_alg1.get_output()

# Display reconstruction and criterion
ff1, axarrf1 = plt.subplots(1, numchannels)
for k in numpy.arange(3):
    axarrf1[k].imshow(x_FISTA1.as_array()[k], vmin=0, vmax=2.5)
plt.show()

plt.figure()
Beispiel #4
0
plt.imshow(x_FISTA.as_array())
plt.title('FISTA Least squares reconstruction')
plt.colorbar()
plt.show()

plt.figure()
plt.semilogy(FISTA_alg.objective)
plt.title('FISTA Least squares criterion')
plt.show()

# FISTA can also solve regularised forms by specifying a second function object
# such as 1-norm regularisation with choice of regularisation parameter lam:

# Create 1-norm function object
lam = 1.0
g0 = lam * L1Norm()

# Run FISTA for least squares plus 1-norm function.
FISTA_alg1 = FISTA()
FISTA_alg1.set_up(x_init=x_init, f=f, g=g0)
FISTA_alg1.max_iteration = 2000
FISTA_alg1.run(opt['iter'])
x_FISTA1 = FISTA_alg1.get_output()

plt.figure()
plt.imshow(x_FISTA1.array)
plt.title('FISTA LS+L1Norm reconstruction')
plt.colorbar()
plt.show()

plt.figure()
Beispiel #5
0
ig = ImageGeometry(voxel_num_x=N, voxel_num_y=N)
ag = ig

n1 = TestData.random_noise(data.as_array(),
                           mode='s&p',
                           salt_vs_pepper=0.9,
                           amount=0.2)
noisy_data = ImageData(n1)

# Regularisation Parameter
alpha = 5

###############################################################################
# Setup and run the FISTA algorithm
operator = Gradient(ig)
fidelity = L1Norm(b=noisy_data)
regulariser = FunctionOperatorComposition(alpha * L2NormSquared(), operator)

x_init = ig.allocate()
opt = {'memopt': True}
fista = FISTA(x_init=x_init, f=regulariser, g=fidelity, opt=opt)
fista.max_iteration = 2000
fista.update_objective_interval = 50
fista.run(2000, verbose=False)
###############################################################################

###############################################################################
# Setup and run the PDHG algorithm
op1 = Gradient(ig)
op2 = Identity(ig, ag)
Beispiel #6
0
    def stest_FISTA_Norm1_cvx(self):
        if not cvx_not_installable:
            try:
                opt = {'memopt': True}
                # Problem data.
                m = 30
                n = 20
                np.random.seed(1)
                Amat = np.random.randn(m, n)
                A = LinearOperatorMatrix(Amat)
                bmat = np.random.randn(m)
                #bmat.shape = (bmat.shape[0], 1)

                # A = Identity()
                # Change n to equal to m.
                vgb = VectorGeometry(m)
                vgx = VectorGeometry(n)
                b = vgb.allocate()
                b.fill(bmat)
                #b = DataContainer(bmat)

                # Regularization parameter
                lam = 10
                opt = {'memopt': True}
                # Create object instances with the test data A and b.
                f = LeastSquares(A, b, c=0.5)
                g0 = ZeroFunction()

                # Initial guess
                #x_init = DataContainer(np.zeros((n, 1)))
                x_init = vgx.allocate()

                # Create 1-norm object instance
                g1 = lam * L1Norm()

                g1(x_init)
                g1.prox(x_init, 0.02)

                # Combine with least squares and solve using generic FISTA implementation
                #x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1, opt=opt)
                fa = FISTA(x_init=x_init, f=f, g=g1)
                fa.max_iteration = 10
                fa.run(10)
                

                # Print for comparison
                print("FISTA least squares plus 1-norm solution and objective value:")
                print(fa.get_output())
                print(fa.get_last_objective())

                # Compare to CVXPY

                # Construct the problem.
                x1 = Variable(n)
                objective1 = Minimize(
                    0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1, 1))
                prob1 = Problem(objective1)

                # The optimal objective is returned by prob.solve().
                result1 = prob1.solve(verbose=False, solver=SCS, eps=1e-9)

                # The optimal solution for x is stored in x.value and optimal objective value
                # is in result as well as in objective.value
                print("CVXPY least squares plus 1-norm solution and objective value:")
                print(x1.value)
                print(objective1.value)

                self.assertNumpyArrayAlmostEqual(
                    numpy.squeeze(x_fista1.array), x1.value, 6)
            except SolverError as se:
                print (str(se))
                self.assertTrue(True)
        else:
            self.assertTrue(cvx_not_installable)
Beispiel #7
0
    numpy.testing.assert_array_almost_equal(res_no_out[0][1].as_array(), \
                                            res_out[0][1].as_array(), decimal=4)

    numpy.testing.assert_array_almost_equal(res_no_out[1].as_array(), \
                                            res_out[1].as_array(), decimal=4)

    ig1 = ImageGeometry(M, N)
    ig2 = ImageGeometry(2 * M, N)
    ig3 = ImageGeometry(3 * M, 4 * N)

    bg = BlockGeometry(ig1, ig2, ig3)

    z = bg.allocate('random_int')

    f1 = L1Norm()
    f2 = 5 * L2NormSquared()

    f = BlockFunction(f2, f2, f2 + 5, f1 - 4, f1)

    res = f.convex_conjugate(z)

    ##########################################################################

#    zzz = B.range_geometry().allocate('random_int')
#    www = B.range_geometry().allocate()
#    www.fill(zzz)

#    res[0].fill(z)

#    f.proximal_conjugate(z, sigma, out = res)