Ejemplo n.º 1
0
    def test_FBPD_Norm1_cvx(self):
        if not cvx_not_installable:
            opt = {'memopt': True}
            # Problem data.
            m = 30
            n = 20
            np.random.seed(1)
            Amat = np.random.randn(m, n)
            A = LinearOperatorMatrix(Amat)
            bmat = np.random.randn(m)
            bmat.shape = (bmat.shape[0], 1)

            # A = Identity()
            # Change n to equal to m.

            b = DataContainer(bmat)

            # Regularization parameter
            lam = 10
            opt = {'memopt': True}
            # Create object instances with the test data A and b.
            f = Norm2sq(A, b, c=0.5, memopt=True)
            g0 = ZeroFun()

            # Initial guess
            x_init = DataContainer(np.zeros((n, 1)))

            # Create 1-norm object instance
            g1 = Norm1(lam)

            # Compare to CVXPY

            # Construct the problem.
            x1 = Variable(n)
            objective1 = Minimize(0.5 * sum_squares(Amat * x1 - bmat.T[0]) +
                                  lam * norm(x1, 1))
            prob1 = Problem(objective1)

            # The optimal objective is returned by prob.solve().
            result1 = prob1.solve(verbose=False, solver=SCS, eps=1e-9)

            # The optimal solution for x is stored in x.value and optimal objective value
            # is in result as well as in objective.value
            print(
                "CVXPY least squares plus 1-norm solution and objective value:"
            )
            print(x1.value)
            print(objective1.value)

            # Now try another algorithm FBPD for same problem:
            x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(
                x_init, Identity(), None, f, g1)
            print(x_fbpd1)
            print(criterfbpd1[-1])

            self.assertNumpyArrayAlmostEqual(numpy.squeeze(x_fbpd1.array),
                                             x1.value, 6)
        else:
            self.assertTrue(cvx_not_installable)
Ejemplo n.º 2
0
    def test_FISTA_cvx(self):
        if not cvx_not_installable:
            # Problem data.
            m = 30
            n = 20
            np.random.seed(1)
            Amat = np.random.randn(m, n)
            A = LinearOperatorMatrix(Amat)
            bmat = np.random.randn(m)
            bmat.shape = (bmat.shape[0], 1)

            # A = Identity()
            # Change n to equal to m.

            b = DataContainer(bmat)

            # Regularization parameter
            lam = 10
            opt = {'memopt': True}
            # Create object instances with the test data A and b.
            f = Norm2sq(A, b, c=0.5, memopt=True)
            g0 = ZeroFun()

            # Initial guess
            x_init = DataContainer(np.zeros((n, 1)))

            f.grad(x_init)

            # Run FISTA for least squares plus zero function.
            x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0, opt=opt)

            # Print solution and final objective/criterion value for comparison
            print(
                "FISTA least squares plus zero function solution and objective value:"
            )
            print(x_fista0.array)
            print(criter0[-1])

            # Compare to CVXPY

            # Construct the problem.
            x0 = Variable(n)
            objective0 = Minimize(0.5 * sum_squares(Amat * x0 - bmat.T[0]))
            prob0 = Problem(objective0)

            # The optimal objective is returned by prob.solve().
            result0 = prob0.solve(verbose=False, solver=SCS, eps=1e-9)

            # The optimal solution for x is stored in x.value and optimal objective value
            # is in result as well as in objective.value
            print(
                "CVXPY least squares plus zero function solution and objective value:"
            )
            print(x0.value)
            print(objective0.value)
            self.assertNumpyArrayAlmostEqual(numpy.squeeze(x_fista0.array),
                                             x0.value, 6)
        else:
            self.assertTrue(cvx_not_installable)
Ejemplo n.º 3
0
 def test_GradientDescent(self):
     print ("Test GradientDescent")
     ig = ImageGeometry(12,13,14)
     x_init = ImageData(geometry=ig)
     b = x_init.copy()
     # fill with random numbers
     b.fill(numpy.random.random(x_init.shape))
     
     identity = TomoIdentity(geometry=ig)
     
     norm2sq = Norm2sq(identity, b)
     
     alg = GradientDescent(x_init=x_init, 
                           objective_function=norm2sq, 
                           rate=0.3)
     alg.max_iteration = 20
     alg.run(20, verbose=True)
     self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array())
Ejemplo n.º 4
0
 def test_FISTA(self):
     print ("Test FISTA")
     ig = ImageGeometry(127,139,149)
     x_init = ImageData(geometry=ig)
     b = x_init.copy()
     # fill with random numbers
     b.fill(numpy.random.random(x_init.shape))
     x_init = ImageData(geometry=ig)
     x_init.fill(numpy.random.random(x_init.shape))
     
     identity = TomoIdentity(geometry=ig)
     
     norm2sq = Norm2sq(identity, b)
     opt = {'tol': 1e-4, 'memopt':False}
     alg = FISTA(x_init=x_init, f=norm2sq, g=None, opt=opt)
     alg.max_iteration = 2
     alg.run(20, verbose=True)
     self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array())
     alg.run(20, verbose=True)
     self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array())
Ejemplo n.º 5
0
np.random.seed(1)
Amat = np.random.randn(m, n)
A = LinearOperatorMatrix(Amat)
bmat = np.random.randn(m)
bmat.shape = (bmat.shape[0],1)

# A = Identity()
# Change n to equal to m.

b = DataContainer(bmat)

# Regularization parameter
lam = 10
opt = {'memopt':True}
# Create object instances with the test data A and b.
f = Norm2sq(A,b,c=0.5, memopt=True)
g0 = ZeroFun()

# Initial guess
x_init = DataContainer(np.zeros((n,1)))

f.grad(x_init)

# Run FISTA for least squares plus zero function.
x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0 , opt=opt)

# Print solution and final objective/criterion value for comparison
print("FISTA least squares plus zero function solution and objective value:")
print(x_fista0.array)
print(criter0[-1])
Ejemplo n.º 6
0
plt.imshow(x_CGLS.array)
plt.title('CGLS')
plt.show()

plt.semilogy(criter_CGLS)
plt.title('CGLS criterion')
plt.show()

# CGLS solves the simple least-squares problem. The same problem can be solved
# by FISTA by setting up explicitly a least squares function object and using
# no regularisation:

# Create least squares object instance with projector, test data and a constant
# coefficient of 0.5:
f = Norm2sq(Aop, b, c=0.5)

# Run FISTA for least squares without regularization
x_fista0, it0, timing0, criter0 = FISTA(x_init, f, None, opt)

plt.imshow(x_fista0.array)
plt.title('FISTA Least squares')
plt.show()

plt.semilogy(criter0)
plt.title('FISTA Least squares criterion')
plt.show()

# FISTA can also solve regularised forms by specifying a second function object
# such as 1-norm regularisation with choice of regularisation parameter lam:
Ejemplo n.º 7
0
ig = ImageGeometry(n, n)
ag = AcquisitionGeometry('parallel', '2D', angles, n, 1)
Aop = AstraProjectorSimple(ig, ag, 'gpu')

# loop to reconstruct energy channels
REC_chan = np.zeros((totChannels, n, n), 'float32')
for i in range(0, totChannels, 1):
    sino_channel = sino_all_channels[:,
                                     i, :]  # extract a sinogram for i-th channel

    print("Initial guess")
    x_init = ImageData(geometry=ig)

    # Create least squares object instance with projector and data.
    print("Create least squares object instance with projector and data.")
    f = Norm2sq(Aop, DataContainer(sino_channel), c=0.5)

    print("Run FISTA-TV for least squares")
    lamtv = 5
    opt = {'tol': 1e-4, 'iter': 200}
    g_fgp = FGP_TV(lambdaReg=lamtv,
                   iterationsTV=50,
                   tolerance=1e-6,
                   methodTV=0,
                   nonnegativity=0,
                   printing=0,
                   device='gpu')

    x_fista_fgp, it1, timing1, criter_fgp = FISTA(x_init, f, g_fgp, opt)
    REC_chan[i, :, :] = x_fista_fgp.array
    """
Ejemplo n.º 8
0
Cop = AstraProjector3DSimple(ig, ag)

# Test backprojection and projection
z1 = Cop.adjoint(padded_data2)
z2 = Cop.direct(z1)

plt.imshow(z1.subset(vertical=68).array)
plt.show()

# Set initial guess
print("Initial guess")
x_init = ImageData(geometry=ig)

# Create least squares object instance with projector and data.
print("Create least squares object instance with projector and data.")
f = Norm2sq(Cop, padded_data2, c=0.5)

# Run FISTA reconstruction for least squares without regularization
print("Run FISTA for least squares")
opt = {'tol': 1e-4, 'iter': 100}
x_fista0, it0, timing0, criter0 = FISTA(x_init, f, None, opt=opt)

plt.imshow(x_fista0.subset(horizontal_x=80).array)
plt.title('FISTA LS')
plt.colorbar()
plt.show()

# Set up 1-norm function for FISTA least squares plus 1-norm regularisation
print("Run FISTA for least squares plus 1-norm regularisation")
lam = 0.1
g0 = Norm1(lam)
Ejemplo n.º 9
0
    def test_FISTA_denoise_cvx(self):
        if not cvx_not_installable:
            opt = {'memopt': True}
            N = 64
            ig = ImageGeometry(voxel_num_x=N, voxel_num_y=N)
            Phantom = ImageData(geometry=ig)

            x = Phantom.as_array()

            x[int(round(N / 4)):int(round(3 * N / 4)),
              int(round(N / 4)):int(round(3 * N / 4))] = 0.5
            x[int(round(N / 8)):int(round(7 * N / 8)),
              int(round(3 * N / 8)):int(round(5 * N / 8))] = 1

            # Identity operator for denoising
            I = TomoIdentity(ig)

            # Data and add noise
            y = I.direct(Phantom)
            y.array = y.array + 0.1 * np.random.randn(N, N)

            # Data fidelity term
            f_denoise = Norm2sq(I, y, c=0.5, memopt=True)

            # 1-norm regulariser
            lam1_denoise = 1.0
            g1_denoise = Norm1(lam1_denoise)

            # Initial guess
            x_init_denoise = ImageData(np.zeros((N, N)))

            # Combine with least squares and solve using generic FISTA implementation
            x_fista1_denoise, it1_denoise, timing1_denoise, \
                criter1_denoise = \
                FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)

            print(x_fista1_denoise)
            print(criter1_denoise[-1])

            # Now denoise LS + 1-norm with FBPD
            x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise,\
                criterfbpd1_denoise = \
                FBPD(x_init_denoise, I, None, f_denoise, g1_denoise)
            print(x_fbpd1_denoise)
            print(criterfbpd1_denoise[-1])

            # Compare to CVXPY

            # Construct the problem.
            x1_denoise = Variable(N**2, 1)
            objective1_denoise = Minimize(
                0.5 * sum_squares(x1_denoise - y.array.flatten()) +
                lam1_denoise * norm(x1_denoise, 1))
            prob1_denoise = Problem(objective1_denoise)

            # The optimal objective is returned by prob.solve().
            result1_denoise = prob1_denoise.solve(verbose=False,
                                                  solver=SCS,
                                                  eps=1e-12)

            # The optimal solution for x is stored in x.value and optimal objective value
            # is in result as well as in objective.value
            print(
                "CVXPY least squares plus 1-norm solution and objective value:"
            )
            print(x1_denoise.value)
            print(objective1_denoise.value)
            self.assertNumpyArrayAlmostEqual(x_fista1_denoise.array.flatten(),
                                             x1_denoise.value, 5)

            self.assertNumpyArrayAlmostEqual(x_fbpd1_denoise.array.flatten(),
                                             x1_denoise.value, 5)
            x1_cvx = x1_denoise.value
            x1_cvx.shape = (N, N)

            # Now TV with FBPD
            lam_tv = 0.1
            gtv = TV2D(lam_tv)
            gtv(gtv.op.direct(x_init_denoise))

            opt_tv = {'tol': 1e-4, 'iter': 10000}

            x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise,\
                criterfbpdtv_denoise = \
                FBPD(x_init_denoise, gtv.op, None, f_denoise, gtv, opt=opt_tv)
            print(x_fbpdtv_denoise)
            print(criterfbpdtv_denoise[-1])

            # Compare to CVXPY

            # Construct the problem.
            xtv_denoise = Variable((N, N))
            objectivetv_denoise = Minimize(0.5 *
                                           sum_squares(xtv_denoise - y.array) +
                                           lam_tv * tv(xtv_denoise))
            probtv_denoise = Problem(objectivetv_denoise)

            # The optimal objective is returned by prob.solve().
            resulttv_denoise = probtv_denoise.solve(verbose=False,
                                                    solver=SCS,
                                                    eps=1e-12)

            # The optimal solution for x is stored in x.value and optimal objective value
            # is in result as well as in objective.value
            print(
                "CVXPY least squares plus 1-norm solution and objective value:"
            )
            print(xtv_denoise.value)
            print(objectivetv_denoise.value)

            self.assertNumpyArrayAlmostEqual(x_fbpdtv_denoise.as_array(),
                                             xtv_denoise.value, 1)

        else:
            self.assertTrue(cvx_not_installable)
Ejemplo n.º 10
0
    def test_FISTA_Norm1_cvx(self):
        if not cvx_not_installable:
            try:
                opt = {'memopt': True}
                # Problem data.
                m = 30
                n = 20
                np.random.seed(1)
                Amat = np.random.randn(m, n)
                A = LinearOperatorMatrix(Amat)
                bmat = np.random.randn(m)
                bmat.shape = (bmat.shape[0], 1)

                # A = Identity()
                # Change n to equal to m.

                b = DataContainer(bmat)

                # Regularization parameter
                lam = 10
                opt = {'memopt': True}
                # Create object instances with the test data A and b.
                f = Norm2sq(A, b, c=0.5, memopt=True)
                g0 = ZeroFun()

                # Initial guess
                x_init = DataContainer(np.zeros((n, 1)))

                # Create 1-norm object instance
                g1 = Norm1(lam)

                g1(x_init)
                g1.prox(x_init, 0.02)

                # Combine with least squares and solve using generic FISTA implementation
                x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1, opt=opt)

                # Print for comparison
                print(
                    "FISTA least squares plus 1-norm solution and objective value:"
                )
                print(x_fista1.as_array().squeeze())
                print(criter1[-1])

                # Compare to CVXPY

                # Construct the problem.
                x1 = Variable(n)
                objective1 = Minimize(0.5 *
                                      sum_squares(Amat * x1 - bmat.T[0]) +
                                      lam * norm(x1, 1))
                prob1 = Problem(objective1)

                # The optimal objective is returned by prob.solve().
                result1 = prob1.solve(verbose=False, solver=SCS, eps=1e-9)

                # The optimal solution for x is stored in x.value and optimal objective value
                # is in result as well as in objective.value
                print(
                    "CVXPY least squares plus 1-norm solution and objective value:"
                )
                print(x1.value)
                print(objective1.value)

                self.assertNumpyArrayAlmostEqual(numpy.squeeze(x_fista1.array),
                                                 x1.value, 6)
            except SolverError as se:
                print(str(se))
                self.assertTrue(True)
        else:
            self.assertTrue(cvx_not_installable)
Ejemplo n.º 11
0
                                            eps=1e-12)

    # The optimal solution for x is stored in x.value and optimal objective
    # value is in result as well as in objective.value

    # Display
    plt.figure()
    plt.imshow(xtv_denoise.value)
    plt.title('CVX TV  with objective equal to {:.2f}'.format(
        objectivetv_denoise.value))
    plt.show()
    print(objectivetv_denoise.value)

#%%
# Data fidelity term
f_denoise = Norm2sq(I, y, c=0.5)

#%%

#%% Then run FBPD algorithm for TV  denoising

# Initial guess
x_init_denoise = ImageData(np.zeros((N, N)))

# Set up TV function
gtv = TV2D(lam_tv)

# Evalutate TV of noisy image.
gtv(gtv.op.direct(y))

# Specify FBPD options and run FBPD.
Ejemplo n.º 12
0
plt.imshow(x_CGLS.subset(vertical=0).array)
plt.title('CGLS')
plt.show()

plt.semilogy(criter_CGLS)
plt.title('CGLS criterion')
plt.show()

# CGLS solves the simple least-squares problem. The same problem can be solved
# by FISTA by setting up explicitly a least squares function object and using
# no regularisation:

# Create least squares object instance with projector, test data and a constant
# coefficient of 0.5:
f = Norm2sq(Cop, b, c=0.5)

# Run FISTA for least squares without regularization
x_fista0, it0, timing0, criter0 = FISTA(x_init, f, None, opt=opt)

plt.imshow(x_fista0.subset(vertical=0).array)
plt.title('FISTA Least squares')
plt.show()

plt.semilogy(criter0)
plt.title('FISTA Least squares criterion')
plt.show()

# FISTA can also solve regularised forms by specifying a second function object
# such as 1-norm regularisation with choice of regularisation parameter lam:
Ejemplo n.º 13
0
# Run CGLS algorithm and display one channel.
x_CGLS, it_CGLS, timing_CGLS, criter_CGLS = CGLS(x_init, Aall, data2d,
                                                 opt_CGLS)

plt.imshow(x_CGLS.subset(channel=100).array)
plt.title('CGLS')
plt.show()

plt.semilogy(criter_CGLS)
plt.title('CGLS Criterion vs iterations')
plt.show()

# Create least squares object instance with projector, test data and a constant
# coefficient of 0.5. Note it is least squares over all channels.
f = Norm2sq(Aall, data2d, c=0.5)

# Options for FISTA algorithm.
opt = {'tol': 1e-4, 'iter': 100}

# Run FISTA for least squares without regularization and display one channel
# reconstruction as image.
x_fista0, it0, timing0, criter0 = FISTA(x_init, f, None, opt)

plt.imshow(x_fista0.subset(channel=100).array)
plt.title('FISTA LS')
plt.show()

plt.semilogy(criter0)
plt.title('FISTA LS Criterion vs iterations')
plt.show()