コード例 #1
0
    def test_FISTA_Denoising(self):
        print ("FISTA Denoising Poisson Noise Tikhonov")
        # adapted from demo FISTA_Tikhonov_Poisson_Denoising.py in CIL-Demos repository
        #loader = TestData(data_dir=os.path.join(sys.prefix, 'share','ccpi'))
        loader = TestData()
        data = loader.load(TestData.SHAPES)
        ig = data.geometry
        ag = ig
        N=300
        # Create Noisy data with Poisson noise
        scale = 5
        n1 = TestData.random_noise( data.as_array()/scale, mode = 'poisson', seed = 10)*scale
        noisy_data = ImageData(n1)

        # Regularisation Parameter
        alpha = 10

        # Setup and run the FISTA algorithm
        operator = Gradient(ig)
        fid = KullbackLeibler(b=noisy_data)
        reg = FunctionOperatorComposition(alpha * L2NormSquared(), operator)

        x_init = ig.allocate()
        fista = FISTA(x_init=x_init , f=reg, g=fid)
        fista.max_iteration = 3000
        fista.update_objective_interval = 500
        fista.run(verbose=True)
        rmse = (fista.get_output() - data).norm() / data.as_array().size
        print ("RMSE", rmse)
        self.assertLess(rmse, 4.2e-4)
コード例 #2
0
    def test_Function(self):

        N = 3
        ig = ImageGeometry(N, N)
        ag = ig
        op1 = Gradient(ig)
        op2 = Identity(ig, ag)

        # Form Composite Operator
        operator = BlockOperator(op1, op2, shape=(2, 1))

        # Create functions
        noisy_data = ag.allocate(ImageGeometry.RANDOM_INT)

        d = ag.allocate(ImageGeometry.RANDOM_INT)
        alpha = 0.5
        # scaled function
        g = alpha * L2NormSquared(b=noisy_data)

        # Compare call of g
        a2 = alpha * (d - noisy_data).power(2).sum()
        #print(a2, g(d))
        self.assertEqual(a2, g(d))

        # Compare convex conjugate of g
        a3 = 0.5 * d.squared_norm() + d.dot(noisy_data)
        self.assertEqual(a3, g.convex_conjugate(d))
コード例 #3
0
 def setup(data, noise):
     if noise == 's&p':
         n1 = TestData.random_noise(data.as_array(), mode = noise, salt_vs_pepper = 0.9, amount=0.2, seed=10)
     elif noise == 'poisson':
         scale = 5
         n1 = TestData.random_noise( data.as_array()/scale, mode = noise, seed = 10)*scale
     elif noise == 'gaussian':
         n1 = TestData.random_noise(data.as_array(), mode = noise, seed = 10)
     else:
         raise ValueError('Unsupported Noise ', noise)
     noisy_data = ig.allocate()
     noisy_data.fill(n1)
 
     # Regularisation Parameter depending on the noise distribution
     if noise == 's&p':
         alpha = 0.8
     elif noise == 'poisson':
         alpha = 1
     elif noise == 'gaussian':
         alpha = .3
         # fidelity
     if noise == 's&p':
         g = L1Norm(b=noisy_data)
     elif noise == 'poisson':
         g = KullbackLeibler(b=noisy_data)
     elif noise == 'gaussian':
         g = 0.5 * L2NormSquared(b=noisy_data)
     return noisy_data, alpha, g
コード例 #4
0
    def test_FISTA(self):
        print ("Test FISTA")
        ig = ImageGeometry(127,139,149)
        x_init = ig.allocate()
        b = x_init.copy()
        # fill with random numbers
        b.fill(numpy.random.random(x_init.shape))
        x_init = ig.allocate(ImageGeometry.RANDOM)
        identity = Identity(ig)
        
	#### it seems FISTA does not work with Nowm2Sq
        # norm2sq = Norm2Sq(identity, b)
        # norm2sq.L = 2 * norm2sq.c * identity.norm()**2
        norm2sq = FunctionOperatorComposition(L2NormSquared(b=b), identity)
        opt = {'tol': 1e-4, 'memopt':False}
        print ("initial objective", norm2sq(x_init))
        alg = FISTA(x_init=x_init, f=norm2sq, g=ZeroFunction())
        alg.max_iteration = 2
        alg.run(20, verbose=True)
        self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array())

        alg = FISTA(x_init=x_init, f=norm2sq, g=ZeroFunction(), max_iteration=2, update_objective_interval=2)
        
        self.assertTrue(alg.max_iteration == 2)
        self.assertTrue(alg.update_objective_interval==2)

        alg.run(20, verbose=True)
        self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array())
コード例 #5
0
    def test_Norm2sq_as_FunctionOperatorComposition(self):

        print('Test for FunctionOperatorComposition')

        M, N = 50, 50
        ig = ImageGeometry(voxel_num_x=M, voxel_num_y=N)
        b = ig.allocate('random_int')

        print('Check call with Identity operator... OK\n')
        operator = 3 * Identity(ig)

        u = ig.allocate('random_int', seed=50)

        func1 = FunctionOperatorComposition(0.5 * L2NormSquared(b=b), operator)
        func2 = LeastSquares(operator, b, 0.5)

        self.assertNumpyArrayAlmostEqual(func1(u), func2(u))

        print('Check gradient with Identity operator... OK\n')

        tmp1 = ig.allocate()
        tmp2 = ig.allocate()
        res_gradient1 = func1.gradient(u)
        res_gradient2 = func2.gradient(u)
        func1.gradient(u, out=tmp1)
        func2.gradient(u, out=tmp2)

        self.assertNumpyArrayAlmostEqual(tmp1.as_array(), tmp2.as_array())
        self.assertNumpyArrayAlmostEqual(res_gradient1.as_array(),
                                         res_gradient2.as_array())

        print('Check call with LinearOperatorMatrix... OK\n')
        mat = np.random.randn(M, N)
        operator = LinearOperatorMatrix(mat)
        vg = VectorGeometry(N)
        b = vg.allocate('random_int')
        u = vg.allocate('random_int')

        func1 = FunctionOperatorComposition(0.5 * L2NormSquared(b=b), operator)
        func2 = LeastSquares(operator, b, 0.5)

        self.assertNumpyArrayAlmostEqual(func1(u), func2(u))

        self.assertNumpyArrayAlmostEqual(func1.L, func2.L)
コード例 #6
0
operator = BlockOperator(op1, op2, shape=(2, 1))

# Compute operator Norm
normK = operator.norm()

# Create functions
if noise == 'poisson':
    alpha = 20
    f2 = KullbackLeibler(noisy_data)
    g = IndicatorBox(lower=0)
    sigma = 1
    tau = 1 / (sigma * normK**2)

elif noise == 'gaussian':
    alpha = 200
    f2 = 0.5 * L2NormSquared(b=noisy_data)
    g = ZeroFunction()
    sigma = 10
    tau = 1 / (sigma * normK**2)

f1 = alpha * L2NormSquared()
f = BlockFunction(f1, f2)

# Setup and run the PDHG algorithm
pdhg = PDHG(f=f, g=g, operator=operator, tau=tau, sigma=sigma)
pdhg.max_iteration = 1000
pdhg.update_objective_interval = 200
pdhg.run(1000)

plt.figure(figsize=(15, 15))
plt.subplot(3, 1, 1)
コード例 #7
0
fo, axarro = plt.subplots(1, numchannels)
for k in range(3):
    axarro[k].imshow(z.as_array()[k], vmin=0, vmax=3500)
plt.show()

# Using the test data b, different reconstruction methods can now be set up as
# demonstrated in the rest of this file. In general all methods need an initial
# guess and some algorithm options to be set:
x_init = ig.allocate(0.0)
opt = {'tol': 1e-4, 'iter': 200}

# Create least squares object instance with projector, test data and a constant
# coefficient of 0.5. Note it is least squares over all channels:
#f = Norm2Sq(Aop,b,c=0.5)
f = FunctionOperatorComposition(L2NormSquared(b=b), Aop)
# Run FISTA for least squares without regularization
FISTA_alg = FISTA()
FISTA_alg.set_up(x_init=x_init, f=f, g=ZeroFunction())
FISTA_alg.max_iteration = 2000
FISTA_alg.run(opt['iter'])
x_FISTA = FISTA_alg.get_output()

# Display reconstruction and criterion
ff0, axarrf0 = plt.subplots(1, numchannels)
for k in numpy.arange(3):
    axarrf0[k].imshow(x_FISTA.as_array()[k], vmin=0, vmax=2.5)
plt.show()

plt.figure()
plt.semilogy(FISTA_alg.objective)
コード例 #8
0
    def test_L2NormSquaredOut(self):
        # TESTS for L2 and scalar * L2

        M, N, K = 2, 3, 5
        ig = ImageGeometry(voxel_num_x=M, voxel_num_y=N, voxel_num_z=K)
        u = ig.allocate(ImageGeometry.RANDOM_INT)
        b = ig.allocate(ImageGeometry.RANDOM_INT)

        # check grad/call no data
        f = L2NormSquared()
        a1 = f.gradient(u)
        a2 = a1 * 0.
        f.gradient(u, out=a2)
        numpy.testing.assert_array_almost_equal(a1.as_array(),
                                                a2.as_array(),
                                                decimal=4)
        #numpy.testing.assert_equal(f(u), u.squared_norm())

        # check grad/call with data
        f1 = L2NormSquared(b=b)
        b1 = f1.gradient(u)
        b2 = b1 * 0.
        f1.gradient(u, out=b2)

        numpy.testing.assert_array_almost_equal(b1.as_array(),
                                                b2.as_array(),
                                                decimal=4)
        #numpy.testing.assert_equal(f1(u), (u-b).squared_norm())

        # check proximal no data
        tau = 5
        e1 = f.proximal(u, tau)
        e2 = e1 * 0.
        f.proximal(u, tau, out=e2)
        numpy.testing.assert_array_almost_equal(e1.as_array(),
                                                e2.as_array(),
                                                decimal=4)

        # check proximal with data
        tau = 5
        h1 = f1.proximal(u, tau)
        h2 = h1 * 0.
        f1.proximal(u, tau, out=h2)
        numpy.testing.assert_array_almost_equal(h1.as_array(),
                                                h2.as_array(),
                                                decimal=4)

        # check proximal conjugate no data
        tau = 0.2
        k1 = f.proximal_conjugate(u, tau)
        k2 = k1 * 0.
        f.proximal_conjugate(u, tau, out=k2)

        numpy.testing.assert_array_almost_equal(k1.as_array(),
                                                k2.as_array(),
                                                decimal=4)

        # check proximal conjugate with data
        l1 = f1.proximal_conjugate(u, tau)
        l2 = l1 * 0.
        f1.proximal_conjugate(u, tau, out=l2)
        numpy.testing.assert_array_almost_equal(l1.as_array(),
                                                l2.as_array(),
                                                decimal=4)

        # check scaled function properties

        # scalar
        scalar = 100
        f_scaled_no_data = scalar * L2NormSquared()
        f_scaled_data = scalar * L2NormSquared(b=b)

        # grad
        w = f_scaled_no_data.gradient(u)
        ww = w * 0
        f_scaled_no_data.gradient(u, out=ww)

        numpy.testing.assert_array_almost_equal(w.as_array(),
                                                ww.as_array(),
                                                decimal=4)

        # numpy.testing.assert_array_almost_equal(f_scaled_data.gradient(u).as_array(), scalar*f1.gradient(u).as_array(), decimal=4)

        # # conj
        # numpy.testing.assert_almost_equal(f_scaled_no_data.convex_conjugate(u), \
        #                         f.convex_conjugate(u/scalar) * scalar, decimal=4)

        # numpy.testing.assert_almost_equal(f_scaled_data.convex_conjugate(u), \
        #                         scalar * f1.convex_conjugate(u/scalar), decimal=4)

        # # proximal
        w = f_scaled_no_data.proximal(u, tau)
        ww = w * 0
        f_scaled_no_data.proximal(u, tau, out=ww)
        numpy.testing.assert_array_almost_equal(w.as_array(), \
                                                ww.as_array())

        # numpy.testing.assert_array_almost_equal(f_scaled_data.proximal(u, tau).as_array(), \
        #                                         f1.proximal(u, tau*scalar).as_array())

        # proximal conjugate
        w = f_scaled_no_data.proximal_conjugate(u, tau)
        ww = w * 0
        f_scaled_no_data.proximal_conjugate(u, tau, out=ww)
        numpy.testing.assert_array_almost_equal(w.as_array(), \
                                                ww.as_array(), decimal=4)
コード例 #9
0
    def test_L2NormSquared(self):
        # TESTS for L2 and scalar * L2
        print("Test L2NormSquared")

        M, N, K = 2, 3, 5
        ig = ImageGeometry(voxel_num_x=M, voxel_num_y=N, voxel_num_z=K)
        u = ig.allocate(ImageGeometry.RANDOM_INT)
        b = ig.allocate(ImageGeometry.RANDOM_INT)

        # check grad/call no data
        f = L2NormSquared()
        a1 = f.gradient(u)
        a2 = 2 * u
        numpy.testing.assert_array_almost_equal(a1.as_array(),
                                                a2.as_array(),
                                                decimal=4)
        numpy.testing.assert_equal(f(u), u.squared_norm())

        # check grad/call with data
        f1 = L2NormSquared(b=b)
        b1 = f1.gradient(u)
        b2 = 2 * (u - b)

        numpy.testing.assert_array_almost_equal(b1.as_array(),
                                                b2.as_array(),
                                                decimal=4)
        numpy.testing.assert_equal(f1(u), (u - b).squared_norm())

        #check convex conjuagate no data
        c1 = f.convex_conjugate(u)
        c2 = 1 / 4. * u.squared_norm()
        numpy.testing.assert_equal(c1, c2)

        #check convex conjugate with data
        d1 = f1.convex_conjugate(u)
        d2 = (1. / 4.) * u.squared_norm() + (u * b).sum()
        numpy.testing.assert_equal(d1, d2)

        # check proximal no data
        tau = 5
        e1 = f.proximal(u, tau)
        e2 = u / (1 + 2 * tau)
        numpy.testing.assert_array_almost_equal(e1.as_array(),
                                                e2.as_array(),
                                                decimal=4)

        # check proximal with data
        tau = 5
        h1 = f1.proximal(u, tau)
        h2 = (u - b) / (1 + 2 * tau) + b
        numpy.testing.assert_array_almost_equal(h1.as_array(),
                                                h2.as_array(),
                                                decimal=4)

        # check proximal conjugate no data
        tau = 0.2
        k1 = f.proximal_conjugate(u, tau)
        k2 = u / (1 + tau / 2)
        numpy.testing.assert_array_almost_equal(k1.as_array(),
                                                k2.as_array(),
                                                decimal=4)

        # check proximal conjugate with data
        l1 = f1.proximal_conjugate(u, tau)
        l2 = (u - tau * b) / (1 + tau / 2)
        numpy.testing.assert_array_almost_equal(l1.as_array(),
                                                l2.as_array(),
                                                decimal=4)

        # check scaled function properties

        # scalar
        scalar = 100
        f_scaled_no_data = scalar * L2NormSquared()
        f_scaled_data = scalar * L2NormSquared(b=b)

        # call
        numpy.testing.assert_equal(f_scaled_no_data(u), scalar * f(u))
        numpy.testing.assert_equal(f_scaled_data(u), scalar * f1(u))

        # grad
        numpy.testing.assert_array_almost_equal(
            f_scaled_no_data.gradient(u).as_array(),
            scalar * f.gradient(u).as_array(),
            decimal=4)
        numpy.testing.assert_array_almost_equal(
            f_scaled_data.gradient(u).as_array(),
            scalar * f1.gradient(u).as_array(),
            decimal=4)

        # conj
        numpy.testing.assert_almost_equal(f_scaled_no_data.convex_conjugate(u), \
                                f.convex_conjugate(u/scalar) * scalar, decimal=4)

        numpy.testing.assert_almost_equal(f_scaled_data.convex_conjugate(u), \
                                scalar * f1.convex_conjugate(u/scalar), decimal=4)

        # proximal
        numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal(u, tau).as_array(), \
                                                f.proximal(u, tau*scalar).as_array())


        numpy.testing.assert_array_almost_equal(f_scaled_data.proximal(u, tau).as_array(), \
                                                f1.proximal(u, tau*scalar).as_array())

        # proximal conjugate
        numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal_conjugate(u, tau).as_array(), \
                                                (u/(1 + tau/(2*scalar) )).as_array(), decimal=4)

        numpy.testing.assert_array_almost_equal(f_scaled_data.proximal_conjugate(u, tau).as_array(), \
                                                ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4)
コード例 #10
0
ag = ig

n1 = TestData.random_noise(data.as_array(),
                           mode='s&p',
                           salt_vs_pepper=0.9,
                           amount=0.2)
noisy_data = ImageData(n1)

# Regularisation Parameter
alpha = 5

###############################################################################
# Setup and run the FISTA algorithm
operator = Gradient(ig)
fidelity = L1Norm(b=noisy_data)
regulariser = FunctionOperatorComposition(alpha * L2NormSquared(), operator)

x_init = ig.allocate()
opt = {'memopt': True}
fista = FISTA(x_init=x_init, f=regulariser, g=fidelity, opt=opt)
fista.max_iteration = 2000
fista.update_objective_interval = 50
fista.run(2000, verbose=False)
###############################################################################

###############################################################################
# Setup and run the PDHG algorithm
op1 = Gradient(ig)
op2 = Identity(ig, ag)

operator = BlockOperator(op1, op2, shape=(2, 1))
コード例 #11
0
                             angles,
                             detectors,
                             det_w,
                             dist_source_center=SourceOrig,
                             dist_center_detector=OrigDetec)
else:
    NotImplemented

Aop = AstraProjectorSimple(ig, ag, dev)
sin = Aop.direct(data)
eta = 0
noisy_data = AcquisitionData(sin.as_array() + np.random.normal(0, 1, ag.shape))
back_proj = Aop.adjoint(noisy_data)

# Define Least Squares
f = FunctionOperatorComposition(L2NormSquared(b=noisy_data), Aop)

# Allocate solution
x_init = ig.allocate()

# Run FISTA for least squares
fista = FISTA(x_init=x_init, f=f, g=ZeroFunction())
fista.max_iteration = 10
fista.update_objective_interval = 2
fista.run(100, verbose=True)

# Run FISTA for least squares with lower/upper bound
fista0 = FISTA(x_init=x_init, f=f, g=IndicatorBox(lower=0, upper=1))
fista0.max_iteration = 10
fista0.update_objective_interval = 2
fista0.run(100, verbose=True)
コード例 #12
0
        out.sqrt(out=out)

        x.subtract(fid.bnoise, out=tmp)
        tmp -= tau

        out += tmp

        out *= 0.5

        # ADD the constraint here
        out.maximum(0, out=out)


fid.proximal = KL_Prox_PosCone

reg = FunctionOperatorComposition(alpha * L2NormSquared(), operator)

x_init = ig.allocate()
fista = FISTA(x_init=x_init, f=reg, g=fid)
fista.max_iteration = 3000
fista.update_objective_interval = 500
fista.run(3000, verbose=True)

# Show results
plt.figure(figsize=(15, 15))
plt.subplot(3, 1, 1)
plt.imshow(data.as_array())
plt.title('Ground Truth')
plt.colorbar()
plt.subplot(3, 1, 2)
plt.imshow(noisy_data.as_array())
コード例 #13
0
    from ccpi.framework import ImageGeometry, BlockGeometry
    from ccpi.optimisation.operators import Gradient, Identity, BlockOperator
    import numpy
    import numpy as np

    ig = ImageGeometry(M, N)
    BG = BlockGeometry(ig, ig)

    u = ig.allocate('random_int')
    B = BlockOperator(Gradient(ig), Identity(ig))

    U = B.direct(u)
    b = ig.allocate('random_int')

    f1 = 10 * MixedL21Norm()
    f2 = 0.5 * L2NormSquared(b=b)

    f = BlockFunction(f1, f2)
    tau = 0.3

    print(" without out ")
    res_no_out = f.proximal_conjugate(U, tau)
    res_out = B.range_geometry().allocate()
    f.proximal_conjugate(U, tau, out=res_out)

    numpy.testing.assert_array_almost_equal(res_no_out[0][0].as_array(), \
                                            res_out[0][0].as_array(), decimal=4)

    numpy.testing.assert_array_almost_equal(res_no_out[0][1].as_array(), \
                                            res_out[0][1].as_array(), decimal=4)
コード例 #14
0
x_init = ig.allocate()
cgls = CGLS()
cgls.set_up(x_init=x_init, operator=Aop, data=sinogram)
cgls.max_iteration = 500
cgls.update_objective_interval = 100
cgls.run(500, verbose=True)

#%%

###############################################################################
# Setup and run the PDHG algorithm
print("Running PDHG reconstruction")

operator = Aop
f = L2NormSquared(b=sinogram)
g = ZeroFunction()

## Compute operator Norm
normK = operator.norm()

## Primal & dual stepsizes
sigma = 0.02
tau = 1 / (sigma * normK**2)

pdhg = PDHG()
pdhg.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma)
pdhg.max_iteration = 1000
pdhg.update_objective_interval = 100
pdhg.run(1000, verbose=True)
if noise == 's&p':
    alpha = 0.8
elif noise == 'poisson':
    alpha = .3
elif noise == 'gaussian':
    alpha = .2

beta = 2 * alpha

# Fidelity
if noise == 's&p':
    f3 = L1Norm(b=noisy_data)
elif noise == 'poisson':
    f3 = KullbackLeibler(noisy_data)
elif noise == 'gaussian':
    f3 = 0.5 * L2NormSquared(b=noisy_data)

if method == '0':

    # Create operators
    op11 = Gradient(ig)
    op12 = Identity(op11.range_geometry())

    op22 = SymmetrizedGradient(op11.domain_geometry())
    op21 = ZeroOperator(ig, op22.range_geometry())

    op31 = Identity(ig, ag)
    op32 = ZeroOperator(op22.domain_geometry(), ag)

    operator = BlockOperator(op11,
                             -1 * op12,
コード例 #16
0
# Form Tikhonov as a Block CGLS structure
op_CGLS = BlockOperator(Aop, alpha * Grad, shape=(2, 1))
block_data = BlockDataContainer(noisy_data, Grad.range_geometry().allocate())

x_init = ig.allocate()
cgls = CGLS(x_init=x_init, operator=op_CGLS, data=block_data)
cgls.max_iteration = 1000
cgls.update_objective_interval = 200
cgls.run(1000, verbose=False)

#Setup and run the PDHG algorithm

# Create BlockOperator
op_PDHG = BlockOperator(Grad, Aop, shape=(2, 1))
# Create functions
f1 = 0.5 * alpha**2 * L2NormSquared()
f2 = 0.5 * L2NormSquared(b=noisy_data)
f = BlockFunction(f1, f2)
g = ZeroFunction()

## Compute operator Norm
normK = op_PDHG.norm()

## Primal & dual stepsizes
sigma = 10
tau = 1 / (sigma * normK**2)

pdhg = PDHG(f=f, g=g, operator=op_PDHG, tau=tau, sigma=sigma)
pdhg.max_iteration = 1000
pdhg.update_objective_interval = 200
pdhg.run(1000, verbose=False)
コード例 #17
0
printing = 0  # (0 is OFF, 1 is ON)
device = 'gpu'  # or cpu

g = FGP_TV(alpha, inner_TV_iter, tolerance, methodTV, nonnegativity_constraint,
           printing, device)

x_init = A3D.volume_geometry.allocate()

# Allocate space for the channel-wise reconstruction
fista_sol_TV_channel_wise = A3D_chan.volume_geometry.allocate()

for i in range(ag.channels):

    # Setup L2NormSquarred fidelity term, for each channel
    f = FunctionOperatorComposition(
        0.5 * L2NormSquared(b=data.subset(channel=i)), A3D)

    # Run FISTA
    fista = FISTA(x_init=x_init, f=f, g=g)
    fista.max_iteration = 100
    fista.update_objective_interval = 50
    fista.run(400, verbose=True, callback=show_data_3D)
    np.copyto(fista_sol_TV_channel_wise.array[i], fista.get_output().array)

#%% show reconstruction

show_4D_channel_slice(fista_sol_TV_channel_wise, 5,
                      'FISTA TV channel-wise reconstruction')
show_4D_channel_slice(fista_sol_TV_channel_wise, 10,
                      'FISTA TV channel-wise reconstruction')
show_4D_channel_slice(fista_sol_TV_channel_wise, 15,