def test_cil_vs_cvxpy_totalvariation_isotropic(self):

        # solution
        u_cvx = cvxpy.Variable(self.data.shape)

        # regularisation parameter
        alpha = 0.1

        # fidelity term
        fidelity = 0.5 * cvxpy.sum_squares(u_cvx - self.data.array)   
        regulariser = (alpha**2) * self.tv_cvxpy_regulariser(u_cvx)

        # objective
        obj =  cvxpy.Minimize( regulariser +  fidelity)
        prob = cvxpy.Problem(obj, constraints = [])

        # Choose solver ( SCS, MOSEK(license needed) )
        tv_cvxpy = prob.solve(verbose = True, solver = cvxpy.SCS)        

        # use TotalVariation from CIL (with Fast Gradient Projection algorithm)
        TV = TotalVariation(max_iteration=200)
        tv_cil = TV.proximal(self.data, tau=alpha**2)     

        # compare solution
        np.testing.assert_allclose(tv_cil.array, u_cvx.value,atol=1e-3)  

        # # compare objectives
        f = 0.5*L2NormSquared(b=self.data)
        cil_objective = f(tv_cil) + TV(tv_cil)*(alpha**2)
        np.testing.assert_allclose(cil_objective, obj.value, atol=1e-3)  
    def test_TotalVariation_vs_FGP_TV_gpu(self):   

        # Isotropic TV cil
        TV_cil_iso = self.alpha * TotalVariation(max_iteration=self.iterations)
        res_TV_cil_iso = TV_cil_iso.proximal(self.data, tau=1.0)        

        # Anisotropic TV cil
        TV_cil_aniso = self.alpha * TotalVariation(max_iteration=self.iterations, isotropic=False) 
        res_TV_cil_aniso = TV_cil_aniso.proximal(self.data, tau=1.0)               
        
        # Isotropic FGP_TV CCPiReg toolkit (gpu)
        TV_regtoolkit_gpu_iso = self.alpha * FGP_TV(max_iteration=self.iterations, device = 'gpu') 
        res_TV_regtoolkit_gpu_iso = TV_regtoolkit_gpu_iso.proximal(self.data, tau=1.0)

        # Anisotropic FGP_TV CCPiReg toolkit (gpu)
        TV_regtoolkit_gpu_aniso = self.alpha * FGP_TV(max_iteration=self.iterations, device = 'gpu', isotropic=False)  
        res_TV_regtoolkit_gpu_aniso = TV_regtoolkit_gpu_aniso.proximal(self.data, tau=1.0)  

        # Anisotropic FGP_TV CCPiReg toolkit (cpu)
        TV_regtoolkit_cpu_aniso = self.alpha * FGP_TV(max_iteration=self.iterations, device = 'cpu', isotropic=False)         
        res_TV_regtoolkit_cpu_aniso = TV_regtoolkit_cpu_aniso.proximal(self.data, tau=1.0)          

        # Isotropic FGP_TV CCPiReg toolkit (cpu)
        TV_regtoolkit_cpu_iso = self.alpha * FGP_TV(max_iteration=self.iterations, device = 'cpu')
        res_TV_regtoolkit_cpu_iso = TV_regtoolkit_cpu_iso.proximal(self.data, tau=1.0)        

        np.testing.assert_array_almost_equal(res_TV_cil_iso.array, res_TV_regtoolkit_gpu_iso.array, decimal=3)
        np.testing.assert_array_almost_equal(res_TV_regtoolkit_cpu_iso.array, res_TV_regtoolkit_gpu_iso.array, decimal=3)

        np.testing.assert_array_almost_equal(res_TV_cil_aniso.array, res_TV_regtoolkit_gpu_aniso.array, decimal=3)
        np.testing.assert_array_almost_equal(res_TV_regtoolkit_cpu_aniso.array, res_TV_regtoolkit_gpu_aniso.array, decimal=3)              
예제 #3
0
 def test_regularisation_parameter3(self):
     tv = TotalVariation()
     try:
         tv.regularisation_parameter = 'string'
         assert False
     except TypeError as te:
         print(te)
         assert True
예제 #4
0
 def test_rmul2(self):
     alpha = 'string'
     try:
         tv = alpha * TotalVariation()
         assert False
     except TypeError as te:
         print(te)
         assert True
예제 #5
0
    def test_SPDHG_vs_PDHG_implicit(self):

        data = dataexample.SIMPLE_PHANTOM_2D.get(size=(128, 128))

        ig = data.geometry
        ig.voxel_size_x = 0.1
        ig.voxel_size_y = 0.1

        detectors = ig.shape[0]
        angles = np.linspace(0, np.pi, 90)
        ag = AcquisitionGeometry('parallel',
                                 '2D',
                                 angles,
                                 detectors,
                                 pixel_size_h=0.1,
                                 angle_unit='radian')
        # Select device
        dev = 'cpu'

        Aop = AstraProjectorSimple(ig, ag, dev)

        sin = Aop.direct(data)
        # Create noisy data. Apply Gaussian noise
        noises = ['gaussian', 'poisson']
        noise = noises[1]
        noisy_data = ag.allocate()
        if noise == 'poisson':
            np.random.seed(10)
            scale = 20
            eta = 0
            noisy_data.fill(
                np.random.poisson(scale * (eta + sin.as_array())) / scale)
        elif noise == 'gaussian':
            np.random.seed(10)
            n1 = np.random.normal(0, 0.1, size=ag.shape)
            noisy_data.fill(n1 + sin.as_array())

        else:
            raise ValueError('Unsupported Noise ', noise)

        # Create BlockOperator
        operator = Aop
        f = KullbackLeibler(b=noisy_data)
        alpha = 0.005
        g = alpha * TotalVariation(50, 1e-4, lower=0)
        normK = operator.norm()

        #% 'implicit' PDHG, preconditioned step-sizes
        tau_tmp = 1.
        sigma_tmp = 1.
        tau = sigma_tmp / operator.adjoint(
            tau_tmp * operator.range_geometry().allocate(1.))
        sigma = tau_tmp / operator.direct(
            sigma_tmp * operator.domain_geometry().allocate(1.))
        #    initial = operator.domain_geometry().allocate()

        #    # Setup and run the PDHG algorithm
        pdhg = PDHG(f=f,
                    g=g,
                    operator=operator,
                    tau=tau,
                    sigma=sigma,
                    max_iteration=1000,
                    update_objective_interval=500)
        pdhg.run(verbose=0)

        subsets = 10
        size_of_subsets = int(len(angles) / subsets)
        # take angles and create uniform subsets in uniform+sequential setting
        list_angles = [
            angles[i:i + size_of_subsets]
            for i in range(0, len(angles), size_of_subsets)
        ]
        # create acquisitioin geometries for each the interval of splitting angles
        list_geoms = [
            AcquisitionGeometry('parallel',
                                '2D',
                                list_angles[i],
                                detectors,
                                pixel_size_h=0.1,
                                angle_unit='radian')
            for i in range(len(list_angles))
        ]
        # create with operators as many as the subsets
        A = BlockOperator(*[
            AstraProjectorSimple(ig, list_geoms[i], dev)
            for i in range(subsets)
        ])
        ## number of subsets
        #(sub2ind, ind2sub) = divide_1Darray_equally(range(len(A)), subsets)
        #
        ## acquisisiton data
        AD_list = []
        for sub_num in range(subsets):
            for i in range(0, len(angles), size_of_subsets):
                arr = noisy_data.as_array()[i:i + size_of_subsets, :]
                AD_list.append(
                    AcquisitionData(arr, geometry=list_geoms[sub_num]))

        g = BlockDataContainer(*AD_list)

        ## block function
        F = BlockFunction(*[KullbackLeibler(b=g[i]) for i in range(subsets)])
        G = alpha * TotalVariation(50, 1e-4, lower=0)

        prob = [1 / len(A)] * len(A)
        spdhg = SPDHG(f=F,
                      g=G,
                      operator=A,
                      max_iteration=1000,
                      update_objective_interval=200,
                      prob=prob)
        spdhg.run(1000, verbose=0)
        from cil.utilities.quality_measures import mae, mse, psnr
        qm = (mae(spdhg.get_output(),
                  pdhg.get_output()), mse(spdhg.get_output(),
                                          pdhg.get_output()),
              psnr(spdhg.get_output(), pdhg.get_output()))
        if debug_print:
            print("Quality measures", qm)

        np.testing.assert_almost_equal(mae(spdhg.get_output(),
                                           pdhg.get_output()),
                                       0.000335,
                                       decimal=3)
        np.testing.assert_almost_equal(mse(spdhg.get_output(),
                                           pdhg.get_output()),
                                       5.51141e-06,
                                       decimal=3)
예제 #6
0
 def test_rmul(self):
     alpha = 0.15
     tv = alpha * TotalVariation()
     assert isinstance(tv, TotalVariation)
예제 #7
0
 def test_regularisation_parameter2(self):
     alpha = 0.15
     tv = alpha * TotalVariation()
     np.testing.assert_almost_equal(tv.regularisation_parameter, alpha)
예제 #8
0
 def test_regularisation_parameter(self):
     tv = TotalVariation()
     np.testing.assert_almost_equal(tv.regularisation_parameter, 1.)
예제 #9
0
    def test_compare_regularisation_toolkit_tomophantom(self):

        print("Compare CIL_FGP_TV vs CCPiReg_FGP_TV no tolerance (3D)")

        print("Building 3D phantom using TomoPhantom software")
        model = 13  # select a model number from the library
        N_size = 64  # Define phantom dimensions using a scalar value (cubic phantom)
        path = os.path.dirname(tomophantom.__file__)
        path_library3D = os.path.join(path, "Phantom3DLibrary.dat")
        #This will generate a N_size x N_size x N_size phantom (3D)
        phantom_tm = TomoP3D.Model(model, N_size, path_library3D)

        ig = ImageGeometry(N_size, N_size, N_size)
        data = ig.allocate()
        data.fill(phantom_tm)

        noisy_data = noise.gaussian(data, seed=10)

        alpha = 0.1
        iters = 1000

        print("Use tau as an array of ones")
        # CIL_TotalVariation no tolerance
        g_CIL = alpha * TotalVariation(iters, tolerance=None, info=True)
        res1 = g_CIL.proximal(noisy_data, ig.allocate(1.))
        t0 = timer()
        res1 = g_CIL.proximal(noisy_data, ig.allocate(1.))
        t1 = timer()
        print(t1 - t0)

        # CCPi Regularisation toolkit high tolerance
        r_alpha = alpha
        r_iterations = iters
        r_tolerance = 1e-9
        r_iso = 0
        r_nonneg = 0
        r_printing = 0
        g_CCPI_reg_toolkit = CCPiReg_FGP_TV(r_alpha, r_iterations, r_tolerance,
                                            r_iso, r_nonneg, r_printing, 'cpu')

        t2 = timer()
        res2 = g_CCPI_reg_toolkit.proximal(noisy_data, 1.)
        t3 = timer()
        print(t3 - t2)
        np.testing.assert_array_almost_equal(res1.as_array(),
                                             res2.as_array(),
                                             decimal=3)

        # CIL_FGP_TV no tolerance
        #g_CIL = FGP_TV(ig, alpha, iters, tolerance=None, info=True)
        g_CIL.tolerance = None
        t0 = timer()
        res1 = g_CIL.proximal(noisy_data, 1.)
        t1 = timer()
        print(t1 - t0)

        ###################################################################
        ###################################################################
        ###################################################################
        ###################################################################

        data = dataexample.PEPPERS.get(size=(256, 256))
        ig = data.geometry
        ag = ig

        noisy_data = noise.gaussian(data, seed=10)

        alpha = 0.1
        iters = 1000

        # CIL_FGP_TV no tolerance
        g_CIL = alpha * TotalVariation(iters, tolerance=None)
        t0 = timer()
        res1 = g_CIL.proximal(noisy_data, 1.)
        t1 = timer()
        print(t1 - t0)

        # CCPi Regularisation toolkit high tolerance
        r_alpha = alpha
        r_iterations = iters
        r_tolerance = 1e-8
        r_iso = 0
        r_nonneg = 0
        r_printing = 0
        g_CCPI_reg_toolkit = CCPiReg_FGP_TV(r_alpha, r_iterations, r_tolerance,
                                            r_iso, r_nonneg, r_printing, 'cpu')

        t2 = timer()
        res2 = g_CCPI_reg_toolkit.proximal(noisy_data, 1.)
        t3 = timer()
        print(t3 - t2)
예제 #10
0
    def test_compare_regularisation_toolkit(self):

        print("Compare CIL_FGP_TV vs CCPiReg_FGP_TV no tolerance (2D)")

        data = dataexample.SHAPES.get()
        ig = data.geometry
        ag = ig

        # Create noisy data.
        n1 = np.random.normal(0, 0.1, size=ig.shape)
        noisy_data = ig.allocate()
        noisy_data.fill(n1 + data.as_array())

        alpha = 0.1
        iters = 1000

        # CIL_FGP_TV no tolerance
        g_CIL = alpha * TotalVariation(
            iters, tolerance=None, lower=0, info=True)
        t0 = timer()
        res1 = g_CIL.proximal(noisy_data, 1.)
        t1 = timer()
        print(t1 - t0)

        # CCPi Regularisation toolkit high tolerance
        r_alpha = alpha
        r_iterations = iters
        r_tolerance = 1e-9
        r_iso = 0
        r_nonneg = 1
        r_printing = 0
        g_CCPI_reg_toolkit = CCPiReg_FGP_TV(r_alpha, r_iterations, r_tolerance,
                                            r_iso, r_nonneg, r_printing, 'cpu')

        t2 = timer()
        res2 = g_CCPI_reg_toolkit.proximal(noisy_data, 1.)
        t3 = timer()
        print(t3 - t1)

        np.testing.assert_array_almost_equal(res1.as_array(),
                                             res2.as_array(),
                                             decimal=4)

        ###################################################################
        ###################################################################
        ###################################################################
        ###################################################################

        print("Compare CIL_FGP_TV vs CCPiReg_FGP_TV with iterations.")
        iters = 408
        # CIL_FGP_TV no tolerance
        g_CIL = alpha * TotalVariation(iters, tolerance=1e-9, lower=0.)
        t0 = timer()
        res1 = g_CIL.proximal(noisy_data, 1.)
        t1 = timer()
        print(t1 - t0)

        # CCPi Regularisation toolkit high tolerance
        r_alpha = alpha
        r_iterations = iters
        r_tolerance = 1e-9
        r_iso = 0
        r_nonneg = 1
        r_printing = 0
        g_CCPI_reg_toolkit = CCPiReg_FGP_TV(r_alpha, r_iterations, r_tolerance,
                                            r_iso, r_nonneg, r_printing, 'cpu')

        t2 = timer()
        res2 = g_CCPI_reg_toolkit.proximal(noisy_data, 1.)
        t3 = timer()
        print(t3 - t2)

        print(mae(res1, res2))
        np.testing.assert_array_almost_equal(res1.as_array(),
                                             res2.as_array(),
                                             decimal=3)
예제 #11
0
    from cil.optimisation.functions import LeastSquares
    from cil.plugins.astra.operators import ProjectionOperator as A  #
    from cil.plugins.ccpi_regularisation.functions import FGP_TV

    K = A(ig_cs, ag_shift)
    # the c parameter is used to remove scaling of L2NormSquared in PDHG
    #
    c = 2
    f = LeastSquares(K, ldata, c=0.5 * c)
    if sparse_beads:
        f.L = 1071.1967 * c
    else:
        f.L = 24.4184 * c
    alpha_rgl = 0.003
    alpha = alpha_rgl * ig_cs.voxel_size_x
    g = c * alpha * TotalVariation(lower=0.)
    g = FGP_TV(alpha, 100, 1e-5, 1, 1, 0, 'gpu')

    algo = FISTA(initial=K.domain.allocate(0),
                 f=f,
                 g=g,
                 max_iteration=10000,
                 update_objective_interval=2)
    #%%
    import cProfile
    algo.update_objective_interval = 10
    cProfile.run('algo.run(100, verbose=1)')
    #%%
    plotter2D(algo.solution, cmap='gist_earth')

    #%%