Пример #1
0
    def process_frames(self, data):

        import numpy as np
        self.pars['input'] = np.nan_to_num(data[0])
        # Running Ccpi-RGLTK modules on GPU
        if (self.parameters['method'] == 'ROF_TV'):
            (im_res,
             infogpu) = ROF_TV(self.pars['input'],
                               self.pars['regularisation_parameter'],
                               self.pars['number_of_iterations'],
                               self.pars['time_marching_parameter'],
                               self.pars['tolerance_constant'], self.device)
        if (self.parameters['method'] == 'FGP_TV'):
            (im_res, infogpu) = FGP_TV(self.pars['input'],
                                       self.pars['regularisation_parameter'],
                                       self.pars['number_of_iterations'],
                                       self.pars['tolerance_constant'],
                                       self.pars['methodTV'],
                                       self.pars['nonneg'], self.device)
        if (self.parameters['method'] == 'SB_TV'):
            (im_res, infogpu) = SB_TV(self.pars['input'],
                                      self.pars['regularisation_parameter'],
                                      self.pars['number_of_iterations'],
                                      self.pars['tolerance_constant'],
                                      self.pars['methodTV'], self.device)
        if (self.parameters['method'] == 'TGV'):
            (im_res,
             infogpu) = TGV(self.pars['input'],
                            self.pars['regularisation_parameter'],
                            self.pars['alpha1'], self.pars['alpha0'],
                            self.pars['number_of_iterations'],
                            self.pars['LipshitzConstant'],
                            self.pars['tolerance_constant'], self.device)
        if (self.parameters['method'] == 'LLT_ROF'):
            (im_res,
             infogpu) = LLT_ROF(self.pars['input'],
                                self.pars['regularisation_parameter'],
                                self.pars['regularisation_parameterLLT'],
                                self.pars['number_of_iterations'],
                                self.pars['time_marching_parameter'],
                                self.pars['tolerance_constant'], self.device)
        if (self.parameters['method'] == 'NDF'):
            (im_res, infogpu) = NDF(
                self.pars['input'], self.pars['regularisation_parameter'],
                self.pars['edge_parameter'], self.pars['number_of_iterations'],
                self.pars['time_marching_parameter'],
                self.pars['penalty_type'], self.pars['tolerance_constant'],
                self.device)
        if (self.parameters['method'] == 'DIFF4th'):
            (im_res,
             infogpu) = Diff4th(self.pars['input'],
                                self.pars['regularisation_parameter'],
                                self.pars['edge_parameter'],
                                self.pars['number_of_iterations'],
                                self.pars['time_marching_parameter'],
                                self.pars['tolerance_constant'], self.device)

    #print "calling process frames", data[0].shape
        return im_res
    def test_LLT_ROF_CPU(self):
        # set parameters
        Im, input, ref = self.getPars()
        # call routine
        sb_cpu, info = LLT_ROF(input, 0.01, 0.008, 1000, 0.001, 0.0, 'cpu')

        rms = rmse(Im, sb_cpu)

        # now test that it generates some expected output
        self.assertAlmostEqual(rms, 0.02, delta=0.01)
Пример #3
0
# set parameters
pars = {'algorithm' : LLT_ROF, \
        'input' : u0,\
        'regularisation_parameterROF':0.01, \
        'regularisation_parameterLLT':0.0085, \
        'number_of_iterations' : 1000 ,\
        'time_marching_parameter' :0.0001 ,\
        'tolerance_constant':0.0}


print ("#############LLT- ROF CPU####################")
start_time = timeit.default_timer()
(lltrof_cpu, info_vec_cpu) = LLT_ROF(pars['input'], 
              pars['regularisation_parameterROF'],
              pars['regularisation_parameterLLT'],
              pars['number_of_iterations'],
              pars['time_marching_parameter'],
              pars['tolerance_constant'], 'cpu')

Qtools = QualityTools(Im, lltrof_cpu)
pars['rmse'] = Qtools.rmse()

txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,4,2)

# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
Пример #4
0
    def process_frames(self, data):
        import numpy as np
        #input_temp = data[0]
        #indices = np.where(np.isnan(input_temp))
        #input_temp[indices] = 0.0
        #self.pars['input'] = input_temp
        input_temp = np.nan_to_num(data[0])
        input_temp[input_temp > 10**15] = 0.0
        self.pars['input'] = input_temp
        #self.pars['input'] = np.nan_to_num(data[0])

        # Running Ccpi-RGLTK modules on GPU
        if (self.parameters['method'] == 'ROF_TV'):
            (im_res,
             infogpu) = ROF_TV(self.pars['input'],
                               self.pars['regularisation_parameter'],
                               self.pars['number_of_iterations'],
                               self.pars['time_marching_parameter'],
                               self.pars['tolerance_constant'], self.device)
        if (self.parameters['method'] == 'FGP_TV'):
            (im_res, infogpu) = FGP_TV(self.pars['input'],
                                       self.pars['regularisation_parameter'],
                                       self.pars['number_of_iterations'],
                                       self.pars['tolerance_constant'],
                                       self.pars['methodTV'],
                                       self.pars['nonneg'], self.device)
        if (self.parameters['method'] == 'SB_TV'):
            (im_res, infogpu) = SB_TV(self.pars['input'],
                                      self.pars['regularisation_parameter'],
                                      self.pars['number_of_iterations'],
                                      self.pars['tolerance_constant'],
                                      self.pars['methodTV'], self.device)
        if (self.parameters['method'] == 'TGV'):
            (im_res,
             infogpu) = TGV(self.pars['input'],
                            self.pars['regularisation_parameter'],
                            self.pars['alpha1'], self.pars['alpha0'],
                            self.pars['number_of_iterations'],
                            self.pars['LipshitzConstant'],
                            self.pars['tolerance_constant'], self.device)
        if (self.parameters['method'] == 'LLT_ROF'):
            (im_res,
             infogpu) = LLT_ROF(self.pars['input'],
                                self.pars['regularisation_parameter'],
                                self.pars['regularisation_parameterLLT'],
                                self.pars['number_of_iterations'],
                                self.pars['time_marching_parameter'],
                                self.pars['tolerance_constant'], self.device)
        if (self.parameters['method'] == 'NDF'):
            (im_res, infogpu) = NDF(
                self.pars['input'], self.pars['regularisation_parameter'],
                self.pars['edge_parameter'], self.pars['number_of_iterations'],
                self.pars['time_marching_parameter'],
                self.pars['penalty_type'], self.pars['tolerance_constant'],
                self.device)
        if (self.parameters['method'] == 'DIFF4th'):
            (im_res,
             infogpu) = Diff4th(self.pars['input'],
                                self.pars['regularisation_parameter'],
                                self.pars['edge_parameter'],
                                self.pars['number_of_iterations'],
                                self.pars['time_marching_parameter'],
                                self.pars['tolerance_constant'], self.device)
        if (self.parameters['method'] == 'NLTV'):
            pars_NLTV = {'algorithm' : PatchSelect, \
                         'input' : self.pars['input'],\
                         'searchwindow': 9, \
                         'patchwindow': 2,\
                         'neighbours' : 17 ,\
                         'edge_parameter':self.pars['edge_parameter']}
            H_i, H_j, Weights = PatchSelect(pars_NLTV['input'],
                                            pars_NLTV['searchwindow'],
                                            pars_NLTV['patchwindow'],
                                            pars_NLTV['neighbours'],
                                            pars_NLTV['edge_parameter'],
                                            self.device)
            parsNLTV_init = {'algorithm' : NLTV, \
                             'input' : pars_NLTV['input'],\
                             'H_i': H_i, \
                             'H_j': H_j,\
                             'H_k' : 0,\
                             'Weights' : Weights,\
                             'regularisation_parameter': self.pars['regularisation_parameter'],\
                             'iterations': self.pars['number_of_iterations']}
            im_res = NLTV(parsNLTV_init['input'], parsNLTV_init['H_i'],
                          parsNLTV_init['H_j'], parsNLTV_init['H_k'],
                          parsNLTV_init['Weights'],
                          parsNLTV_init['regularisation_parameter'],
                          parsNLTV_init['iterations'])
            del H_i, H_j, Weights
        #print "calling process frames", data[0].shape
        return im_res
    def test_LLT_ROF_CPU_vs_GPU(self):
        filename = os.path.join("lena_gray_512.tif")
        plt = TiffReader()
        # read image
        Im = plt.imread(filename)
        Im = np.asarray(Im, dtype='float32')

        Im = Im / 255
        perc = 0.05
        u0 = Im + np.random.normal(loc=0, scale=perc * Im, size=np.shape(Im))
        u_ref = Im + np.random.normal(
            loc=0, scale=0.01 * Im, size=np.shape(Im))

        # map the u0 u0->u0>0
        # f = np.frompyfunc(lambda x: 0 if x < 0 else x, 1,1)
        u0 = u0.astype('float32')
        u_ref = u_ref.astype('float32')

        print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
        print("____________LLT-ROF bench___________________")
        print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")

        # set parameters
        pars = {'algorithm' : LLT_ROF, \
                'input' : u0,\
                'regularisation_parameterROF':0.04, \
                'regularisation_parameterLLT':0.01, \
                'number_of_iterations' :1000 ,\
                'time_marching_parameter' :0.0001 ,\
                }

        print("#############LLT- ROF CPU####################")
        start_time = timeit.default_timer()
        lltrof_cpu = LLT_ROF(pars['input'],
                             pars['regularisation_parameterROF'],
                             pars['regularisation_parameterLLT'],
                             pars['number_of_iterations'],
                             pars['time_marching_parameter'], 'cpu')

        rms = rmse(Im, lltrof_cpu)
        pars['rmse'] = rms

        txtstr = printParametersToString(pars)
        txtstr += "%s = %.3fs" % ('elapsed time',
                                  timeit.default_timer() - start_time)
        print(txtstr)
        print("#############LLT- ROF GPU####################")
        start_time = timeit.default_timer()
        try:
            lltrof_gpu = LLT_ROF(pars['input'],
                                 pars['regularisation_parameterROF'],
                                 pars['regularisation_parameterLLT'],
                                 pars['number_of_iterations'],
                                 pars['time_marching_parameter'], 'gpu')

        except ValueError as ve:
            self.assertTrue(True)
            return
        rms = rmse(Im, lltrof_gpu)
        pars['rmse'] = rms
        pars['algorithm'] = LLT_ROF
        txtstr = printParametersToString(pars)
        txtstr += "%s = %.3fs" % ('elapsed time',
                                  timeit.default_timer() - start_time)
        print(txtstr)
        print("--------Compare the results--------")
        tolerance = 1e-04
        diff_im = np.zeros(np.shape(lltrof_gpu))
        diff_im = abs(lltrof_cpu - lltrof_gpu)
        diff_im[diff_im > tolerance] = 1
        self.assertLessEqual(diff_im.sum(), 1)
Пример #6
0
    def ADMM(
        self,
        projdata,  # tomographic projection data in 2D (sinogram) or 3D array
        InitialObject=0,  # initialise reconstruction with an array
        iterationsADMM=15,  # the number of outer ADMM iterations
        rho_const=1000.0,  # augmented Lagrangian parameter
        alpha=1.0,  # over-relaxation parameter (ADMM)
        regularisation=None,  # enable regularisation  with CCPi - RGL toolkit
        regularisation_parameter=0.01,  # regularisation parameter if regularisation is not None
        regularisation_parameter2=0.01,  # 2nd regularisation parameter (LLT_ROF method)
        regularisation_iterations=100,  # the number of INNER iterations for regularisation
        tolerance_regul=0.0,  # tolerance to stop inner (regularisation) iterations / e.g. 1e-06
        time_marching_parameter=0.0025,  # gradient step parameter (ROF_TV, LLT_ROF, NDF, DIFF4th) penalties
        TGV_alpha1=1.0,  # TGV specific parameter for the 1st order term
        TGV_alpha2=2.0,  # TGV specific parameter for the 2st order term
        TGV_LipschitzConstant=12.0,  # TGV specific parameter for convergence
        edge_param=0.01,  # edge (noise) threshold parameter for NDF and DIFF4th
        NDF_penalty=1,  # NDF specific penalty type: 1 - Huber, 2 - Perona-Malik, 3 - Tukey Biweight
        NLTV_H_i=0,  # NLTV-specific penalty type, the array of i-related indices
        NLTV_H_j=0,  # NLTV-specific penalty type, the array of j-related indices
        NLTV_Weights=0,  # NLTV-specific penalty type, the array of Weights
        methodTV=0  # 0/1 - isotropic/anisotropic TV
    ):
        def ADMM_Ax(x):
            data_upd = self.Atools.A_optomo(x)
            x_temp = self.Atools.A_optomo.transposeOpTomo(data_upd)
            x_upd = x_temp + self.rho_const * x
            return x_upd

        def ADMM_Atb(b):
            b = self.Atools.A_optomo.transposeOpTomo(b)
            return b

        self.rho_const = rho_const
        (data_dim, rec_dim) = np.shape(self.Atools.A_optomo)

        # The dependency on the CCPi-RGL toolkit for regularisation
        if regularisation is not None:
            if ((regularisation != 'ROF_TV') and (regularisation != 'FGP_TV')
                    and (regularisation != 'SB_TV')
                    and (regularisation != 'LLT_ROF')
                    and (regularisation != 'TGV') and (regularisation != 'NDF')
                    and (regularisation != 'Diff4th')
                    and (regularisation != 'NLTV')):
                raise (
                    'Unknown regularisation method, select: ROF_TV, FGP_TV, SB_TV, LLT_ROF, TGV, NDF, Diff4th, NLTV'
                )
            else:
                from ccpi.filters.regularisers import ROF_TV, FGP_TV, SB_TV, LLT_ROF, TGV, NDF, Diff4th, NLTV

        # initialise the solution and other ADMM variables
        if (np.size(InitialObject) == rec_dim):
            # the object has been initialised with an array
            X = InitialObject.ravel()
            del InitialObject
        else:
            X = np.zeros(rec_dim, 'float32')

        denomN = 1.0 / np.size(X)
        z = np.zeros(rec_dim, 'float32')
        u = np.zeros(rec_dim, 'float32')
        b_to_solver_const = self.Atools.A_optomo.transposeOpTomo(
            projdata.ravel())

        # Outer ADMM iterations
        for iter in range(0, iterationsADMM):
            X_old = X
            # solving quadratic problem using linalg solver
            A_to_solver = scipy.sparse.linalg.LinearOperator(
                (rec_dim, rec_dim), matvec=ADMM_Ax, rmatvec=ADMM_Atb)
            b_to_solver = b_to_solver_const + self.rho_const * (z - u)
            outputSolver = scipy.sparse.linalg.gmres(A_to_solver,
                                                     b_to_solver,
                                                     tol=self.tolerance,
                                                     maxiter=20)
            X = np.float32(outputSolver[0])  # get gmres solution
            if (self.nonnegativity == 1):
                X[X < 0.0] = 0.0
            # ADMM iterations stopping criteria
            nrm = LA.norm(X - X_old) * denomN
            if nrm > self.tolerance:
                # z-update with relaxation
                zold = z.copy()
                x_hat = alpha * X + (1.0 - alpha) * zold
                if (self.geom == '2D'):
                    x_prox_reg = (x_hat + u).reshape(
                        [self.ObjSize, self.ObjSize])
                if (self.geom == '3D'):
                    x_prox_reg = (x_hat + u).reshape(
                        [self.DetectorsDimV, self.ObjSize, self.ObjSize])
                # Apply regularisation using CCPi-RGL toolkit. The proximal operator of the chosen regulariser
                if (regularisation == 'ROF_TV'):
                    # Rudin - Osher - Fatemi Total variation method
                    (z, info_vec) = ROF_TV(x_prox_reg,
                                           regularisation_parameter,
                                           regularisation_iterations,
                                           time_marching_parameter,
                                           tolerance_regul, self.device)
                if (regularisation == 'FGP_TV'):
                    # Fast-Gradient-Projection Total variation method
                    (z, info_vec) = FGP_TV(x_prox_reg,
                                           regularisation_parameter,
                                           regularisation_iterations,
                                           tolerance_regul, methodTV, 0,
                                           self.device)
                if (regularisation == 'SB_TV'):
                    # Split Bregman Total variation method
                    (z, info_vec) = SB_TV(x_prox_reg, regularisation_parameter,
                                          regularisation_iterations,
                                          tolerance_regul, methodTV,
                                          self.device)
                if (regularisation == 'LLT_ROF'):
                    # Lysaker-Lundervold-Tai + ROF Total variation method
                    (z,
                     info_vec) = LLT_ROF(x_prox_reg, regularisation_parameter,
                                         regularisation_parameter2,
                                         regularisation_iterations,
                                         time_marching_parameter,
                                         tolerance_regul, self.device)
                if (regularisation == 'TGV'):
                    # Total Generalised Variation method
                    (z, info_vec) = TGV(x_prox_reg, regularisation_parameter,
                                        TGV_alpha1, TGV_alpha2,
                                        regularisation_iterations,
                                        TGV_LipschitzConstant, tolerance_regul,
                                        self.device)
                if (regularisation == 'NDF'):
                    # Nonlinear isotropic diffusion method
                    (z, info_vec) = NDF(x_prox_reg, regularisation_parameter,
                                        edge_param, regularisation_iterations,
                                        time_marching_parameter, NDF_penalty,
                                        tolerance_regul, self.device)
                if (regularisation == 'DIFF4th'):
                    # Anisotropic diffusion of higher order
                    (z,
                     info_vec) = Diff4th(x_prox_reg, regularisation_parameter,
                                         edge_param, regularisation_iterations,
                                         time_marching_parameter,
                                         tolerance_regul, self.device)
                if (regularisation == 'NLTV'):
                    # Non-local Total Variation / 2D only
                    z = NLTV(x_prox_reg, NLTV_H_i, NLTV_H_j, NLTV_H_i,
                             NLTV_Weights, regularisation_parameter,
                             regularisation_iterations)
                z = z.ravel()
                # update u variable
                u = u + (x_hat - z)
            else:
                print('ADMM stopped at iteration', iter)
                break
        if (self.geom == '2D'):
            return X.reshape([self.ObjSize, self.ObjSize])
        if (self.geom == '3D'):
            return X.reshape([self.DetectorsDimV, self.ObjSize, self.ObjSize])
Пример #7
0
    def FISTA(
        self,
        projdata,  # tomographic projection data in 2D (sinogram) or 3D array
        weights=None,  # raw projection data for PWLS model
        lambdaR_L1=0.0,  # regularisation parameter for GH data model
        alpha_ring=50,  # GH data model convergence accelerator (use carefully !)
        huber_data_threshold=0.0,  # threshold parameter for __Huber__ data fidelity 
        student_data_threshold=0.0,  # threshold parameter for __Students't__ data fidelity 
        InitialObject=None,  # initialise reconstruction with an array
        lipschitz_const=5e+06,  # can be a given value or calculated using Power method
        iterationsFISTA=100,  # the number of OUTER FISTA iterations
        regularisation=None,  # enable regularisation  with CCPi - RGL toolkit
        regularisation_parameter=0.01,  # regularisation parameter if regularisation is not None
        regularisation_parameter2=0.01,  # 2nd regularisation parameter (LLT_ROF method)
        regularisation_iterations=100,  # the number of INNER iterations for regularisation
        tolerance_regul=0.0,  # tolerance to stop inner (regularisation) iterations / e.g. 1e-06
        time_marching_parameter=0.0025,  # gradient step parameter (ROF_TV, LLT_ROF, NDF, DIFF4th) penalties
        TGV_alpha1=1.0,  # TGV specific parameter for the 1st order term
        TGV_alpha2=2.0,  # TGV specific parameter for the 2st order term
        TGV_LipschitzConstant=12.0,  # TGV specific parameter for convergence
        edge_param=0.01,  # edge (noise) threshold parameter for NDF and DIFF4th
        NDF_penalty='Huber',  # NDF specific penalty type: Huber (default), Perona, Tukey
        NLTV_H_i=0,  # NLTV-specific penalty type, the array of i-related indices
        NLTV_H_j=0,  # NLTV-specific penalty type, the array of j-related indices
        NLTV_Weights=0,  # NLTV-specific penalty type, the array of Weights
        methodTV=0  # 0/1 - TV specific isotropic/anisotropic choice
    ):

        L_const_inv = 1.0 / lipschitz_const  # inverted Lipschitz constant
        if (self.geom == '2D'):
            # 2D reconstruction
            # initialise the solution
            if (np.size(InitialObject) == self.ObjSize**2):
                # the object has been initialised with an array
                X = InitialObject
                del InitialObject
            else:
                X = np.zeros((self.ObjSize, self.ObjSize),
                             'float32')  # initialise with zeros
                r = np.zeros(
                    (self.DetectorsDimH, 1),
                    'float32')  # 1D array of sparse "ring" variables (GH)
        if (self.geom == '3D'):
            # initialise the solution
            if (np.size(InitialObject) == self.ObjSize**3):
                # the object has been initialised with an array
                X = InitialObject
                del InitialObject
            else:
                X = np.zeros((self.DetectorsDimV, self.ObjSize, self.ObjSize),
                             'float32')  # initialise with zeros
                r = np.zeros(
                    (self.DetectorsDimV, self.DetectorsDimH),
                    'float32')  # 2D array of sparse "ring" variables (GH)
        if (self.OS_number > 1):
            regularisation_iterations = (int)(regularisation_iterations /
                                              self.OS_number)
        if (NDF_penalty == 'Huber'):
            NDF_penalty = 1
        elif (NDF_penalty == 'Perona'):
            NDF_penalty = 2
        elif (NDF_penalty == 'Tukey'):
            NDF_penalty = 3
        else:
            raise ("For NDF_penalty choose Huber, Perona or Tukey")

        # The dependency on the CCPi-RGL toolkit for regularisation
        if regularisation is not None:
            if ((regularisation != 'ROF_TV') and (regularisation != 'FGP_TV')
                    and (regularisation != 'SB_TV')
                    and (regularisation != 'LLT_ROF')
                    and (regularisation != 'TGV') and (regularisation != 'NDF')
                    and (regularisation != 'Diff4th')
                    and (regularisation != 'NLTV')):
                raise (
                    'Unknown regularisation method, select: ROF_TV, FGP_TV, SB_TV, LLT_ROF, TGV, NDF, Diff4th, NLTV'
                )
            else:
                from ccpi.filters.regularisers import ROF_TV, FGP_TV, SB_TV, LLT_ROF, TGV, NDF, Diff4th, NLTV

#****************************************************************************#
# FISTA (model-based modification) algorithm begins here:
        t = 1.0
        denomN = 1.0 / np.size(X)
        X_t = np.copy(X)
        r_x = r.copy()
        # Outer FISTA iterations
        for iter in range(0, iterationsFISTA):
            r_old = r
            # Do G-H fidelity pre-calculations using the full projections dataset for OS version
            if ((self.OS_number > 1) and (lambdaR_L1 > 0.0) and (iter > 0)):
                if (self.geom == '2D'):
                    vec = np.zeros((self.DetectorsDimH))
                else:
                    vec = np.zeros((self.DetectorsDimV, self.DetectorsDimH))
                for sub_ind in range(self.OS_number):
                    #select a specific set of indeces for the subset (OS)
                    indVec = self.Atools.newInd_Vec[sub_ind, :]
                    if (indVec[self.Atools.NumbProjBins - 1] == 0):
                        indVec = indVec[:-1]  #shrink vector size
                    if (self.geom == '2D'):
                        res = self.Atools.forwprojOS(
                            X_t, sub_ind) - projdata[indVec, :]
                        res[:,
                            0:None] = res[:, 0:None] + alpha_ring * r_x[:, 0]
                        vec = vec + (1.0 / self.OS_number) * res.sum(axis=0)
                    else:
                        res = self.Atools.forwprojOS(
                            X_t, sub_ind) - projdata[:, indVec, :]
                        for ang_index in range(len(indVec)):
                            res[:,
                                ang_index, :] = res[:,
                                                    ang_index, :] + alpha_ring * r_x
                        vec = res.sum(axis=1)
                if (self.geom == '2D'):
                    r[:, 0] = r_x[:, 0] - np.multiply(L_const_inv, vec)
                else:
                    r = r_x - np.multiply(L_const_inv, vec)
            # loop over subsets (OS)
            for sub_ind in range(self.OS_number):
                X_old = X
                t_old = t
                if (self.OS_number > 1):
                    #select a specific set of indeces for the subset (OS)
                    indVec = self.Atools.newInd_Vec[sub_ind, :]
                    if (indVec[self.Atools.NumbProjBins - 1] == 0):
                        indVec = indVec[:-1]  #shrink vector size
                    if (self.OS_number > 1):
                        # OS-reduced residuals
                        if (self.geom == '2D'):
                            if (self.datafidelity == 'LS'):
                                # 2D Least-squares (LS) data fidelity - OS (linear)
                                res = self.Atools.forwprojOS(
                                    X_t, sub_ind) - projdata[indVec, :]
                            elif (self.datafidelity == 'PWLS'):
                                # 2D Penalised Weighted Least-squares - OS data fidelity (approximately linear)
                                res = np.multiply(
                                    weights[indVec, :],
                                    (self.Atools.forwprojOS(X_t, sub_ind) -
                                     projdata[indVec, :]))
                            else:
                                raise (
                                    "Choose the data fidelity term: LS, PWLS")
                            # ring removal part for Group-Huber (GH) fidelity (2D)
                            if ((lambdaR_L1 > 0.0) and (iter > 0)):
                                res[:,
                                    0:None] = res[:,
                                                  0:None] + alpha_ring * r_x[:,
                                                                             0]
                        else:  # 3D
                            if (self.datafidelity == 'LS'):
                                # 3D Least-squares (LS) data fidelity - OS (linear)
                                res = self.Atools.forwprojOS(
                                    X_t, sub_ind) - projdata[:, indVec, :]
                            elif (self.datafidelity == 'PWLS'):
                                # 3D Penalised Weighted Least-squares - OS data fidelity (approximately linear)
                                res = np.multiply(
                                    weights[:, indVec, :],
                                    (self.Atools.forwprojOS(X_t, sub_ind) -
                                     projdata[:, indVec, :]))
                            else:
                                raise (
                                    "Choose the data fidelity term: LS, PWLS")
                            # GH - fidelity part (3D)
                            if ((lambdaR_L1 > 0.0) and (iter > 0)):
                                for ang_index in range(len(indVec)):
                                    res[:,
                                        ang_index, :] = res[:,
                                                            ang_index, :] + alpha_ring * r_x
                else:  # non-OS (classical all-data approach)
                    if (self.datafidelity == 'LS'):
                        # full residual for LS fidelity
                        res = self.Atools.forwproj(X_t) - projdata
                    elif (self.datafidelity == 'PWLS'):
                        # full gradient for the PWLS fidelity
                        res = np.multiply(
                            weights, (self.Atools.forwproj(X_t) - projdata))
                    else:
                        raise ("Choose the data fidelity term: LS, PWLS")
                    if ((self.geom == '2D') and (lambdaR_L1 > 0.0)
                            and (iter > 0)):  # GH 2D part
                        res[0:None, :] = res[0:None, :] + alpha_ring * r_x[:,
                                                                           0]
                        vec = res.sum(axis=0)
                        r[:, 0] = r_x[:, 0] - np.multiply(L_const_inv, vec)
                    if ((self.geom == '3D') and (lambdaR_L1 > 0.0)
                            and (iter > 0)):  # GH 3D part
                        for ang_index in range(self.angles_number):
                            res[:,
                                ang_index, :] = res[:,
                                                    ang_index, :] + alpha_ring * r_x
                            vec = res.sum(axis=1)
                            r = r_x - np.multiply(L_const_inv, vec)
                if (huber_data_threshold > 0.0):
                    # apply Huber penalty
                    multHuber = np.ones(np.shape(res))
                    multHuber[(np.where(
                        np.abs(res) > huber_data_threshold))] = np.divide(
                            huber_data_threshold,
                            np.abs(res[(np.where(
                                np.abs(res) > huber_data_threshold))]))
                    if (self.OS_number > 1):
                        # OS-Huber-gradient
                        grad_fidelity = self.Atools.backprojOS(
                            np.multiply(multHuber, res), sub_ind)
                    else:
                        # full Huber gradient
                        grad_fidelity = self.Atools.backproj(
                            np.multiply(multHuber, res))
                elif (student_data_threshold > 0.0):
                    # apply Students't penalty
                    multStudent = np.ones(np.shape(res))
                    multStudent = np.divide(2.0,
                                            student_data_threshold**2 + res**2)
                    if (self.OS_number > 1):
                        # OS-Students't-gradient
                        grad_fidelity = self.Atools.backprojOS(
                            np.multiply(multStudent, res), sub_ind)
                    else:
                        # full Students't gradient
                        grad_fidelity = self.Atools.backproj(
                            np.multiply(multStudent, res))
                else:
                    if (self.OS_number > 1):
                        # OS reduced gradient
                        grad_fidelity = self.Atools.backprojOS(res, sub_ind)
                    else:
                        # full gradient
                        grad_fidelity = self.Atools.backproj(res)

                X = X_t - L_const_inv * grad_fidelity
                if (self.nonnegativity == 1):
                    X[X < 0.0] = 0.0
                # The proximal operator of the chosen regulariser
                if (regularisation == 'ROF_TV'):
                    # Rudin - Osher - Fatemi Total variation method
                    (X, info_vec) = ROF_TV(X, regularisation_parameter,
                                           regularisation_iterations,
                                           time_marching_parameter,
                                           tolerance_regul, self.device)
                if (regularisation == 'FGP_TV'):
                    # Fast-Gradient-Projection Total variation method
                    (X, info_vec) = FGP_TV(X, regularisation_parameter,
                                           regularisation_iterations,
                                           tolerance_regul, methodTV, 0,
                                           self.device)
                if (regularisation == 'SB_TV'):
                    # Split Bregman Total variation method
                    (X, info_vec) = SB_TV(X, regularisation_parameter,
                                          regularisation_iterations,
                                          tolerance_regul, methodTV,
                                          self.device)
                if (regularisation == 'LLT_ROF'):
                    # Lysaker-Lundervold-Tai + ROF Total variation method
                    (X, info_vec) = LLT_ROF(X, regularisation_parameter,
                                            regularisation_parameter2,
                                            regularisation_iterations,
                                            time_marching_parameter,
                                            tolerance_regul, self.device)
                if (regularisation == 'TGV'):
                    # Total Generalised Variation method
                    (X, info_vec) = TGV(X, regularisation_parameter,
                                        TGV_alpha1, TGV_alpha2,
                                        regularisation_iterations,
                                        TGV_LipschitzConstant, tolerance_regul,
                                        self.device)
                if (regularisation == 'NDF'):
                    # Nonlinear isotropic diffusion method
                    (X, info_vec) = NDF(X, regularisation_parameter,
                                        edge_param, regularisation_iterations,
                                        time_marching_parameter, NDF_penalty,
                                        tolerance_regul, self.device)
                if (regularisation == 'Diff4th'):
                    # Anisotropic diffusion of higher order
                    (X, info_vec) = Diff4th(X, regularisation_parameter,
                                            edge_param,
                                            regularisation_iterations,
                                            time_marching_parameter,
                                            tolerance_regul, self.device)
                if (regularisation == 'NLTV'):
                    # Non-local Total Variation
                    X = NLTV(X, NLTV_H_i, NLTV_H_j, NLTV_H_i, NLTV_Weights,
                             regularisation_parameter,
                             regularisation_iterations)
                t = (1.0 + np.sqrt(1.0 + 4.0 * t**2)) * 0.5
                # updating t variable
                X_t = X + ((t_old - 1.0) / t) * (X - X_old)  # updating X
            if ((lambdaR_L1 > 0.0) and (iter > 0)):
                r = np.maximum((np.abs(r) - lambdaR_L1), 0.0) * np.sign(
                    r)  # soft-thresholding operator for ring vector
                r_x = r + ((t_old - 1.0) / t) * (r - r_old)  # updating r
            # stopping criteria
            nrm = LA.norm(X - X_old) * denomN
            if nrm < self.tolerance:
                print('FISTA stopped at iteration', iter)
                break
        return X
a.set_title('Noisy Image')
imgplot = plt.imshow(noisyVol[10, :, :], cmap="gray")

# set parameters
pars = {'algorithm' : LLT_ROF, \
        'input' : noisyVol,\
        'regularisation_parameterROF':0.04, \
        'regularisation_parameterLLT':0.015, \
        'number_of_iterations' :300 ,\
        'time_marching_parameter' :0.0025 ,\
        }

print("#############LLT ROF CPU####################")
start_time = timeit.default_timer()
lltrof_gpu3D = LLT_ROF(pars['input'], pars['regularisation_parameterROF'],
                       pars['regularisation_parameterLLT'],
                       pars['number_of_iterations'],
                       pars['time_marching_parameter'], 'gpu')

rms = rmse(idealVol, lltrof_gpu3D)
pars['rmse'] = rms

txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time', timeit.default_timer() - start_time)
print(txtstr)
a = fig.add_subplot(1, 2, 2)

# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15,
       0.25,
Пример #9
0
    def FISTA(
        self,
        projdata,  # tomographic projection data in 2D (sinogram) or 3D array
        weights=None,  # raw projection data for PWLS model
        InitialObject=0,  # initialise reconstruction with an array
        lipschitz_const=5e+06,  # can be a given value or calculated using Power method
        iterationsFISTA=100,  # the number of OUTER FISTA iterations
        regularisation=None,  # enable regularisation  with CCPi - RGL toolkit
        regularisation_parameter=0.01,  # regularisation parameter if regularisation is not None
        regularisation_parameter2=0.01,  # 2nd regularisation parameter (LLT_ROF method)
        regularisation_iterations=100,  # the number of INNER iterations for regularisation
        time_marching_parameter=0.0025,  # gradient step parameter (ROF_TV, LLT_ROF, NDF, DIFF4th) penalties
        tolerance_regul=1e-06,  # tolerance to stop regularisation
        TGV_alpha1=1.0,  # TGV specific parameter for the 1st order term
        TGV_alpha2=0.8,  # TGV specific parameter for the 2st order term
        TGV_LipschitzConstant=12.0,  # TGV specific parameter for convergence
        edge_param=0.01,  # edge (noise) threshold parameter for NDF and DIFF4th
        NDF_penalty=1,  # NDF specific penalty type: 1 - Huber, 2 - Perona-Malik, 3 - Tukey Biweight
        NLTV_H_i=0,  # NLTV-specific penalty type, the array of i-related indices
        NLTV_H_j=0,  # NLTV-specific penalty type, the array of j-related indices
        NLTV_Weights=0,  # NLTV-specific penalty type, the array of Weights
        methodTV=0,  # 0/1 - isotropic/anisotropic TV
        nonneg=0  # 0/1 disabled/enabled nonnegativity (for FGP_TV currently)
    ):

        L_const_inv = 1.0 / lipschitz_const  # inverted Lipschitz constant
        if (self.geom == '2D'):
            # 2D reconstruction
            # initialise the solution
            if (np.size(InitialObject) == self.ObjSize**2):
                # the object has been initialised with an array
                X = InitialObject
                del InitialObject
            else:
                X = np.zeros((self.ObjSize, self.ObjSize), 'float32')
        if (self.geom == '3D'):
            # initialise the solution
            if (np.size(InitialObject) == self.ObjSize**3):
                # the object has been initialised with an array
                X = InitialObject
                del InitialObject
            else:
                X = np.zeros((self.ObjSize, self.ObjSize, self.ObjSize),
                             'float32')
        if (self.OS_number > 1):
            regularisation_iterations = (int)(regularisation_iterations /
                                              self.OS_number)

        # The dependency on the CCPi-RGL toolkit for regularisation
        if regularisation is not None:
            if ((regularisation != 'ROF_TV') and (regularisation != 'FGP_TV')
                    and (regularisation != 'SB_TV')
                    and (regularisation != 'LLT_ROF')
                    and (regularisation != 'TGV') and (regularisation != 'NDF')
                    and (regularisation != 'DIFF4th')
                    and (regularisation != 'NLTV')):
                raise (
                    'Unknown regularisation method, select: ROF_TV, FGP_TV, SB_TV, LLT_ROF, TGV, NDF, DIFF4th, NLTV'
                )
            else:
                from ccpi.filters.regularisers import ROF_TV, FGP_TV, SB_TV, LLT_ROF, TGV, NDF, DIFF4th, NLTV

#****************************************************************************#
# FISTA algorithm begins here:
        t = 1.0
        denomN = 1.0 / np.size(X)
        X_t = np.copy(X)
        # Outer FISTA iterations
        for iter in range(0, iterationsFISTA):
            for sub_ind in range(self.OS_number):
                # loop over subsets
                X_old = X
                t_old = t
                if (self.OS_number > 1):
                    #select a specific set of indeces for the subset (OS)
                    indVec = self.Atools.newInd_Vec[sub_ind, :]
                    if (indVec[self.Atools.NumbProjBins - 1] == 0):
                        indVec = indVec[:-1]  #shrink vector size

                if (self.datafidelity == 'LS'):
                    # Least-squares data fidelity (linear)
                    if (self.OS_number > 1):
                        # OS-reduced gradient for LS fidelity
                        if (self.geom == '2D'):
                            grad_fidelity = self.Atools.backprojOS(
                                self.Atools.forwprojOS(X_t, sub_ind) -
                                projdata[indVec, :], sub_ind)
                        else:
                            grad_fidelity = self.Atools.backprojOS(
                                self.Atools.forwprojOS(X_t, sub_ind) -
                                projdata[:, indVec, :], sub_ind)
                    else:
                        # full gradient for LS fidelity
                        grad_fidelity = self.Atools.backproj(
                            self.Atools.forwproj(X_t) - projdata)
                elif (self.datafidelity == 'PWLS'):
                    # Penalised Weighted Least-squares data fidelity (approximately linear)
                    if (self.OS_number > 1):
                        # OS-reduced gradient for PWLS fidelity
                        if (self.geom == '2D'):
                            grad_fidelity = self.Atools.backprojOS(
                                np.multiply(
                                    weights[indVec, :],
                                    (self.Atools.forwprojOS(X_t, sub_ind) -
                                     projdata[indVec, :])), sub_ind)
                        else:
                            grad_fidelity = self.Atools.backprojOS(
                                np.multiply(
                                    weights[:, indVec, :],
                                    (self.Atools.forwprojOS(X_t, sub_ind) -
                                     projdata[:, indVec, :])), sub_ind)
                    else:
                        # full gradient for PWLS fidelity
                        grad_fidelity = self.Atools.backproj(
                            np.multiply(
                                weights,
                                (self.Atools.forwproj(X_t) - projdata)))
                else:
                    raise ("Choose the data fidelity term: LS, PWLS")
                X = X_t - L_const_inv * grad_fidelity
                # stopping criteria
                nrm = LA.norm(X - X_old) * denomN
                if nrm > self.tolerance:
                    # The proximal operator of the chosen regulariser
                    if (regularisation == 'ROF_TV'):
                        # Rudin - Osher - Fatemi Total variation method
                        X = ROF_TV(X, regularisation_parameter,
                                   regularisation_iterations,
                                   time_marching_parameter, self.device)
                    if (regularisation == 'FGP_TV'):
                        # Fast-Gradient-Projection Total variation method
                        X = FGP_TV(X, regularisation_parameter,
                                   regularisation_iterations, tolerance_regul,
                                   methodTV, nonneg, 0, self.device)
                    if (regularisation == 'SB_TV'):
                        # Split Bregman Total variation method
                        X = SB_TV(X, regularisation_parameter,
                                  regularisation_iterations, tolerance_regul,
                                  methodTV, 0, self.device)
                    if (regularisation == 'LLT_ROF'):
                        # Lysaker-Lundervold-Tai + ROF Total variation method
                        X = LLT_ROF(X, regularisation_parameter,
                                    regularisation_parameter2,
                                    regularisation_iterations,
                                    time_marching_parameter, self.device)
                    if (regularisation == 'TGV'):
                        # Total Generalised Variation method (2D only currently)
                        X = TGV(X, regularisation_parameter, TGV_alpha1,
                                TGV_alpha2, regularisation_iterations,
                                TGV_LipschitzConstant,
                                'cpu')  # till gpu version is fixed
                    if (regularisation == 'NDF'):
                        # Nonlinear isotropic diffusion method
                        X = NDF(X, regularisation_parameter, edge_param,
                                regularisation_iterations,
                                time_marching_parameter, NDF_penalty,
                                self.device)
                    if (regularisation == 'DIFF4th'):
                        # Anisotropic diffusion of higher order
                        X = DIFF4th(X, regularisation_parameter, edge_param,
                                    regularisation_iterations,
                                    time_marching_parameter, self.device)
                    if (regularisation == 'NLTV'):
                        # Non-local Total Variation
                        X = NLTV(X, NLTV_H_i, NLTV_H_j, NLTV_H_i, NLTV_Weights,
                                 regularisation_parameter,
                                 regularisation_iterations)
                    t = (1.0 + np.sqrt(1.0 + 4.0 * t**2)) * 0.5
                    # updating t variable
                    X_t = X + ((t_old - 1.0) / t) * (X - X_old)  # updating X
                else:
                    #print('FISTA stopped at iteration', iter)
                    break
#****************************************************************************#
        return X