Esempio n. 1
0
 def test01_beta(self):
     """Check beta applied consistently"""
     Prior10 = LaplacianPrior({'Vm': self.Vm, 'gamma': 0.0, 'beta':1e-10})
     error = 0.0
     for mm in self.M:
         r1 = (self.Priorb.grad(mm)).array()
         r10 = (Prior10.grad(mm)).array()
         err = np.linalg.norm(r1 - 1e5*r10)/np.linalg.norm(r1)
         error = max(error, err)
     self.assertTrue(error < 3e-16, error)
Esempio n. 2
0
 def __init__(self, mesh, k, regularization='tikhonov'):
     """
     Inputs:
         pbtype = 'denoising' or 'deblurring'
         mesh = Fenics mesh
         k = Fenics Expression of the blurring kernel; must have parameter t
         f = target image
     """
     self.mesh = mesh
     self.V = dl.FunctionSpace(self.mesh, 'Lagrange', 1)
     self.dimV = self.V.dim()
     self.xx = self.V.dofmap().tabulate_all_coordinates(self.mesh)
     self.test, self.trial = dl.TestFunction(self.V), dl.TrialFunction(
         self.V)
     # Target data:
     self.f_true = 0.75 * (self.xx >= .1) * (self.xx <= .25)
     self.f_true += (self.xx >= 0.28) * (self.xx <= 0.3) * (15 * self.xx -
                                                            15 * 0.28)
     self.f_true += (self.xx > 0.3) * (self.xx < 0.33) * 0.3
     self.f_true += (self.xx >= 0.33) * (self.xx <= 0.35) * (-15 * self.xx +
                                                             15 * 0.35)
     self.f_true += (self.xx >= .4) * (self.xx <= .9) * (
         self.xx - .4)**2 * (self.xx - 0.9)**2 / .25**4
     self.g = None  # current iterate
     # kernel operator
     self.k = k
     self.Kweak = dl.inner(self.k, self.test) * dl.dx
     self.assembleK()
     # mass matrix
     self.Mweak = dl.inner(self.test, self.trial) * dl.dx
     self.M = dl.assemble(self.Mweak)
     # regularization
     self.parameters['regularization'] = regularization
     if regularization == 'tikhonov':
         self.RegTikh = LaplacianPrior({
             'gamma': 1.0,
             'beta': 0.0,
             'Vm': self.V
         })
         self.R = self.RegTikh.Minvprior.array()
     elif regularization == 'TV':
         self.RegTV = TV({'eps': 1e-2, 'Vm': self.V})
     # line search parameters
     self.parameters['alpha0'] = 1.0
     self.parameters['rho'] = 0.5
     self.parameters['c'] = 5e-5
     self.parameters['max_backtrack'] = 12
Esempio n. 3
0
 def setUp(self):
     mesh = UnitSquareMesh(5,5,'crossed')
     self.Vm = FunctionSpace(mesh, 'Lagrange', 2)
     self.m = Function(self.Vm)
     self.m.vector()[:] = np.random.randn(self.Vm.dim())
     self.Priorg = LaplacianPrior({'Vm': self.Vm, 'gamma': 1e-5})
     self.Priorb = LaplacianPrior({'Vm': self.Vm, 'gamma': 0.0, 'beta':1e-5})
     self.Prior = LaplacianPrior({'Vm': self.Vm, 'gamma': 1e-5, \
     'beta':1e-10, 'm0': self.m})
     self.M = []
     for ii in range(10):    self.M.append(Function(self.Vm))
     self.lenm = self.Vm.dim()
     for mm in self.M:
         mm.vector()[:] = np.random.randn(self.lenm)
Esempio n. 4
0
    def __init__(self,
                 CGdeg,
                 regularizationtype,
                 h=1.0,
                 parameters=[],
                 image='image.dat'):
        class Image(dl.Expression):
            def __init__(self, Lx, Ly, data):
                self.data = data
                self.hx = Lx / float(self.data.shape[1] - 1)
                self.hy = Ly / float(self.data.shape[0] - 1)

            def eval(self, values, x):
                j = math.floor(x[0] / self.hx)
                i = math.floor(x[1] / self.hy)
                values[0] = self.data[i, j]

        data = np.loadtxt(image, delimiter=',')
        #Lx, Ly = float(data.shape[1])/float(data.shape[0]), 1.
        Lx, Ly = 2., 1.
        scaling = 100. * h  # =1.0 => h~0.01
        Lx, Ly = scaling * Lx, scaling * Ly
        np.random.seed(seed=1)
        noise_std_dev = 0.3
        noise = noise_std_dev * np.random.randn(data.shape[0], data.shape[1])
        print '||noise||={}'.format(np.linalg.norm(noise))
        mesh = dl.RectangleMesh(dl.Point(0, 0), dl.Point(Lx, Ly), 200, 100)
        mcoord = mesh.coordinates()
        print 'hx={}, hy={}'.format((mcoord[-1][0] - mcoord[0][0]) / 200.,
                                    (mcoord[-1][1] - mcoord[0][1]) / 100.)
        V = dl.FunctionSpace(mesh, 'Lagrange', CGdeg)
        trueImage = Image(Lx, Ly, data)
        noisyImage = Image(Lx, Ly, data + noise)
        print 'min(data)={}, max(data)={}'.format(np.amin(data), np.amax(data))
        print 'min(data+noise)={}, max(data+noise)={}'.format(
            np.amin(data + noise), np.amax(data + noise))
        self.u_true = dl.interpolate(trueImage, V)
        self.u_0 = dl.interpolate(noisyImage, V)

        self.u = dl.Function(V)
        self.ucopy = dl.Function(V)
        self.G = dl.Function(V)
        self.du = dl.Function(V)
        u_test = dl.TestFunction(V)
        u_trial = dl.TrialFunction(V)

        Mweak = dl.inner(u_test, u_trial) * dl.dx
        self.M = dl.assemble(Mweak)
        self.solverM = dl.LUSolver('petsc')
        self.solverM.parameters['symmetric'] = True
        self.solverM.parameters['reuse_factorization'] = True
        self.solverM.set_operator(self.M)

        self.regul = regularizationtype
        if self.regul == 'tikhonov':
            self.Regul = LaplacianPrior({'Vm': V, 'gamma': 1.0, 'beta': 0.0})
        elif self.regul == 'TV':
            paramTV = {'Vm': V, 'k': 1.0, 'eps': 1e-4, 'GNhessian': True}
            paramTV.update(parameters)
            self.Regul = TV(paramTV)
            self.inexact = False
        elif self.regul == 'TVPD':
            paramTV = {'Vm': V, 'k': 1.0, 'eps': 1e-4, 'exact': False}
            paramTV.update(parameters)
            self.Regul = TVPD(paramTV)
            self.inexact = False
        self.alpha = 1.0

        self.Hess = self.M

        self.parametersLS = {'alpha0':1.0, 'rho':0.5, 'c':5e-5, \
        'max_backtrack':12, 'cgtol':0.5, 'maxiter':50000}

        filename, ext = os.path.splitext(sys.argv[0])
        #if os.path.isdir(filename + '/'):   shutil.rmtree(filename + '/')
        self.myplot = PlotFenics(filename)

        try:
            solver = PETScKrylovSolver('cg', 'ml_amg')
            self.precond = 'ml_amg'
        except:
            print '*** WARNING: ML not installed -- using petsc_amg instead'
            self.precond = 'petsc_amg'
Esempio n. 5
0
class TestGaussianprior(unittest.TestCase):

    def setUp(self):
        mesh = UnitSquareMesh(5,5,'crossed')
        self.Vm = FunctionSpace(mesh, 'Lagrange', 2)
        self.m = Function(self.Vm)
        self.m.vector()[:] = np.random.randn(self.Vm.dim())
        self.Priorg = LaplacianPrior({'Vm': self.Vm, 'gamma': 1e-5})
        self.Priorb = LaplacianPrior({'Vm': self.Vm, 'gamma': 0.0, 'beta':1e-5})
        self.Prior = LaplacianPrior({'Vm': self.Vm, 'gamma': 1e-5, \
        'beta':1e-10, 'm0': self.m})
        self.M = []
        for ii in range(10):    self.M.append(Function(self.Vm))
        self.lenm = self.Vm.dim()
        for mm in self.M:
            mm.vector()[:] = np.random.randn(self.lenm)

    def test00a_inst(self):
        """Default instantiation and check default values"""
        Prior = LaplacianPrior({'Vm': self.Vm, 'gamma': 1e-5})
        error = Prior.beta + np.linalg.norm(Prior.m0.vector().array())
        self.assertTrue(error < 1e-16, error)

    def test00b_inst(self):
        """Default instantiation"""
        Prior = LaplacianPrior({'Vm': self.Vm, 'gamma': 1e-5, \
        'beta': 1e-7, 'm0': self.m})

    def test01_gamma(self):
        """Check gamma applied consistently"""
        Prior10 = LaplacianPrior({'Vm': self.Vm, 'gamma': 1e-10})
        error = 0.0
        for mm in self.M:
            r1 = (self.Priorg.grad(mm)).array()
            r10 = (Prior10.grad(mm)).array()
            err = np.linalg.norm(r1 - 1e5*r10)/np.linalg.norm(r1)
            error = max(error, err)
        self.assertTrue(error < 3e-16, error)

    def test01_beta(self):
        """Check beta applied consistently"""
        Prior10 = LaplacianPrior({'Vm': self.Vm, 'gamma': 0.0, 'beta':1e-10})
        error = 0.0
        for mm in self.M:
            r1 = (self.Priorb.grad(mm)).array()
            r10 = (Prior10.grad(mm)).array()
            err = np.linalg.norm(r1 - 1e5*r10)/np.linalg.norm(r1)
            error = max(error, err)
        self.assertTrue(error < 3e-16, error)
        
    def test02_precond(self):
        """Check preconditioner when beta is defined"""
        prec = self.Prior.get_precond()
        gR = self.Prior.gamma*self.Prior.R + \
        self.Prior.beta*self.Prior.M
        error = 0.0
        for mm in self.M:
            r1 = (prec * mm.vector()).array()
            r2 = (gR * mm.vector()).array()
            err = np.linalg.norm(r1 - r2)/np.linalg.norm(r1)
            error = max(error, err)
        self.assertTrue(error < 1e-16, error)

    def test02_precond2(self):
        """Check preconditioner when beta is not defined"""
        prec = self.Priorg.get_precond()
        gR = self.Priorg.R
        gM = self.Priorg.M
        gRM = self.Priorg.gamma*gR + (1e-14)*gM
        error = 0.0
        for mm in self.M:
            r1 = (prec * mm.vector()).array()
            r2 = (gRM * mm.vector()).array()
            err = np.linalg.norm(r1 - r2)/np.linalg.norm(r1)
            error = max(error, err)
        self.assertTrue(error < 1e-16, error)

    def test03_costnull(self):
        """Check null space of regularization"""
        self.m.vector()[:] = np.ones(self.lenm)
        cost = self.Priorg.cost(self.m) 
        self.assertTrue(cost < 1e-16, cost)

    def test03_costposit(self):
        """Check cost is nonnegative"""
        mincost = 1.0
        for mm in self.M:
            cost = self.Priorg.cost(mm)
            mincost = min(mincost, cost)
        self.assertTrue(mincost > 0.0, mincost)
        
    def test04_grad(self):
        """Check cost and gradient are consistent"""
        error = 0.0
        h = 1e-5
        for mm in self.M:
            grad = self.Prior.grad(mm)
            mm_arr = mm.vector().array()
            for dm in self.M:
                dm_arr = dm.vector().array()
                dm_arr /= np.linalg.norm(dm_arr)
                gradxdm = np.dot(grad.array(), dm_arr)
                self.m.vector()[:] = mm_arr + h*dm_arr
                cost1 = self.Prior.cost(self.m)
                self.m.vector()[:] = mm_arr - h*dm_arr
                cost2 = self.Prior.cost(self.m)
                gradxdm_fd = (cost1-cost2) / (2.*h)
                err = abs(gradxdm - gradxdm_fd) / abs(gradxdm)
                error = max(error, err)
        self.assertTrue(error < 2e-7, error)

    def test05_hess(self):
        """Check gradient and Hessian do the same"""
        error = 0.0
        for mm in self.M:
            gradm = self.Prior.grad(mm).array()
            hessm = self.Prior.hessian(mm.vector()-self.Prior.m0.vector())\
            .array()
            err = np.linalg.norm(gradm-hessm)/np.linalg.norm(gradm)
            error = max(error, err)
        self.assertTrue(error < 1e-16, error)
        
    def test06_hess(self):
        """Check gradient and hessian are consistent"""
        error = 0.0
        h = 1e-5
        for mm in self.M:
            for dm in self.M:
                Hdm = self.Prior.hessian(dm.vector()).array()
                self.m.vector()[:] = mm.vector().array() + \
                h*dm.vector().array()
                G1dm = self.Prior.grad(self.m).array()
                self.m.vector()[:] = mm.vector().array() - \
                h*dm.vector().array()
                G2dm = self.Prior.grad(self.m).array()
                HFDdm = (G1dm-G2dm)/(2*h)
                err = np.linalg.norm(HFDdm-Hdm)/np.linalg.norm(Hdm)
                error = max(error, err)
        self.assertTrue(error < 1e-8, error)
Esempio n. 6
0
 def test00b_inst(self):
     """Default instantiation"""
     Prior = LaplacianPrior({'Vm': self.Vm, 'gamma': 1e-5, \
     'beta': 1e-7, 'm0': self.m})
Esempio n. 7
0
 def test00a_inst(self):
     """Default instantiation and check default values"""
     Prior = LaplacianPrior({'Vm': self.Vm, 'gamma': 1e-5})
     error = Prior.beta + np.linalg.norm(Prior.m0.vector().array())
     self.assertTrue(error < 1e-16, error)