def run_problem():
    # Domain, f-e spaces and boundary conditions:
    mesh = UnitSquareMesh(20,20)
    V = FunctionSpace(mesh, 'Lagrange', 2)  # space for state and adjoint variables
    Vm = FunctionSpace(mesh, 'Lagrange', 1) # space for medium parameter
    Vme = FunctionSpace(mesh, 'Lagrange', 5)    # sp for target med param
    # Define zero Boundary conditions:
    def u0_boundary(x, on_boundary):
        return on_boundary
    u0 = Constant("0.0")
    bc = DirichletBC(V, u0, u0_boundary)
    # Define target medium and rhs:
    mtrue_exp = Expression('1 + 7*(pow(pow(x[0] - 0.5,2) +' + \
    ' pow(x[1] - 0.5,2),0.5) > 0.2)')
    mtrue = interpolate(mtrue_exp, Vme)
    f = Expression("1.0")

    # Compute target data:
    noisepercent = 0.05   # e.g., 0.02 = 2% noise level
    ObsOp = ObsEntireDomain({'V': V,'noise':noisepercent})
    goal = ObjFctalElliptic(V, Vme, bc, bc, [f], ObsOp)
    goal.update_m(mtrue)
    goal.solvefwd()
    UDnoise = goal.U

    # Solve reconstruction problem:
    Regul = LaplacianPrior({'Vm':Vm,'gamma':1e-3,'beta':1e-14})
    ObsOp.noise = False
    InvPb = ObjFctalElliptic(V, Vm, bc, bc, [f], ObsOp, UDnoise, Regul, [], False)
    InvPb.update_m(1.0) # Set initial medium
    InvPb.solvefwd_cost()
    # Choose between steepest descent and Newton's method:
    METHODS = ['sd','Newt']
    meth = METHODS[1]
    if meth == 'sd':    alpha_init = 1e3
    elif meth == 'Newt':    alpha_init = 1.0
    nbcheck = 0 # Grad and Hessian checks
    nbLS = 20   # Max nb of line searches
    # Prepare results outputs:
    PP = PostProcessor(meth, Vm, mtrue)
    PP.getResults0(InvPb)    # Get results for index 0 (before first iteration)
    PP.printResults()
    # Start iteration:
    maxiter = 100 
    for it in range(1, maxiter+1):
        InvPb.solveadj_constructgrad()
        # Check gradient and Hessian:
        if nbcheck and (it == 1 or it % 10 == 0): 
            checkgradfd(InvPb, nbcheck)
            checkhessfd(InvPb, nbcheck)
        # Compute search direction:
        if it == 1: gradnorm_init = InvPb.getGradnorm()
        if meth == 'Newt':
            if it == 1: maxtolcg = .5
            else:   maxtolcg = CGresults[3]
        else:   maxtolcg = None
        CGresults = compute_searchdirection(InvPb, meth, gradnorm_init, maxtolcg)
        # Compute line search:
        LSresults = bcktrcklinesearch(InvPb, nbLS, alpha_init)
        InvPb.plotm(it) # Plot current medium reconstruction
        # Print results:
        PP.getResults(InvPb, LSresults, CGresults)
        PP.printResults()
        if PP.Stop():   break   # Stopping criterion
        alpha_init = PP.alpha_init()    # Initialize next alpha when using sd
    InvPb.gatherm() # Create one plot for all intermediate reconstructions
    if it == maxiter:   print "Max nb of iterations reached."
Example #2
0
def runcontobs():
    mesh = dl.UnitSquareMesh(100, 100)
    V = dl.FunctionSpace(mesh, 'Lagrange', 1)
    myn = 1
    m_exp = dl.Expression('sin(n*pi*x[0])*sin(n*pi*x[1])', n=myn)
    m = dl.interpolate(m_exp, V)
    m_in = dl.Function(V)
    mv = m.vector()
    shm = mv.array().shape
    HH = [1e-4, 1e-5, 1e-6]

    # CONTINUOUS obsop:
    # Cost:
    obsopcont = ObsEntireDomain({'V': V}, None)
    cost_ex = (.5 - np.sin(2 * np.pi * myn) / (4 * np.pi * myn))**2
    print 'relative error on cost: {:.2e}'.format(\
    np.abs(2*obsopcont.costfct(mv.array(), np.zeros(shm)) - cost_ex) / cost_ex)
    print 'relative error on cost_F: {:.2e}'.format(\
    np.abs(2*obsopcont.costfct_F(m, dl.Function(V)) - cost_ex) / cost_ex)

    md_exp = dl.Expression('sin(n*pi*x[0])*sin(n*pi*x[1])', n=3)
    md = dl.interpolate(md_exp, V)
    cost = obsopcont.costfct(mv.array(), md.vector().array())
    cost_F = obsopcont.costfct_F(m, md)
    print 'cost={}, cost_F={}, rel_err={:.2e}'.format(cost, cost_F,\
    np.abs(cost-cost_F)/np.abs(cost_F))

    # Gradient:
    print '\nGradient:'
    failures = 0
    for nn in range(8):
        print '\ttest ' + str(nn + 1)
        dm_exp = dl.Expression('sin(n*pi*x[0])*sin(n*pi*x[1])', n=nn + 1)
        dm = dl.interpolate(dm_exp, V)

        for h in HH:
            success = False
            setfct(m_in, m)
            m_in.vector().axpy(h, dm.vector())
            cost1 = obsopcont.costfct_F(m_in, md)

            setfct(m_in, m)
            m_in.vector().axpy(-h, dm.vector())
            cost2 = obsopcont.costfct_F(m_in, md)

            cost = obsopcont.costfct_F(m, md)

            GradFD1 = (cost1 - cost) / h
            GradFD2 = (cost1 - cost2) / (2. * h)

            Gradm = obsopcont.grad(m, md)
            Gradm_h = Gradm.inner(dm.vector())

            err1 = np.abs(GradFD1 - Gradm_h) / np.abs(Gradm_h)
            err2 = np.abs(GradFD2 - Gradm_h) / np.abs(Gradm_h)
            print 'h={}, GradFD1={:.5e}, GradFD2={:.5e} Gradm_h={:.5e}, err1={:.2e}, err2={:.2e}'.format(\
            h, GradFD1, GradFD2, Gradm_h, err1, err2)
            if err2 < 1e-6:
                print 'test {}: OK!'.format(nn + 1)
                success = True
                break
        if not success: failures += 1
    print '\nTest gradient -- Summary: {} test(s) failed'.format(failures)

    if failures < 5:
        print '\n\nHessian:'
        failures = 0
        for nn in range(8):
            print '\ttest ' + str(nn + 1)
            dm_exp = dl.Expression('sin(n*pi*x[0])*sin(n*pi*x[1])', n=nn + 1)
            dm = dl.interpolate(dm_exp, V)

            for h in HH:
                success = False
                setfct(m_in, m)
                m_in.vector().axpy(h, dm.vector())
                grad1 = obsopcont.grad(m_in, md)
                #
                setfct(m_in, m)
                m_in.vector().axpy(-h, dm.vector())
                grad2 = obsopcont.grad(m_in, md)
                #
                HessFD = (grad1 - grad2) / (2. * h)

                Hessmdm = obsopcont.hessian(dm.vector())

                err = (HessFD - Hessmdm).norm('l2') / Hessmdm.norm('l2')
                print 'h={}, err={}'.format(h, err)

                if err < 1e-6:
                    print 'test {}: OK!'.format(nn + 1)
                    success = True
                    break
            if not success: failures += 1
        print '\nTest Hessian --  Summary: {} test(s) failed\n'.format(
            failures)
Example #3
0
from fenicstools.observationoperator import ObsEntireDomain
from fenicstools.miscfenics import setfct

mesh = dl.UnitSquareMesh(100, 100)
V = dl.FunctionSpace(mesh, 'Lagrange', 1)
myn = 1
m_exp = dl.Expression('sin(n*pi*x[0])*sin(n*pi*x[1])', n=myn)
m = dl.interpolate(m_exp, V)
m_in = dl.Function(V)
mv = m.vector()
shm = mv.array().shape
HH = [1e-4, 1e-5, 1e-6]

# CONTINUOUS obsop:
# Cost:
obsopcont = ObsEntireDomain({'V':V}, None)
cost_ex = (.5-np.sin(2*np.pi*myn)/(4*np.pi*myn))**2
print 'relative error on cost: {:.2e}'.format(\
np.abs(2*obsopcont.costfct(mv.array(), np.zeros(shm)) - cost_ex) / cost_ex)
print 'relative error on cost_F: {:.2e}'.format(\
np.abs(2*obsopcont.costfct_F(m, dl.Function(V)) - cost_ex) / cost_ex)

md_exp = dl.Expression('sin(n*pi*x[0])*sin(n*pi*x[1])', n=3)
md = dl.interpolate(md_exp, V)
cost = obsopcont.costfct(mv.array(), md.vector().array())
cost_F = obsopcont.costfct_F(m, md)
print 'cost={}, cost_F={}, rel_err={:.2e}'.format(cost, cost_F,\
np.abs(cost-cost_F)/np.abs(cost_F))

# Gradient:
print '\nGradient:'
Example #4
0
if PLOT:
    filename, ext = splitext(sys.argv[0])
    if mpirank == 0 and isdir(filename + '/'):
        rmtree(filename + '/')
    MPI.barrier(mpicomm)
    myplot = PlotFenics(filename)
    MPI.barrier(mpicomm)
    myplot.set_varname('m_target')
    myplot.plot_vtk(mtrue)
    myplot.set_varname('m_targetVm')
    myplot.plot_vtk(mtrueVm)
else:
    myplot = None

if mpirank == 0: print 'Compute noisy data'
ObsOp = ObsEntireDomain({'V': V}, mpicomm)
ObsOp.noise = False
goal = ObjFctalHelmholtz(V,
                         Vme,
                         bc,
                         bc,
                         f,
                         ObsOp,
                         Data={'k': 1.0},
                         plot=False,
                         mycomm=mpicomm)
goal.update_m(mtrue)
goal.solvefwd()
# noise
np.random.seed(11)
noisepercent = 0.02  # e.g., 0.02 = 2% noise level
Vm = FunctionSpace(mesh, 'Lagrange', 1) # space for medium parameter
Vme = FunctionSpace(mesh, 'Lagrange', 5)    # sp for target med param
# Define zero Boundary conditions:
def u0_boundary(x, on_boundary):
    return on_boundary
u0 = Constant("0.0")
bc = DirichletBC(V, u0, u0_boundary)
# Define target medium and rhs:
mtrue_exp = Expression('1 + 7*(pow(pow(x[0] - 0.5,2) +' + \
' pow(x[1] - 0.5,2),0.5) > 0.2)')
mtrue = interpolate(mtrue_exp, Vme)
f = Expression("1.0")

print 'p{}: Compute target data'.format(myrank)
noisepercent = 0.00   # e.g., 0.02 = 2% noise level
ObsOp = ObsEntireDomain({'V': V,'noise':noisepercent}, mycomm)
goal = ObjFctalElliptic(V, Vme, bc, bc, [f], ObsOp, [], [], [], False, mycomm)
goal.update_m(mtrue)
goal.solvefwd()
print 'p{}'.format(myrank)
UDnoise = goal.U

print 'p{}: Solve reconstruction problem'.format(myrank)
Regul = LaplacianPrior({'Vm':Vm,'gamma':1e-5,'beta':1e-14})
ObsOp.noise = False
InvPb = ObjFctalElliptic(V, Vm, bc, bc, [f], ObsOp, UDnoise, Regul, [], False, mycomm)
InvPb.update_m(1.0) # Set initial medium
InvPb.solvefwd_cost()
# Choose between steepest descent and Newton's method:
METHODS = ['sd','Newt']
meth = METHODS[1]