Beispiel #1
0
    def J(f):
        a = f * inner(grad(u), grad(v)) * dx
        L = inner(f, v) * dx + inner(g1, v) * ds(1) + inner(g2, v) * ds(2)

        solve(a == L, u_, bc)

        return assemble(u_**2 * dx)
Beispiel #2
0
def compute_errors(u_e, u):
    """Compute various measures of the error u - u_e, where
    u is a finite element Function and u_e is an Expression.

    Adapted from https://fenicsproject.org/pub/tutorial/html/._ftut1020.html
    """
    print('u_e', u_e.ufl_element().degree())
    # Get function space
    V = u.function_space()

    # Explicit computation of L2 norm
    error = (u - u_e)**2 * dl.dx
    E1 = np.sqrt(abs(dla.assemble(error)))

    # Explicit interpolation of u_e onto the same space as u
    u_e_ = dla.interpolate(u_e, V)
    error = (u - u_e_)**2 * dl.dx
    E2 = np.sqrt(abs(dla.assemble(error)))

    # Explicit interpolation of u_e to higher-order elements.
    # u will also be interpolated to the space Ve before integration
    Ve = dl.FunctionSpace(V.mesh(), 'P', 5)
    u_e_ = dla.interpolate(u_e, Ve)
    error = (u - u_e)**2 * dl.dx
    E3 = np.sqrt(abs(dla.assemble(error)))

    # Infinity norm based on nodal values
    u_e_ = dla.interpolate(u_e, V)
    E4 = abs(u_e_.vector().get_local() - u.vector().get_local()).max()

    # L2 norm
    E5 = dl.errornorm(u_e, u, norm_type='L2', degree_rise=3)

    # H1 seminorm
    E6 = dl.errornorm(u_e, u, norm_type='H10', degree_rise=3)

    # Collect error measures in a dictionary with self-explanatory keys
    errors = {
        'u - u_e': E1,
        'u - interpolate(u_e, V)': E2,
        'interpolate(u, Ve) - interpolate(u_e, Ve)': E3,
        'infinity norm (of dofs)': E4,
        'L2 norm': E5,
        'H10 seminorm': E6
    }

    return errors
def eval_cost_fem(w, rho):
    u, _ = fenics.split(w)
    J_form = (
        0.5 * ufl.inner(alpha(rho) * u, u) * ufl.dx
        + mu * ufl.inner(ufl.grad(u), ufl.grad(u)) * ufl.dx
    )
    J = fenics_adjoint.assemble(J_form)
    return J
Beispiel #4
0
def fenics_cost(u, f):
    x = ufl.SpatialCoordinate(mesh)
    w = ufl.sin(ufl.pi * x[0]) * ufl.sin(ufl.pi * x[1])
    d = 1 / (2 * ufl.pi ** 2) * w
    alpha = fa.Constant(1e-6)
    J_form = (0.5 * ufl.inner(u - d, u - d)) * ufl.dx + alpha / 2 * f ** 2 * ufl.dx
    J = fa.assemble(J_form)
    return J
Beispiel #5
0
def assemble_fenics(u, kappa0, kappa1):

    f = fa.Expression(
        "10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)", degree=2)

    inner, grad, dx = ufl.inner, ufl.grad, ufl.dx
    J_form = 0.5 * inner(kappa0 * grad(u), grad(u)) * dx - kappa1 * f * u * dx
    J = fa.assemble(J_form)
    return J
Beispiel #6
0
def test_solver_ident_zeros():
    """
    Test using ident zeros to restrict half of the domain
    """
    from fenics_adjoint import (UnitSquareMesh, Function, assemble, solve,
                                project, Expression, DirichletBC)
    mesh = UnitSquareMesh(10, 10)
    cf = MeshFunction("size_t", mesh, mesh.topology().dim(), 0)
    top_half().mark(cf, 1)

    ff = MeshFunction("size_t", mesh, mesh.topology().dim() - 1, 0)
    top_boundary().mark(ff, 1)

    dx = Measure("dx", domain=mesh, subdomain_data=cf)

    V = FunctionSpace(mesh, "CG", 1)
    u, v = TrialFunction(V), TestFunction(V)
    a = inner(grad(u), grad(v)) * dx(1)
    w = Function(V)

    with stop_annotating():
        w.assign(project(Expression("x[0]", degree=1), V))
    rhs = w**3 * v * dx(1)
    A = assemble(a, keep_diagonal=True)
    A.ident_zeros()
    b = assemble(rhs)
    bc = DirichletBC(V, Constant(1), ff, 1)
    bc.apply(A, b)
    uh = Function(V)
    solve(A, uh.vector(), b, "umfpack")

    J = assemble(inner(uh, uh) * dx(1))

    Jhat = ReducedFunctional(J, Control(w))
    with stop_annotating():
        w1 = project(Expression("x[0]*x[1]", degree=2), V)
    results = taylor_to_dict(Jhat, w, w1)
    assert (min(results["R0"]["Rate"]) > 0.95)
    assert (min(results["R1"]["Rate"]) > 1.95)
    assert (min(results["R2"]["Rate"]) > 2.95)
Beispiel #7
0
    def J(f):
        u_1 = Function(V)
        u_1.vector()[:] = 1

        a = u_1 * u * v * dx + dt * f * inner(grad(u), grad(v)) * dx
        L = u_1 * v * dx

        # Time loop
        t = dt
        while t <= T:
            solve(a == L, u_, bc)
            u_1.assign(u_)
            t += dt

        return assemble(u_1**2 * dx)
def run_model(function_space,
              kappa,
              forcing,
              init_condition,
              dt,
              final_time,
              boundary_conditions=None,
              second_order_timestepping=False,
              exact_sol=None,
              velocity=None,
              point_sources=None,
              intermediate_times=None):
    """
    Use implicit euler to solve transient advection diffusion equation

    du/dt = grad (k* grad u) - vel*grad u + f

    WARNING: when point sources solution changes significantly when mesh is 
    varied
    """
    mesh = function_space.mesh()

    time_independent_boundaries = False
    if boundary_conditions is None:
        bndry_obj = dl.CompiledSubDomain("on_boundary")
        boundary_conditions = [['dirichlet', bndry_obj, dla.Constant(0)]]
        time_independent_boundaries = True

    num_bndrys = len(boundary_conditions)
    boundaries = mark_boundaries(mesh, boundary_conditions)
    dirichlet_bcs = collect_dirichlet_boundaries(function_space,
                                                 boundary_conditions,
                                                 boundaries)

    # To express integrals over the boundary parts using ds(i), we must first
    # redefine the measure ds in terms of our boundary markers:
    ds = dl.Measure('ds', domain=mesh, subdomain_data=boundaries)
    dx = dl.Measure('dx', domain=mesh)

    # Variational problem at each time
    u = dl.TrialFunction(function_space)
    v = dl.TestFunction(function_space)

    # Previous solution
    if hasattr(init_condition, 't'):
        assert init_condition.t == 0
    u_1 = dla.interpolate(init_condition, function_space)

    if not second_order_timestepping:
        theta = 1
    else:
        theta = 0.5

    if hasattr(forcing, 't'):
        forcing_1 = copy_expression(forcing)
    else:
        forcing_1 = forcing

    def steady_state_form(u, v, f):
        F = kappa * dl.inner(dl.grad(u), dl.grad(v)) * dx
        F -= f * v * dx
        if velocity is not None:
            F += dl.dot(velocity, dl.grad(u)) * v * dx
        return F

    F = u*v*dx-u_1*v*dx + dt*theta*steady_state_form(u, v, forcing) + \
        dt*(1.-theta)*steady_state_form(u_1, v, forcing_1)
    a, L = dl.lhs(F), dl.rhs(F)

    # a = u*v*dx + theta*dt*kappa*dl.inner(dl.grad(u), dl.grad(v))*dx
    # L = (u_1 + dt*theta*forcing)*v*dx

    # if velocity is not None:
    #     a += theta*dt*v*dl.dot(velocity,dl.grad(u))*dx

    # if second_order_timestepping:
    #     L -= (1-theta)*dt*dl.inner(kappa*dl.grad(u_1), dl.grad(v))*dx
    #     L += (1-theta)*dt*forcing_1*v*dx

    #     if velocity is not None:
    #         L -= (1-theta)*dt*(v*dl.dot(velocity,dl.grad(u_1)))*dx

    beta_1_list = []
    alpha_1_list = []
    for ii in range(num_bndrys):
        if (boundary_conditions[ii][0] == 'robin'):
            alpha = boundary_conditions[ii][3]
            a += theta * dt * alpha * u * v * ds(ii)
            if second_order_timestepping:
                if hasattr(alpha, 't'):
                    alpha_1 = copy_expression(alpha)
                    alpha_1_list.append(alpha_1)
                else:
                    alpha_1 = alpha
                L -= (1 - theta) * dt * alpha_1 * u_1 * v * ds(ii)

        if ((boundary_conditions[ii][0] == 'robin')
                or (boundary_conditions[ii][0] == 'neumann')):
            beta = boundary_conditions[ii][2]
            L -= theta * dt * beta * v * ds(ii)
            if second_order_timestepping:
                if hasattr(beta, 't'):
                    beta_1 = copy_expression(beta)
                    beta_1_list.append(beta_1)
                else:
                    # boundary condition is constant in time
                    beta_1 = beta
                L -= (1 - theta) * dt * beta_1 * v * ds(ii)

    if time_independent_boundaries:
        # TODO this can be used if dirichlet and robin conditions are not
        # time dependent.
        A = dla.assemble(a)
        for bc in dirichlet_bcs:
            bc.apply(A)
        solver = dla.LUSolver(A)
        #solver.parameters["reuse_factorization"] = True
    else:
        solver = None

    u_2 = dla.Function(function_space)
    u_2.assign(u_1)
    t = 0.0

    dt_tol = 1e-12
    n_time_steps = 0
    if intermediate_times is not None:
        intermediate_u = []
        intermediate_cnt = 0
        # assert in chronological order
        assert np.allclose(intermediate_times, np.array(intermediate_times))
        assert np.all(intermediate_times < final_time)

    while t < final_time - dt_tol:
        # Update current time
        prev_t = t
        forcing_1.t = prev_t
        t += dt
        t = min(t, final_time)
        forcing.t = t

        # set current time for time varying boundary conditions
        for ii in range(num_bndrys):
            if hasattr(boundary_conditions[ii][2], 't'):
                boundary_conditions[ii][2].t = t

        # set previous time for time varying boundary conditions when
        # using second order timestepping. lists will be empty if using
        # first order timestepping
        for jj in range(len(beta_1_list)):
            beta_1_list[jj].t = prev_t
        for jj in range(len(alpha_1_list)):
            alpha_1_list[jj].t = prev_t

        #A, b = dl.assemble_system(a, L, dirichlet_bcs)
        # for bc in dirichlet_bcs:
        #    bc.apply(A,b)
        if boundary_conditions is not None:
            A = dla.assemble(a)
            for bc in dirichlet_bcs:
                bc.apply(A)

        b = dla.assemble(L)
        for bc in dirichlet_bcs:
            bc.apply(b)

        if point_sources is not None:
            ps_list = []
            for ii in range(len(point_sources)):
                point, expr = point_sources[ii]
                ps_list.append((dl.Point(point[0], point[1]), expr(t)))
            ps = dla.PointSource(function_space, ps_list)
            ps.apply(b)

        if solver is None:
            dla.solve(A, u_2.vector(), b)
        else:
            solver.solve(u_2.vector(), b)

        # tape = dla.get_working_tape()
        # tape.visualise()

        #print ("t =", t, "end t=", final_time)

        # Update previous solution
        u_1.assign(u_2)
        # import matplotlib.pyplot as plt
        # plt.subplot(131)
        # pp=dl.plot(u_1)
        # plt.subplot(132)
        # dl.plot(forcing,mesh=mesh)
        # plt.subplot(133)
        # dl.plot(forcing_1,mesh=mesh)
        # plt.colorbar(pp)
        # plt.show()

        # compute error
        if exact_sol is not None:
            exact_sol.t = t
            error = dl.errornorm(exact_sol, u_2)
            print('t = %.2f: error = %.3g' % (t, error))
            # dl.plot(exact_sol,mesh=mesh)
            # plt.show()

        if (intermediate_times is not None
                and intermediate_cnt < intermediate_times.shape[0]
                and t >= intermediate_times[intermediate_cnt]):
            # save solution closest to intermediate time
            u_t = dla.Function(function_space)
            u_t.assign(u_2)
            intermediate_u.append(u_t)
            intermediate_cnt += 1
        n_time_steps += 1
    # print ("t =", t, "end t=", final_time,"# time steps", n_time_steps)

    if intermediate_times is None:
        return u_2
    else:
        return intermediate_u + [u_2]
def eval_volume_fem(rho):
    # We want V - \int rho dx >= 0, so write this as \int V/delta - rho dx >= 0
    J_form = (V / delta - rho) * ufl.dx
    J = fenics_adjoint.assemble(J_form)
    return J
Beispiel #10
0
def eval_volume(rho):
    J_form = rho * ufl.dx
    J = fa.assemble(J_form)
    return J
Beispiel #11
0
def eval_cost(u, x):
    J_form = dot(b, u) * dx + fa.Constant(1.0e-8) * dot(grad(x), grad(x)) * dx
    J = fa.assemble(J_form)
    return J
Beispiel #12
0
 def J(f):
     a = f * inner(grad(u), grad(v)) * dx + u**2 * v * dx - f * v * dx
     L = 0
     solve(a == L, u, bc)
     return assemble(u**2 * dx)
Beispiel #13
0
 def J(c):
     a = inner(grad(u), grad(v)) * dx
     L = c * v * dx
     solve(a == L, u_, bc)
     return assemble(u_**2 * dx)
Beispiel #14
0
def constrained_newton_energy_solve(F,
                                    uh,
                                    dirichlet_bcs=None,
                                    bc0=None,
                                    linear_solver=None,
                                    opts=dict(),
                                    C=None,
                                    constraint_vec=None):
    """
    See https://uvilla.github.io/inverse15/UnconstrainedMinimization.html

    F: dl.Expression
        The energy functional.

    uh : dl.Function
        Final solution. The initial state on entry to the function 
        will be used as initial guess and then overwritten

    dirichlet_bcs : list
        The Dirichlet boundary conditions on the unknown u.

    bc0 : list
        The Dirichlet boundary conditions for the step (du) in the Newton 
        iterations.
    
    """
    max_iter = opts.get("max_iter", 20)
    # exit when sqrt(g,g)/sqrt(g_0,g_0) <= rel_tolerance"
    rtol = opts.get("rel_tolerance", 1e-8)
    # exit when sqrt(g,g) <= abs_tolerance
    atol = opts.get("abs_tolerance", 1e-9)
    # exit when (g,du) <= gdu_tolerance
    gdu_tol = opts.get("gdu_tolerance", 1e-14)
    # define armijo sufficient decrease
    c_armijo = opts.get("c_armijo", 1e-4)
    # exit if max backtracking steps reached
    max_backtrack = opts.get("max_backtracking_iter", 20)
    # define verbosity
    prt_level = opts.get("print_level", 0)

    termination_reasons = [
        "Maximum number of Iteration reached",  #0
        "Norm of the gradient less than tolerance",  #1
        "Maximum number of backtracking reached",  #2
        "Norm of (g, du) less than tolerance"  #3
    ]
    it = 0
    total_cg_iter = 0
    converged = False
    reason = 0

    L = F
    if C is not None:
        L += C
        if prt_level > 0:
            print("Solving Constrained Nonlinear Problem")
    else:
        if prt_level > 0:
            print("Solving Unconstrained Nonlinear Problem")

    # Compute gradient and hessian
    grad = dla.derivative(L, uh)
    H = dla.derivative(grad, uh)

    # Applying boundary conditions
    if dirichlet_bcs is not None:
        if type(dirichlet_bcs) is dla.DirichletBC:
            bcsl = [dirichlet_bcs]
        else:
            bcsl = dirichlet_bcs
        [bc.apply(uh.vector()) for bc in bcsl]

    if constraint_vec is not None:
        assert C is not None
        dcd_state = dla.assemble(dla.derivative(C, u))
        dcd_lagrangeMult = dcd_state * constraint_vec
        if not dcd_lagrangeMult.norm("l2") < 1.e-14:
            msg = "The initial guess does not satisfy the constraint."
            raise ValueError(msg)

    # Setting variables
    Fn = dla.assemble(F)
    gn = dla.assemble(grad)
    g0_norm = gn.norm("l2")
    gn_norm = g0_norm
    tol = max(g0_norm * rtol, atol)
    du = dla.Function(uh.function_space()).vector()

    #if linear_solver =='PETScLU':
    #    linear_solver = dl.PETScLUSolver(uh.function_space().mesh().mpi_comm())
    #else:
    #    assert linear_solver is None

    if prt_level > 0:
        print("{0:>3}  {1:>6} {2:>15} {3:>15} {4:>15} {5:>15}".format(
            "Nit", "CGit", "Energy", "||g||", "(g,du)", "alpha"))
        print("{0:3d} {1:6d}    {2:15e} {3:15e}     {4:15}   {5:15}".format(
            0, 0, Fn, g0_norm, "    NA    ", "    NA"))

    converged = False
    reason = 0

    for it in range(max_iter):
        if bc0 is not None:
            [Hn, gn] = dla.assemble_system(H, grad, bc0)
        else:
            Hn = dla.assemble(H)
            gn = dla.assemble(grad)

        Hn.init_vector(du, 1)
        if linear_solver is None:
            lin_it = dla.solve(Hn, du, -gn, "cg", "petsc_amg")
        else:
            print('a')
            lin_it = dla.solve(Hn, du, -gn, "lu")
            #linear_solver.set_operator(Hn)
            #lin_it = linear_solver.solve(du, -gn)
        total_cg_iter += lin_it

        du_gn = du.inner(gn)

        alpha = 1.0
        if (np.abs(du_gn) < gdu_tol):
            converged = True
            reason = 3
            uh.vector().axpy(alpha, du)
            Fn = dla.assemble(F)
            gn_norm = gn.norm("l2")
            break

        uh_backtrack = uh.copy(deepcopy=True)
        bk_converged = False

        #Backtrack
        for j in range(max_backtrack):
            uh.assign(uh_backtrack)
            uh.vector().axpy(alpha, du)
            Fnext = dla.assemble(F)
            #print(Fnext,Fn + alpha*c_armijo*du_gn)
            if Fnext < Fn + alpha * c_armijo * du_gn:
                Fn = Fnext
                bk_converged = True
                break
            alpha /= 2.

        if not bk_converged:
            reason = 2
            break

        gn_norm = gn.norm("l2")

        if prt_level > 0:
            print("{0:3d} {1:6d}    {2:15e} {3:15e} {4:15e} {5:15e}".format(
                it + 1, lin_it, Fn, gn_norm, du_gn, alpha))

        if gn_norm < tol:
            converged = True
            reason = 1
            break

    if prt_level > 0:
        if reason is 3:
            print("{0:3d} {1:6d}    {2:15e} {3:15e} {4:15e} {5:15e}".format(
                it + 1, lin_it, Fn, gn_norm, du_gn, alpha))
        print(termination_reasons[reason])
        if converged:
            print("Newton converged in ", it, \
                  "nonlinear iterations and ", total_cg_iter,
                  "linear iterations." )
        else:
            print("Newton did NOT converge in ", it, "iterations.")
        print("Final norm of the gradient: ", gn_norm)
        print("Value of the cost functional: ", Fn)

    if reason in [0, 2]:
        raise Exception(termination_reasons[reason])

    return uh
Beispiel #15
0
def unconstrained_newton_solve(F,
                               J,
                               uh,
                               dirichlet_bcs=None,
                               bc0=None,
                               linear_solver=None,
                               opts=dict()):
    """
    F: dl.Expression
        The variational form.

    uh : dl.Function
        Final solution. The initial state on entry to the function 
        will be used as initial guess and then overwritten

    dirichlet_bcs : list
        The Dirichlet boundary conditions on the unknown u.

    bc0 : list
        The Dirichlet boundary conditions for the step (du) in the Newton 
        iterations.
    
    """
    max_iter = opts.get("max_iter", 50)
    # exit when sqrt(g,g)/sqrt(g_0,g_0) <= rel_tolerance"
    rtol = opts.get("rel_tolerance", 1e-8)
    # exit when sqrt(g,g) <= abs_tolerance
    atol = opts.get("abs_tolerance", 1e-9)
    # exit when (g,du) <= gdu_tolerance
    gdu_tol = opts.get("gdu_tolerance", 1e-14)
    # define armijo sufficient decrease
    c_armijo = opts.get("c_armijo", 1e-4)
    # exit if max backtracking steps reached
    max_backtrack = opts.get("max_backtracking_iter", 20)
    # define verbosity
    prt_level = opts.get("print_level", 0)

    termination_reasons = [
        "Maximum number of Iteration reached",  #0
        "Norm of the gradient less than tolerance",  #1
        "Maximum number of backtracking reached",  #2
        "Norm of (g, du) less than tolerance",  #3
        "Norm of residual less than tolerance"
    ]  #4
    it = 0
    total_cg_iter = 0
    converged = False
    reason = 0

    if prt_level > 0:
        print("Solving Nonlinear Problem")

    # Applying boundary conditions
    if dirichlet_bcs is not None:
        if type(dirichlet_bcs) is dla.DirichletBC:
            bcsl = [dirichlet_bcs]
        else:
            bcsl = dirichlet_bcs
        [bc.apply(uh.vector()) for bc in bcsl]

    if type(bc0) is dla.DirichletBC:
        bc0 = [bc0]

    # Setting variables
    gn = dla.assemble(F)
    res_func = dla.Function(uh.function_space())
    res_func.assign(dla.Function(uh.function_space(), gn))
    res = res_func.vector()
    if bc0 is not None:
        for bc in bc0:
            bc.apply(res)
    Fn = res.norm("l2")
    g0_norm = gn.norm("l2")
    gn_norm = g0_norm
    tol = max(g0_norm * rtol, atol)
    res_tol = max(Fn * rtol, atol)
    du = dla.Function(uh.function_space()).vector()

    if linear_solver == 'PETScLU':
        linear_solver = dla.PETScLUSolver(
            uh.function_space().mesh().mpi_comm())
    else:
        assert linear_solver is None

    if prt_level > 0:
        print("{0:>3}  {1:>6} {2:>15} {3:>15} {4:>15} {5:>15} {6:>6}".format(
            "Nit", "CGit", "||r||", "||g||", "(g,du)", "alpha", "Nbt"))
        print("{0:3d} {1:6d}    {2:15e} {3:15e}     {4:15}   {5:10} {6:s}".
              format(0, 0, Fn, g0_norm, "    NA    ", "    NA", "NA"))

    converged = False
    reason = 0
    nbt = 0

    for it in range(max_iter):
        if bc0 is not None:
            [Hn, gn] = dla.assemble_system(J, F, bc0)
        else:
            Hn = dla.assemble(J)
            gn = dla.assemble(F)

        Hn.init_vector(du, 1)
        if linear_solver is None:
            lin_it = dla.solve(Hn, du, -gn, "cg", "petsc_amg")
        else:
            linear_solver.set_operator(Hn)
            lin_it = linear_solver.solve(du, -gn)
        total_cg_iter += lin_it

        du_gn = du.inner(gn)

        alpha = 1.0
        if (np.abs(du_gn) < gdu_tol):
            converged = True
            reason = 3
            uh.vector().axpy(alpha, du)
            gn_norm = gn.norm("l2")
            Fn = gn_norm
            break

        uh_backtrack = uh.copy(deepcopy=True)
        bk_converged = False

        #Backtrack
        for nbt in range(max_backtrack):
            uh.assign(uh_backtrack)
            uh.vector().axpy(alpha, du)
            res = dla.assemble(F)
            if bc0 is not None:
                for bc in bc0:
                    bc.apply(res)
            Fnext = res.norm("l2")
            #print(Fn,Fnext,Fn + alpha*c_armijo*du_gn)
            if Fnext < Fn + alpha * c_armijo * du_gn:
                #if True:
                Fn = Fnext
                bk_converged = True
                break
            alpha /= 2.

        if not bk_converged:
            reason = 2
            break

        gn_norm = gn.norm("l2")

        if prt_level > 0:
            print("{0:3d} {1:6d}    {2:15e} {3:15e} {4:15e} {5:15e} {6:3d}".
                  format(it + 1, lin_it, Fn, gn_norm, du_gn, alpha, nbt + 1))

        if gn_norm < tol:
            converged = True
            reason = 1
            break

        if Fn < res_tol:
            converged = True
            reason = 4
            break

    if prt_level > 0:
        if reason is 3:
            print("{0:3d} {1:6d}    {2:15e} {3:15e} {4:15e} {5:15e} {6:3d}".
                  format(it + 1, lin_it, Fn, gn_norm, du_gn, alpha, nbt + 1))
        print(termination_reasons[reason])
        if converged:
            print("Newton converged in ", it, \
                  "nonlinear iterations and ", total_cg_iter,
                  "linear iterations." )
        else:
            print("Newton did NOT converge in ", it, "iterations.")
        print("Final norm of the gradient: ", gn_norm)
        print("Value of the cost functional: ", Fn)

    if reason in [0, 2]:
        raise Exception(termination_reasons[reason])

    return uh