示例#1
0
 def __init__(self, V, **kwargs):
     # Call parent
     ParametrizedProblem.__init__(
         self,
         os.path.join("test_eim_approximation_17_tempdir",
                      expression_type, basis_generation,
                      "mock_problem"))
     # Minimal subset of a ParametrizedDifferentialProblem
     self.V = V
     self._solution = Function(V)
     self.components = ["u", "s", "p"]
     # Parametrized function to be interpolated
     x = SpatialCoordinate(V.mesh())
     mu = SymbolicParameters(self, V, (-1., -1.))
     self.f00 = 1. / sqrt(
         pow(x[0] - mu[0], 2) + pow(x[1] - mu[1], 2) + 0.01)
     self.f01 = 1. / sqrt(
         pow(x[0] - mu[0], 4) + pow(x[1] - mu[1], 4) + 0.01)
     # Inner product
     f = TrialFunction(self.V)
     g = TestFunction(self.V)
     self.inner_product = assemble(inner(f, g) * dx)
     # Collapsed vector and space
     self.V0 = V.sub(0).collapse()
     self.V00 = V.sub(0).sub(0).collapse()
     self.V1 = V.sub(1).collapse()
示例#2
0
    def linear_solver(self, phi_k):

        # cast params as constant functions so that, if they are set to 0, FEniCS still understand
        # what is being integrated
        mu, M = Constant(self.physics.mu), Constant(self.physics.M)
        lam = Constant(self.physics.lam)
        mn, mf = Constant(self.mn), Constant(self.mf)

        D = self.physics.D

        # boundary condition
        Dirichlet_bc = self.get_Dirichlet_bc()

        # trial and test function
        phi = d.TrialFunction(self.fem.S)
        v = d.TestFunction(self.fem.S)

        # r^(D-1)
        rD = Expression('pow(x[0],D-1)', D=D, degree=self.fem.func_degree)

        # bilinear form a and linear form L
        a = - inner( grad(phi), grad(v) ) * rD * dx + ( - 3.*lam*(mf/mn)**2*phi_k**2 \
            - 6.*d.sqrt(lam)*(mu*mf/mn**2)*phi_k - 2.*(mu/mn)**2 ) * phi * v * rD * dx
        L = ( (mn**(D-2.)/(mf*M))*self.source.rho - 2.*lam*(mf/mn)**2*phi_k**3 \
            - 3.*d.sqrt(lam)*(mu*mf/mn**2)*phi_k**2 ) * v * rD * dx

        # define a vector with the solution
        sol = d.Function(self.fem.S)

        # solve linearised system
        pde = d.LinearVariationalProblem(a, L, sol, Dirichlet_bc)
        solver = d.LinearVariationalSolver(pde)
        solver.solve()

        return sol
示例#3
0
def mesh_Triangle(n=10):
    dom = mshr.Polygon(
        [Point(0., -1.),
         Point(sqrt(3) / 2, 1. / 2),
         Point(-sqrt(3) / 2, 1. / 2)])
    mesh = mshr.generate_mesh(dom, n, "cgal")
    return mesh
示例#4
0
def les_update(nut_, nut_form, A_mass, At, u_, dt, bc_ksgs, bt, ksgs_sol,
               KineticEnergySGS, CG1, ksgs, delta, **NS_namespace):

    p, q = TrialFunction(CG1), TestFunction(CG1)

    Ck = KineticEnergySGS["Ck"]
    Ce = KineticEnergySGS["Ce"]

    Sij = sym(grad(u_))
    assemble(dt * inner(dot(u_, 0.5 * grad(p)), q) * dx + inner(
        (dt * Ce * sqrt(ksgs) / delta) * 0.5 * p, q) * dx +
             inner(dt * Ck * sqrt(ksgs) * delta * grad(0.5 * p), grad(q)) * dx,
             tensor=At)

    assemble(dt * 2 * Ck * delta * sqrt(ksgs) * inner(Sij, grad(u_)) * q * dx,
             tensor=bt)
    bt.axpy(1.0, A_mass * ksgs.vector())
    bt.axpy(-1.0, At * ksgs.vector())
    At.axpy(1.0, A_mass, True)

    # Solve for ksgs
    bc_ksgs.apply(At, bt)
    ksgs_sol.solve(At, ksgs.vector(), bt)
    ksgs.vector().set_local(ksgs.vector().array().clip(min=1e-7))
    ksgs.vector().apply("insert")

    # Update nut_
    nut_()
示例#5
0
def les_update(nut_, nut_form, A_mass, At, u_, dt, bc_ksgs, bt, ksgs_sol,
        KineticEnergySGS, CG1, ksgs, delta, **NS_namespace):

    p, q = TrialFunction(CG1), TestFunction(CG1)

    Ck = KineticEnergySGS["Ck"]
    Ce = KineticEnergySGS["Ce"]

    Sij = sym(grad(u_))
    assemble(dt*inner(dot(u_, 0.5*grad(p)), q)*dx
             + inner((dt*Ce*sqrt(ksgs)/delta)*0.5*p, q)*dx
             + inner(dt*Ck*sqrt(ksgs)*delta*grad(0.5*p), grad(q))*dx, tensor=At)
    
    assemble(dt*2*Ck*delta*sqrt(ksgs)*inner(Sij,grad(u_))*q*dx, tensor=bt)
    bt.axpy(1.0, A_mass*ksgs.vector())
    bt.axpy(-1.0, At*ksgs.vector())
    At.axpy(1.0, A_mass, True)

    # Solve for ksgs
    bc_ksgs.apply(At, bt)
    ksgs_sol.solve(At, ksgs.vector(), bt)
    ksgs.vector().set_local(ksgs.vector().array().clip(min=1e-7))
    ksgs.vector().apply("insert")

    # Update nut_
    nut_()
示例#6
0
    def Displacement2Epsilon0(self, U):
        """Converti le localisateur en deplacement en champ de predeformation
        a appliquer au probleme auxiliaire suivant"""
        Epsilon0 = []

        for i in range(len(U)):
            Epsilon0 = Epsilon0 + [
                fe.as_vector((
                    U[i][0],
                    fe.interpolate(fe.Constant(0.0), self.X),
                    U[i][1] / fe.sqrt(2),
                ))
            ]
        # zero_ = fe.interpolate(fe.Constant(0.0), self.X)
        # # Possibilité de mettre directement fe.Constant(0.0) ?
        # prestrain_ = (U[i][0], zero_, U[i][1] / fe.sqrt(2))
        # prestrain_ = fe.as_vector(prestrain_)
        # Epsilon0.append(prestrain_)

        for i in range(len(U)):
            Epsilon0 = Epsilon0 + [
                fe.as_vector((
                    fe.interpolate(fe.Constant(0.0), self.X),
                    U[i][1],
                    U[i][0] / fe.sqrt(2),
                ))
            ]
        # zero_ = fe.interpolate(fe.Constant(0.0), self.X)
        # # Possibilité de mettre directement fe.Constant(0.0) ?
        # prestrain_ = (zero_, U[i][1], U[i][0] / fe.sqrt(2))
        # prestrain_ = fe.as_vector(prestrain_)
        # Epsilon0.append(prestrain_)
        return Epsilon0
def err_est_fun(mesh_name2, hol_cyl, T_sol1, T_sol2, deg_choice2, hol_cyl2,
                count_it):
    '''
    Error estimate.
    '''
    #comm1 = MPI.COMM_WORLD

    #rank1 = comm1.Get_rank()

    #e1 = sqrt(int_Omega (T - T_ex)**2. dx)/sqrt(int_Omega T**2. dx)
    #do.assemble yields integration
    e1 = do.sqrt(do.assemble(pow(T_sol2 - T_sol1, 2.) * do.dx(domain = hol_cyl))) / \
         do.sqrt(do.assemble(pow(T_sol1, 2.) * do.dx(domain = hol_cyl)))

    #sometimes e1 does not work properly (it might be unstable) -> e2
    #the degree of piecewise polynomials used to approximate T_th and T...
    #...will be the degree of T + degree_rise
    e2 = do.errornorm(T_sol1, T_sol2, norm_type = 'l2', degree_rise = 2, mesh = hol_cyl2) / \
         do.sqrt(do.assemble(pow(T_sol1, 2.) * do.dx(domain = hol_cyl)))

    print 'err: count_t = {}, error_1 = {}, error_2 = {}'.format(
        count_it, e1, e2)

    #print 'rank = ', rank1, ',
    print 'max(T_sol) = ', max(T_sol2.vector().array())
    #print 'rank = ', rank1, ',
    print 'min(T_sol) = ', min(T_sol2.vector().array()), '\n'

    return e1, e2
示例#8
0
    def strong_residual_form(self, sol, units):

        if units == 'rescaled':
            resc = 1.
        elif units == 'physical':
            resc = self.mn**2 * self.mf
        else:
            message = "Invalid choice of units: valid choices are 'physical' or 'rescaled'."
            raise ValueError, message

        # cast params as constant functions so that, if they are set to 0, FEniCS still understand
        # what is being integrated
        mu, M = Constant(self.physics.mu), Constant(self.physics.M)
        lam = Constant(self.physics.lam)
        mn, mf = Constant(self.mn), Constant(self.mf)

        D = self.physics.D

        # define r for use in the computation of the Laplacian
        r = Expression('x[0]', degree=self.fem.func_degree)

        # I expand manually the Laplacian into (D-1)/r df/dr + d2f/dr2
        f = sol.dx(0).dx(0) + Constant(D-1.)/r * sol.dx(0) - lam*(mf/mn)**2*sol**3 \
            - 3.*d.sqrt(lam)*(mu*mf/mn**2)*sol**2 - 2.*(mu/mn)**2*sol - mn**(D-2.)/M**2*self.source.rho*sol \
            - (mu*mn**(D-2.)/(mf*M**2))*self.source.rho/d.sqrt(lam)
        f *= resc
        F = project(f, self.fem.dS, self.physics.D, self.fem.func_degree)

        return F
示例#9
0
    def evaluateError(cls, w, coeff_field, pde, f, zeta, gamma, ceta, cQ, newmi_add_maxm, maxh=0.1, quadrature_degree= -1, projection_degree_increase=1, refine_projection_mesh=1):
        """Evaluate EGSZ Error (7.5)."""
        logger.debug("starting evaluateError")

        # define store function for timings
        from functools import partial
        def _store_stats(val, key, stats):
            stats[key] = val

        timing_stats = {}
        with timing(msg="ResidualEstimator.evaluateResidualEstimator", logfunc=logger.info, store_func=partial(_store_stats, key="TIME-RESIDUAL", stats=timing_stats)):
            resind, reserror = ResidualEstimator.evaluateResidualEstimator(w, coeff_field, pde, f, quadrature_degree)

        logger.debug("starting evaluateProjectionEstimator")
        with timing(msg="ResidualEstimator.evaluateProjectionError", logfunc=logger.info, store_func=partial(_store_stats, key="TIME-PROJECTION", stats=timing_stats)):
            projind, projerror = ResidualEstimator.evaluateProjectionError(w, coeff_field, pde, maxh, True, projection_degree_increase, refine_projection_mesh)

        logger.debug("starting evaluateInactiveProjectionError")
        with timing(msg="ResidualEstimator.evaluateInactiveMIProjectionError", logfunc=logger.info, store_func=partial(_store_stats, key="TIME-INACTIVE-MI", stats=timing_stats)):
            mierror = ResidualEstimator.evaluateInactiveMIProjectionError(w, coeff_field, pde, maxh, newmi_add_maxm) 

        eta = sum(reserror[mu] ** 2 for mu in reserror)
        delta = sum(projerror[mu] ** 2 for mu in projerror)
        delta_inactive_mi = sum(v[1] ** 2 for v in mierror)
        est1 = ceta / sqrt(1 - gamma) * sqrt(eta)
        est2 = cQ / sqrt(1 - gamma) * sqrt(delta + delta_inactive_mi)
        est3 = cQ * sqrt(zeta / (1 - gamma))
        est4 = zeta / (1 - gamma)
#        xi = (ceta / sqrt(1 - gamma) * sqrt(eta) + cQ / sqrt(1 - gamma) * sqrt(delta)
#              + cQ * sqrt(zeta / (1 - gamma))) ** 2 + zeta / (1 - gamma)
        xi = (est1 + est2 + est3) ** 2 + est4
        logger.info("Total Residual ERROR Factors: A1=%s  A2=%s  A3=%s  A4=%s", ceta / sqrt(1 - gamma), cQ / sqrt(1 - gamma), cQ * sqrt(zeta / (1 - gamma)), zeta / (1 - gamma))
        return (xi, resind, projind, mierror, (est1, est2, est3, est4), (eta, delta, zeta), timing_stats)
示例#10
0
def _tau_b():
    P_i = rho * g * Hmid  # Overburden, or hydrostatic pressure of ice
    P_w = Max(-rho_w * g * B,
              thklim)  # Water pressure is either ocean pressure or zero
    N = Max((P_i - P_w) / (rho * g),
            thklim)  # Effective pressure is P_i - P_w, rho*g appears in A_s
    normalx = (B.dx(0)) / sqrt((B.dx(0)**2 + 1.0))
    normalz = sqrt(1 - normalx**2)
    return mu * A_s * (abs(u(1)) + 1.0)**(1. / n - 1) * u(1) * (
        abs(N) + 1.0)**(1. / m) * (1 - normalx**2)
 def nu_t(self, m):
     """
     The turbulent viscosity nu_t = C*sqrt(inletMomentum)*(x+a*inletWidth)^{1/2}
     where C = exp(m[0]) and a = exp( m[1] )
     """
     #C = dl.exp(m[0])
     #a = dl.exp(m[1])
     C = self._C(m)
     a = self._a(m)
     return C * dl.sqrt(
         self.inletMomentun) * dl.sqrt(self.xfun + a * self.inletWidth)
示例#12
0
    def get_lambdas(self):
        C_n = self.F_n.T * self.F_n
        C_0 = self.F_0.T * self.F_0
        I1 = inner(inv(C_0), C_n)
        I2 = det(C_n) / det(C_0)
        delta = sqrt(I1**2 - 4 * I2)

        lambda1 = sqrt(0.5 * (I1 + delta))
        lambda2 = sqrt(0.5 * (I1 - delta))
        lambda3 = sqrt(det(self.C_0) / det(self.C_n))

        return lambda1, lambda2, lambda3
def d2_dr2(du):
    d2u_dx2 = df.Dx(df.Dx(du, 0), 0)
    du_dxdy = df.Dx(df.Dx(du, 0), 1)
    d2u_dy2 = df.Dx(df.Dx(du, 1), 1)
    du_dx = d_dx(du)
    du_dy = d_dy(du)
    d2u_dr2 = (x *
               (-x * (x * du_dx + y * du_dy) / (x**2 + y**2)**(3 / 2) +
                (x * d2u_dx2 + y * du_dxdy + du_dx) / df.sqrt(x**2 + y**2)) +
               y * (-y * (x * du_dx + y * du_dy) / (x**2 + y**2)**(3 / 2) +
                    (x * du_dxdy + y * d2u_dy2 + du_dy) / df.sqrt(x**2 + y**2))
               ) / df.sqrt(x**2 + y**2)
    return d2u_dr2
示例#14
0
    def __init__(self, mesh, parameters=[], isprint=False):
        self.parameters = {}
        self.parameters['eps'] = 0.0
        self.parameters['k'] = 1.0
        self.parameters['correctcost'] = True
        self.parameters.update(parameters)
        eps = self.parameters['eps']
        k = self.parameters['k']

        self.V = FunctionSpace(mesh, 'CG', 1)
        self.tmp1, self.tmp2 = Function(self.V), Function(self.V)

        self.VV = VectorFunctionSpace(mesh, 'CG', 1, 2)
        self.m = Function(self.VV)
        self.mtest = TestFunction(self.VV)
        self.mtrial = TrialFunction(self.VV)
        self.m1, self.m2 = split(self.m)
        self.mh = Function(self.VV)

        normg1 = inner(nabla_grad(self.m1), nabla_grad(self.m1))
        normg2 = inner(nabla_grad(self.m2), nabla_grad(self.m2))
        if self.parameters['correctcost']:
            meshtmp = UnitSquareMesh(mesh.mpi_comm(), 10, 10)
            Vtmp = FunctionSpace(meshtmp, 'CG', 1)
            x = SpatialCoordinate(meshtmp)
            self.correctioncost = 1. / assemble(sqrt(4.0 * x[0] * x[0]) * dx)
            print '[NuclearNormformula] Correction cost with factor={}'.format(
                self.correctioncost)
        else:
            self.correctioncost = 1.0
        self.cost = 1./np.sqrt(2.0) * Constant(k) * (\
        sqrt(normg1 + normg2 + Constant(np.sqrt(eps)) +
        sqrt((normg1 - normg2)**2 + Constant(eps) +
        4.0*inner(nabla_grad(self.m1), nabla_grad(self.m2))**2))
        + sqrt(normg1 + normg2 + Constant(np.sqrt(eps)*(1.0+1e-15)) -
        sqrt((normg1 - normg2)**2 + Constant(eps) +
        4.0*inner(nabla_grad(self.m1), nabla_grad(self.m2))**2)))*dx

        self.grad = derivative(self.cost, self.m, self.mtest)

        self.hessian = derivative(self.grad, self.m, self.mtrial)

        M = assemble(inner(self.mtest, self.mtrial) * dx)
        factM = 1e-2 * k
        self.sMass = M * factM

        if isprint:
            print '[NuclearNormformula] eps={}, k={}'.format(eps, k)

        self.amgprecond = amg_solver()
    def _compute_global_averages(self, velocity, temperature):
        assert hasattr(self, "_mesh")
        dV = dlfn.Measure("dx", domain=self._mesh)
        V = dlfn.assemble(self._one * dV)

        from dolfin import dot
        velocity_sqrd = dlfn.assemble(dot(velocity, velocity) * dV)
        temperature_sqrd = dlfn.assemble(temperature * temperature * dV)

        from math import sqrt
        kinetic_energy = 0.5 * velocity_sqrd / V
        velocity_rms = sqrt(velocity_sqrd / V)
        temperature_rms = sqrt(temperature_sqrd / V)

        return velocity_rms, kinetic_energy, temperature_rms
示例#16
0
    def updateCoefficients(self):

        x, y = SpatialCoordinate(self.mesh)

        self.a = as_matrix([[-5. / ln(sqrt(pow(x, 2) + pow(y, 2))) + 15, 1.0],
                            [1.0, -1. / ln(sqrt(pow(x, 2) + pow(y, 2))) + 3]])

        # Init exact solution
        self.u_ = pow(sqrt(pow(x, 2) + pow(y, 2)), 7. / 4)

        # Init right-hand side
        self.f = inner(self.a, grad(grad(self.u_)))

        # Set boundary conditions to exact solution
        self.g = self.u_
示例#17
0
def setup_NSu(u, v, u_, p_, bcs_NSu, u_1, p_1, phi_, rho_, rho_1, g_, M_, nu_,
              rho_e_, V_, dt, drho, sigma_bar, eps, dveps, grav, enable_PF,
              enable_EC):
    """ Set up the Navier-Stokes subproblem. """
    # Crank-Nicolson velocity
    # u_CN = 0.5*(u_1 + u)

    F_predict = (
        1. / dt * df.sqrt(rho_) *
        df.dot(df.sqrt(rho_) * u - df.sqrt(rho_1) * u_1, v) * df.dx
        # + rho_*df.inner(df.grad(u), df.outer(u_1, v))*df.dx
        # + 2*nu_*df.inner(df.sym(df.grad(u)), df.grad(v))*df.dx
        # - p_1 * df.div(v)*df.dx
        # + df.div(u)*q*df.dx
        + rho_ * df.dot(df.dot(u_1, df.nabla_grad(u)), v) * df.dx +
        2 * nu_ * df.inner(df.sym(df.grad(u)), df.sym(df.grad(v))) * df.dx -
        p_1 * df.div(v) * df.dx - df.dot(rho_ * grav, v) * df.dx)

    phi_filtered = unit_interval_filter(phi_)
    if enable_PF:
        F_predict += -drho * M_ * df.dot(
            df.dot(df.nabla_grad(g_), df.nabla_grad(u)), v) * df.dx
        F_predict += -sigma_bar * eps * df.inner(
            df.outer(df.grad(phi_filtered), df.grad(phi_filtered)),
            df.grad(v)) * df.dx
    if enable_EC and rho_e_ != 0:
        F_predict += rho_e_ * df.dot(df.grad(V_), v) * df.dx
    if enable_PF and enable_EC:
        F_predict += dveps * df.dot(df.grad(phi_filtered), v) * df.dot(
            df.grad(V_), df.grad(V_)) * df.dx

    # a1, L1 = df.lhs(F_predict), df.rhs(F_predict)

    F_correct = (df.inner(u - u_, v) * df.dx +
                 dt / rho_ * df.inner(df.grad(p_ - p_1), v) * df.dx)
    # a3 = df.dot(u, v)*df.dx
    # L3 = df.dot(u_, v)*df.dx - dt*df.dot(df.grad(p_), v)*df.dx
    # a3, L3 = df.lhs(F_correct), df.rhs(F_correct)

    solver = dict()
    # solver["a1"] = a1
    # solver["L1"] = L1
    solver["Fu"] = F_predict
    solver["Fu_corr"] = F_correct
    # solver["a3"] = a3
    # solver["L3"] = L3
    solver["bcs"] = bcs_NSu
    return solver
示例#18
0
    def updateCoefficients(self):

        # Init coefficient matrix
        x, y = SpatialCoordinate(self.mesh)

        self.a = 0.5 * as_matrix(
            [[cos(self.gamma[0]), sin(self.gamma[0])],
             [- sin(self.gamma[0]), cos(self.gamma[0])]]) \
            * as_matrix([[1, sin(self.gamma[1])], [0, cos(self.gamma[1])]]) \
            * as_matrix([[1, 0], [sin(self.gamma[1]), cos(self.gamma[1])]]) \
            * as_matrix(
            [[cos(self.gamma[0]), - sin(self.gamma[0])],
             [sin(self.gamma[0]), cos(self.gamma[0])]])
        self.b = as_vector([Constant(0.0), Constant(0.0)])
        self.c = -pi**2

        self.u_ = exp(x * y) * sin(pi * x) * sin(pi * y)

        # Init right-hand side
        self.f = - sqrt(3) * (sin(self.gamma[1])/pi)**2 \
            + 111111
        # TODO work here

        # Set boundary conditions
        self.g = Constant(0.0)
def append_fun(v, p, T):
    # time
    t_list.append(t)
    # save values at time history point 1
    vx_list1.append(v(np.array(point1))[0])
    vy_list1.append(v(np.array(point1))[1])
    T_list1.append(T(np.array(point1)))
    p_list1.append(p(np.array(point1)))
    # pressure differences
    p_diff_list14.append(p(np.array(point1)) - p(np.array(point4)))
    p_diff_list51.append(p(np.array(point5)) - p(np.array(point1)))
    p_diff_list35.append(p(np.array(point3)) - p(np.array(point5)))
    # average velocity metric
    v_metric_list.append(dlfn.sqrt(kin_e))
    # skewness metric 
    skew_metric_list12.append(T(np.array(point1)) + T(np.array(point2)))
    skew_metric_vx_list12.append(v(np.array(point1))[0]
                                + v(np.array(point2))[0])
    # average vorticity metric
    omega_metric_list.append(omega_metric)
    # nusselt lists
    nusselt_left_list.append(nusselt_left)
    nusselt_right_list.append(nusselt_right)
    # list of iteration
    iteration_list.append(cnt)
    return 
示例#20
0
 def update_multimesh(self, step):
     move_norm = []
     hmins = []
     move_max = []
     for i in range(1, self.N):
         s_move = self.deformation[i - 1].copy(True)
         s_move.vector()[:] *= step
         # Approximate geodesic distance
         dDeform = Measure("ds", subdomain_data=self.mfs[i])
         n_i = FacetNormal(self.multimesh.part(i))
         geo_dist_i = inner(s_move, s_move)*\
                      dDeform(self.move_dict[i]["Deform"])
         move_norm.append(assemble(geo_dist_i))
         # move_max.append(project(sqrt(s_move[0]**2 + s_move[1]**2),
         #                         FunctionSpace(self.multimesh.part(i),"CG",1)
         #                         ).vector().max())
         # hmins.append(self.multimesh.part(i).hmin())
         ALE.move(self.multimesh.part(i), s_move)
     # Compute L2 norm of movement
     self.move_norm = sqrt(sum(move_norm))
     # self.move_max = max(move_max)
     # print(hmins, move_max)
     self.multimesh.build()
     for key in self.cover_points.keys():
         self.multimesh.auto_cover(key, self.cover_points[key])
def identity_test(n, shape, subd=I_curve):
    '''Averaging over indep coords of f'''
    true = df.Expression('x[2]*x[2]', degree=2)

    mesh = df.UnitCubeMesh(n, n, n)
    V = df.FunctionSpace(mesh, 'CG', 2)
    v = df.interpolate(true, V)

    f = df.MeshFunction('size_t', mesh, 1, 0)
    subd.mark(f, 1)

    line_mesh = EmbeddedMesh(f, 1)
    Q = average_space(V, line_mesh)
    q = df.Function(Q)

    Pi = avg_mat(V, Q, line_mesh, {'shape': shape})
    Pi.mult(v.vector(), q.vector())

    q0 = true
    # Error
    L = df.inner(q0 - q, q0 - q) * df.dx

    e = q.vector().copy()
    e.axpy(-1, df.interpolate(q0, Q).vector())

    return df.sqrt(abs(df.assemble(L)))
def checktheboundarycoordinates(bcsd, femp, plot=False):
    g1 = dolfin.Constant((0, 0))
    for bc in bcsd:
        bcrl = dolfin.DirichletBC(femp['V'], g1, bc())
        bcdict = bcrl.get_boundary_values()
        print(list(bcdict.keys()))

    bcinds = list(bcdict.keys())

    V = femp['V']

    cylmesh = femp['V'].mesh()
    if plot:
        dolfin.plot(cylmesh)
        dolfin.interactive(True)

    gdim = cylmesh.geometry().dim()
    dofmap = V.dofmap()

    # Get coordinates as len(dofs) x gdim array
    dofs_x = dofmap.tabulate_all_coordinates(cylmesh).reshape((-1, gdim))

    # for dof, dof_x in zip(dofs, dofs_x):
    #     print dof, ':', dof_x
    xcenter = 0.2
    ycenter = 0.2
    for bcind in bcinds:
        dofx = dofs_x[bcind, :]
        dx = dofx[0] - xcenter
        dy = dofx[1] - ycenter
        r = dolfin.sqrt(dx*dx + dy*dy)
        print(bcind, ':', dofx, r)
示例#23
0
def les_setup(u_, mesh, KineticEnergySGS, assemble_matrix, CG1Function, nut_krylov_solver, bcs, **NS_namespace):
    """
    Set up for solving the Kinetic Energy SGS-model.
    """
    DG = FunctionSpace(mesh, "DG", 0)
    CG1 = FunctionSpace(mesh, "CG", 1)
    dim = mesh.geometry().dim()
    delta = Function(DG)
    delta.vector().zero()
    delta.vector().axpy(1.0, assemble(TestFunction(DG)*dx))
    delta.vector().set_local(delta.vector().array()**(1./dim))
    delta.vector().apply('insert')
    
    Ck = KineticEnergySGS["Ck"]
    ksgs = interpolate(Constant(1E-7), CG1)
    bc_ksgs = DirichletBC(CG1, 0, "on_boundary")
    A_mass = assemble_matrix(TrialFunction(CG1)*TestFunction(CG1)*dx)
    nut_form = Ck * delta * sqrt(ksgs)
    bcs_nut = derived_bcs(CG1, bcs['u0'], u_)
    nut_ = CG1Function(nut_form, mesh, method=nut_krylov_solver, bcs=bcs_nut, bounded=True, name="nut")
    At = Matrix()
    bt = Vector(nut_.vector())
    ksgs_sol = KrylovSolver("bicgstab", "additive_schwarz")
    ksgs_sol.parameters["preconditioner"]["structure"] = "same_nonzero_pattern"
    ksgs_sol.parameters["error_on_nonconvergence"] = False
    ksgs_sol.parameters["monitor_convergence"] = False
    ksgs_sol.parameters["report"] = False
    del NS_namespace
    return locals()
示例#24
0
def les_setup(u_, mesh, Smagorinsky, CG1Function, nut_krylov_solver, bcs,
              **NS_namespace):
    """
    Set up for solving Smagorinsky-Lilly LES model.
    """
    DG = FunctionSpace(mesh, "DG", 0)
    CG1 = FunctionSpace(mesh, "CG", 1)

    # Compute cell size and put in delta
    dim = mesh.geometry().dim()
    delta = Function(DG)
    delta.vector().zero()
    delta.vector().set_local(
        assemble(TestFunction(DG) * dx).array()**(1. / dim))
    delta.vector().apply('insert')

    # Set up Smagorinsky form
    Sij = sym(grad(u_))
    magS = sqrt(2 * inner(Sij, Sij))
    nut_form = Smagorinsky['Cs']**2 * delta**2 * magS
    bcs_nut = derived_bcs(CG1, bcs['u0'], u_)
    nut_ = CG1Function(nut_form,
                       mesh,
                       method=nut_krylov_solver,
                       bcs=bcs_nut,
                       bounded=True,
                       name="nut")
    return dict(Sij=Sij, nut_=nut_, delta=delta, bcs_nut=bcs_nut)
 def eval(self, value, x):
     value[0], value[1] = 1., 1.
     if verbose:
         dx = x[0] - xcenter
         dy = x[1] - ycenter
         r = dolfin.sqrt(dx*dx + dy*dy)
         print x, dx, dy, r
示例#26
0
    def update(self):
        if not self.active:
            return

        with Timer('Ocellaris update hydrostatic pressure'):
            A = self.tensor_lhs
            b = assemble(self.form_rhs)

            if self.null_space is None:
                # Create vector that spans the null space, and normalize
                null_space_vector = b.copy()
                null_space_vector[:] = sqrt(1.0 / null_space_vector.size())

                # Create null space basis object and attach to PETSc matrix
                self.null_space = VectorSpaceBasis([null_space_vector])
                as_backend_type(A).set_nullspace(self.null_space)

            self.null_space.orthogonalize(b)
            self.solver.solve(A, self.func.vector(), b)

        if not self.every_timestep:
            # Give initial values for p, but do not continuously compute p_hydrostatic
            sim = self.simulation
            p = sim.data['p']
            if p.vector().max() == p.vector().min() == 0.0:
                sim.log.info(
                    'Initial pressure field is identically zero, initializing to hydrostatic'
                )
                p.interpolate(self.func)

            # Disable further hydrostatic pressure calculations
            self.func.vector().zero()
            del sim.data['p_hydrostatic']
            del self.func
            self.active = False
 def eval(self, value, x):
     value[0], value[1] = 1., 1.
     if verbose:
         dx = x[0] - xcenter
         dy = x[1] - ycenter
         r = dolfin.sqrt(dx * dx + dy * dy)
         print x, dx, dy, r
def disk_test(n, subd=I_curve):
    '''Averaging over indep coords of f'''
    shape = Disk(radius=lambda x0: 0.1 + 0.0 * x0[2] / 2, degree=10)
    foo = df.Expression('x[2]*((x[0]-0.5)*(x[0]-0.5) + (x[1]-0.5)*(x[1]-0.5))',
                        degree=3)

    mesh = df.UnitCubeMesh(n, n, n)
    V = df.FunctionSpace(mesh, 'CG', 3)
    v = df.interpolate(foo, V)

    f = df.MeshFunction('size_t', mesh, 1, 0)
    subd.mark(f, 1)

    true = df.Expression('x[2]*(0.1+0.0*x[2]/2)*(0.1+0.0*x[2]/2)/2', degree=4)

    line_mesh = EmbeddedMesh(f, 1)
    Q = average_space(V, line_mesh)
    q = df.Function(Q)

    Pi = avg_mat(V, Q, line_mesh, {'shape': shape})
    Pi.mult(v.vector(), q.vector())

    q0 = true
    # Error
    L = df.inner(q0 - q, q0 - q) * df.dx

    e = q.vector().copy()
    e.axpy(-1, df.interpolate(q0, Q).vector())

    return df.sqrt(abs(df.assemble(L)))
示例#29
0
    def test_to_DG0_subdomain(self):
        mesh = df.UnitSquareMesh(4, 4)
        cell_f = df.MeshFunction('size_t', mesh, 2, 0)
        df.CompiledSubDomain('x[0] < 0.5 + DOLFIN_EPS').mark(cell_f, 1)

        submesh = EmbeddedMesh(cell_f, 1)

        transfer = SubMeshTransfer(mesh, submesh)

        V = df.FunctionSpace(mesh, 'DG', 0)
        Vsub = df.FunctionSpace(submesh, 'DG', 0)

        to_Vsub = transfer.compute_map(Vsub, V, strict=True)
        # Set degree 0 to get the quad order right
        f = df.Expression('x[0] + 2*x[1]', degree=0)

        fV = df.interpolate(f, V)
        fsub = df.Function(Vsub)

        to_Vsub(fsub, fV)

        error = df.inner(fsub - f, fsub - f)*df.dx(domain=submesh)
        error = df.sqrt(abs(df.assemble(error)))

        self.assertTrue(error < 1E-13)
示例#30
0
    def evalGradientParameter(self, x, mg, misfit_only=False):
        """
        Evaluate the gradient for the variation parameter equation at the point x=[u,a,p].
        Parameters:
        - x = [u,a,p] the point at which to evaluate the gradient.
        - mg the variational gradient (g, atest) being atest a test function in the parameter space
          (Output parameter)
        
        Returns the norm of the gradient in the correct inner product g_norm = sqrt(g,g)
        """
        C = self.assembleC(x)

        self.prior.init_vector(mg, 0)
        C.transpmult(x[ADJOINT], mg)

        if misfit_only == False:
            Rdx = dl.Vector()
            self.prior.init_vector(Rdx, 0)
            dx = x[PARAMETER] - self.prior.mean
            self.prior.R.mult(dx, Rdx)
            mg.axpy(1., Rdx)

        g = dl.Vector()
        self.prior.init_vector(g, 1)

        self.prior.Msolver.solve(g, mg)
        g_norm = dl.sqrt(g.inner(mg))

        return g_norm
示例#31
0
    def evaluateEstimator(cls, w, coeff_field, pde, f, quadrature_degree= -1, osc_quadrature_degree = 15):
        """Evaluate patch local equilibration estimator for all active mu of w."""

        # TODO: determine oscillations of coeff_field and calculate with projected coefficients?!

        # use uBLAS backend for conversion to scipy sparse matrices
        backup_backend = parameters.linear_algebra_backend
        parameters.linear_algebra_backend = "uBLAS"

        # determine rhs oscillations
        mu0 = Multiindex()
        mesh = w[mu0]._fefunc.function_space().mesh()
        degree = element_degree(w[mu0]._fefunc)
        DG0 = FunctionSpace(mesh, 'DG', 0)
#        DG0_dofs = dict([(c.index(),DG0.dofmap().cell_dofs(c.index())[0]) for c in cells(mesh)])
        dg0 = TestFunction(DG0)
        osc_global, osc_local, Pf = evaluate_oscillations(f, mesh, degree - 1, dg0, osc_quadrature_degree)

        # evaluate global equilibration estimators
        eta_local = MultiVector()
        eta = {}
        for mu in w.active_indices():
            eta[mu], eta_local[mu] = cls._evaluateLocalEstimator(mu, w, coeff_field, pde, Pf, quadrature_degree)
        global_eta = sqrt(sum([v ** 2 for v in eta.values()]))

        # restore backend and return estimator
        parameters.linear_algebra_backend = backup_backend
        return global_eta, eta, eta_local, osc_global, osc_local
示例#32
0
def les_setup(u_, mesh, KineticEnergySGS, assemble_matrix, CG1Function,
              nut_krylov_solver, bcs, **NS_namespace):
    """
    Set up for solving the Kinetic Energy SGS-model.
    """
    DG = FunctionSpace(mesh, "DG", 0)
    CG1 = FunctionSpace(mesh, "CG", 1)
    dim = mesh.geometry().dim()
    delta = Function(DG)
    delta.vector().zero()
    delta.vector().axpy(1.0, assemble(TestFunction(DG) * dx))
    delta.vector().set_local(delta.vector().array()**(1. / dim))
    delta.vector().apply('insert')

    Ck = KineticEnergySGS["Ck"]
    ksgs = interpolate(Constant(1E-7), CG1)
    bc_ksgs = DirichletBC(CG1, 0, "on_boundary")
    A_mass = assemble_matrix(TrialFunction(CG1) * TestFunction(CG1) * dx)
    nut_form = Ck * delta * sqrt(ksgs)
    bcs_nut = derived_bcs(CG1, bcs['u0'], u_)
    nut_ = CG1Function(nut_form,
                       mesh,
                       method=nut_krylov_solver,
                       bcs=bcs_nut,
                       bounded=True,
                       name="nut")
    At = Matrix()
    bt = Vector(nut_.vector())
    ksgs_sol = KrylovSolver("bicgstab", "additive_schwarz")
    #ksgs_sol.parameters["preconditioner"]["structure"] = "same_nonzero_pattern"
    ksgs_sol.parameters["error_on_nonconvergence"] = False
    ksgs_sol.parameters["monitor_convergence"] = False
    ksgs_sol.parameters["report"] = False
    del NS_namespace
    return locals()
示例#33
0
def test_failsafe_sweep():
    interpolate_expression = Expression('x[0]', degree=1)
    mesh = UnitSquareMesh(5, 5)
    V = FunctionSpace(mesh, "DG", 1)

    v = Function(V)
    v.assign(interpolate_expression)

    np_min, np_max = 1, 2
    np_failsafe = 4

    # Initialize particles
    x = RandomRectangle(Point(0.0, 0.0), Point(1., 1.)).generate([100, 100])
    s = assign_particle_values(x, interpolate_expression)
    # Broadcast to other procs
    x = comm.bcast(x, root=0)
    s = comm.bcast(s, root=0)

    property_idx = 1
    p = particles(x, [s], mesh)
    AD = AddDelete(p, np_min, np_max, [v])
    AD.do_sweep_failsafe(np_failsafe)

    # Must recover linear
    lstsq_rho = l2projection(p, V, property_idx)
    lstsq_rho.project(v.cpp_object())

    error = sqrt(
        assemble(
            (v - interpolate_expression) * (v - interpolate_expression) * dx))

    assert len(p.positions() == mesh.num_cells() * np_failsafe)
    assert error < 1e-12
示例#34
0
def checktheboundarycoordinates(bcsd, femp, plot=False):
    g1 = dolfin.Constant((0, 0))
    for bc in bcsd:
        bcrl = dolfin.DirichletBC(femp['V'], g1, bc())
        bcdict = bcrl.get_boundary_values()
        print bcdict.keys()

    bcinds = bcdict.keys()

    V = femp['V']

    cylmesh = femp['V'].mesh()
    if plot:
        dolfin.plot(cylmesh)
        dolfin.interactive(True)

    gdim = cylmesh.geometry().dim()
    dofmap = V.dofmap()

    # Get coordinates as len(dofs) x gdim array
    dofs_x = dofmap.tabulate_all_coordinates(cylmesh).reshape((-1, gdim))

    # for dof, dof_x in zip(dofs, dofs_x):
    #     print dof, ':', dof_x
    xcenter = 0.2
    ycenter = 0.2
    for bcind in bcinds:
        dofx = dofs_x[bcind, :]
        dx = dofx[0] - xcenter
        dy = dofx[1] - ycenter
        r = dolfin.sqrt(dx*dx + dy*dy)
        print bcind, ':', dofx, r
示例#35
0
    def updateCoefficients(self):

        x, y, z = SpatialCoordinate(self.mesh)

        # Init coefficient matrix
        self.a = as_matrix([[Constant(1.),
                             Constant(0.),
                             Constant(0.)],
                            [Constant(0.),
                             Constant(1.),
                             Constant(0.)],
                            [Constant(0.),
                             Constant(0.),
                             Constant(1.)]])

        # Set up explicit solution
        print('Chosen alpha is {}'.format(self.alpha))
        print('Solution is in H^{}'.format(1.5 + self.alpha))

        r = sqrt((x - .5)**2 + (y - .5)**2 + (z - .5)**2)
        self.u_ = r**self.alpha

        # Init right-hand side
        self.f = inner(self.a, grad(grad(self.u_)))

        # Set boundary conditions to exact solution
        self.g = self.u_
def square_test(n, subd=I_curve):
    '''Averaging over indep coords of f'''
    size = 0.1
    shape = Square(P=lambda x0: x0 - np.array(
        [size + size * x0[2], size + size * x0[2], 0]),
                   degree=10)
    foo = df.Expression('x[2]*((x[0]-0.5)*(x[0]-0.5) + (x[1]-0.5)*(x[1]-0.5))',
                        degree=3)

    mesh = df.UnitCubeMesh(n, n, n)
    V = df.FunctionSpace(mesh, 'CG', 3)
    v = df.interpolate(foo, V)

    f = df.MeshFunction('size_t', mesh, 1, 0)
    subd.mark(f, 1)

    true = df.Expression('x[2]*2./3*(size+size*x[2])*(size+size*x[2])',
                         degree=4,
                         size=size)

    line_mesh = EmbeddedMesh(f, 1)
    Q = average_space(V, line_mesh)
    q = df.Function(Q)

    Pi = avg_mat(V, Q, line_mesh, {'shape': shape})
    Pi.mult(v.vector(), q.vector())

    q0 = true
    # Error
    L = df.inner(q0 - q, q0 - q) * df.dx

    e = q.vector().copy()
    e.axpy(-1, df.interpolate(q0, Q).vector())

    return df.sqrt(abs(df.assemble(L)))
def _adaptive_mesh_refinement(dx, phi, mu, sigma, omega, conv, voltages):
    from dolfin import cells, refine
    eta = _error_estimator(dx, phi, mu, sigma, omega, conv, voltages)
    mesh = phi.function_space().mesh()
    level = 0
    TOL = 1.0e-4
    E = sum([e * e for e in eta])
    E = sqrt(MPI.sum(E))
    info('Level %d: E = %g (TOL = %g)' % (level, E, TOL))
    # Mark cells for refinement
    REFINE_RATIO = 0.5
    cell_markers = MeshFunction('bool', mesh, mesh.topology().dim())
    eta_0 = sorted(eta, reverse=True)[int(len(eta) * REFINE_RATIO)]
    eta_0 = MPI.max(eta_0)
    for c in cells(mesh):
        cell_markers[c] = eta[c.index()] > eta_0
    # Refine mesh
    mesh = refine(mesh, cell_markers)
    # Plot mesh
    plot(mesh)
    interactive()
    exit()
    ## Compute error indicators
    #K = array([c.volume() for c in cells(mesh)])
    #R = array([abs(source([c.midpoint().x(), c.midpoint().y()])) for c in cells(mesh)])
    #gam = h*R*sqrt(K)
    return
示例#38
0
 def update(self, parameters=None):
     """ Update the parameters.
     parameters should be:
         - k(x) = factor inside TV
         - eps = regularization parameter
         - Vm = FunctionSpace for parameter. 
     ||f||_TV = int k(x) sqrt{|grad f|^2 + eps} dx
     """
     # reset some variables
     self.H = None
     # udpate parameters
     if parameters == None:  
         parameters = self.parameters
     else:
         self.parameters.update(parameters)
     GN = self.parameters['GNhessian']
     self.Vm = self.parameters['Vm']
     eps = self.parameters['eps']
     self.k = self.parameters['k']
     # define functions
     self.m = Function(self.Vm)
     self.test, self.trial = TestFunction(self.Vm), TrialFunction(self.Vm)
     # frequently-used variable
     self.fTV = inner(nabla_grad(self.m), nabla_grad(self.m)) + Constant(eps)
     self.kovsq = self.k / sqrt(self.fTV)
     #
     # cost functional
     self.wkformcost = self.k*sqrt(self.fTV)*dx
     # gradient
     self.wkformgrad = self.kovsq*inner(nabla_grad(self.m), nabla_grad(self.test))*dx
     # Hessian
     self.wkformGNhess = self.kovsq*inner(nabla_grad(self.trial), nabla_grad(self.test))*dx
     self.wkformFhess = self.kovsq*( \
     inner(nabla_grad(self.trial), nabla_grad(self.test)) - \
     inner(nabla_grad(self.m), nabla_grad(self.test))*\
     inner(nabla_grad(self.trial), nabla_grad(self.m))/self.fTV )*dx
     if self.isPD(): 
         self.updatePD()
         self.wkformhess = self.wkformPDhess
         print 'TV regularization -- primal-dual Newton'
     else:
         if GN: 
             self.wkformhess = self.wkformGNhess
             print 'TV regularization -- GN Hessian'
         else:   
             self.wkformhess = self.wkformFhess
             print 'TV regularization -- full Hessian'
 def inside(self, x, on_boundary):
     dx = x[0] - xcenter
     dy = x[1] - ycenter
     r = dolfin.sqrt(dx*dx + dy*dy)
     oncyl = on_boundary and r < radius + bmarg and dy < 0 and dx > 0
     if r < radius + bmarg:
         print r, oncyl, on_boundary
     return oncyl
示例#40
0
 def set_abc(self, mesh, class_bc_abc, lumpD=False):
     self.abc = True # False means zero-Neumann all-around
     if lumpD:    self.lumpD = True
     abc_boundaryparts = FacetFunction("size_t", mesh)
     class_bc_abc.mark(abc_boundaryparts, 1)
     self.ds = Measure("ds")[abc_boundaryparts]
     self.weak_d = inner(sqrt(self.lam*self.rho)*self.trial, self.test)*self.ds(1)
     self.class_bc_abc = class_bc_abc    # to make copies
示例#41
0
    def test_L2_norm(self):
        asbl_norm = dol.sqrt(dol.assemble(dol.inner(self.dolf_sin, self.dolf_sin)*dol.dx))

        # FLAG THIS! tol isn't *that* close!

        # the L2 norm of sin(x) should be sqrt(pi)
        npt.assert_allclose(asbl_norm, np.sqrt(np.pi), rtol=1e-2, atol=1e-2, \
                                err_msg="Assembled L2 norm check failed")
示例#42
0
 def inside(self, x, on_boundary):
     dx = x[0] - xcenter
     dy = x[1] - ycenter
     r = dolfin.sqrt(dx*dx + dy*dy)
     if bccontrol:
         notinbbx = not insidebbox(x)
         return on_boundary and r < radius + bmarg and notinbbx
     else:
         return on_boundary and r < radius + bmarg
示例#43
0
 def evaluateResidualEstimator(cls, w, coeff_field, pde, f, quadrature_degree= -1):
     """Evaluate residual estimator EGSZ2 (4.1) for all active mu of w."""
     # evaluate residual estimator for all multiindices
     eta_local = MultiVector()
     eta = {}
     for mu in w.active_indices():
         eta[mu], eta_local[mu] = cls._evaluateResidualEstimator(mu, w, coeff_field, pde, f, quadrature_degree)
     global_eta = sqrt(sum([v ** 2 for v in eta.values()]))
     return global_eta, eta, eta_local
示例#44
0
        def eval_zeta_bar(mu, suppLambda, coeff_field, normw, V, M):
            assert mu in normw.keys()
            zz = 0
#            print "====zeta bar Z1", mu, M
            for m in range(M):
                if m in suppLambda:
                    continue
                _, am_rv = coeff_field[m]
                beta = am_rv.orth_polys.get_beta(mu[m])
                ainfty = get_ainfty(m, V)
                zz += (beta[1] * ainfty) ** 2
            return normw[mu] * sqrt(zz)
示例#45
0
    def compute(self, get):
        t1 = get("t")
        t0 = get("t", -1)
        dt = Constant(t1 - t0)
        u = get("Velocity")

        hF = self._hF
        hK = self._hK
        scaling = 1.0 / hK
        assemble((dt * sqrt(u**2) / hF)*self._v*scaling*dx(),
                 tensor=self._cfl.vector())

        return self._cfl
示例#46
0
    def evaluateProjectionError(cls, w, coeff_field, pde, maxh=0.0, local=True, projection_degree_increase=1, refine_mesh=1):
        """Evaluate the projection error according to EGSZ (4.8).

        The global projection error
        ..math::
            \delta_\mu(w_N) := \sum_{m=1}^\infty ||a_m/\overline{a}||_{L^\infty(D)}
            \left\{ \int_D \overline{a}\alpha_{\mu_m+1}^\mu |\nabla(\Pi_{\mu+e_m}^\mu(\Pi_\mu^{\mu+e_m}w_{N,\mu+e_m}))|^2\;dx \right^{1/2}
            + \left\{ \int_D \overline{a}\alpha_{\mu_m-1}^\mu |\nabla(\Pi_{\mu-e_m}^\mu(\Pi_\mu^{\mu-e_m}w_{N,\mu-e_m}))|^2\;dx \right^{1/2}

        is localised by (6.4)
        ..math::
            \zeta_{\mu,T,m}^{\mu\pm e_m} := ||a_m/\overline{a}||_{L^\infty(D)} \alpha_{\mu_m\pm 1}\int_T | \nabla( \Pi_{\mu\pm e_m}^\mu(\Pi_\mu^{\mu\pm e_m} w_{N,mu\pm e_)m})) - w_{N,mu\pm e_)m} |^2\;dx
        """
        
        if logger.isEnabledFor(logging.DEBUG):
            for mu in w.active_indices():
                logger.debug("[projection error] mesh for %s has %s cells", mu, w[mu]._fefunc.function_space().mesh().num_cells())
    
        global_error = {}
        if local:
            proj_error = MultiVector()
        else:
            proj_error = {}
        Lambda = w.active_indices()
        if len(Lambda) > 1:
            for mu in Lambda:
                maxm = w.max_order
                if len(coeff_field) < maxm:
                    logger.warning("insufficient length of coefficient field for MultiVector (%i < %i)",
                        len(coeff_field), maxm)
                    maxm = len(coeff_field)
                zeta_mu = [cls.evaluateLocalProjectionError(w, mu, m, coeff_field, pde, Lambda, maxh, local, projection_degree_increase, refine_mesh)
                                for m in range(maxm)]
                dmu = sum(zeta_mu)
                if local:
                    proj_error[mu] = FlatVector(dmu)
#                    global_error[mu] = sqrt(sum(e ** 2 for e in dmu))
                    global_error[mu] = sum(sqrt(sum(e ** 2 for e in perr)) for perr in zeta_mu)
                else:
                    proj_error[mu] = dmu
                    global_error = dmu
        else:
            mu = Lambda[0]
            if local:
                proj_error[mu] = FlatVector(np.zeros(w[mu].coeffs.size()))
            else:
                proj_error[mu] = 0
            global_error = {mu: 0}
        return proj_error, global_error
示例#47
0
                def eval(self, value, x):
                    xvec = x - centvec.flatten()
                    aang = np.arccos(np.dot(xvec, b2base)
                                     / (np.linalg.norm(xvec)
                                        * np.linalg.norm(b2base)))
                    s = aang/extensrad

                    vls = _csf(s, b2normal)
                    value[0], value[1] = vls[0], vls[1]
                    if verbose:
                        dx = x[0] - xcenter
                        dy = x[1] - ycenter
                        r = dolfin.sqrt(dx*dx + dy*dy)
                        print x - centvec.flatten(), ': s=', s, ': r=', r, \
                            ':', np.linalg.norm(np.array(vls))
示例#48
0
 def updatePD(self):
     """ Update the parameters.
     parameters should be:
         - k(x) = factor inside TV
         - eps = regularization parameter
         - Vm = FunctionSpace for parameter. 
     ||f||_TV = int k(x) sqrt{|grad f|^2 + eps} dx
     """
     # primal dual variables
     self.Vw = FunctionSpace(self.Vm.mesh(), 'DG', 0)
     self.wH = Function(self.Vw*self.Vw)  # dual variable used in Hessian (re-scaled)
     #self.wH = nabla_grad(self.m)/sqrt(self.fTV) # full Hessian
     self.w = Function(self.Vw*self.Vw)  # dual variable for primal-dual, initialized at 0
     self.dm = Function(self.Vm)
     self.dw = Function(self.Vw*self.Vw)  
     self.testw = TestFunction(self.Vw*self.Vw)
     self.trialw = TrialFunction(self.Vw*self.Vw)
     # investigate convergence of dual variable
     self.dualres = self.w*sqrt(self.fTV) - nabla_grad(self.m)
     self.dualresnorm = inner(self.dualres, self.dualres)*dx
     self.normgraddm = inner(nabla_grad(self.dm), nabla_grad(self.dm))*dx
     # Hessian
     self.wkformPDhess = self.kovsq * ( \
     inner(nabla_grad(self.trial), nabla_grad(self.test)) - \
     0.5*( inner(self.wH, nabla_grad(self.test))*\
     inner(nabla_grad(self.trial), nabla_grad(self.m)) + \
     inner(nabla_grad(self.m), nabla_grad(self.test))*\
     inner(nabla_grad(self.trial), self.wH) ) / sqrt(self.fTV) \
     )*dx
     # update dual variable
     self.Mw = assemble(inner(self.trialw, self.testw)*dx)
     self.rhswwk = inner(-self.w, self.testw)*dx + \
     inner(nabla_grad(self.m)+nabla_grad(self.dm), self.testw) \
     /sqrt(self.fTV)*dx + \
     inner(-inner(nabla_grad(self.m),nabla_grad(self.dm))* \
     self.wH/sqrt(self.fTV), self.testw)*dx
示例#49
0
def compute_velocity_correction(
    ui, p0, p1, u_bcs, rho, mu, dt, rotational_form, my_dx, tol, verbose
):
    """Compute the velocity correction according to

    .. math::

        U = u_0 - \\frac{dt}{\\rho} \\nabla (p_1-p_0).
    """
    W = ui.function_space()
    P = p1.function_space()

    u = TrialFunction(W)
    v = TestFunction(W)
    a3 = dot(u, v) * my_dx
    phi = Function(P)
    phi.assign(p1)
    if p0:
        phi -= p0
    if rotational_form:
        r = SpatialCoordinate(W.mesh())[0]
        div_ui = 1 / r * (r * ui[0]).dx(0) + ui[1].dx(1)
        phi += mu * div_ui
    L3 = dot(ui, v) * my_dx - dt / rho * (phi.dx(0) * v[0] + phi.dx(1) * v[1]) * my_dx
    u1 = Function(W)
    solve(
        a3 == L3,
        u1,
        bcs=u_bcs,
        solver_parameters={
            "linear_solver": "iterative",
            "symmetric": True,
            "preconditioner": "hypre_amg",
            "krylov_solver": {
                "relative_tolerance": tol,
                "absolute_tolerance": 0.0,
                "maximum_iterations": 100,
                "monitor_convergence": verbose,
            },
        },
    )
    # u = project(ui - k/rho * grad(phi), V)
    # div_u = 1/r * div(r*u)
    r = SpatialCoordinate(W.mesh())[0]
    div_u1 = 1.0 / r * (r * u1[0]).dx(0) + u1[1].dx(1)
    info("||u||_div = {:e}".format(sqrt(assemble(div_u1 * div_u1 * my_dx))))
    return u1
示例#50
0
def les_setup(u_, mesh, Smagorinsky, CG1Function, nut_krylov_solver, bcs, **NS_namespace):
    """
    Set up for solving Smagorinsky-Lilly LES model.
    """
    DG = FunctionSpace(mesh, "DG", 0)
    CG1 = FunctionSpace(mesh, "CG", 1)
    dim = mesh.geometry().dim()
    delta = Function(DG)
    delta.vector().zero()
    delta.vector().axpy(1.0, assemble(TestFunction(DG)*dx))
    delta.vector().apply('insert')
    Sij = sym(grad(u_))
    magS = sqrt(2*inner(Sij,Sij))    
    nut_form = Smagorinsky['Cs']**2 * delta**2 * magS
    bcs_nut = derived_bcs(CG1, bcs['u0'], u_)
    nut_ = CG1Function(nut_form, mesh, method=nut_krylov_solver, bcs=bcs_nut, bounded=True, name="nut")
    return dict(Sij=Sij, nut_=nut_, delta=delta, bcs_nut=bcs_nut)    
示例#51
0
文件: state.py 项目: getzze/magnum.fe
def _normalize(self, graceful = True):
  if not isinstance(self, Function):
    raise Exception("Cannot normalize '%s'." % type(self))
  f = self.copy(True)
  v = TestFunction(f.function_space())

  # if graceful = True: (0,0,0) -> (1,0,0)
  #              False: (0,0,0) -> (0,0,0)
  fx = 1.0 if graceful else 0.0

  expr = conditional(eq(inner(f, f), 0.0), \
    Constant((fx, 0.0, 0.0)),           \
    f / sqrt(inner(f, f)))

  result = Function(f.function_space())
  assemble(inner(v, expr) * dP, result.vector())
  setattr(self._state, self.name(), result)
示例#52
0
def les_setup(u_, mesh, Smagorinsky, CG1Function, nut_krylov_solver, bcs, **NS_namespace):
    """
    Set up for solving Smagorinsky-Lilly LES model.
    """
    DG = FunctionSpace(mesh, "DG", 0)
    CG1 = FunctionSpace(mesh, "CG", 1)

    # Compute cell size and put in delta
    delta = Function(DG)
    delta.vector().zero()
    delta.vector().axpy(1.0, assemble(TestFunction(DG) * dx))
    delta.vector().apply("insert")

    # Set up Smagorinsky form
    Sij = sym(grad(u_))
    magS = sqrt(2 * inner(Sij, Sij))
    nut_form = Smagorinsky["Cs"] ** 2 * delta ** 2 * magS
    bcs_nut = derived_bcs(CG1, bcs["u0"], u_)
    nut_ = CG1Function(nut_form, mesh, method=nut_krylov_solver, bcs=bcs_nut, bounded=True, name="nut")
    return dict(Sij=Sij, nut_=nut_, delta=delta, bcs_nut=bcs_nut)
示例#53
0
 def adaptive(self, mesh, eigv, eigf):
     """Refine mesh based on residual errors."""
     fraction = 0.1
     C = FunctionSpace(mesh, "DG", 0)  # constants on triangles
     w = TestFunction(C)
     h = CellSize(mesh)
     n = FacetNormal(mesh)
     marker = CellFunction("bool", mesh)
     print len(marker)
     indicators = np.zeros(len(marker))
     for e, u in zip(eigv, eigf):
         errform = avg(h) * jump(grad(u), n) ** 2 * avg(w) * dS \
             + h * (inner(grad(u), n) - Constant(e) * u) ** 2 * w * ds
         if self.degree > 1:
             errform += h ** 2 * div(grad(u)) ** 2 * w * dx
         indicators[:] += assemble(errform).array()  # errors for each cell
     print "Residual error: ", sqrt(sum(indicators) / len(eigv))
     cutoff = sorted(
         indicators, reverse=True)[
         int(len(indicators) * fraction) - 1]
     marker.array()[:] = indicators > cutoff  # mark worst errors
     mesh = refine(mesh, marker)
     return mesh
示例#54
0
 def __init__(self, acousticwavePDE, regularization=None):
     """ 
     Input:
         acousticwavePDE should be an instantiation from class AcousticWave
     """
     self.PDE = acousticwavePDE
     self.PDE.exact = None
     self.fwdsource = self.PDE.ftime
     self.MG = Function(self.PDE.Vl)
     self.MGv = self.MG.vector()
     self.Grad = Function(self.PDE.Vl)
     self.Gradv = self.Grad.vector()
     self.srchdir = Function(self.PDE.Vl)
     self.delta_m = Function(self.PDE.Vl)
     LinearOperator.__init__(self, self.MG.vector(), self.MG.vector())
     self.obsop = None   # Observation operator
     self.dd = None  # observations
     if regularization == None:  self.regularization = ZeroRegularization()
     else:   self.regularization = regularization
     self.alpha_reg = 1.0
     # gradient
     self.lamtest, self.lamtrial = TestFunction(self.PDE.Vl), TrialFunction(self.PDE.Vl)
     self.p, self.v = Function(self.PDE.V), Function(self.PDE.V)
     self.wkformgrad = inner(self.lamtest*nabla_grad(self.p), nabla_grad(self.v))*dx
     # incremental rhs
     self.lamhat = Function(self.PDE.Vl)
     self.ptrial, self.ptest = TrialFunction(self.PDE.V), TestFunction(self.PDE.V)
     self.wkformrhsincr = inner(self.lamhat*nabla_grad(self.ptrial), nabla_grad(self.ptest))*dx
     # Hessian
     self.phat, self.vhat = Function(self.PDE.V), Function(self.PDE.V)
     self.wkformhess = inner(self.lamtest*nabla_grad(self.phat), nabla_grad(self.v))*dx \
     + inner(self.lamtest*nabla_grad(self.p), nabla_grad(self.vhat))*dx
     # Mass matrix:
     weak_m =  inner(self.lamtrial,self.lamtest)*dx
     Mass = assemble(weak_m)
     self.solverM = LUSolver()
     self.solverM.parameters['reuse_factorization'] = True
     self.solverM.parameters['symmetric'] = True
     self.solverM.set_operator(Mass)
     # Time-integration factors
     self.factors = np.ones(self.PDE.times.size)
     self.factors[0], self.factors[-1] = 0.5, 0.5
     self.factors *= self.PDE.Dt
     self.invDt = 1./self.PDE.Dt
     # Absorbing BCs
     if self.PDE.abc:
         #TODO: should probably be tested in other situations
         if self.PDE.lumpD:
             print '*** Warning: Damping matrix D is lumped. ',\
             'Make sure gradient is consistent.'
         self.vD, self.pD, self.p1D, self.p2D = Function(self.PDE.V), \
         Function(self.PDE.V), Function(self.PDE.V), Function(self.PDE.V)
         self.wkformgradD = inner(0.5*sqrt(self.PDE.rho/self.PDE.lam)\
         *self.pD, self.vD*self.lamtest)*self.PDE.ds(1)
         self.wkformDprime = inner(0.5*sqrt(self.PDE.rho/self.PDE.lam)\
         *self.lamhat*self.ptrial, self.ptest)*self.PDE.ds(1)
         self.dp, self.dph, self.vhatD = Function(self.PDE.V), \
         Function(self.PDE.V), Function(self.PDE.V)
         self.p1hatD, self.p2hatD = Function(self.PDE.V), Function(self.PDE.V)
         self.wkformhessD = inner(-0.25*sqrt(self.PDE.rho)/(self.PDE.lam*sqrt(self.PDE.lam))\
         *self.lamhat*self.dp, self.vD*self.lamtest)*self.PDE.ds(1) \
         + inner(0.5*sqrt(self.PDE.rho/self.PDE.lam)\
         *self.dph, self.vD*self.lamtest)*self.PDE.ds(1)\
         + inner(0.5*sqrt(self.PDE.rho/self.PDE.lam)\
         *self.dp, self.vhatD*self.lamtest)*self.PDE.ds(1)
示例#55
0
    def _evaluateGlobalMixedEstimator(cls, mu, w, coeff_field, pde, f, quadrature_degree, vectorspace_type='BDM'):
        """Evaluation of global mixed equilibrated estimator."""
        # set quadrature degree
#        quadrature_degree_old = parameters["form_compiler"]["quadrature_degree"]
#        parameters["form_compiler"]["quadrature_degree"] = quadrature_degree
#        logger.debug("residual quadrature order = " + str(quadrature_degree))

        # prepare numerical flux and f
        sigma_mu, f_mu = evaluate_numerical_flux(w, mu, coeff_field, f)

        # ###################
        # ## MIXED PROBLEM ##
        # ###################

        # get setup data for mixed problem
        V = w[mu]._fefunc.function_space()
        mesh = V.mesh()
        degree = element_degree(w[mu]._fefunc)

        # create function spaces
        DG0 = FunctionSpace(mesh, 'DG', 0)
        DG0_dofs = [DG0.dofmap().cell_dofs(c.index())[0] for c in cells(mesh)]
        RT = FunctionSpace(mesh, vectorspace_type, degree)
        W = RT * DG0

        # setup boundary conditions
#        bcs = pde.create_dirichlet_bcs(W.sub(1))

        # debug ===
        # from dolfin import DOLFIN_EPS, DirichletBC
        # def boundary(x):
        #     return x[0] < DOLFIN_EPS or x[0] > 1.0 + DOLFIN_EPS or x[1] < DOLFIN_EPS or x[1] > 1.0 + DOLFIN_EPS
        # bcs = [DirichletBC(W.sub(1), Constant(0.0), boundary)]
        # === debug

        # create trial and test functions
        (sigma, u) = TrialFunctions(W)
        (tau, v) = TestFunctions(W)

        # define variational form
        a_eq = (dot(sigma, tau) + div(tau) * u + div(sigma) * v) * dx
        L_eq = (- f_mu * v + dot(sigma_mu, tau)) * dx

        # compute solution
        w_eq = Function(W)
        solve(a_eq == L_eq, w_eq)
        (sigma_mixed, u_mixed) = w_eq.split()

        # #############################
        # ## EQUILIBRATION ESTIMATOR ##
        # #############################

        # evaluate error estimator
        dg0 = TestFunction(DG0)
        eta_mu = inner(sigma_mu, sigma_mu) * dg0 * dx
        eta_T = assemble(eta_mu, form_compiler_parameters={'quadrature_degree': quadrature_degree})
        eta_T = np.array([sqrt(e) for e in eta_T])

        # evaluate global error
        eta = sqrt(sum(i**2 for i in eta_T))
        # reorder array entries for local estimators
        eta_T = eta_T[DG0_dofs]

        # restore quadrature degree
#        parameters["form_compiler"]["quadrature_degree"] = quadrature_degree_old

        return eta, FlatVector(eta_T)
示例#56
0
def les_setup(u_, mesh, assemble_matrix, CG1Function, nut_krylov_solver, bcs, **NS_namespace):
    """
    Set up for solving the Germano Dynamic LES model applying
    Lagrangian Averaging.
    """

    # Create function spaces
    CG1 = FunctionSpace(mesh, "CG", 1)
    p, q = TrialFunction(CG1), TestFunction(CG1)
    dim = mesh.geometry().dim()

    # Define delta and project delta**2 to CG1
    delta = pow(CellVolume(mesh), 1. / dim)
    delta_CG1_sq = project(delta, CG1)
    delta_CG1_sq.vector().set_local(delta_CG1_sq.vector().array()**2)
    delta_CG1_sq.vector().apply("insert")

    # Define nut_
    Sij = sym(grad(u_))
    magS = sqrt(2 * inner(Sij, Sij))
    Cs = Function(CG1)
    nut_form = Cs**2 * delta**2 * magS
    # Create nut_ BCs
    ff = MeshFunction("size_t", mesh, mesh.topology().dim() - 1, 0)
    bcs_nut = []
    for i, bc in enumerate(bcs['u0']):
        bc.apply(u_[0].vector())  # Need to initialize bc
        m = bc.markers()  # Get facet indices of boundary
        ff.array()[m] = i + 1
        bcs_nut.append(DirichletBC(CG1, Constant(0), ff, i + 1))
    nut_ = CG1Function(nut_form, mesh, method=nut_krylov_solver,
                       bcs=bcs_nut, bounded=True, name="nut")

    # Create functions for holding the different velocities
    u_CG1 = as_vector([Function(CG1) for i in range(dim)])
    u_filtered = as_vector([Function(CG1) for i in range(dim)])
    dummy = Function(CG1)
    ll = LagrangeInterpolator()

    # Assemble required filter matrices and functions
    G_under = Function(CG1, assemble(TestFunction(CG1) * dx))
    G_under.vector().set_local(1. / G_under.vector().array())
    G_under.vector().apply("insert")
    G_matr = assemble(inner(p, q) * dx)

    # Set up functions for Lij and Mij
    Lij = [Function(CG1) for i in range(dim * dim)]
    Mij = [Function(CG1) for i in range(dim * dim)]
    # Check if case is 2D or 3D and set up uiuj product pairs and
    # Sij forms, assemble required matrices
    Sijcomps = [Function(CG1) for i in range(dim * dim)]
    Sijfcomps = [Function(CG1) for i in range(dim * dim)]
    # Assemble some required matrices for solving for rate of strain terms
    Sijmats = [assemble_matrix(p.dx(i) * q * dx) for i in range(dim)]
    if dim == 3:
        tensdim = 6
        uiuj_pairs = ((0, 0), (0, 1), (0, 2), (1, 1), (1, 2), (2, 2))
    else:
        tensdim = 3
        uiuj_pairs = ((0, 0), (0, 1), (1, 1))

    # Set up Lagrange functions
    JLM = Function(CG1)
    JLM.vector()[:] += 1E-32
    JMM = Function(CG1)
    JMM.vector()[:] += 1

    return dict(Sij=Sij, nut_form=nut_form, nut_=nut_, delta=delta, bcs_nut=bcs_nut,
                delta_CG1_sq=delta_CG1_sq, CG1=CG1, Cs=Cs, u_CG1=u_CG1,
                u_filtered=u_filtered, ll=ll, Lij=Lij, Mij=Mij, Sijcomps=Sijcomps,
                Sijfcomps=Sijfcomps, Sijmats=Sijmats, JLM=JLM, JMM=JMM, dim=dim,
                tensdim=tensdim, G_matr=G_matr, G_under=G_under, dummy=dummy,
                uiuj_pairs=uiuj_pairs)
示例#57
0
    def _evaluateResidualEstimator(cls, mu, w, coeff_field, pde, f, quadrature_degree):
        """Evaluate the residual error according to EGSZ (5.7) which consists of volume terms (5.3) and jump terms (5.5).

            .. math:: \eta_{\mu,T}(w_N) &:= h_T || \overline{a}^{-1/2} (f\delta_{\mu,0} + \nabla\overline{a}\cdot\nabla w_{N,\mu}
                                + \sum_{m=1}^\infty \nabla a_m\cdot\nabla( \alpha^m_{\mu_m+1}\Pi_\mu^{\mu+e_m} w_{N,\mu+e_m}
                                - \alpha_{\mu_m}^m w_{N,\mu} + \alpha_{\mu_m-1}^m\Pi_\mu^{\mu_m-e_m} w_{N,\mu-e_m} ||_{L^2(T)}\\
          \eta_{\mu,S}(w_N) &:= h_S^{-1/2} || \overline{a}^{-1/2} [(\overline{a}\nabla w_{N,\mu} + \sum_{m=1}^\infty a_m\nabla
                                  ( \alpha_{\mu_m+1}^m\Pi_\mu^{\mu+e_m} w_{N,\mu+e_m} - \alpha_{\mu_m}^m w_{N,\mu}
                                  + \alpha_{\mu_m-1}^m\Pi_\mu^{\mu-e_m} w_{N,\mu-e_m})\cdot\nu] ||_{L^2(S)}\\
        """
        # set quadrature degree
        quadrature_degree_old = parameters["form_compiler"]["quadrature_degree"]
        parameters["form_compiler"]["quadrature_degree"] = quadrature_degree
        logger.debug("residual quadrature order = " + str(quadrature_degree))
    
        # get pde residual terms
        r_T = pde.volume_residual
        r_E = pde.edge_residual
        r_Nb = pde.neumann_residual
        
        # get mean field of coefficient
        a0_f = coeff_field.mean_func

        # prepare some FEM variables
        V = w[mu]._fefunc.function_space()
        mesh = V.mesh()
        nu = FacetNormal(mesh)

        # initialise volume and edge residual with deterministic part
#        R_T = dot(nabla_grad(a0_f), nabla_grad(w[mu]._fefunc))
        R_T = r_T(a0_f, w[mu]._fefunc)
        if not mu:
            R_T = R_T + f
#        R_E = a0_f * dot(nabla_grad(w[mu]._fefunc), nu)
        R_E = r_E(a0_f, w[mu]._fefunc, nu)
        # get Neumann residual
        homogeneousNBC = False if mu.order == 0 else True
        R_Nb = r_Nb(a0_f, w[mu]._fefunc, nu, mesh, homogeneous=homogeneousNBC)

        # iterate m
        Lambda = w.active_indices()
        maxm = w.max_order
        if len(coeff_field) < maxm:
            logger.warning("insufficient length of coefficient field for MultiVector (%i < %i)", len(coeff_field), maxm)
            maxm = len(coeff_field)
            #        assert coeff_field.length >= maxm        # ensure coeff_field expansion is sufficiently long
        for m in range(maxm):
            am_f, am_rv = coeff_field[m]

            # prepare polynom coefficients
            beta = am_rv.orth_polys.get_beta(mu[m])

            # mu
            res = -beta[0] * w[mu]

            # mu+1
            mu1 = mu.inc(m)
            if mu1 in Lambda:
                w_mu1 = w.get_projection(mu1, mu)
                res += beta[1] * w_mu1

            # mu-1
            mu2 = mu.dec(m)
            if mu2 in Lambda:
                w_mu2 = w.get_projection(mu2, mu)
                res += beta[-1] * w_mu2

            # add volume contribution for m
#            r_t = dot(nabla_grad(am_f), nabla_grad(res._fefunc))
            R_T = R_T + r_T(am_f, res._fefunc)
            # add edge contribution for m
#            r_e = am_f * dot(nabla_grad(res._fefunc), nu)
            R_E = R_E + r_E(am_f, res._fefunc, nu)

        # prepare more FEM variables for residual assembly
        DG = FunctionSpace(mesh, "DG", 0)
        s = TestFunction(DG)
        h = CellSize(mesh)

        # scaling of residual terms and definition of residual form
        a0_s = a0_f[0] if isinstance(a0_f, tuple) else a0_f     # required for elasticity parameters
        res_form = (h ** 2 * (1 / a0_s) * dot(R_T, R_T) * s * dx
                    + avg(h) * dot(avg(R_E) / avg(a0_s), avg(R_E)) * 2 * avg(s) * dS)
        
        resT = h ** 2 * (1 / a0_s) * dot(R_T, R_T) * s * dx
        resE = 0 * s * dx + avg(h) * dot(avg(R_E) / avg(a0_s), avg(R_E)) * 2 * avg(s) * dS
        resNb = 0 * s * dx
        
        # add Neumann residuals
        if R_Nb is not None:
            for rj, dsj in R_Nb:
                res_form = res_form + h * (1 / a0_s) * dot(rj, rj) * s * dsj
                resNb += h * (1 / a0_s) * dot(rj, rj) * s * dsj

        # FEM evaluate residual on mesh
        eta = assemble(res_form)
        eta_indicator = np.array([sqrt(e) for e in eta])
        # map DG dofs to cell indices
        dofs = [DG.dofmap().cell_dofs(c.index())[0] for c in cells(mesh)]
        eta_indicator = eta_indicator[dofs]
        global_error = sqrt(sum(e for e in eta))

        # debug ---
        if False:
            etaT = assemble(resT)
            etaT_indicator = etaT #np.array([sqrt(e) for e in etaT])
            etaT = sqrt(sum(e for e in etaT))
            etaE = assemble(resE)
            etaE_indicator = etaE #np.array([sqrt(e) for e in etaE])
            etaE = sqrt(sum(e for e in etaE))
            etaNb = assemble(resNb)
            etaNb_indicator = etaNb #np.array([sqrt(e) for e in etaNb])
            etaNb = sqrt(sum(e for e in etaNb))
        
            print "==========RESIDUAL ESTIMATOR============"
            print "eta", eta
            print "eta_indicator", eta_indicator
            print "global =", global_error
            print "volume =", etaT
            print "edge =", etaE
            print "Neumann =", etaNb

            if False:        
                plot_indicators(((eta, "overall residual"), (etaT_indicator, "volume residual"), (etaE_indicator, "edge residual"), (etaNb_indicator, "Neumann residual")), mesh)
        # ---debug
        
        # restore quadrature degree
        parameters["form_compiler"]["quadrature_degree"] = quadrature_degree_old

        return (FlatVector(eta_indicator), global_error)
def solve_maxwell(V, dx,
                  Mu, Sigma,  # dictionaries
                  omega,
                  f_list,  # list of dictionaries
                  convections,  # dictionary
                  bcs=None,
                  tol=1.0e-12,
                  compute_residuals=True,
                  verbose=False
                  ):
    '''Solve the complex-valued time-harmonic Maxwell system in 2D cylindrical
    coordinates.

    :param V: function space for potentials
    :param dx: measure
    :param omega: current frequency
    :type omega: float
    :param f_list: list of right-hand sides
    :param convections: convection terms by subdomains
    :type convections: dictionary
    :param bcs: Dirichlet boundary conditions
    :param tol: solver tolerance
    :type tol: float
    :param verbose: solver verbosity
    :type verbose: boolean
    :rtype: list of functions
    '''
    # For the exact solution of the magnetic scalar potential, see
    # <http://www.physics.udel.edu/~jim/PHYS809_10F/Class_Notes/Class_26.pdf>.
    # Here, the value of \phi along the rotational axis is specified as
    #
    #    phi(z) = 2 pi I / c * (z/|z| - z/sqrt(z^2 + a^2))
    #
    # where 'a' is the radius of the coil. This expression contradicts what is
    # specified by [Chaboudez97]_ who claim that phi=0 is the natural value
    # at the symmetry axis.
    #
    # For more analytic expressions, see
    #
    #     Simple Analytic Expressions for the Magnetic Field of a Circular
    #     Current Loop;
    #     James Simpson, John Lane, Christopher Immer, and Robert Youngquist;
    #     <http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20010038494_2001057024.pdf>.
    #

    # Check if boundary conditions on phi are explicitly provided.
    if not bcs:
        # Create Dirichlet boundary conditions.
        # In the cylindrically symmetric formulation, the magnetic vector
        # potential is given by
        #
        #    A = e^{i omega t} phi(r,z) e_{theta}.
        #
        # It is natural to demand phi=0 along the symmetry axis r=0 to avoid
        # discontinuities there.
        # Also, this makes sure that the system is well-defined (see comment
        # below).
        #
        def xzero(x, on_boundary):
            return on_boundary and abs(x[0]) < DOLFIN_EPS
        bcs = DirichletBC(V * V, (0.0, 0.0), xzero)
        #
        # Concerning the boundary conditions for the rest of the system:
        # At the other boundaries, it is not uncommon (?) to set so-called
        # impedance boundary conditions; see, e.g.,
        #
        #    Chaboudez et al.,
        #    Numerical Modeling in Induction Heating for Axisymmetric
        #    Geometries,
        #    IEEE Transactions on Magnetics, vol. 33, no. 1, Jan 1997,
        #    <http://www.esi-group.com/products/casting/publications/Articles_PDF/InductionaxiIEEE97.pdf>.
        #
        # or
        #
        #    <ftp://ftp.math.ethz.ch/pub/sam-reports/reports/reports2010/2010-39.pdf>.
        #
        # TODO review those, references don't seem to be too accurate
        # Those translate into Robin-type boundary conditions (and are in fact
        # sometimes called that, cf.
        # https://en.wikipedia.org/wiki/Robin_boundary_condition).
        # The classical reference is
        #
        #     Impedance boundary conditions for imperfectly conducting
        #     surfaces,
        #     T.B.A. Senior,
        #     <http://link.springer.com/content/pdf/10.1007/BF02920074>.
        #
        #class OuterBoundary(SubDomain):
        #    def inside(self, x, on_boundary):
        #        return on_boundary and abs(x[0]) > DOLFIN_EPS
        #boundaries = FacetFunction('size_t', mesh)
        #boundaries.set_all(0)
        #outer_boundary = OuterBoundary()
        #outer_boundary.mark(boundaries, 1)
        #ds = Measure('ds')[boundaries]
        ##n = FacetNormal(mesh)
        ##a += - 1.0/Mu[i] * dot(grad(r*ur), n) * vr * ds(1) \
        ##     - 1.0/Mu[i] * dot(grad(r*ui), n) * vi * ds(1)
        ##L += - 1.0/Mu[i] * 1.0 * vr * ds(1) \
        ##     - 1.0/Mu[i] * 1.0 * vi * ds(1)
        ## This is -n.grad(r u) = u:
        #a += 1.0/Mu[i] * ur * vr * ds(1) \
        #   + 1.0/Mu[i] * ui * vi * ds(1)

    # Create the system matrix, preconditioner, and the right-hand sides.
    # For preconditioners, there are two approaches. The first one, described
    # in
    #
    #     Algebraic Multigrid for Complex Symmetric Systems;
    #     D. Lahaye, H. De Gersem, S. Vandewalle, and K. Hameyer;
    #     <https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=877730>
    #
    # doesn't work too well here.
    # The matrix P, created in _build_system(), provides a better alternative.
    # For more details, see documentation in _build_system().
    #
    A, P, b_list, M, W = _build_system(V, dx,
                                       Mu, Sigma,  # dictionaries
                                       omega,
                                       f_list,  # list of dicts
                                       convections,  # dict
                                       bcs
                                       )

    #from matplotlib import pyplot as pp
    #rows, cols, values = M.data()
    #from scipy.sparse import csr_matrix
    #M_matrix = csr_matrix((values, cols, rows))
    ##from matplotlib import pyplot as pp
    ###pp.spy(M_matrix, precision=1e-3, marker='.', markersize=5)
    ##pp.spy(M_matrix)
    ##pp.show()
    ## colormap
    #cmap = pp.cm.gray_r
    #M_dense = M_matrix.todense()
    #from matplotlib.colors import LogNorm
    #im = pp.imshow(abs(M_dense), cmap=cmap, interpolation='nearest', norm=LogNorm())
    ##im = pp.imshow(abs(M_dense), cmap=cmap, interpolation='nearest')
    ##im = pp.imshow(abs(A_r), cmap=cmap, interpolation='nearest')
    ##im = pp.imshow(abs(A_i), cmap=cmap, interpolation='nearest')
    #pp.colorbar()
    #pp.show()
    #exit()
    #print A
    #rows, cols, values = A.data()
    #from scipy.sparse import csr_matrix
    #A_matrix = csr_matrix((values, cols, rows))

    ###pp.spy(A_matrix, precision=1e-3, marker='.', markersize=5)
    ##pp.spy(A_matrix)
    ##pp.show()

    ## colormap
    #cmap = pp.cm.gray_r
    #A_dense = A_matrix.todense()
    ##A_r = A_dense[0::2][0::2]
    ##A_i = A_dense[1::2][0::2]
    #cmap.set_bad('r')
    ##im = pp.imshow(abs(A_dense), cmap=cmap, interpolation='nearest', norm=LogNorm())
    #im = pp.imshow(abs(A_dense), cmap=cmap, interpolation='nearest')
    ##im = pp.imshow(abs(A_r), cmap=cmap, interpolation='nearest')
    ##im = pp.imshow(abs(A_i), cmap=cmap, interpolation='nearest')
    #pp.colorbar()
    #pp.show()

    # prepare solver
    solver = KrylovSolver('gmres', 'amg')
    solver.set_operators(A, P)

    # The PDE for A has huge coefficients (order 10^8) all over. Hence, if
    # relative residual is set to 10^-6, the actual residual will still be of
    # the order 10^2. While this isn't too bad (after all the equations are
    # upscaled by a large factor), one can choose a very small relative
    # tolerance here to get a visually pleasing residual norm.
    solver.parameters['relative_tolerance'] = 1.0e-12
    solver.parameters['absolute_tolerance'] = 0.0
    solver.parameters['maximum_iterations'] = 100
    solver.parameters['report'] = verbose
    solver.parameters['monitor_convergence'] = verbose

    phi_list = []
    for k, b in enumerate(b_list):
        with Message('Computing coil ring %d/%d...' % (k + 1, len(b_list))):
            # Define goal functional for adaptivity.
            # Adaptivity not working for subdomains, cf.
            # https://bugs.launchpad.net/dolfin/+bug/872105.
            #(phi_r, phi_i) = split(phi)
            #M = (phi_r*phi_r + phi_i*phi_i) * dx(2)
            phi_list.append(Function(W))
            phi_list[-1].rename('phi%d' % k, 'phi%d' % k)
            solver.solve(phi_list[-1].vector(), b)

        ## Adaptive mesh refinement.
        #_adaptive_mesh_refinement(dx,
        #                          phi_list[-1],
        #                          Mu, Sigma, omega,
        #                          convections,
        #                          f_list[k]
        #                          )
        #exit()

        if compute_residuals:
            # Sanity check: Compute residuals.
            # This is quite the good test that we haven't messed up
            # real/imaginary in the above formulation.
            r_r, r_i = _build_residuals(V, dx, phi_list[-1],
                                        omega, Mu, Sigma,
                                        convections, voltages
                                        )

            def xzero(x, on_boundary):
                return on_boundary and abs(x[0]) < DOLFIN_EPS

            subdomain_indices = Mu.keys()

            # Solve an FEM problem to get the corresponding residual function
            # out.
            # This is exactly what we need here! :)
            u = TrialFunction(V)
            v = TestFunction(V)
            a = zero() * dx(0)
            for i in subdomain_indices:
                a += u * v * dx(i)

            # TODO don't hard code the boundary conditions like this
            R_r = Function(V)
            solve(a == r_r, R_r,
                  bcs=DirichletBC(V, 0.0, xzero)
                  )

            # TODO don't hard code the boundary conditions like this
            R_i = Function(V)
            solve(a == r_i, R_i,
                  bcs=DirichletBC(V, 0.0, xzero)
                  )

            nrm_r = norm(R_r)
            info('||r_r|| = %e' % nrm_r)
            nrm_i = norm(R_i)
            info('||r_i|| = %e' % nrm_i)
            res_norm = sqrt(nrm_r * nrm_r + nrm_i * nrm_i)
            info('||r|| = %e' % res_norm)

            plot(R_r, title='R_r')
            plot(R_i, title='R_i')
            interactive()
            #exit()
    return phi_list
示例#59
0
    def evaluateUpperTailBound(cls, w, coeff_field, pde, maxh=1 / 10, add_maxm=10):
        """Estimate upper tail bounds according to Section 3.2."""
        
        @cache
        def get_ainfty(m, V):
            a0_f = coeff_field.mean_func
            if isinstance(a0_f, tuple):
                a0_f = a0_f[0]
            # determine min \overline{a} on D (approximately)
            f = FEniCSVector.from_basis(V, sub_spaces=0)
            f.interpolate(a0_f)
            min_a0 = f.min_val
            am_f, _ = coeff_field[m]
            if isinstance(am_f, tuple):
                am_f = am_f[0]
            # determine ||a_m/\overline{a}||_{L\infty(D)} (approximately)
            try:
                # use exact bounds if defined
                max_am = am_f.max_val
            except:
                # otherwise interpolate
                f.interpolate(am_f)
                max_am = f.max_val
            ainftym = max_am / min_a0
            assert isinstance(ainftym, float)
            return ainftym
        
        def prepare_norm_w(energynorm, w):
            normw = {}
            for mu in w.active_indices():
                normw[mu] = energynorm(w[mu]._fefunc)        
            return normw
        
        def LambdaBoundary(Lambda):
            suppLambda = supp(Lambda)
            for mu in Lambda:
                for m in suppLambda:
                    mu1 = mu.inc(m)
                    if mu1 not in Lambda:
                        yield mu1
                        
                    mu2 = mu.dec(m)
                    if mu2 not in Lambda and mu2 is not None:
                        yield mu2

        # evaluate (3.15)
        def eval_zeta_bar(mu, suppLambda, coeff_field, normw, V, M):
            assert mu in normw.keys()
            zz = 0
#            print "====zeta bar Z1", mu, M
            for m in range(M):
                if m in suppLambda:
                    continue
                _, am_rv = coeff_field[m]
                beta = am_rv.orth_polys.get_beta(mu[m])
                ainfty = get_ainfty(m, V)
                zz += (beta[1] * ainfty) ** 2
            return normw[mu] * sqrt(zz)
        
        # evaluate (3.11)
        def eval_zeta(mu, Lambda, coeff_field, normw, V, M=None, this_m=None):
            z = 0
            if this_m is None:
                for m in range(M):
                    _, am_rv = coeff_field[m]
                    beta = am_rv.orth_polys.get_beta(mu[m])
                    ainfty = get_ainfty(m, V)
                    mu1 = mu.inc(m)
                    if mu1 in Lambda:
#                        print "====zeta Z1", ainfty, beta[1], normw[mu1], " == ", ainfty * beta[1] * normw[mu1]
                        z += ainfty * beta[1] * normw[mu1]
                    mu2 = mu.dec(m)
                    if mu2 in Lambda:
#                        print "====zeta Z2", ainfty, beta[-1], normw[mu2], " == ", ainfty * beta[-1] * normw[mu2]
                        z += ainfty * beta[-1] * normw[mu2]
                return z
            else:
                m = this_m
                _, am_rv = coeff_field[m]
                beta = am_rv.orth_polys.get_beta(mu[m])
                ainfty = get_ainfty(m, V)
#                print "====zeta Z3", m, ainfty, beta[1], normw[mu], " == ", ainfty * beta[1] * normw[mu]
                return ainfty * beta[1] * normw[mu]
        
        # prepare some variables
        energynorm = pde.energy_norm
        Lambda = w.active_indices()
        suppLambda = supp(w.active_indices())
#        M = min(w.max_order + add_maxm, len(coeff_field))
        M = w.max_order + add_maxm
        normw = prepare_norm_w(energynorm, w)
        # retrieve (sufficiently fine) function space for maximum norm evaluation
        V = w[Multiindex()].basis.refine_maxh(maxh)[0]
        # evaluate estimator contributions of (3.16)
        from collections import defaultdict
        # === (a) zeta ===
        zeta = defaultdict(int)
        # iterate multiindex extensions
#        print "===A1 Lambda", Lambda
        for nu in LambdaBoundary(Lambda):
            assert nu not in Lambda
#            print "===A2 boundary nu", nu
            zeta[nu] += eval_zeta(nu, Lambda, coeff_field, normw, V, M)
        # === (b) zeta_bar ===
        zeta_bar = {}
        # iterate over active indices
        for mu in Lambda:
            zeta_bar[mu] = eval_zeta_bar(mu, suppLambda, coeff_field, normw, V, M)

        # evaluate summed estimator (3.16)
        global_zeta = sqrt(sum([v ** 2 for v in zeta.values()]) + sum([v ** 2 for v in zeta_bar.values()]))
        # also return zeta evaluation for single m (needed for refinement algorithm)
        eval_zeta_m = lambda mu, m: eval_zeta(mu=mu, Lambda=Lambda, coeff_field=coeff_field, normw=normw, V=V, M=M, this_m=m)
        logger.debug("=== ZETA  %s --- %s --- %s", global_zeta, zeta, zeta_bar)
        return global_zeta, zeta, zeta_bar, eval_zeta_m
示例#60
0
    def evaluateLocalProjectionError(cls, w, mu, m, coeff_field, pde, Lambda, maxh=0.0, local=True, projection_degree_increase=1, refine_mesh=1):
        """Evaluate the local projection error according to EGSZ (6.4).

        Localisation of the global projection error (4.8) by (6.4)
        ..math::
            \zeta_{\mu,T,m}^{\mu\pm e_m} := ||a_m/\overline{a}||_{L^\infty(D)} \alpha_{\mu_m\pm 1}\int_T | \nabla( \Pi_{\mu\pm e_m}^\mu(\Pi_\mu^{\mu\pm e_m} w_{N,mu\pm e_)m})) - w_{N,mu\pm e_)m} |^2\;dx

        The sum :math:`\zeta_{\mu,T,m}^{\mu+e_m} + \zeta_{\mu,T,m}^{\mu-e_m}` is returned.
        """

        # determine ||a_m/\overline{a}||_{L\infty(D)} (approximately)
        a0_f = coeff_field.mean_func
        am_f, _ = coeff_field[m]
        if isinstance(a0_f, tuple):
            assert isinstance(am_f, tuple)
            a0_f = a0_f[0]
            am_f = am_f[0]
        # create discretisation space
        V_coeff, _, _, _ = w[mu].basis.refine_maxh(maxh)
        # interpolate coefficient functions on mesh
        f_coeff = V_coeff.new_vector(sub_spaces=0)
#        print "evaluateLocalProjectionError"
#        print f_coeff.num_sub_spaces
#        print a0_f.value_shape()
        f_coeff.interpolate(a0_f)
        amin = f_coeff.min_val
        f_coeff.interpolate(am_f)
        ammax = f_coeff.max_val
        ainfty = ammax / amin
        assert isinstance(ainfty, float)
        logger.debug("==== local projection error for mu = %s ====", mu)
        logger.debug("amin = %f  amax = %f  ainfty = %f", amin, ammax, ainfty)

        # prepare polynom coefficients
        _, am_rv = coeff_field[m]
        beta = am_rv.orth_polys.get_beta(mu[m])

        # mu+1
        mu1 = mu.inc(m)
        if mu1 in Lambda:
            logger.debug("[LPE-A] local projection error for mu = %s with %s", mu, mu1)

            # debug---
#            if True:
#                from dolfin import Function, inner
#                V1 = w[mu]._fefunc.function_space();
#                ufl = V1.ufl_element();
#                V2 = FunctionSpace(V1.mesh(), ufl.family(), ufl.degree() + 1)
#                f1 = Function(V1)
#                f1.interpolate(w[mu1]._fefunc)
#                f12 = Function(V2)
#                f12.interpolate(f1)
#                f2 = Function(V2)
#                f2.interpolate(w[mu1]._fefunc)
#                err2 = Function(V2, f2.vector() - f12.vector())
#                aerr = a0_f * inner(nabla_grad(err2), nabla_grad(err2)) * dx
#                perr = sqrt(assemble(aerr))
#                logger.info("DEBUG A --- global projection error %s - %s: %s", mu1, mu, perr)
            # ---debug

            # evaluate H1 semi-norm of projection error
            error1, sum_up = w.get_projection_error_function(mu1, mu, 1 + projection_degree_increase, refine_mesh=refine_mesh)
            logger.debug("global projection error norms: L2 = %s and H1 = %s", norm(error1._fefunc, "L2"), norm(error1._fefunc, "H1"))
#            pe = weighted_H1_norm(a0_f, error1, local)  # TODO: this should be the energy error!
#            pe = sum_up(pe)     # summation for cells according to reference mesh refinement
            if local:
                energynorm = pde.get_energy_norm(mesh=error1._fefunc.function_space().mesh())
                pe = energynorm(error1._fefunc)
                pe = np.array([e ** 2 for e in pe])     # square norms
                pe = sum_up(pe)                         # summation for cells according to reference mesh refinement
                pe = np.sqrt(pe)                        # take square root again for summed norm
                logger.debug("summed local projection errors: %s", sqrt(sum([e ** 2 for e in pe])))
#                # DEBUG---                
#                pe2 = weighted_H1_norm(a0_f, error1, local)  # TODO: this should be the energy error!
#                pe2 = np.array([e ** 2 for e in pe2])     # square norms
#                pe2 = sum_up(pe2)                         # summation for cells according to reference mesh refinement
#                pe2 = np.sqrt(pe2)                        # take square root again for summed norm
#                logger.warn("[A] summed local projection errors: %s = %s \t weights: %s = %s", sqrt(sum([e ** 2 for e in pe])), sqrt(sum([e2 ** 2 for e2 in pe2])), a0_f((0, 0)), pde._a0((0, 0)))
#                # ---DEBUG
            else:
                pe = pde.energy_norm(error1._fefunc)
                logger.debug("global projection error: %s", pe)
            zeta1 = beta[1] * pe
        else:
            if local:
                zeta1 = np.zeros(w[mu].basis.mesh.num_cells())
            else:
                zeta1 = 0

        # mu -1
        mu2 = mu.dec(m)
        if mu2 in Lambda:
            logger.debug("[LPE-B] local projection error for mu = %s with %s", mu, mu2)

            # debug---
#            if True:
#                from dolfin import Function, inner
#                V1 = w[mu]._fefunc.function_space();
#                ufl = V1.ufl_element();
#                V2 = FunctionSpace(V1.mesh(), ufl.family(), ufl.degree() + 1)
#                f1 = Function(V1)
#                f1.interpolate(w[mu2]._fefunc)
#                f12 = Function(V2)
#                f12.interpolate(f1)
#                f2 = Function(V2)
#                f2.interpolate(w[mu2]._fefunc)
#                err2 = Function(V2, f2.vector() - f12.vector())
#                aerr = a0_f * inner(nabla_grad(err2), nabla_grad(err2)) * dx
#                perr = sqrt(assemble(aerr))
#                logger.info("DEBUG B --- global projection error %s - %s: %s", mu2, mu, perr)
            # ---debug
            
            # evaluate H1 semi-norm of projection error
            error2, sum_up = w.get_projection_error_function(mu2, mu, 1 + projection_degree_increase, refine_mesh=refine_mesh)
            logger.debug("global projection error norms: L2 = %s and H1 = %s", norm(error2._fefunc, "L2"), norm(error2._fefunc, "H1"))
#            pe = weighted_H1_norm(a0_f, error2, local)  # TODO: this should be the energy error!
#            pe = sum_up(pe)     # summation for cells according to reference mesh refinement
            if local:
                energynorm = pde.get_energy_norm(mesh=error2._fefunc.function_space().mesh())
                pe = energynorm(error2._fefunc)
                pe = np.array([e ** 2 for e in pe])     # square norms
                pe = sum_up(pe)                         # summation for cells according to reference mesh refinement
                pe = np.sqrt(pe)                        # take square root again for summed norm
                logger.debug("summed local projection errors: %s", sqrt(sum([e ** 2 for e in pe])))
#                # DEBUG---
#                from dolfin import plot
##                plot(w[mu]._fefunc)
##                plot(error2._fefunc, interactive=True)                
#                pe2 = weighted_H1_norm(a0_f, error2, local)  # TODO: this should be the energy error!
#                pe2 = np.array([e ** 2 for e in pe2])     # square norms
#                pe2 = sum_up(pe2)                         # summation for cells according to reference mesh refinement
#                pe2 = np.sqrt(pe2)                        # take square root again for summed norm
#                logger.warn("[B] summed local projection errors: %s = %s \t weights: %s = %s", sqrt(sum([e ** 2 for e in pe])), sqrt(sum([e2 ** 2 for e2 in pe2])), a0_f((0, 0)), pde._a0((0, 0)))
#                # ---DEBUG                
            else:
                pe = pde.energy_norm(error2._fefunc)
                logger.debug("global projection error: %s", pe)
            zeta2 = beta[-1] * pe
        else:
            if local:
                zeta2 = np.zeros(w[mu].basis.mesh.num_cells())
            else:
                zeta2 = 0

        logger.debug("beta[-1] = %s  beta[1] = %s  ainfty = %s", beta[-1], beta[1], ainfty)
        zeta = ainfty * (zeta1 + zeta2)
        return zeta