def get_N_form(self): try: return self.N_form except AttributeError: pass # Set up magnetic field and equivalent electric current forms H_r = -curl(self.E_i)/(self.k0*Z0) H_i = curl(self.E_r)/(self.k0*Z0) J_r = cross(self.n, H_r) J_i = cross(self.n, H_i) #------------------------------ # Set up form for far field potential N theta_hat = self.theta_hat phi_hat = self.phi_hat phase = self.phase N_r = J_r*dolfin.cos(phase) - J_i*dolfin.sin(phase) N_i = J_r*dolfin.sin(phase) + J_i*dolfin.cos(phase) # UFL does not seem to like vector valued functionals, so we split the # final functional from into theta and phi components self.N_form = dict( r_theta=dot(theta_hat, N_r)*ds, r_phi=dot(phi_hat, N_r)*ds, i_theta=dot(theta_hat, N_i)*ds, i_phi=dot(phi_hat, N_i)*ds) return self.N_form
def neumann_linear_form(self, V, neumann_boundary, g, L=None): """Return or add up the linear form L(v) coming from the Neumann boundary""" v = dolfin.TrialFunction(V) for g_j, ds_j in self.neumann_form_list(neumann_boundary, g, V.mesh()): if L is None: L = dot(g_j, v) * ds_j else: L += dot(g_j, v) * ds_j return L
def force(self, state, turbine_field): """ Computes the force field over turbine field :param state: Current solution state. :type state: UFL :param turbine_field: Turbine friction field :type turbine_field: UFL """ return self.rho * turbine_field * dot(state[0], state[0]) + dot(state[1], state[1])
def power(self, state, turbine_field): """ Computes the power field over the domain. :param state: Current solution state. :type state: UFL :param turbine_field: Turbine friction field :type turbine_field: UFL """ return (self.rho * turbine_field * (dot(state[0], state[0]) + dot(state[1], state[1])) ** 1.5)
def _speed_squared(self, state): """ The velocity speed with turbine cut in and out speed limits """ speed_sq = dot(state[0], state[0]) + dot(state[1], state[1]) if self._cut_in_speed is not None: speed_sq *= conditional(speed_sq < self._cut_in_speed**2, self._eps, 1) if self._cut_out_speed is not None: speed_sq = conditional(speed_sq > self._cut_out_speed**2, self._cut_out_speed**2, speed_sq) return speed_sq
def weak_F(t, u_t, u, v): # Define the differential equation. mesh = v.function_space().mesh() n = FacetNormal(mesh) r = Expression('x[0]', degree=1, cell=triangle) # All time-dependent components be set to t. f.t = t b.t = t kappa.t = t F = - inner(b, grad(u)) * v * dx \ - 1.0 / (rho * cp) * dot(r * kappa * grad(u), grad(v / r)) * dx \ + 1.0 / (rho * cp) * dot(r * kappa * grad(u), n) * v / r * ds \ + 1.0 / (rho * cp) * f * v * dx return F
def flux_derivative(self, u, coeff): a = coeff Du = self.differential_op(u) Dsigma = dot(nabla_grad(a), Du) if element_degree(u) >= 2: Dsigma += a * div(Du) return Dsigma
def get_stiffness_form(self): """Get 'stiffness' / curl . curl matrix form""" u = self.trial_function v = self.test_function mu_r = self.material_functions['mu_r'] s = dot(curl(v), curl(u))/mu_r*dx return s
def test_pointsource_vector_fs(mesh, point): """Tests point source when given constructor PointSource(V, point, mag) with a vector for a vector function space that isn't placed at a node for 1D, 2D and 3D. Global points given to constructor from rank 0 processor. """ rank = MPI.rank(mesh.mpi_comm()) V = VectorFunctionSpace(mesh, "CG", 1) v = TestFunction(V) b = assemble(dot(Constant([0.0]*mesh.geometry().dim()), v)*dx) if rank == 0: ps = PointSource(V, point, 10.0) else: ps = PointSource(V, []) ps.apply(b) # Checks array sums to correct value b_sum = b.sum() assert round(b_sum - 10.0*V.num_sub_spaces()) == 0 # Checks point source is added to correct part of the array v2d = vertex_to_dof_map(V) for v in vertices(mesh): if near(v.midpoint().distance(point), 0.0): for spc_idx in range(V.num_sub_spaces()): ind = v2d[v.index()*V.num_sub_spaces() + spc_idx] if ind < len(b.get_local()): assert np.round(b.get_local()[ind] - 10.0) == 0
def __init__(self, ui, time_step_method, rho, mu, u, p0, dt, bcs, f, my_dx): super(TentativeVelocityProblem, self).__init__() W = ui.function_space() v = TestFunction(W) self.bcs = bcs r = SpatialCoordinate(ui.function_space().mesh())[0] def me(uu, ff): return _momentum_equation(uu, v, p0, ff, rho, mu, my_dx) self.F0 = rho * dot(ui - u[0], v) / dt * 2 * pi * r * my_dx if time_step_method == "forward euler": self.F0 += me(u[0], f[0]) elif time_step_method == "backward euler": self.F0 += me(ui, f[1]) else: assert ( time_step_method == "crank-nicolson" ), "Unknown time stepper '{}'".format( time_step_method ) self.F0 += 0.5 * (me(u[0], f[0]) + me(ui, f[1])) self.jacobian = derivative(self.F0, ui) self.reset_sparsity = True return
def test_multi_ps_matrix(mesh): """Tests point source PointSource(V, source) for mulitple point sources applied to a matrix for 1D, 2D and 3D. Global points given to constructor from rank 0 processor. """ c_ids = [0, 1, 2] rank = MPI.rank(mesh.mpi_comm()) V = VectorFunctionSpace(mesh, "CG", 1, dim=2) u, v = TrialFunction(V), TestFunction(V) A = assemble(Constant(0.0)*dot(u, v)*dx) source = [] if rank == 0: for c_id in c_ids: cell = Cell(mesh, c_id) point = cell.midpoint() source.append((point, 10.0)) ps = PointSource(V, source) ps.apply(A) # Checks b sums to correct value a_sum = MPI.sum(mesh.mpi_comm(), np.sum(A.array())) assert round(a_sum - 2*len(c_ids)*10) == 0
def test_pointsource_mixed_space(mesh, point): """Tests point source when given constructor PointSource(V, point, mag) with a vector for a mixed function space that isn't placed at a node for 1D, 2D and 3D. Global points given to constructor from rank 0 processor. """ rank = MPI.rank(mesh.mpi_comm()) ele1 = FiniteElement("CG", mesh.ufl_cell(), 1) ele2 = FiniteElement("DG", mesh.ufl_cell(), 2) ele3 = VectorElement("CG", mesh.ufl_cell(), 2) V = FunctionSpace(mesh, MixedElement([ele1, ele2, ele3])) value_dimension = V.element().value_dimension(0) v = TestFunction(V) b = assemble(dot(Constant([0.0]*value_dimension), v)*dx) if rank == 0: ps = PointSource(V, point, 10.0) else: ps = PointSource(V, []) ps.apply(b) # Checks array sums to correct value b_sum = b.sum() assert round(b_sum - 10.0*value_dimension) == 0
def divergence_matrix(mesh): CR = VectorFunctionSpace(mesh, 'CR', 1) DG = FunctionSpace(mesh, 'DG', 0) A = cg1_cr_interpolation_matrix(mesh) M = assemble(dot(div(TrialFunction(CR)), TestFunction(DG))*dx()) C = compiled_cr_module.cr_divergence_matrix(M, A, DG, CR) return C
def les_update(nut_, nut_form, A_mass, At, u_, dt, bc_ksgs, bt, ksgs_sol, KineticEnergySGS, CG1, ksgs, delta, **NS_namespace): p, q = TrialFunction(CG1), TestFunction(CG1) Ck = KineticEnergySGS["Ck"] Ce = KineticEnergySGS["Ce"] Sij = sym(grad(u_)) assemble(dt*inner(dot(u_, 0.5*grad(p)), q)*dx + inner((dt*Ce*sqrt(ksgs)/delta)*0.5*p, q)*dx + inner(dt*Ck*sqrt(ksgs)*delta*grad(0.5*p), grad(q))*dx, tensor=At) assemble(dt*2*Ck*delta*sqrt(ksgs)*inner(Sij,grad(u_))*q*dx, tensor=bt) bt.axpy(1.0, A_mass*ksgs.vector()) bt.axpy(-1.0, At*ksgs.vector()) At.axpy(1.0, A_mass, True) # Solve for ksgs bc_ksgs.apply(At, bt) ksgs_sol.solve(At, ksgs.vector(), bt) ksgs.vector().set_local(ksgs.vector().array().clip(min=1e-7)) ksgs.vector().apply("insert") # Update nut_ nut_()
def __init__(self, V, u, v, b, kappa, rho, cp, source, dirichlet_bcs=[], neumann_bcs={}, robin_bcs={}, dx=dx, ds=ds ): super(HeatCylindrical, self).__init__() self.dirichlet_bcs = dirichlet_bcs r = Expression('x[0]', degree=1, domain=V.mesh()) self.V = V self.dx_multiplier = 2*pi*r self.F0 = kappa * r * dot(grad(u), grad(v / (rho * cp))) \ * 2*pi * dx #F -= dot(b, grad(u)) * v * 2*pi*r * dx_workpiece(0) if b: self.F0 += (b[0] * u.dx(0) + b[1] * u.dx(1)) * v * 2*pi*r * dx # Joule heat self.F0 -= 1.0 / (rho * cp) * source * v * 2*pi*r * dx # Neumann boundary conditions for k, nGradT in neumann_bcs.iteritems(): self.F0 -= r * kappa * nGradT * v / (rho * cp) \ * 2 * pi * ds(k) # Robin boundary conditions for k, value in robin_bcs.iteritems(): alpha, u0 = value self.F0 -= r * kappa * alpha * (u - u0) * v / (rho * cp) \ * 2 * pi * ds(k) return
def compute_velocity_correction( ui, p0, p1, u_bcs, rho, mu, dt, rotational_form, my_dx, tol, verbose ): """Compute the velocity correction according to .. math:: U = u_0 - \\frac{dt}{\\rho} \\nabla (p_1-p_0). """ W = ui.function_space() P = p1.function_space() u = TrialFunction(W) v = TestFunction(W) a3 = dot(u, v) * my_dx phi = Function(P) phi.assign(p1) if p0: phi -= p0 if rotational_form: r = SpatialCoordinate(W.mesh())[0] div_ui = 1 / r * (r * ui[0]).dx(0) + ui[1].dx(1) phi += mu * div_ui L3 = dot(ui, v) * my_dx - dt / rho * (phi.dx(0) * v[0] + phi.dx(1) * v[1]) * my_dx u1 = Function(W) solve( a3 == L3, u1, bcs=u_bcs, solver_parameters={ "linear_solver": "iterative", "symmetric": True, "preconditioner": "hypre_amg", "krylov_solver": { "relative_tolerance": tol, "absolute_tolerance": 0.0, "maximum_iterations": 100, "monitor_convergence": verbose, }, }, ) # u = project(ui - k/rho * grad(phi), V) # div_u = 1/r * div(r*u) r = SpatialCoordinate(W.mesh())[0] div_u1 = 1.0 / r * (r * u1[0]).dx(0) + u1[1].dx(1) info("||u||_div = {:e}".format(sqrt(assemble(div_u1 * div_u1 * my_dx)))) return u1
def weak_F(t, u_t, u, v): # All time-dependent components be set to t. u0.t = t f.t = f F = - 1.0 / (rho * cp) * kappa * dot(grad(u), grad(v)) * dx \ + 1.0 / (rho * cp) * kappa * (u*u*u*u - u0*u0*u0*u0) * v * ds \ + 1.0 / (rho * cp) * f * v * dx return F
def _get_form(self): n = self.function_space.cell().n k0 = self.k0 E_i = self.E_i E_r = self.E_r mu_r = self._get_mur_function() return (1/k0/Z0)*dolfin.dot(n, (dolfin.cross(E_r, -dolfin.curl(E_i)/mu_r) + dolfin.cross(E_i, dolfin.curl(E_r)/mu_r)))*dolfin.ds
def __init__(self, ui, theta, rho, mu, u, p0, dt, bcs, f0, f1, stabilization=False, dx=dx ): super(TentativeVelocityProblem, self).__init__() W = ui.function_space() v = TestFunction(W) self.bcs = bcs r = Expression('x[0]', degree=1, domain=ui.function_space().mesh()) #self.F0 = rho * dot(3*ui - 4*u[-1] + u[-2], v) / (2*Constant(dt)) \ # * 2*pi*r*dx #self.F0 += momentum_equation(ui, v, # p0, # f1, # rho, mu, # stabilization=stabilization, # dx=dx # ) self.F0 = rho * dot(ui - u[-1], v) / Constant(dt) \ * 2*pi*r*dx if abs(theta) > DOLFIN_EPS: # Implicit terms. if f1 is None: raise RuntimeError('Implicit schemes need right-hand side ' 'at target step (f1).') self.F0 += theta \ * momentum_equation(ui, v, p0, f1, rho, mu, stabilization=stabilization, dx=dx ) if abs(1.0 - theta) > DOLFIN_EPS: # Explicit terms. if f0 is None: raise RuntimeError('Explicit schemes need right-hand side ' 'at current step (f0).') self.F0 += (1.0 - theta) \ * momentum_equation(u[-1], v, p0, f0, rho, mu, stabilization=stabilization, dx=dx ) self.jacobian = derivative(self.F0, ui) self.reset_sparsity = True return
def _build_residuals(V, dx, phi, omega, Mu, Sigma, convections, voltages): #class OuterBoundary(SubDomain): # def inside(self, x, on_boundary): # return on_boundary and abs(x[0]) > DOLFIN_EPS #boundaries = FacetFunction('size_t', mesh) #boundaries.set_all(0) #outer_boundary = OuterBoundary() #outer_boundary.mark(boundaries, 1) #ds = Measure('ds')[boundaries] r = Expression('x[0]', degree=1, domain=V.mesh()) subdomain_indices = Mu.keys() #u = TrialFunction(V) v = TestFunction(V) r_r = zero() * dx(0) for i in subdomain_indices: r_r += 1.0 / (Mu[i] * r) * dot(grad(r * phi[0]), grad(r * v)) * 2 * pi * dx(i) \ - omega * Sigma[i] * phi[1] * v * 2 * pi * r * dx(i) # convections for i, conv in convections.items(): r_r += dot(conv, grad(r * phi[0])) * v * 2 * pi * dx(i) # rhs for i, voltage in voltages.items(): r_r -= Sigma[i] * voltage.real * v * dx(i) ## boundaries #r_r += 1.0/Mu[i] * phi[0] * v * 2*pi*ds(1) # imaginary part r_i = zero() * dx(0) for i in subdomain_indices: r_i += 1.0 / (Mu[i] * r) * dot(grad(r * phi[1]), grad(r * v)) * 2 * pi * dx(i) \ + omega * Sigma[i] * phi[0] * v * 2 * pi * r * dx(i) # convections for i, conv in convections.items(): r_i += dot(conv, grad(r * phi[1])) * v * 2 * pi * dx(i) # rhs for i, voltage in voltages.items(): r_r -= Sigma[i] * voltage.imag * v * dx(i) ## boundaries #r_i += 1.0/Mu[i] * phi[1] * v * 2*pi*ds(1) return r_r, r_i
def divergence_matrix(mesh): CR = VectorFunctionSpace(mesh, 'CR', 1) DG = FunctionSpace(mesh, 'DG', 0) A = cg1_cr_interpolation_matrix(mesh) M = assemble(dot(div(TrialFunction(CR)), TestFunction(DG))*dx()) compiled_cr_module.cr_divergence_matrix(M, A, DG, CR) M_mat = as_backend_type(M).mat() M_mat.matMult(A.mat()) return M
def compute(self, get): u = get("Velocity") assemble(dot(u[1].dx(0)-u[0].dx(1), self.q)*dx(), tensor=self.L) self.bc.apply(self.L) self.solver.solve(self.psi.vector(), self.L) #solve(self.A, self.psi.vector(), self.L) return self.psi
def flux_derivative(self, u, coeff): """First derivative of flux.""" lmbda, mu = coeff Du = self.differential_op(u) I = Identity(u.cell().d) Dsigma = 2.0 * mu * div(Du) + dot(nabla_grad(lmbda), tr(Du) * I) if element_degree(u) >= 2: Dsigma += lmbda * div(tr(Du) * I) return Dsigma
def _momentum_equation(u, v, p, f, rho, mu, my_dx): """Weak form of the momentum equation. """ # rho and my are Constant() functions assert rho.values()[0] > 0.0 assert mu.values()[0] > 0.0 # Skew-symmetric formulation. # Don't include the boundary term # # - mu *inner(r*grad(u2)*n , v2) * 2*pi*ds. # # This effectively means that at all boundaries where no sufficient # Dirichlet-conditions are posed, we assume grad(u)*n to vanish. # # The original term # u2[0]/(r*r) * v2[0] # doesn't explode iff u2[0]~r and v2[0]~r at r=0. Hence, we need to enforce # homogeneous Dirichlet-conditions for n.u at r=0. This corresponds to no # flow in normal direction through the symmetry axis -- makes sense. # When using the 2*pi*r weighting, we can even be a bit lax on the # enforcement on u2[0]. # # For this to be well defined, u[0]/r and u[2]/r must be bounded for r=0, # so u[0]~u[2]~r must hold. This either needs to be enforced in the # boundary conditions (homogeneous Dirichlet for u[0], u[2] at r=0) or must # follow from the dynamics of the system. # # TODO some more explanation for the following lines of code mesh = v.function_space().mesh() r = SpatialCoordinate(mesh)[0] F = ( rho * 0.5 * (dot(grad(u) * u, v) - dot(grad(v) * u, u)) * 2 * pi * r * my_dx + mu * inner(r * grad(u), grad(v)) * 2 * pi * my_dx + mu * u[0] / r * v[0] * 2 * pi * my_dx - dot(f, v) * 2 * pi * r * my_dx ) if p: F += (p.dx(0) * v[0] + p.dx(1) * v[1]) * 2 * pi * r * my_dx if len(u) == 3: F += rho * (-u[2] * u[2] * v[0] + u[0] * u[2] * v[2]) * 2 * pi * my_dx F += mu * u[2] / r * v[2] * 2 * pi * my_dx return F
def get_L_form(self): try: return self.L_form except AttributeError: pass # Set up equivalent magnetic current forms M_r = -cross(self.n, self.E_r) M_i = -cross(self.n, self.E_i) #------------------------------ # Set up form for far field potential L theta_hat = self.theta_hat phi_hat = self.phi_hat phase = self.phase L_r = M_r*dolfin.cos(phase) - M_i*dolfin.sin(phase) L_i = M_r*dolfin.sin(phase) + M_i*dolfin.cos(phase) self.L_form = dict( r_theta=dot(theta_hat, L_r)*ds, r_phi=dot(phi_hat, L_r)*ds, i_theta=dot(theta_hat, L_i)*ds, i_phi=dot(phi_hat, L_i)*ds) return self.L_form
def compute(self, get): u = get("Velocity") mu = get("DynamicViscosity") if isinstance(mu, (float, int)): mu = Constant(mu) n = self._n T = -mu*dot((grad(u) + grad(u).T), n) Tn = dot(T, n) Tt = T - Tn*n tau_form = dot(self.v, Tt)*ds() assemble(tau_form, tensor=self.tau.vector()) #self.b[self._keys] = self.tau.vector()[self._values] # FIXME: This is not safe!!! get_set_vector(self.b, self._keys, self.tau.vector(), self._values, self._temp_array) # Ensure proper scaling self.solver.solve(self.tau_boundary.vector(), self.b) return self.tau_boundary
def _get_forms(self): if self.dirty: E_r, E_i, g_r, g_i = self.E_r, self.E_i, self.g_r, self.g_i k0 = self.k0 eps_r = self._get_epsr_function() mu_r = self._get_mur_function() form_r = (dot(curl(E_r)/mu_r, curl(g_r)) - dot(curl(E_i)/mu_r, curl(g_i)) \ - k0**2*(dot(eps_r*E_r, g_r) - dot(eps_r*E_i, g_i)))*self.dx form_i = (dot(curl(E_r)/mu_r, curl(g_i)) + dot(curl(E_i)/mu_r, curl(g_r)) \ - k0**2*(dot(eps_r*E_r, g_i) + dot(eps_r*E_i, g_r)))*self.dx self.form_r, self.form_i = form_r, form_i self.dirty = False return self.form_r, self.form_i
def neumann_residual(self, coeff, v, nu, mesh, homogeneous=False): """Neumann boundary residual.""" form = [] a = coeff boundaries = self.neumann_boundary g = self.g if boundaries is not None: if homogeneous: g = zero_function(v.function_space()) for g_j, ds_j in self.weak_form.neumann_form_list(boundaries, g, mesh): r_j = g_j - a * dot(self.weak_form.flux(v, coeff), nu) form.append((r_j, ds_j)) return form
def neumann_residual(self, coeff, v, nu, mesh, homogeneous=False): # the coefficient field does not influence the Neumann boundary for Navier-Lame! """Neumann boundary residual.""" form = [] boundaries = self.neumann_boundary g = self.g if boundaries is not None: if homogeneous: g = zero_function(v.function_space()) for g_j, ds_j in self.weak_form.neumann_form_list(boundaries, g, mesh): r_j = g_j - dot(self.weak_form.flux(v, coeff), nu) form.append((r_j, ds_j)) return form
def _residual_strong(dx, v, phi, mu, sigma, omega, conv, voltages): '''Get the residual in strong form, projected onto V. ''' r = Expression('x[0]', degree=1, cell=triangle) R = [zero() * dx(0), zero() * dx(0)] subdomain_indices = mu.keys() for i in subdomain_indices: # diffusion, reaction R_r = - div(1 / (mu[i] * r) * grad(r * phi[0])) \ - sigma[i] * omega * phi[1] R_i = - div(1 / (mu[i] * r) * grad(r * phi[1])) \ + sigma[i] * omega * phi[0] # convection if i in conv: R_r += dot(conv[i], 1 / r * grad(r * phi[0])) R_i += dot(conv[i], 1 / r * grad(r * phi[1])) # right-hand side if i in voltages: R_r -= sigma[i] * voltages[i].real / (2 * pi * r) R_i -= sigma[i] * voltages[i].imag / (2 * pi * r) R[0] += R_r * v * dx(i) R[1] += R_i * v * dx(i) return R
def run_and_calculate_error(N, dt, tmax, polydeg_rho, last=False): """ Run Ocellaris and return L2 & H1 errors in the last time step """ say(N, dt, tmax, polydeg_rho) # Setup and run simulation sim = Simulation() sim.input.read_yaml('transport.inp') mesh_type = sim.input.get_value('mesh/type') if mesh_type == 'XML': # Create unstructured mesh with gmsh cmd1 = [ 'gmsh', '-string', 'lc = %f;' % (3.14 / N), '-o', 'disc_%d.msh' % N, '-2', '../convergence-variable-density-disk/disc.geo' ] cmd2 = ['dolfin-convert', 'disc_%d.msh' % N, 'disc.xml'] with open('/dev/null', 'w') as devnull: for cmd in (cmd1, cmd2): say(' '.join(cmd)) if ISROOT: subprocess.call(cmd, stdout=devnull, stderr=devnull) elif mesh_type == 'UnitDisc': sim.input.set_value('mesh/N', N // 2) else: sim.input.set_value('mesh/Nx', N) sim.input.set_value('mesh/Ny', N) sim.input.set_value('time/dt', dt) sim.input.set_value('time/tmax', tmax) sim.input.set_value('multiphase_solver/polynomial_degree_rho', polydeg_rho) sim.input.set_value('output/stdout_enabled', False) say('Running with multiphase solver %s ...' % (sim.input.get_value('multiphase_solver/type'))) t1 = time.time() setup_simulation(sim) run_simulation(sim) duration = time.time() - t1 say('DONE') # Interpolate the analytical solution to the same function space Vu = sim.data['Vu'] Vp = sim.data['Vp'] Vr = sim.data['Vrho'] polydeg_r = Vr.ufl_element().degree() vals = dict(t=sim.time, dt=sim.dt) rho_e = dolfin.Expression( sim.input.get_value('initial_conditions/rho_p/cpp_code'), degree=polydeg_r, **vals) rho_a = dolfin.project(rho_e, Vr) rho_e.t = 0 rho_0 = dolfin.project(rho_e, Vr) # Calculate L2 errors err_rho = calc_err(sim.data['rho'], rho_a) # Calculate H1 errors err_rho_H1 = calc_err(sim.data['rho'], rho_a, 'H1') mesh = sim.data['mesh'] n = dolfin.FacetNormal(mesh) reports = sim.reporting.timestep_xy_reports say('Num time steps:', sim.timestep) say('Num cells:', mesh.num_cells()) say('Co_max:', numpy.max(reports['Co'])) say('rho_min went from %r to %r' % (reports['min(rho)'][0], reports['min(rho)'][-1])) say('rho_max went from %r to %r' % (reports['max(rho)'][0], reports['max(rho)'][-1])) m0, m1 = reports['mass'][0], reports['mass'][-1] say('mass error %.3e (%.3e)' % (m1 - m0, (m1 - m0) / m0)) say('vel compat error %.3e' % dolfin.assemble(dolfin.dot(sim.data['u'], n) * dolfin.ds)) int_p = dolfin.assemble(sim.data['p'] * dolfin.dx) say('p*dx', int_p) div_u_Vp = abs( dolfin.project(dolfin.div(sim.data['u']), Vp).vector().get_local()).max() say('div(u)|Vp', div_u_Vp) div_u_Vu = abs( dolfin.project(dolfin.div(sim.data['u']), Vu).vector().get_local()).max() say('div(u)|Vu', div_u_Vu) Vdg0 = dolfin.FunctionSpace(mesh, "DG", 0) div_u_DG0 = abs( dolfin.project(dolfin.div(sim.data['u']), Vdg0).vector().get_local()).max() say('div(u)|DG0', div_u_DG0) Vdg1 = dolfin.FunctionSpace(mesh, "DG", 1) div_u_DG1 = abs( dolfin.project(dolfin.div(sim.data['u']), Vdg1).vector().get_local()).max() say('div(u)|DG1', div_u_DG1) isoparam = mesh.ufl_coordinate_element().degree() > 1 if last and (not isoparam or sim.input.get_value('mesh/type') == 'UnitDisc'): # Plot the results for fa, name in ((rho_a, 'rho'), ): fh = sim.data[name] if isoparam: # Bug in matplotlib plotting for isoparametric elements mesh2 = dolfin.UnitDiscMesh(dolfin.MPI.comm_world, N // 2, 1, 2) ue = fa.function_space().ufl_element() V2 = dolfin.FunctionSpace(mesh2, ue.family(), ue.degree()) fa2, fh2 = dolfin.Function(V2), dolfin.Function(V2) fa2.vector().set_local(fa.vector().get_local()) fh2.vector().set_local(fh.vector().get_local()) fa, fh = fa2, fh2 plot(fh - fa, name + ' diff', '%g_%g_%s_diff' % (N, dt, name)) plot(fa, name + ' analytical', '%g_%g_%s_analytical' % (N, dt, name)) plot(fh, name + ' numerical', '%g_%g_%s_numerical' % (N, dt, name)) plot(rho_0, name + ' initial', '%g_%g_%s_initial' % (N, dt, name)) hmin = mesh.hmin() return err_rho, err_rho_H1, hmin, dt, duration
displacements_function = df.Function(displacements_function_space) v = df.TestFunction(displacements_function_space) residual_form = get_residual_form(displacements_function, v, density_function, density_function_space, tractionBC, f, 1) pde_problem.add_state('displacements', displacements_function, residual_form, 'density') # Add output-avg_density to the PDE problem: volume = df.assemble(df.Constant(1.) * df.dx(domain=mesh)) avg_density_form = density_function / (df.Constant( 1. * volume)) * df.dx(domain=mesh) pde_problem.add_scalar_output('avg_density', avg_density_form, 'density') # Add output-compliance to the PDE problem: compliance_form = df.dot(f, displacements_function) * dss(6) pde_problem.add_scalar_output('compliance', compliance_form, 'displacements') # Add boundary conditions to the PDE problem: pde_problem.add_bc( df.DirichletBC(displacements_function_space, df.Constant((0.0, 0.0)), '(abs(x[0]-0.) < DOLFIN_EPS)')) # Define the OpenMDAO problem and model prob = om.Problem() num_dof_density = pde_problem.inputs_dict['density'][ 'function'].function_space().dim() comp = om.IndepVarComp() comp.add_output(
def test_moving_mesh(): t = 0. dt = 0.025 num_steps = 20 xmin, ymin = 0., 0. xmax, ymax = 2., 2. xc, yc = 1., 1. nx, ny = 20, 20 pres = 150 k = 1 mesh = RectangleMesh(Point(xmin, ymin), Point(xmax, ymax), nx, ny) n = FacetNormal(mesh) # Class for mesh motion dU = PeriodicVelocity(xmin, xmax, dt, t, degree=1) Qcg = VectorFunctionSpace(mesh, 'CG', 1) boundaries = MeshFunction("size_t", mesh, mesh.topology().dim()-1) boundaries.set_all(0) leftbound = Left(xmin) leftbound.mark(boundaries, 99) ds = Measure('ds', domain=mesh, subdomain_data=boundaries) # Create function spaces Q_E_Rho = FiniteElement("DG", mesh.ufl_cell(), k) T_1 = FunctionSpace(mesh, 'DG', 0) Qbar_E = FiniteElement("DGT", mesh.ufl_cell(), k) Q_Rho = FunctionSpace(mesh, Q_E_Rho) Qbar = FunctionSpace(mesh, Qbar_E) phih, phih0 = Function(Q_Rho), Function(Q_Rho) phibar = Function(Qbar) # Advective velocity uh = Function(Qcg) uh.assign(Constant((0., 0.))) # Mesh velocity umesh = Function(Qcg) # Total velocity uadvect = uh-umesh # Now throw in the particles x = RandomRectangle(Point(xmin, ymin), Point(xmax, ymax)).generate([pres, pres]) s = assign_particle_values(x, GaussianPulse(center=(xc, yc), sigma=float(0.25), U=[0, 0], time=0., height=1., degree=3)) x = comm.bcast(x, root=0) s = comm.bcast(s, root=0) p = particles(x, [s], mesh) # Define projections problem FuncSpace_adv = {'FuncSpace_local': Q_Rho, 'FuncSpace_lambda': T_1, 'FuncSpace_bar': Qbar} FormsPDE = FormsPDEMap(mesh, FuncSpace_adv, ds=ds) forms_pde = FormsPDE.forms_theta_linear(phih0, uadvect, dt, Constant(1.0), zeta=Constant(0.)) pde_projection = PDEStaticCondensation(mesh, p, forms_pde['N_a'], forms_pde['G_a'], forms_pde['L_a'], forms_pde['H_a'], forms_pde['B_a'], forms_pde['Q_a'], forms_pde['R_a'], forms_pde['S_a'], [], 1) # Initialize the initial condition at mesh by an l2 projection lstsq_rho = l2projection(p, Q_Rho, 1) lstsq_rho.project(phih0.cpp_object()) for step in range(num_steps): # Compute old area at old configuration old_area = assemble(phih0*dx) # Pre-assemble rhs pde_projection.assemble_state_rhs() # Move mesh dU.compute_ubc() umesh.assign(project(dU, Qcg)) ALE.move(mesh, project(dU * dt, Qcg)) dU.update() # Relocate particles as a result of mesh motion # NOTE: if particles were advected themselve, # we had to run update_facets_info() here as well p.relocate() # Assemble left-hand side on new config, but not the right-hand side pde_projection.assemble(True, False) pde_projection.solve_problem(phibar.cpp_object(), phih.cpp_object(), 'mumps', 'none') # Needed to compute conservation, note that there # is an outgoing flux at left boundary new_area = assemble(phih*dx) gamma = conditional(ge(dot(uadvect, n), 0), 0, 1) bflux = assemble((1-gamma) * dot(uadvect, n) * phih * ds) # Update solution assign(phih0, phih) # Put assertion on (global) mass balance, local mass balance is # too time consuming but should pass also assert new_area - old_area + bflux * dt < 1e-12 # Assert that max value of phih stays close to 2 and # min value close to 0. This typically will fail if # we do not do a correct relocate of particles assert np.amin(phih.vector().get_local()) > -0.015 assert np.amax(phih.vector().get_local()) < 1.04
def __init__(self, centers, J, n): self.centers = centers self.J = J self.target = 0.002 self.J /= self.target # dir_path = os.path.dirname(os.path.realpath(__file__)) # with open(os.path.join(dir_path, '../colorio/data/gamut_triangulation.yaml')) as f: # data = yaml.safe_load(f) # self.points = np.column_stack([ # data['points'], np.zeros(len(data['points'])) # ]) # self.cells = np.array(data['cells']) # self.points, self.cells = colorio.xy_gamut_mesh(0.15) self.points, self.cells = meshzoo.triangle( n, corners=np.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]]) ) # https://bitbucket.org/fenics-project/dolfin/issues/845/initialize-mesh-from-vertices editor = MeshEditor() mesh = Mesh() editor.open(mesh, "triangle", 2, 2) editor.init_vertices(self.points.shape[0]) editor.init_cells(self.cells.shape[0]) for k, point in enumerate(self.points): editor.add_vertex(k, point) for k, cell in enumerate(self.cells): editor.add_cell(k, cell) editor.close() self.V = FunctionSpace(mesh, "CG", 1) self.Vgrad = VectorFunctionSpace(mesh, "DG", 0) # self.ux0 = Function(self.V) # self.uy0 = Function(self.V) # 0 starting guess # ax = np.zeros(self.V.dim()) # ay = np.zeros(self.V.dim()) # Use F(x, y) = (x, y) as starting guess self.ux0 = project(Expression("x[0]", degree=1), self.V) self.uy0 = project(Expression("x[1]", degree=1), self.V) ax = self.ux0.vector().get_local() ay = self.uy0.vector().get_local() # Note that alpha doesn't contain the values in the order that one might expect, # see # <https://www.allanswered.com/post/awevg/projectexpressionx0-v-vector-get_local-not-in-order/>. self.alpha = np.concatenate([ax, ay]) self.num_f_eval = 0 # Build L as scipy.csr_matrix u = TrialFunction(self.V) v = TestFunction(self.V) L = assemble(dot(grad(u), grad(v)) * dx) Lmat = as_backend_type(L).mat() indptr, indices, data = Lmat.getValuesCSR() size = Lmat.getSize() self.L = sparse.csr_matrix((data, indices, indptr), shape=size) self.LT = self.L.getH() self.dx, self.dy = build_grad_matrices(self.V, centers) self.dxT = self.dx.getH() self.dyT = self.dy.getH() return
tol = 1E-14 def boundary_D(x, on_boundary): return on_boundary and (near(x[0], 0, tol) or near(x[0], 1.0, tol)) bc = DirichletBC(V, u_D, boundary_D) u = TrialFunction(V) v = TestFunction(V) f = Expression("10 * exp( - (pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2) \ + pow(x[2] - 0.5, 2)) / 1)", degree=6) g = Expression("sin(5.0*x[0])*sin(5.0*x[1])", degree=6) a = dot(grad(u), grad(v)) * dx L = f * v * dx + g * v * ds A = PETScMatrix() b = PETScVector() assemble_system(a, L, bc, A_tensor=A, b_tensor=b) A = A.mat() b = b.vec() # ========================================================================= # Construct the alist for systems on levels from fine to coarse # construct the transfer operators first ruse = [None] * (nl - 1) Alist = [None] * (nl)
def Cost(xp): global x_old, rho_old comm = nMPI.COMM_WORLD mpi_rank = comm.Get_rank() x1, x2 = xp rs = 8.0 # radiation boundary radius l = x1 # Patch length w = 4.5 # Patch width s1 = x2 * x1 / 2.0 # Feed offset h = 1.0 # Patch height t = 0.05 # Metal thickness lc = 1.0 # Coax length rc = 0.25 # Coax shield radius cc = 0.107 #Coax center conductor 50 ohm air diel eps = 1.0e-4 tol = 1.0e-6 eta = 377.0 eps_c = 1.0 k0 = 2.45 * 2.0 * np.pi / 30.0 # Frequency in GHz ls = 0.025 lm = 0.8 lw = 0.06 lp = 0.3 if mpi_rank == 0: print("x[0] = {0:<f}, x[1] = {1:<f} ".format(xp[0], xp[1])) print("length = {0:<f}, width = {1:<f}, feed offset = {2:<f}".format(l, w, s1)) gmsh.initialize() gmsh.option.setNumber('General.Terminal', 1) gmsh.model.add("SimplePatchOpt") # Radiation sphere gmsh.model.occ.addSphere(0.0, 0.0, 0.0, rs, 1) gmsh.model.occ.addBox(0.0, -rs, 0.0, rs, 2*rs, rs, 2) gmsh.model.occ.intersect([(3,1)],[(3,2)], 3, removeObject=True, removeTool=True) # Patch gmsh.model.occ.addBox(0.0, -l/2, h, w/2, l, t, 4) # coax center gmsh.model.occ.addCylinder(0.0, s1, -lc, 0.0, 0.0, lc+h, cc, 5, 2.0*np.pi) # patch center gnd via #gmsh.model.occ.addCylinder(0.0, 0.0, 0.0, 0.0, 0.0, h, cc, 6, 2.0*np.pi) # coax shield gmsh.model.occ.addCylinder(0.0, s1, -lc, 0.0, 0.0, lc, rc, 7) gmsh.model.occ.addBox(0.0, s1-rc, -lc, rc, 2.0*rc, lc, 8) gmsh.model.occ.intersect([(3,7)], [(3,8)], 9, removeObject=True, removeTool=True) gmsh.model.occ.fuse([(3,3)], [(3,9)], 10, removeObject=True, removeTool=True) # cutout internal boundaries #gmsh.model.occ.cut([(3,10)], [(3,4),(3,5),(3,6)], 11, removeObject=True, removeTool=True) gmsh.model.occ.cut([(3,10)], [(3,4),(3,5)], 11, removeObject=True, removeTool=True) gmsh.option.setNumber('Mesh.MeshSizeMin', ls) gmsh.option.setNumber('Mesh.MeshSizeMax', lm) gmsh.option.setNumber('Mesh.Algorithm', 6) gmsh.option.setNumber('Mesh.Algorithm3D', 1) # gmsh.option.setNumber('Mesh.MshFileVersion', 2.2) gmsh.option.setNumber('Mesh.MshFileVersion', 4.1) gmsh.option.setNumber('Mesh.Format', 1) gmsh.option.setNumber('Mesh.MinimumCirclePoints', 36) gmsh.option.setNumber('Mesh.CharacteristicLengthFromCurvature', 1) gmsh.model.occ.synchronize() pts = gmsh.model.getEntities(0) gmsh.model.mesh.setSize(pts, lm) #Set background mesh density pts = gmsh.model.getEntitiesInBoundingBox(-eps, -l/2-eps, h-eps, w/2+eps, l/2+eps, h+t+eps) gmsh.model.mesh.setSize(pts, ls) pts = gmsh.model.getEntitiesInBoundingBox(-eps, s1-rc-eps, -lc-eps, rc+eps, s1+rc+eps, h+eps) gmsh.model.mesh.setSize(pts, lw) pts = gmsh.model.getEntitiesInBoundingBox(-eps, -rc-eps, -eps, rc+eps, rc+eps, eps) gmsh.model.mesh.setSize(pts, lw) # Embed points to reduce mesh density on patch faces fce1 = gmsh.model.getEntitiesInBoundingBox(-eps, -l/2-eps, h+t-eps, w/2+eps, l/2+eps, h+t+eps, 2) gmsh.model.occ.synchronize() gmsh.model.geo.addPoint(w/4, -l/4, h+t, lp, 1000) gmsh.model.geo.addPoint(w/4, 0.0, h+t, lp, 1001) gmsh.model.geo.addPoint(w/4, l/4, h+t, lp, 1002) gmsh.model.geo.synchronize() # sf1 = gmsh.model.occ.fragment(fce1, [(0,1000),(0,1001), (0,1002)], -1, True, True) gmsh.model.occ.synchronize() print(fce1) fce2 = gmsh.model.getEntitiesInBoundingBox(-eps, -l/2-eps, h-eps, w/2+eps, l/2+eps, h+eps, 2) gmsh.model.geo.addPoint(w/4, -9*l/32, h, lp, 1003) gmsh.model.geo.addPoint(w/4, 0.0, h, lp, 1004) gmsh.model.geo.addPoint(w/4, 9*l/32, h, lp, 1005) gmsh.model.geo.synchronize() for tt in fce1: gmsh.model.mesh.embed(0, [1000, 1001, 1002], 2, tt[1]) for tt in fce2: gmsh.model.mesh.embed(0, [1003, 1004, 1005], 2, tt[1]) # sf2 = gmsh.model.occ.fragment(fce2, [(0,1003),(0,1004), (0,1005)], -1, True, True) print(fce2) gmsh.model.occ.remove(fce1) gmsh.model.occ.remove(fce2) gmsh.model.occ.synchronize() gmsh.model.addPhysicalGroup(3, [11], 1) gmsh.model.setPhysicalName(3, 1, "Air") gmsh.model.mesh.optimize("Relocate3D", niter=5) gmsh.model.mesh.generate(3) gmsh.write("SimplePatch.msh") # gmsh.fltk.run() gmsh.finalize() msh = meshio.read("SimplePatch.msh") for cell in msh.cells: if cell.type == "tetra": tetra_cells = cell.data for key in msh.cell_data_dict["gmsh:physical"].keys(): if key == "tetra": tetra_data = msh.cell_data_dict["gmsh:physical"][key] tetra_mesh = meshio.Mesh(points=msh.points, cells={"tetra": tetra_cells}, cell_data={"VolumeRegions":[tetra_data]}) meshio.write("mesh.xdmf", tetra_mesh) mesh = dolfin.Mesh() with dolfin.XDMFFile("mesh.xdmf") as infile: infile.read(mesh) mvc = dolfin.MeshValueCollection("size_t", mesh, 3) with dolfin.XDMFFile("mesh.xdmf") as infile: infile.read(mvc, "VolumeRegions") cf = dolfin.cpp.mesh.MeshFunctionSizet(mesh, mvc) class PEC(dolfin.SubDomain): def inside(self, x, on_boundary): return on_boundary class InputBC(dolfin.SubDomain): def inside(self, x, on_boundary): return on_boundary and dolfin.near(x[2], -lc, tol) class OutputBC(dolfin.SubDomain): def inside(self, x, on_boundary): rr = np.sqrt(x[0]*x[0]+x[1]*x[1]+x[2]*x[2]) return on_boundary and dolfin.near(rr, 8.0, 1.0e-1) class PMC(dolfin.SubDomain): def inside(self, x, on_boundary): return on_boundary and dolfin.near(x[0], 0.0, tol) # Volume domains dolfin.File("VolSubDomains.pvd").write(cf) dolfin.File("Mesh.pvd").write(mesh) # Mark boundaries sub_domains = dolfin.MeshFunction("size_t", mesh, mesh.topology().dim() - 1) sub_domains.set_all(4) pec = PEC() pec.mark(sub_domains, 0) in_port = InputBC() in_port.mark(sub_domains, 1) out_port = OutputBC() out_port.mark(sub_domains, 2) pmc = PMC() pmc.mark(sub_domains, 3) dolfin.File("BoxSubDomains.pvd").write(sub_domains) # Set up function spaces cell = dolfin.tetrahedron ele_type = dolfin.FiniteElement('N1curl', cell, 2, variant="integral") # H(curl) element for EM V2 = dolfin.FunctionSpace(mesh, ele_type * ele_type) V = dolfin.FunctionSpace(mesh, ele_type) (u_r, u_i) = dolfin.TrialFunctions(V2) (v_r, v_i) = dolfin.TestFunctions(V2) dolfin.info(mesh) #surface integral definitions from boundaries ds = dolfin.Measure('ds', domain = mesh, subdomain_data = sub_domains) #volume regions dx_air = dolfin.Measure('dx', domain = mesh, subdomain_data = cf, subdomain_id = 1) dx_subst = dolfin.Measure('dx', domain = mesh, subdomain_data = cf, subdomain_id = 2) # with source and sink terms u0 = dolfin.Constant((0.0, 0.0, 0.0)) #PEC definition h_src = dolfin.Expression(('-(x[1] - s) / (2.0 * pi * (pow(x[0], 2.0) + pow(x[1] - s,2.0)))', 'x[0] / (2.0 * pi *(pow(x[0],2.0) + pow(x[1] - s,2.0)))', 0.0), degree = 2, s = s1) e_src = dolfin.Expression(('x[0] / (2.0 * pi * (pow(x[0], 2.0) + pow(x[1] - s,2.0)))', 'x[1] / (2.0 * pi *(pow(x[0],2.0) + pow(x[1] - s,2.0)))', 0.0), degree = 2, s = s1) Rrad = dolfin.Expression(('sqrt(x[0] * x[0] + x[1] * x[1] + x[2] * x[2])'), degree = 2) #Boundary condition dictionary boundary_conditions = {0: {'PEC' : u0}, 1: {'InputBC': (h_src)}, 2: {'OutputBC': Rrad}} n = dolfin.FacetNormal(mesh) #Build PEC boundary conditions for real and imaginary parts bcs = [] for i in boundary_conditions: if 'PEC' in boundary_conditions[i]: bc = dolfin.DirichletBC(V2.sub(0), boundary_conditions[i]['PEC'], sub_domains, i) bcs.append(bc) bc = dolfin.DirichletBC(V2.sub(1), boundary_conditions[i]['PEC'], sub_domains, i) bcs.append(bc) # Build input BC source term and loading term integral_source = [] integrals_load =[] for i in boundary_conditions: if 'InputBC' in boundary_conditions[i]: r = boundary_conditions[i]['InputBC'] bb1 = 2.0 * (k0 * eta) * dolfin.inner(v_i, dolfin.cross(n, r)) * ds(i) #Factor of two from field equivalence principle integral_source.append(bb1) bb2 = dolfin.inner(dolfin.cross(n, v_i), dolfin.cross(n, u_r)) * k0 * np.sqrt(eps_c) * ds(i) integrals_load.append(bb2) bb2 = dolfin.inner(-dolfin.cross(n, v_r), dolfin.cross(n, u_i)) * k0 * np.sqrt(eps_c) * ds(i) integrals_load.append(bb2) for i in boundary_conditions: if 'OutputBC' in boundary_conditions[i]: r = boundary_conditions[i]['OutputBC'] bb2 = (dolfin.inner(dolfin.cross(n, v_i), dolfin.cross(n, u_r)) * k0 + 1.0 * dolfin.inner(dolfin.cross(n, v_i), dolfin.cross(n, u_i)) / r)* ds(i) integrals_load.append(bb2) bb2 = (dolfin.inner(-dolfin.cross(n, v_r), dolfin.cross(n, u_i)) * k0 + 1.0 * dolfin.inner(dolfin.cross(n, v_r), dolfin.cross(n, u_r)) / r)* ds(i) integrals_load.append(bb2) # for PMC, do nothing. Natural BC. a = (dolfin.inner(dolfin.curl(v_r), dolfin.curl(u_r)) + dolfin.inner(dolfin.curl(v_i), dolfin.curl(u_i)) - eps_c * k0 * k0 * (dolfin.inner(v_r, u_r) + dolfin.inner(v_i, u_i))) * dx_subst + (dolfin.inner(dolfin.curl(v_r), dolfin.curl(u_r)) + dolfin.inner(dolfin.curl(v_i), dolfin.curl(u_i)) - k0 * k0 * (dolfin.inner(v_r, u_r) + dolfin.inner(v_i, u_i))) * dx_air + sum(integrals_load) L = sum(integral_source) u1 = dolfin.Function(V2) vdim = u1.vector().size() print("Solution vector size =", vdim) dolfin.solve(a == L, u1, bcs, solver_parameters = {'linear_solver' : 'mumps'}) u1_r, u1_i = u1.split(True) fp = dolfin.File("EField_r.pvd") fp << u1_r fp = dolfin.File("EField_i.pvd") fp << u1_i #fp = File('WaveFile.pvd') #ut = u1_r.copy(deepcopy=True) #for i in range(50): # ut.vector().zero() # ut.vector().axpy(cos(pi * i / 25.0 + pi / 2.0), u1_i.vector()) # ut.vector().axpy(cos(pi * i / 25.0), u1_r.vector()) # fp << (ut, i) H = dolfin.interpolate(h_src, V) # Get input field P = dolfin.assemble((-dolfin.dot(u1_r,dolfin.cross(dolfin.curl(u1_i),n))+dolfin.dot(u1_i,dolfin.cross(dolfin.curl(u1_r),n))) * ds(2)) P_refl = dolfin.assemble((-dolfin.dot(u1_i,dolfin.cross(dolfin.curl(u1_r), n)) + dolfin.dot(u1_r, dolfin.cross(dolfin.curl(u1_i), n))) * ds(1)) P_inc = dolfin.assemble((dolfin.dot(H, H) * eta / (2.0 * np.sqrt(eps_c))) * ds(1)) print("Integrated power on port 2:", P/(2.0 * k0 * eta)) print("Incident power at port 1:", P_inc) print("Integrated reflected power on port 1:", P_inc - P_refl / (2.0 * k0 * eta)) rho_old = (P_inc - P_refl / (2.0 * k0 * eta)) / P_inc #Fraction of incident power reflected as objective function return rho_old
def j(self, t, u): return Problem_Basic.j(self, t, dol.assemble(dol.dot(u,self.v)*dol.dx).get_local())
def k_plus(k, n): return dot(dot(n('+'), k('+')), n('+'))
cell = mesh.ufl_cell() displacement_fe = df.VectorElement("CG", cell, 1) temperature_fe = df.FiniteElement("CG", cell, 1) mixed_fs = df.FunctionSpace(mesh, df.MixedElement([displacement_fe, temperature_fe])) mixed_fs.sub(1).dofmap().dofs() mixed_function = df.Function(mixed_fs) displacements_function, temperature_function = df.split(mixed_function) v, T_hat = df.TestFunctions(mixed_fs) residual_form = get_residual_form(displacements_function, v, density_function, temperature_function, T_hat, KAPPA, K, ALPHA) residual_form -= (df.dot(f_r, v) * dss(10) + df.dot(f_t, v) * dss(14) + \ q*T_hat*dss(5) + q_half*T_hat*dss(6) + q_quart*T_hat*dss(7)) print("get residual_form-------") pde_problem.add_state('mixed_states', mixed_function, residual_form, 'density') ''' 4. 3. Add outputs ''' # Add output-avg_density to the PDE problem: volume = df.assemble(df.Constant(1.) * df.dx(domain=mesh)) avg_density_form = density_function / (df.Constant( 1. * volume)) * df.dx(domain=mesh) pde_problem.add_scalar_output('avg_density', avg_density_form, 'density') print("Add output-avg_density-------") # Add output-compliance to the PDE problem:
def ns_IPCS_convection_supg(VQL, mesh, bcs, dt, parameter): mu, rho, nu, D = parameter V, Q, L = VQL vu, vp, vr = TestFunction(V), TestFunction(Q), TestFunction( L) # for integration u_, p_, r_ = Function(V), Function(Q), Function(L) # for the solution u_1, p_1, r_1 = Function(V), Function(Q), Function( L) # for the prev. solution u, p, r = TrialFunction(V), TrialFunction(Q), TrialFunction(L) # unknown! tau_SUPG = Function(L) bcu = [bcs[0], bcs[1]] # note: no-slip at cylinder wall is no longer used! bcp = [bcs[2]] bcr = [DirichletBC(L, rho, inlet)] # print(h) # print(h.min(), h.max(), h.mean()) # # C++ version (does not work) # ue_code = ''' # //https://fenicsproject.discourse.group/t/pass-function-to-c-expression/1081 # #include <pybind11/pybind11.h> # #include <pybind11/eigen.h> # #include <dolfin/mesh/Vertex.h> # namespace py = pybind11; # #include <dolfin/function/Expression.h> # #include <dolfin/function/Function.h> # #include <dolfin/mesh/Mesh.h> # class SUPG : public dolfin::Expression # { # public: # double D; # // std::shared_ptr<Mesh> mesh; // doesn work # std::shared_ptr<dolfin::Function> velocity; # SUPG(std::shared_ptr<dolfin::Function> u_) : dolfin::Expression(2) # { # velocity = u_; # } # void eval(Eigen::Ref<Eigen::VectorXd> values, # Eigen::Ref<const Eigen::VectorXd> x, # const ufc::cell& cell) const override # { # //Cell cell(*mesh, c.index); # velocity->eval(values, x); // values now holds the velocity? # double tau = 0.0; # //double h = cell.h(); # //double h = cell.diameter(); # double h = 0.015; # double magnitude = 0.0; // pythagoras # for (uint i=0; i<x.size(); ++i) # { # magnitude += values[i] * values[i]; # } # magnitude = sqrt(magnitude); # double Pe = magnitude * h / (2.0 * D); # if (Pe > DOLFIN_EPS) # { # tau = h / (2.0*magnitude) * (1.0/tanh(Pe) - 1.0/Pe); # } # values[0] = tau; # }; # }; # PYBIND11_MODULE(SIGNATURE, m) # { # py::class_<SUPG, std::shared_ptr<SUPG>, dolfin::Expression> # (m, "SUPG") # .def(py::init<std::shared_ptr<dolfin::Function>>()) # .def_readwrite("D", &SUPG::D) # .def_readwrite("velocity", &SUPG::velocity); # } # ''' # compiled = compile_cpp_code(ue_code) # expr = CompiledExpression(compiled.SUPG(u_.cpp_object()), D=D, degree=1) # tau_supg_from_expr = project(expr, mesh=mesh).vector().vec().array # print(tau_supg_from_expr) # class _rho_(UserExpression): # def eval(self, values, x): # if (x[0]-.2)*(x[0]-.2) + (x[1]-.2)*(x[1]-.2) < 0.0025: # values[0] = rho*5 # else: # values[0] = rho # f0 = _rho_(degree=2, element=L.ufl_element()) # r_1.assign(project(f0, mesh=mesh)) x, y = np.split(L.tabulate_dof_coordinates(), 2, 1) x, y = x.ravel(), y.ravel() ll = (x - .2) * (x - .2) + (y - .2) * (y - .2) < 0.0025 # logic list r_1.vector().vec().array = rho r_1.vector().vec().array[ll] = rho * 5 # cells_mapped = np.empty((mesh.num_cells(), 3), dtype=np.int32) # for i in range(mesh.num_cells()): # len(mesh.cells()) # # print(cells_mapped[i], L.dofmap().cell_dofs(i)) # cells_mapped[i] = L.dofmap().cell_dofs(i) # data_mapped = r_1.vector().vec().array # fig, ax = plt.subplots() # ax.tricontourf(x, y, cells_mapped, data_mapped, levels=15) # ax.set_aspect("equal") # plt.suptitle("initial density") # plt.show() n = FacetNormal(mesh) u_mid = (u + u_1) / 2.0 F1 = r_1*dot((u - u_1) / dt, vu)*dx \ + r_1*dot(dot(u_1, nabla_grad(u_1)), vu)*dx \ + inner(sigma(u_mid, p_1, mu), epsilon(vu))*dx \ + dot(p_1*n, vu)*ds - dot(mu*nabla_grad(u_mid)*n, vu)*ds a1 = lhs(F1) L1 = rhs(F1) # Define variational problem for step 2 a2 = dot(nabla_grad(p), nabla_grad(vp)) * dx L2 = dot(nabla_grad(p_1), nabla_grad(vp)) * dx - (1 / dt) * div(u_) * vp * dx # Define variational problem for step 3 a3 = dot(u, vu) * dx L3 = dot(u_, vu) * dx - dt * dot(nabla_grad(p_ - p_1), vu) * dx # Step 4: Transport of rho / Convection-diffusion and SUPG vr = vr + tau_SUPG * inner(u_, grad(vr)) # SUPG stabilization r_mid = (r + r_1) / 2.0 F4 = dot((r - r_1) / dt, vr) * dx \ + dot(dot(u_, grad(r_mid)), vr) * dx if D > 0.0: F4 += dot(D * grad(r_mid), grad(vr)) * dx # F4 += beta * dot(dot(u_, grad(r_mid)), dot(u_, grad(vr))) * dx a4 = lhs(F4) L4 = rhs(F4) # Assemble matrices A1 = assemble(a1) A2 = assemble(a2) A3 = assemble(a3) # A4 = assemble(a4) # Apply boundary conditions to matrices [bc.apply(A1) for bc in bcu] [bc.apply(A2) for bc in bcp] return (u_, p_, r_, u_1, p_1, r_1, tau_SUPG, D, L1, a1, L2, A2, L3, A3, L4, a4, bcu, bcp, bcr)
def k_normal(k, n): return dot(dot(np.transpose(n), k), n)
Vh = dlfn.FunctionSpace(mesh, P1Curl * P1) # rhs function jumpM = dlfn.Constant((0.0, 0.0, -1.0)) # trial functions A, phi = dlfn.TrialFunctions(Vh) # test functions delA, psi = dlfn.TestFunctions(Vh) # geometric objects n = dlfn.FacetNormal(mesh) P = dlfn.Identity(dim) - dlfn.outer(n, n) # Dirichlet boundary condition on exterior surface bcA = dlfn.DirichletBC(Vh.sub(0), dlfn.Constant((0.0, 0.0, 0.0)), facetIds, bndryId) bcPhi = dlfn.DirichletBC(Vh.sub(1), dlfn.Constant(0.0), facetIds, bndryId) # bilinear form a = dlfn.dot(dlfn.curl(A), dlfn.curl(delA)) * dV() \ + dlfn.dot(A, dlfn.grad(psi)) * dV() \ + dlfn.dot(dlfn.grad(phi), delA) * dV() # rhs form l = dlfn.dot(dlfn.cross(n("+"), jumpM("+")), delA("+")) * dA(intrfcId) # compute solution sol = dlfn.Function(Vh) lin_problem = dlfn.LinearVariationalProblem(a, l, sol, bcs=[bcA, bcPhi]) lin_solver = dlfn.LinearVariationalSolver(lin_problem) lin_solver_parameters = lin_solver.parameters lin_solver.solve() # sub solutions solA = sol.sub(0) solPhi = sol.sub(1) # output to pvd pvd_A = dlfn.File("solution-A.pvd")
np.array([1, 2], dtype=np.uintp), theta_p, step) if step == 2: theta_L.assign(theta_next) # Probably can be combined into one file? xdmf_u.write(Uh.sub(0), t) xdmf_p.write(Uh.sub(1), t) del (t1) timer.stop() # Compute errors u_exact.t = t p_exact.t = t u_error = sqrt(assemble(dot(Uh.sub(0) - u_exact, Uh.sub(0) - u_exact) * dx)) p_error = sqrt(assemble(dot(Uh.sub(1) - p_exact, Uh.sub(1) - p_exact) * dx)) udiv = sqrt(assemble(div(Uh.sub(0)) * div(Uh.sub(0)) * dx)) momentum = assemble((dot(Uh.sub(0), ex) + dot(Uh.sub(0), ey)) * dx) if comm.Get_rank() == 0: print("Velocity error " + str(u_error)) print("Pressure error " + str(p_error)) print("Momentum " + str(momentum)) print("Divergence " + str(udiv)) print('Elapsed time ' + str(timer.elapsed()[0])) list_timings(TimingClear.keep, [TimingType.wall])
def F( u, v, kappa, rho, cp, convection, source, r, neumann_bcs, robin_bcs, my_dx, my_ds, stabilization, ): """ Compute .. math:: F(u) = \\int_\\Omega \\kappa r \\langle\\nabla u, \\nabla \\frac{v}{\\rho c_p}\\rangle \\, 2\\pi \\, \\text{d}x + \\int_\\Omega \\langle c, \\nabla u\\rangle v \\, 2\\pi r\\,\\text{d}x - \\int_\\Omega \\frac{1}{\\rho c_p} f v \\, 2\\pi r \\,\\text{d}x\\\\ - \\int_\\Gamma r \\kappa \\langle n, \\nabla T\\rangle v \\frac{1}{\\rho c_p} 2\\pi \\,\\text{d}s - \\int_\\Gamma r \\kappa \\alpha (u - u_0) v \\frac{1}{\\rho c_p} \\, 2\\pi \\,\\text{d}s, used for time-stepping .. math:: u' = F(u). """ rho_cp = rho * cp F0 = kappa * r * dot(grad(u), grad(v / rho_cp)) * 2 * pi * my_dx # F -= dot(b, grad(u)) * v * 2*pi*r * dx_workpiece(0) if convection is not None: c = as_vector([convection[0], convection[1]]) F0 += dot(c, grad(u)) * v * 2 * pi * r * my_dx # Joule heat F0 -= source * v / rho_cp * 2 * pi * r * my_dx # Neumann boundary conditions for k, n_grad_T in neumann_bcs.items(): F0 -= r * kappa * n_grad_T * v / rho_cp * 2 * pi * my_ds(k) # Robin boundary conditions for k, value in robin_bcs.items(): alpha, u0 = value F0 -= r * kappa * alpha * (u - u0) * v / rho_cp * 2 * pi * my_ds(k) if stabilization == "supg": # Add SUPG stabilization. assert convection is not None # TODO u_t? R = (-div(kappa * r * grad(u)) / rho_cp * 2 * pi + dot(c, grad(u)) * 2 * pi * r - source / rho_cp * 2 * pi * r) mesh = v.function_space().mesh() element_degree = v.ufl_element().degree() tau = stab.supg(mesh, convection, kappa, element_degree) F0 += R * tau * dot(convection, grad(v)) * my_dx else: assert stabilization is None return F0
editor.add_cell(k, cell) editor.close() return mesh mesh = UnitSquareMesh(200, 200) # mesh = create_dolfin_mesh(*meshzoo.triangle(1500, corners=[[0, 0], [1, 0], [0, 1]])) V = FunctionSpace(mesh, "CG", 1) u = TrialFunction(V) v = TestFunction(V) n = FacetNormal(mesh) # A = assemble(dot(grad(u), grad(v)) * dx - dot(n, grad(u)) * v * ds) A = assemble(dot(grad(u), grad(v)) * dx - dot(n, grad(u)) * v * ds) M = assemble(u * v * dx) f = Expression("sin(pi * x[0]) * sin(pi * x[1])", element=V.ufl_element()) x = project(f, V) Ax = A * x.vector() Minv_Ax = Function(V).vector() solve(M, Minv_Ax, Ax) val = Ax.inner(Minv_Ax) print(val) # Exact value x = sympy.Symbol("x") y = sympy.Symbol("y")
def __div__(self, other): # We use Claas Abert's 'point measure hack' for the vertex-wise operation. a = self.coerce_scalar_field(other) w = df.TestFunction(self.functionspace) v_res = df.assemble(df.dot(self.f / a.f, w) * df.dP) return Field(self.functionspace, value=v_res)
psi_h.cpp_object(), 'gmres', 'hypre_amg') # Update old solution assign(psi0_h, psi_h) # Store # if step % store_step is 0 or step is 1: output_field << psi_h timer.stop() # Compute error (we should accurately recover initial condition) l2_error = sqrt( abs( assemble( dot(psi_h - psi0_expression, psi_h - psi0_expression) * dx))) # The global mass conservation error should be zero area_end = assemble(psi_h * dx) if comm.Get_rank() == 0: print("l2 error " + str(l2_error)) # Store in error error table num_cells_t = mesh.num_entities_global(2) num_particles = len(x) area_error_end = np.float64((area_end - area_0)) with open(output_table, "a") as write_file: write_file.write( "%-12.5g %-15d %-20d %-10.2e %-20.3g %-20.3g \n" %
def h_u(M, u, reg_u=dl.Constant(dl.DOLFIN_SQRT_EPS)): """ Compute the mesh size in the direction u. """ return dl.sqrt(dl.dot(u, u) / (dl.dot(dl.dot(u, M), u) + reg_u))
def get_spherical(self): """ Transform magnetisation coordinates to spherical coordinates theta = arctan(m_r / m_z) ; m_r = sqrt(m_x ^ 2 + m_y ^ 2) phi = arctan(m_y / m_x) The theta and phi generalised coordinates are stored in self.theta and self.phi respectively. When this function is called, the two dolfin functions are returned """ # Create an scalar Function Space to compute the cylindrical radius (x^2 + y^2) # and the angles phi and theta S1 = df.FunctionSpace(self.functionspace.mesh(), 'CG', 1) # Create a dolfin function from the FS m_r = df.Function(S1) # Compute the radius using the assemble method with dolfin dP # (like a dirac delta to get values on every node of the mesh) # This returns a dolfin vector cyl_vector = df.assemble( df.dot(df.sqrt(self.f[0] * self.f[0] + self.f[1] * self.f[1]), df.TestFunction(S1)) * df.dP, ) # Set the vector values to the dolfin function m_r.vector().set_local(cyl_vector.get_local()) # Now we compute the theta and phi angles to describe the magnetisation # and save them to the coresponding variables self.theta = df.Function(S1) self.phi = df.Function(S1) # We will use the same vector variable than the one used to # compute m_r, in order to save memory # Theta = arctan(m_r / m_z) cyl_vector = df.assemble( df.dot(df.atan_2(m_r, self.f[2]), df.TestFunction(S1)) * df.dP, tensor=cyl_vector) # Instead of: # self.theta.vector().set_local(cyl_vector.get_local()) # We will use: self.theta.vector().axpy(1, cyl_vector) # which adds: 1 * cyl_vector # to self.theta.vector() and is much faster # (we assume self.theta.vector() is empty, i.e. only made of zeros) # See: Fenics Book, page 44 # Phi = arctan(m_y / m_x) cyl_vector = df.assemble( df.dot(df.atan_2(self.f[1], self.f[0]), df.TestFunction(S1)) * df.dP, tensor=cyl_vector) # We will save this line just in case: # self.phi.vector().set_local(cyl_vector.get_local()) self.phi.vector().axpy(1, cyl_vector) return self.theta, self.phi
def h_linear(integrator_type, mesh, subdomains, boundaries, t_start, dt, T, solution0, \ alpha_0, K_0, mu_l_0, lmbda_l_0, Ks_0, \ alpha_1, K_1, mu_l_1, lmbda_l_1, Ks_1, \ alpha, K, mu_l, lmbda_l, Ks, \ cf_0, phi_0, rho_0, mu_0, k_0,\ cf_1, phi_1, rho_1, mu_1, k_1,\ cf, phi, rho, mu, k, \ sigma_v_freeze, dphi_c_dt): # Create mesh and define function space parameters["ghost_mode"] = "shared_facet" # required by dS dx = Measure('dx', domain=mesh, subdomain_data=subdomains) ds = Measure('ds', domain=mesh, subdomain_data=boundaries) dS = Measure('dS', domain=mesh, subdomain_data=boundaries) BDM = FiniteElement("BDM", mesh.ufl_cell(), 1) PDG = FiniteElement("DG", mesh.ufl_cell(), 0) BDM_F = FunctionSpace(mesh, BDM) PDG_F = FunctionSpace(mesh, PDG) W = BlockFunctionSpace([BDM_F, PDG_F], restrict=[None, None]) TM = TensorFunctionSpace(mesh, 'DG', 0) PM = FunctionSpace(mesh, 'DG', 0) n = FacetNormal(mesh) vc = CellVolume(mesh) fc = FacetArea(mesh) h = vc / fc h_avg = (vc('+') + vc('-')) / (2 * avg(fc)) I = Identity(mesh.topology().dim()) monitor_dt = dt p_outlet = 0.1e6 p_inlet = 1000.0 M_inv = phi_0 * cf + (alpha - phi_0) / Ks # Define variational problem trial = BlockTrialFunction(W) dv, dp = block_split(trial) trial_dot = BlockTrialFunction(W) dv_dot, dp_dot = block_split(trial_dot) test = BlockTestFunction(W) psiv, psip = block_split(test) block_w = BlockFunction(W) v, p = block_split(block_w) block_w_dot = BlockFunction(W) v_dot, p_dot = block_split(block_w_dot) a_time = Constant(0.0) * inner(v_dot, psiv) * dx #quasi static # k is a function of phi #k = perm_update_rutqvist_newton(p,p0,phi0,phi,coeff) lhs_a = inner(dot(v, mu * inv(k)), psiv) * dx - p * div( psiv ) * dx #+ 6.0*inner(psiv,n)*ds(2) # - inner(gravity*(rho-rho0), psiv)*dx b_time = (M_inv + pow(alpha, 2.) / K) * p_dot * psip * dx lhs_b = div(v) * psip * dx #div(rho*v)*psip*dx #TODO rho rhs_v = -p_outlet * inner(psiv, n) * ds(3) rhs_p = -alpha / K * sigma_v_freeze * psip * dx - dphi_c_dt * psip * dx r_u = [lhs_a, lhs_b] j_u = block_derivative(r_u, block_w, trial) r_u_dot = [a_time, b_time] j_u_dot = block_derivative(r_u_dot, block_w_dot, trial_dot) r = [r_u_dot[0] + r_u[0] - rhs_v, \ r_u_dot[1] + r_u[1] - rhs_p] def bc(t): #bc_v = [DirichletBC(W.sub(0), (.0, .0), boundaries, 4)] v1 = DirichletBC(W.sub(0), (1.e-4 * 2.0, 0.0), boundaries, 1) v2 = DirichletBC(W.sub(0), (0.0, 0.0), boundaries, 2) v4 = DirichletBC(W.sub(0), (0.0, 0.0), boundaries, 4) bc_v = [v1, v2, v4] return BlockDirichletBC([bc_v, None]) # Define problem wrapper class ProblemWrapper(object): def set_time(self, t): pass #g.t = t # Residual and jacobian functions def residual_eval(self, t, solution, solution_dot): #print(as_backend_type(assemble(p_time - p_time_error)).vec().norm()) #print("gravity effect", as_backend_type(assemble(inner(gravity*(rho-rho0), psiv)*dx)).vec().norm()) return r def jacobian_eval(self, t, solution, solution_dot, solution_dot_coefficient): return [[Constant(solution_dot_coefficient)*j_u_dot[0, 0] + j_u[0, 0], \ Constant(solution_dot_coefficient)*j_u_dot[0, 1] + j_u[0, 1]], \ [Constant(solution_dot_coefficient)*j_u_dot[1, 0] + j_u[1, 0], \ Constant(solution_dot_coefficient)*j_u_dot[1, 1] + j_u[1, 1]]] # Define boundary condition def bc_eval(self, t): return bc(t) # Define initial condition def ic_eval(self): return solution0 # Define custom monitor to plot the solution def monitor(self, t, solution, solution_dot): pass # Solve the time dependent problem problem_wrapper = ProblemWrapper() (solution, solution_dot) = (block_w, block_w_dot) solver = TimeStepping(problem_wrapper, solution, solution_dot) solver.set_parameters({ "initial_time": t_start, "time_step_size": dt, "monitor": { "time_step_size": monitor_dt, }, "final_time": T, "exact_final_time": "stepover", "integrator_type": integrator_type, "problem_type": "linear", "linear_solver": "mumps", "report": True }) export_solution = solver.solve() return export_solution, T
def equilibrium_EC(w_, x_, test_functions, solutes, permittivity, mesh, dx, ds, normal, dirichlet_bcs, neumann_bcs, boundary_to_mark, use_iterative_solvers, c_lagrange, V_lagrange, **namespace): """ Electrochemistry equilibrium solver. Nonlinear! """ num_solutes = len(solutes) cV = df.split(w_["EC"]) c, V = cV[:num_solutes], cV[num_solutes] if c_lagrange: c0, V0 = cV[num_solutes + 1:2 * num_solutes + 1], cV[2 * num_solutes + 1] if V_lagrange: V0 = cV[-1] b = test_functions["EC"][:num_solutes] U = test_functions["EC"][num_solutes] if c_lagrange: b0, U0 = cV[num_solutes + 1:2 * num_solutes + 1], cV[2 * num_solutes + 1] if V_lagrange: U0 = test_functions["EC"][-1] phi = x_["phi"] q = [] sum_zx = sum([solute[1] * xj for solute, xj in zip(solutes, composition)]) for solute, xj in zip(solutes, composition): q.append(-xj * Q / (area * sum_zx)) z = [] # Charge z[species] K = [] # Diffusivity K[species] beta = [] for solute in solutes: z.append(solute[1]) K.append(ramp(phi, solute[2:4])) beta.append(ramp(phi, solute[4:6])) rho_e = sum([c_e * z_e for c_e, z_e in zip(c, z)]) veps = ramp(phi, permittivity) F_c = [] for ci, bi, c0i, b0i, solute, qi, betai, Ki in zip(c, b, c0, b0, solutes, q, beta, K): zi = solute[1] F_ci = Ki * (df.dot( df.grad(bi), df.grad(ci) + df.grad(betai) + zi * ci * df.grad(V))) * dx if c_lagrange: F_ci += b0i * (ci - df.Constant(qi)) * dx + c0i * bi * dx F_V = veps * df.dot(df.grad(U), df.grad(V)) * dx for boundary_name, sigma_e in neumann_bcs["V"].iteritems(): F_V += -sigma_e * U * ds(boundary_to_mark[boundary_name]) if rho_e != 0: F_V += -rho_e * U * dx if V_lagrange: F_V += V0 * U * dx + V * U0 * dx F = sum(F_c) + F_V J = df.derivative(F, w_["EC"]) problem = df.NonlinearVariationalProblem(F, w_["EC"], dirichlet_bcs["EC"], J) solver = df.NonlinearVariationalSolver(problem) solver.parameters["newton_solver"]["relative_tolerance"] = 1e-7 if use_iterative_solvers: solver.parameters["newton_solver"]["linear_solver"] = "bicgstab" if not V_lagrange: solver.parameters["newton_solver"]["preconditioner"] = "hypre_amg" solver.solve()
def setup_NS(w_NS, u, p, v, q, p0, q0, dx, ds, normal, dirichlet_bcs, neumann_bcs, boundary_to_mark, u_1, phi_, rho_, rho_1, g_, M_, mu_, rho_e_, c_, V_, c_1, V_1, dbeta, solutes, per_tau, drho, sigma_bar, eps, dveps, grav, enable_PF, enable_EC, use_iterative_solvers, use_pressure_stabilization, p_lagrange, q_rhs): """ Set up the Navier-Stokes subproblem. """ # F = ( # per_tau * rho_ * df.dot(u - u_1, v)*dx # + rho_*df.inner(df.grad(u), df.outer(u_1, v))*dx # + 2*mu_*df.inner(df.sym(df.grad(u)), df.grad(v))*dx # - p * df.div(v)*dx # + df.div(u)*q*dx # - df.dot(rho_*grav, v)*dx # ) mom_1 = rho_1 * u_1 if enable_PF: mom_1 += -M_ * drho * df.nabla_grad(g_) F = (per_tau * rho_1 * df.dot(u - u_1, v) * dx + 2 * mu_ * df.inner(df.sym(df.nabla_grad(u)), df.sym(df.nabla_grad(v))) * dx - p * df.div(v) * dx - q * df.div(u) * dx + df.inner(df.nabla_grad(u), df.outer(mom_1, v)) * dx + 0.5 * (per_tau * (rho_ - rho_1) * df.dot(u, v) - df.dot(mom_1, df.nabla_grad(df.dot(u, v)))) * dx - rho_ * df.dot(grav, v) * dx) for boundary_name, slip_length in neumann_bcs["u"].iteritems(): F += 1./slip_length * \ df.dot(u, v) * ds(boundary_to_mark[boundary_name]) for boundary_name, pressure in neumann_bcs["p"].iteritems(): F += pressure * df.inner(normal, v) * ds( boundary_to_mark[boundary_name]) if enable_PF: F += phi_ * df.dot(df.grad(g_), v) * dx if enable_EC: for ci_, ci_1, dbetai, solute in zip(c_, c_1, dbeta, solutes): zi = solute[1] F += df.dot(df.grad(ci_), v)*dx \ + zi*ci_1*df.dot(df.grad(V_), v)*dx if enable_PF: F += ci_ * dbetai * df.dot(df.grad(phi_), v) * dx if p_lagrange: F += (p * q0 + q * p0) * dx if "u" in q_rhs: F += -df.dot(q_rhs["u"], v) * dx a, L = df.lhs(F), df.rhs(F) problem = df.LinearVariationalProblem(a, L, w_NS, dirichlet_bcs) solver = df.LinearVariationalSolver(problem) if use_iterative_solvers and use_pressure_stabilization: solver.parameters["linear_solver"] = "gmres" # solver.parameters["preconditioner"] = "ilu" return solver
def run_and_calculate_error(N, dt, tmax, polydeg_u, polydeg_p, nu, last=False): """ Run Ocellaris and return L2 & H1 errors in the last time step """ say(N, dt, tmax, polydeg_u, polydeg_p) # Setup and run simulation timingtypes = [ dolfin.TimingType.user, dolfin.TimingType.system, dolfin.TimingType.wall ] dolfin.timings(dolfin.TimingClear_clear, timingtypes) sim = Simulation() sim.input.read_yaml('disc.inp') mesh_type = sim.input.get_value('mesh/type') if mesh_type == 'XML': # Create unstructured mesh with gmsh cmd1 = [ 'gmsh', '-string', 'lc = %f;' % (3.14 / N), '-o', 'disc_%d.msh' % N, '-2', 'disc.geo' ] cmd2 = ['dolfin-convert', 'disc_%d.msh' % N, 'disc.xml'] with open('/dev/null', 'w') as devnull: for cmd in (cmd1, cmd2): say(' '.join(cmd)) subprocess.call(cmd, stdout=devnull, stderr=devnull) elif mesh_type == 'UnitDisc': sim.input.set_value('mesh/N', N // 2) else: sim.input.set_value('mesh/Nx', N) sim.input.set_value('mesh/Ny', N) sim.input.set_value('time/dt', dt) sim.input.set_value('time/tmax', tmax) sim.input.set_value('solver/polynomial_degree_velocity', polydeg_u) sim.input.set_value('solver/polynomial_degree_pressure', polydeg_p) sim.input.set_value('physical_properties/nu', nu) sim.input.set_value('output/stdout_enabled', False) say('Running with %s %s solver ...' % (sim.input.get_value('solver/type'), sim.input.get_value('solver/function_space_velocity'))) t1 = time.time() setup_simulation(sim) run_simulation(sim) duration = time.time() - t1 say('DONE') # Interpolate the analytical solution to the same function space Vu = sim.data['Vu'] Vp = sim.data['Vp'] Vr = sim.data['Vrho'] polydeg_r = Vr.ufl_element().degree() vals = dict(t=sim.time, dt=sim.dt, Q=sim.input.get_value('user_code/constants/Q')) rho_e = dolfin.Expression( sim.input.get_value('initial_conditions/rho_p/cpp_code'), degree=polydeg_r, **vals) u0e = dolfin.Expression( sim.input.get_value('initial_conditions/up0/cpp_code'), degree=polydeg_u, **vals) u1e = dolfin.Expression( sim.input.get_value('initial_conditions/up1/cpp_code'), degree=polydeg_u, **vals) pe = dolfin.Expression( sim.input.get_value('initial_conditions/p/cpp_code'), degree=polydeg_p, **vals) rho_a = dolfin.project(rho_e, Vr) u0a = dolfin.project(u0e, Vu) u1a = dolfin.project(u1e, Vu) pa = dolfin.project(pe, Vp) mesh = sim.data['mesh'] n = dolfin.FacetNormal(mesh) # Correct for possible non-zero average p int_p = dolfin.assemble(sim.data['p'] * dolfin.dx) int_pa = dolfin.assemble(pa * dolfin.dx) vol = dolfin.assemble(dolfin.Constant(1.0) * dolfin.dx(domain=mesh)) pa_avg = int_pa / vol sim.data['p'].vector()[:] += pa_avg # Calculate L2 errors err_rho = calc_err(sim.data['rho'], rho_a) err_u0 = calc_err(sim.data['u0'], u0a) err_u1 = calc_err(sim.data['u1'], u1a) err_p = calc_err(sim.data['p'], pa) # Calculate H1 errors err_rho_H1 = calc_err(sim.data['rho'], rho_a, 'H1') err_u0_H1 = calc_err(sim.data['u0'], u0a, 'H1') err_u1_H1 = calc_err(sim.data['u1'], u1a, 'H1') err_p_H1 = calc_err(sim.data['p'], pa, 'H1') reports = sim.reporting.timestep_xy_reports say('Num time steps:', sim.timestep) say('Num cells:', mesh.num_cells()) Co_max, Pe_max = numpy.max(reports['Co']), numpy.max(reports['Pe']) say('Co_max:', Co_max) say('Pe_max:', Pe_max) say('rho_min went from %r to %r' % (reports['min(rho)'][0], reports['min(rho)'][-1])) say('rho_max went from %r to %r' % (reports['max(rho)'][0], reports['max(rho)'][-1])) m0, m1 = reports['mass'][0], reports['mass'][-1] say('mass error %.3e (%.3e)' % (m1 - m0, (m1 - m0) / m0)) say('vel repr error %.3e' % dolfin.assemble(dolfin.dot(sim.data['u'], n) * dolfin.ds)) say('p*dx', int_p) div_u_Vp = abs( dolfin.project(dolfin.div(sim.data['u']), Vp).vector().get_local()).max() say('div(u)|Vp', div_u_Vp) div_u_Vu = abs( dolfin.project(dolfin.div(sim.data['u']), Vu).vector().get_local()).max() say('div(u)|Vu', div_u_Vu) Vdg0 = dolfin.FunctionSpace(mesh, "DG", 0) div_u_DG0 = abs( dolfin.project(dolfin.div(sim.data['u']), Vdg0).vector().get_local()).max() say('div(u)|DG0', div_u_DG0) Vdg1 = dolfin.FunctionSpace(mesh, "DG", 1) div_u_DG1 = abs( dolfin.project(dolfin.div(sim.data['u']), Vdg1).vector().get_local()).max() say('div(u)|DG1', div_u_DG1) isoparam = mesh.ufl_coordinate_element().degree() > 1 allways_plot = True if (last or allways_plot) and ( not isoparam or sim.input.get_value('mesh/type') == 'UnitDisc'): # Plot the results for fa, name in ((u0a, 'u0'), (u1a, 'u1'), (pa, 'p'), (rho_a, 'rho')): fh = sim.data[name] if isoparam: # Bug in matplotlib plotting for isoparametric elements mesh2 = dolfin.UnitDiscMesh(dolfin.MPI.comm_world, N // 2, 1, 2) ue = fa.function_space().ufl_element() V2 = dolfin.FunctionSpace(mesh2, ue.family(), ue.degree()) fa2, fh2 = dolfin.Function(V2), dolfin.Function(V2) fa2.vector().set_local(fa.vector().get_local()) fh2.vector().set_local(fh.vector().get_local()) fa, fh = fa2, fh2 discr = '' # '%g_%g_' % (N, dt) plot(fa, name + ' analytical', '%s%s_1analytical' % (discr, name)) plot(fh, name + ' numerical', '%s%s_2numerical' % (discr, name)) plot(fh - fa, name + ' diff', '%s%s_3diff' % (discr, name)) hmin = mesh.hmin() return err_rho, err_u0, err_u1, err_p, err_rho_H1, err_u0_H1, err_u1_H1, err_p_H1, hmin, dt, Co_max, Pe_max, duration
def __init__(self, V, viscosity=1e-2, penalty=1e5): ''' Parameters ---------- V : dolfin.FunctionSpace Function space for the distance function. viscosity: float or dolfin.Constant Stabilization for the unique solution of the distance problem. penalty: float or dolfin.Constant Penalty for weakly enforcing the zero-distance boundary conditions. ''' if not isinstance(V, dolfin.FunctionSpace): raise TypeError('Parameter `V` must be a `dolfin.FunctionSpace`') if not isinstance(viscosity, Constant): if not isinstance(viscosity, (float, int)): raise TypeError('`Parameter `viscosity`') viscosity = Constant(viscosity) if not isinstance(penalty, Constant): if not isinstance(penalty, (float, int)): raise TypeError('Parameter `penalty`') penalty = Constant(penalty) self._viscosity = viscosity self._penalty = penalty mesh = V.mesh() xs = mesh.coordinates() l0 = (xs.max(0) - xs.min(0)).min() self._d = d = Function(V) self._Q = dolfin.FunctionSpace(mesh, 'DG', 0) self._mf = dolfin.MeshFunction('size_t', mesh, mesh.geometric_dimension()) self._dx_penalty = dx(subdomain_id=1, subdomain_data=self._mf, domain=mesh) v0 = dolfin.TestFunction(V) v1 = dolfin.TrialFunction(V) target_gradient = Constant(1.0) scaled_penalty = penalty / mesh.hmax() lhs_F0 = l0*dot(grad(v0), grad(v1))*dx \ + scaled_penalty*v0*v1*self._dx_penalty rhs_F0 = v0 * target_gradient * dx problem = dolfin.LinearVariationalProblem(lhs_F0, rhs_F0, d) self._linear_solver = dolfin.LinearVariationalSolver(problem) self._linear_solver.parameters["symmetric"] = True F = v0*(grad(d)**2-target_gradient)*dx \ + viscosity*l0*dot(grad(v0), grad(d))*dx \ + scaled_penalty*v0*d*self._dx_penalty J = dolfin.derivative(F, d, v1) problem = dolfin.NonlinearVariationalProblem(F, d, bcs=None, J=J) self._nonlinear_solver = dolfin.NonlinearVariationalSolver(problem) self._nonlinear_solver.parameters['nonlinear_solver'] = 'newton' self._nonlinear_solver.parameters['symmetric'] = False self._solve_initdist_problem = self._linear_solver.solve self._solve_distance_problem = self._nonlinear_solver.solve
@function.expression.numba_eval def ui_eval(values, x, cell_idx): values[:, 0] = np.exp(1.0j * k0 * (np.cos(theta) * x[:, 0] + np.sin(theta) * x[:, 1])) # Test and trial function space V = FunctionSpace(mesh, ("Lagrange", deg)) # Prepare Expression as FE function ui = interpolate(Expression(ui_eval), V) # Define variational problem u = TrialFunction(V) v = TestFunction(V) g = dot(grad(ui), n) + 1j * k0 * ui a = inner(grad(u), grad(v)) * dx - k0**2 * inner(u, v) * dx + 1j * k0 * inner( u, v) * ds L = inner(g, v) * ds # Compute solution u = Function(V) solve(a == L, u, []) # Save solution in XDMF format (to be viewed in Paraview, for example) with XDMFFile(MPI.comm_world, "plane_wave.xdmf", encoding=XDMFFile.Encoding.HDF5) as file: file.write(u) """Calculate L2 and H1 errors of FEM solution and best approximation. This demonstrates the error bounds given in Ihlenburg. Pollution errors
def update_charge(self, phi): bnd_id = self.domain_args[1] projection = df.dot(df.grad(phi), self.n) * self.dss(bnd_id) self.charge = df.assemble(projection) return self.charge
def setup_scalar_equation(self): sim = self.simulation V = sim.data['Vphi'] mesh = V.mesh() P = V.ufl_element().degree() # Source term source_cpp = sim.input.get_value('solver/source', '0', 'string') f = dolfin.Expression(source_cpp, degree=P) # Create the solution function sim.data['phi'] = dolfin.Function(V) # DG elliptic penalty penalty = define_penalty(mesh, P, k_min=1.0, k_max=1.0) penalty_dS = dolfin.Constant(penalty) penalty_ds = dolfin.Constant(penalty * 2) yh = dolfin.Constant(1 / (penalty * 2)) # Define weak form u, v = dolfin.TrialFunction(V), dolfin.TestFunction(V) a = dot(grad(u), grad(v)) * dx L = f * v * dx # Symmetric Interior Penalty method for -∇⋅∇φ n = dolfin.FacetNormal(mesh) a -= dot(n('+'), avg(grad(u))) * jump(v) * dS a -= dot(n('+'), avg(grad(v))) * jump(u) * dS # Symmetric Interior Penalty coercivity term a += penalty_dS * jump(u) * jump(v) * dS # Dirichlet boundary conditions # Nitsche's (1971) method, see e.g. Epshteyn and Rivière (2007) dirichlet_bcs = sim.data['dirichlet_bcs'].get('phi', []) for dbc in dirichlet_bcs: bcval, dds = dbc.func(), dbc.ds() # SIPG for -∇⋅∇φ a -= dot(n, grad(u)) * v * dds a -= dot(n, grad(v)) * u * dds L -= dot(n, grad(v)) * bcval * dds # Weak Dirichlet a += penalty_ds * u * v * dds L += penalty_ds * bcval * v * dds # Neumann boundary conditions neumann_bcs = sim.data['neumann_bcs'].get('phi', []) for nbc in neumann_bcs: L += nbc.func() * v * nbc.ds() # Robin boundary conditions # See Juntunen and Stenberg (2009) # n⋅∇φ = (φ0 - φ)/b + g robin_bcs = sim.data['robin_bcs'].get('phi', []) for rbc in robin_bcs: b, rds = rbc.blend(), rbc.ds() dval, nval = rbc.dfunc(), rbc.nfunc() # From IBP of the main equation a -= dot(n, grad(u)) * v * rds # Test functions for the Robin BC z1 = 1 / (b + yh) * v z2 = -yh / (b + yh) * dot(n, grad(v)) # Robin BC added twice with different test functions for z in [z1, z2]: a += b * dot(n, grad(u)) * z * rds a += u * z * rds L += dval * z * rds L += b * nval * z * rds # Does the system have a null-space? self.has_null_space = len(dirichlet_bcs) + len(robin_bcs) == 0 self.form_lhs = a self.form_rhs = L
def __init__(self, Vh, covariance, mean=None): """ Constructor Inputs: - :code:`Vh`: Finite element space on which the prior is defined. Must be the Real space with one global degree of freedom - :code:`covariance`: The covariance of the prior. Must be a :code:`numpy.ndarray` of appropriate size - :code:`mean`(optional): Mean of the prior distribution. Must be of type `dolfin.Vector()` """ self.Vh = Vh if Vh.dim() != covariance.shape[0] or Vh.dim() != covariance.shape[1]: raise ValueError( "Covariance incompatible with Finite Element space") if not np.issubdtype(covariance.dtype, np.floating): raise TypeError("Covariance matrix must be a float array") self.covariance = covariance #np.linalg.cholesky automatically provides more error checking, #so use those self.chol = np.linalg.cholesky(self.covariance) self.chol_inv = scila.solve_triangular(self.chol, np.identity(Vh.dim()), lower=True) self.precision = np.dot(self.chol_inv.T, self.chol_inv) trial = dl.TrialFunction(Vh) test = dl.TestFunction(Vh) domain_measure_inv = dl.Constant(1.0 \ / dl.assemble(dl.Constant(1.) * dl.dx(Vh.mesh()))) #Identity mass matrix self.M = dl.assemble(domain_measure_inv * dl.inner(trial, test) * dl.dx) self.Msolver = Operator2Solver(self.M) if mean: self.mean = mean else: tmp = dl.Vector() self.M.init_vector(tmp, 0) tmp.zero() self.mean = tmp if Vh.dim() == 1: trial = dl.as_matrix([[trial]]) test = dl.as_matrix([[test]]) #Create form matrices covariance_op = dl.as_matrix(list(map(list, self.covariance))) precision_op = dl.as_matrix(list(map(list, self.precision))) chol_op = dl.as_matrix(list(map(list, self.chol))) chol_inv_op = dl.as_matrix(list(map(list, self.chol_inv))) #variational for the regularization operator, or the precision matrix var_form_R = domain_measure_inv \ * dl.inner(test, dl.dot(precision_op, trial)) * dl.dx #variational for the inverse regularization operator, or the covariance #matrix var_form_Rinv = domain_measure_inv \ * dl.inner(test, dl.dot(covariance_op, trial)) * dl.dx #variational form for the square root of the regularization operator var_form_R_sqrt = domain_measure_inv \ * dl.inner(test, dl.dot(chol_inv_op.T, trial)) * dl.dx #variational form for the square root of the inverse regularization #operator var_form_Rinv_sqrt = domain_measure_inv \ * dl.inner(test, dl.dot(chol_op, trial)) * dl.dx self.R = dl.assemble(var_form_R) self.RSolverOp = dl.assemble(var_form_Rinv) self.Rsolver = Operator2Solver(self.RSolverOp) self.sqrtR = dl.assemble(var_form_R_sqrt) self.sqrtRinv = dl.assemble(var_form_Rinv_sqrt)
def end_hook(x_, enable_NS, dx, **namespace): u_norm = 0. if enable_NS: u_norm = df.assemble(df.dot(x_["u"], x_["u"])*dx) info("Velocity norm = {:e}".format(u_norm))
def assemble_laplacian(self): print("Assembling laplacians") # laplacian discretisation self.laplacian = assemble(dot(grad(self.w), grad(self.u)) * dx)