from LagrangianParticles import LagrangianParticles from particle_generators import RandomCircle import matplotlib.pyplot as plt from dolfin import VectorFunctionSpace, interpolate, RectangleMesh, Expression, Point from mpi4py import MPI as pyMPI comm = pyMPI.COMM_WORLD mesh = RectangleMesh(Point(0, 0), Point(1, 1), 10, 10) particle_positions = RandomCircle([0.5, 0.75], 0.15).generate([100, 100]) V = VectorFunctionSpace(mesh, 'CG', 1) u = interpolate( Expression(("-2*sin(pi*x[1])*cos(pi*x[1])*pow(sin(pi*x[0]),2)", "2*sin(pi*x[0])*cos(pi*x[0])*pow(sin(pi*x[1]),2)")), V) lp = LagrangianParticles(V) lp.add_particles(particle_positions) fig = plt.figure() lp.scatter(fig) fig.suptitle('Initial') if comm.Get_rank() == 0: fig.show() plt.ion() save = False dt = 0.01 for step in range(500):
def unstructured_mesh_2d(): domain = Rectangle(Point(0., 0.), Point(1., 1.)) return generate_mesh(domain, 5)
def test_eim_approximation_17(expression_type, basis_generation): """ This test is similar to test 15. However, in contrast to test 15, the solution is not split at all. * EIM: unsplit solution is used in the definition of the parametrized expression, similarly to test 11. * DEIM: unsplit solution is used in the definition of the parametrized tensor. This results in a single coefficient of type Function, which however is stored internally by UFL as an Indexed of Function and a mute index. This test requires the FEniCS backend to properly differentiate between Indexed objects with a fixed index (such as a component of the solution as in test 13) and Indexed objects with a mute index, which should be treated has if the entire solution was required. """ @StoreMapFromProblemNameToProblem @StoreMapFromProblemToTrainingStatus @StoreMapFromSolutionToProblem class MockProblem(ParametrizedProblem): def __init__(self, V, **kwargs): # Call parent ParametrizedProblem.__init__( self, os.path.join("test_eim_approximation_17_tempdir", expression_type, basis_generation, "mock_problem")) # Minimal subset of a ParametrizedDifferentialProblem self.V = V self._solution = Function(V) self.components = ["u", "s", "p"] # Parametrized function to be interpolated x = SpatialCoordinate(V.mesh()) mu = SymbolicParameters(self, V, (-1., -1.)) self.f00 = 1. / sqrt( pow(x[0] - mu[0], 2) + pow(x[1] - mu[1], 2) + 0.01) self.f01 = 1. / sqrt( pow(x[0] - mu[0], 4) + pow(x[1] - mu[1], 4) + 0.01) # Inner product f = TrialFunction(self.V) g = TestFunction(self.V) self.inner_product = assemble(inner(f, g) * dx) # Collapsed vector and space self.V0 = V.sub(0).collapse() self.V00 = V.sub(0).sub(0).collapse() self.V1 = V.sub(1).collapse() def name(self): return "MockProblem_17_" + expression_type + "_" + basis_generation def init(self): pass def solve(self): assert not hasattr(self, "_is_solving") self._is_solving = True f00 = project(self.f00, self.V00) f01 = project(self.f01, self.V00) assign(self._solution.sub(0).sub(0), f00) assign(self._solution.sub(0).sub(1), f01) delattr(self, "_is_solving") return self._solution @StoreMapFromProblemToReductionMethod @UpdateMapFromProblemToTrainingStatus class MockReductionMethod(ReductionMethod): def __init__(self, truth_problem, **kwargs): # Call parent ReductionMethod.__init__( self, os.path.join("test_eim_approximation_17_tempdir", expression_type, basis_generation, "mock_problem")) # Minimal subset of a DifferentialProblemReductionMethod self.truth_problem = truth_problem self.reduced_problem = None # I/O self.folder["basis"] = os.path.join( self.truth_problem.folder_prefix, "basis") # Gram Schmidt self.GS = GramSchmidt(self.truth_problem.inner_product) def initialize_training_set(self, ntrain, enable_import=True, sampling=None, **kwargs): return ReductionMethod.initialize_training_set( self, self.truth_problem.mu_range, ntrain, enable_import, sampling, **kwargs) def initialize_testing_set(self, ntest, enable_import=False, sampling=None, **kwargs): return ReductionMethod.initialize_testing_set( self, self.truth_problem.mu_range, ntest, enable_import, sampling, **kwargs) def offline(self): self.reduced_problem = MockReducedProblem(self.truth_problem) if self.folder["basis"].create( ): # basis folder was not available yet for (index, mu) in enumerate(self.training_set): self.truth_problem.set_mu(mu) print("solving mock problem at mu =", self.truth_problem.mu) f = self.truth_problem.solve() self.update_basis_matrix((index, f)) self.reduced_problem.basis_functions.save( self.folder["basis"], "basis") else: self.reduced_problem.basis_functions.load( self.folder["basis"], "basis") self._finalize_offline() return self.reduced_problem def update_basis_matrix(self, index_and_snapshot): (index, snapshot) = index_and_snapshot component = "u" if index % 2 == 0 else "s" self.reduced_problem.basis_functions.enrich(snapshot, component) self.GS.apply(self.reduced_problem.basis_functions[component], 0) def error_analysis(self, N=None, **kwargs): pass def speedup_analysis(self, N=None, **kwargs): pass @StoreMapFromProblemToReducedProblem class MockReducedProblem(ParametrizedProblem): @sync_setters("truth_problem", "set_mu", "mu") @sync_setters("truth_problem", "set_mu_range", "mu_range") def __init__(self, truth_problem, **kwargs): # Call parent ParametrizedProblem.__init__( self, os.path.join("test_eim_approximation_17_tempdir", expression_type, basis_generation, "mock_problem")) # Minimal subset of a ParametrizedReducedDifferentialProblem self.truth_problem = truth_problem self.basis_functions = BasisFunctionsMatrix(self.truth_problem.V) self.basis_functions.init(self.truth_problem.components) self._solution = None def solve(self): print("solving mock reduced problem at mu =", self.mu) assert not hasattr(self, "_is_solving") self._is_solving = True f = self.truth_problem.solve() f_N = transpose( self.basis_functions) * self.truth_problem.inner_product * f # Return the reduced solution self._solution = OnlineFunction(f_N) delattr(self, "_is_solving") return self._solution class ParametrizedFunctionApproximation(EIMApproximation): def __init__(self, truth_problem, expression_type, basis_generation): self.V = truth_problem.V # folder_prefix = os.path.join("test_eim_approximation_17_tempdir", expression_type, basis_generation) assert expression_type in ("Function", "Vector", "Matrix") if expression_type == "Function": # Call Parent constructor EIMApproximation.__init__( self, truth_problem, ParametrizedExpressionFactory(truth_problem._solution), folder_prefix, basis_generation) elif expression_type == "Vector": v = TestFunction(self.V) form = inner(truth_problem._solution, v) * dx # Call Parent constructor EIMApproximation.__init__(self, truth_problem, ParametrizedTensorFactory(form), folder_prefix, basis_generation) elif expression_type == "Matrix": u = TrialFunction(self.V) v = TestFunction(self.V) form = inner(truth_problem._solution, u) * v[0] * dx # Call Parent constructor EIMApproximation.__init__(self, truth_problem, ParametrizedTensorFactory(form), folder_prefix, basis_generation) else: # impossible to arrive here anyway thanks to the assert raise AssertionError("Invalid expression_type") # 1. Create the mesh for this test mesh = RectangleMesh(Point(0.1, 0.1), Point(0.9, 0.9), 20, 20) # 2. Create Finite Element space (Lagrange P1) element_0 = VectorElement("Lagrange", mesh.ufl_cell(), 2) element_1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1) element = MixedElement(element_0, element_1) V = FunctionSpace(mesh, element, components=[["u", "s"], "p"]) # 3. Create a parametrized problem problem = MockProblem(V) mu_range = [(-1., -0.01), (-1., -0.01)] problem.set_mu_range(mu_range) # 4. Create a reduction method and run the offline phase to generate the corresponding # reduced problem reduction_method = MockReductionMethod(problem) reduction_method.initialize_training_set(16, sampling=EquispacedDistribution()) reduction_method.offline() # 5. Allocate an object of the ParametrizedFunctionApproximation class parametrized_function_approximation = ParametrizedFunctionApproximation( problem, expression_type, basis_generation) parametrized_function_approximation.set_mu_range(mu_range) # 6. Prepare reduction with EIM parametrized_function_reduction_method = EIMApproximationReductionMethod( parametrized_function_approximation) parametrized_function_reduction_method.set_Nmax(16) parametrized_function_reduction_method.set_tolerance(0.) # 7. Perform EIM offline phase parametrized_function_reduction_method.initialize_training_set( 64, sampling=EquispacedDistribution()) reduced_parametrized_function_approximation = parametrized_function_reduction_method.offline( ) # 8. Perform EIM online solve online_mu = (-1., -1.) reduced_parametrized_function_approximation.set_mu(online_mu) reduced_parametrized_function_approximation.solve() # 9. Perform EIM error analysis parametrized_function_reduction_method.initialize_testing_set(100) parametrized_function_reduction_method.error_analysis()
def average_matrix(V, TV, shape): ''' Averaging matrix for reduction of g in V to TV by integration over shape. ''' # We build a matrix representation of u in V -> Pi(u) in TV where # # Pi(u)(s) = |L(s)|^-1*\int_{L(s)}u(t) dx(s) # # Here L is the shape over which u is integrated for reduction. # Its measure is |L(s)|. mesh_x = TV.mesh().coordinates() # The idea for point evaluation/computing dofs of TV is to minimize # the number of evaluation. I mean a vector dof if done naively would # have to evaluate at same x number of component times. value_size = TV.ufl_element().value_size() mesh = V.mesh() # Eval at points will require serch tree = mesh.bounding_box_tree() limit = mesh.num_cells() TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1)) line_mesh = TV.mesh() TV_dm = TV.dofmap() V_dm = V.dofmap() # For non scalar we plan to make compoenents by shift if value_size > 1: TV_dm = TV.sub(0).dofmap() Vel = V.element() basis_values = np.zeros(V.element().space_dimension() * value_size) with petsc_serial_matrix(TV, V) as mat: for line_cell in cells(line_mesh): # Get the tangent (normal of the plane which cuts the virtual # surface to yield the bdry curve v0, v1 = mesh_x[line_cell.entities(0)] n = v0 - v1 # The idea is now to minimize the point evaluation scalar_dofs = TV_dm.cell_dofs(line_cell.index()) scalar_dofs_x = TV_coordinates[scalar_dofs] for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x): # Avg point here has the role of 'height' coordinate quadrature = shape.quadrature(avg_point, n) integration_points = quadrature.points wq = quadrature.weights curve_measure = sum(wq) data = {} for index, ip in enumerate(integration_points): c = tree.compute_first_entity_collision(Point(*ip)) if c >= limit: continue cs = tree.compute_entity_collisions(Point(*ip)) # assert False for c in cs[:1]: Vcell = Cell(mesh, c) vertex_coordinates = Vcell.get_vertex_coordinates() cell_orientation = Vcell.orientation() basis_values[:] = Vel.evaluate_basis_all( ip, vertex_coordinates, cell_orientation) cols_ip = V_dm.cell_dofs(c) values_ip = basis_values * wq[index] # Add for col, value in zip( cols_ip, values_ip.reshape((-1, value_size))): if col in data: data[col] += value / curve_measure else: data[col] = value / curve_measure # The thing now that with data we can assign to several # rows of the matrix column_indices = np.array(list(data.keys()), dtype='int32') for shift in range(value_size): row = scalar_row + shift column_values = np.array( [data[col][shift] for col in column_indices]) mat.setValues([row], column_indices, column_values, PETSc.InsertMode.INSERT_VALUES) # On to next avg point # On to next cell return mat
def test_advect_periodic(advection_scheme): # FIXME: this unit test is sensitive to the ordering of the particle # array, i.e. xp0_root and xpE_root may contain exactly the same entries # but only in a different order. This will return an error right now xmin, xmax = 0.0, 1.0 ymin, ymax = 0.0, 1.0 pres = 3 mesh = RectangleMesh(Point(xmin, ymin), Point(xmax, ymax), 10, 10) lims = np.array([ [xmin, xmin, ymin, ymax], [xmax, xmax, ymin, ymax], [xmin, xmax, ymin, ymin], [xmin, xmax, ymax, ymax], ]) vexpr = Constant((1.0, 1.0)) V = VectorFunctionSpace(mesh, "CG", 1) x = RandomRectangle(Point(0.05, 0.05), Point(0.15, 0.15)).generate([pres, pres]) x = comm.bcast(x, root=0) dt = 0.05 v = Function(V) v.assign(vexpr) p = particles(x, [x * 0, x**2], mesh) if advection_scheme == "euler": ap = advect_particles(p, V, v, "periodic", lims.flatten()) elif advection_scheme == "rk2": ap = advect_rk2(p, V, v, "periodic", lims.flatten()) elif advection_scheme == "rk3": ap = advect_rk3(p, V, v, "periodic", lims.flatten()) else: assert False xp0 = p.positions() t = 0.0 while t < 1.0 - 1e-12: ap.do_step(dt) t += dt xpE = p.positions() # Check if position correct xp0_root = comm.gather(xp0, root=0) xpE_root = comm.gather(xpE, root=0) num_particles = p.number_of_particles() if comm.Get_rank() == 0: xp0_root = np.float32(np.vstack(xp0_root)) xpE_root = np.float32(np.vstack(xpE_root)) # Sort on x positions xp0_root = xp0_root[xp0_root[:, 0].argsort(), :] xpE_root = xpE_root[xpE_root[:, 0].argsort(), :] error = np.linalg.norm(xp0_root - xpE_root) assert error < 1e-10 assert num_particles - pres**2 == 0
def test_nullspace_orthogonal(mesh, degree): """Test that null spaces orthogonalisation""" V = VectorFunctionSpace(mesh, ('Lagrange', degree)) null_space = build_elastic_nullspace(V) assert not null_space.is_orthogonal() assert not null_space.is_orthonormal() null_space.orthonormalize() assert null_space.is_orthogonal() assert null_space.is_orthonormal() @pytest.mark.parametrize("mesh", [ UnitSquareMesh(MPI.comm_world, 12, 13), BoxMesh.create(MPI.comm_world, [ Point(0.8, -0.2, 1.2)._cpp_object, Point(3.0, 11.0, -5.0)._cpp_object ], [12, 18, 25], cell_type=CellType.Type.tetrahedron, ghost_mode=GhostMode.none), ]) @pytest.mark.parametrize("degree", [1, 2]) def test_nullspace_check(mesh, degree): V = VectorFunctionSpace(mesh, ('Lagrange', degree)) u, v = TrialFunction(V), TestFunction(V) mesh.geometry.coord_mapping = fem.create_coordinate_map(mesh) E, nu = 2.0e2, 0.3 mu = E / (2.0 * (1.0 + nu)) lmbda = E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu))
basis = VectorSpaceBasis(nullspace_basis) basis.orthonormalize() _x = [basis[i] for i in range(6)] nsp = PETSc.NullSpace() nsp.create(_x) return nsp # Load mesh from file # mesh = Mesh(MPI.comm_world) # XDMFFile(MPI.comm_world, "../pulley.xdmf").read(mesh) # mesh = UnitCubeMesh(2, 2, 2) mesh = BoxMesh(MPI.comm_world, [Point(0, 0, 0)._cpp_object, Point(2, 1, 1)._cpp_object], [12, 12, 12], CellType.Type.tetrahedron, dolfin.cpp.mesh.GhostMode.none) cmap = dolfin.fem.create_coordinate_map(mesh.ufl_domain()) mesh.geometry.coord_mapping = cmap # Function to mark inner surface of pulley # def inner_surface(x, on_boundary): # r = 3.75 - x[2]*0.17 # return (x[0]*x[0] + x[1]*x[1]) < r*r and on_boundary def boundary(x, on_boundary): return np.logical_or(x[:, 0] < 10.0 * np.finfo(float).eps, x[:, 0] > 1.0 - 10.0 * np.finfo(float).eps)
def run_with_params(Tb, mu_value, k_s, path): run_time_init = clock() mesh = BoxMesh(Point(0.0, 0.0, 0.0), Point(mesh_width, mesh_width, mesh_height), nx, ny, nz) pbc = PeriodicBoundary() WE = VectorElement('CG', mesh.ufl_cell(), 2) SE = FiniteElement('CG', mesh.ufl_cell(), 1) WSSS = FunctionSpace(mesh, MixedElement(WE, SE, SE, SE), constrained_domain=pbc) # W = FunctionSpace(mesh, WE, constrained_domain=pbc) # S = FunctionSpace(mesh, SE, constrained_domain=pbc) W = WSSS.sub(0).collapse() S = WSSS.sub(1).collapse() temperature_vals = [27.0 + 273, Tb + 273, 1300.0 + 273, 1305.0 + 273] temp_prof = TemperatureProfile(temperature_vals, element=S.ufl_element()) mu_a = mu_value # this was taken from the Blankenbach paper, can change Ep = b / temp_prof.delta mu_bot = exp(-Ep * (temp_prof.bottom * temp_prof.delta - 1573.0) + cc) * mu_a # TODO: verify exponentiation Ra = rho_0 * alpha * g * temp_prof.delta * h**3 / (kappa_0 * mu_a) w0 = rho_0 * alpha * g * temp_prof.delta * h**2 / mu_a tau = h / w0 p0 = mu_a * w0 / h log(mu_a, mu_bot, Ra, w0, p0) slip_vx = 1.6E-09 / w0 # Non-dimensional slip_velocity = Constant((slip_vx, 0.0, 0.0)) zero_slip = Constant((0.0, 0.0, 0.0)) time_step = 3.0E11 / tau * 2 dt = Constant(time_step) t_end = 3.0E15 / tau / 5.0 # Non-dimensional times u = Function(WSSS) # Instead of TrialFunctions, we use split(u) for our non-linear problem v, p, T, Tf = split(u) v_t, p_t, T_t, Tf_t = TestFunctions(WSSS) T0 = interpolate(temp_prof, S) mu_exp = Expression( 'exp(-Ep * (T_val * dTemp - 1573.0) + cc * x[2] / mesh_height)', Ep=Ep, dTemp=temp_prof.delta, cc=cc, mesh_height=mesh_height, T_val=T0, element=S.ufl_element()) Tf0 = interpolate(temp_prof, S) mu = Function(S) v0 = Function(W) v_theta = (1.0 - theta) * v0 + theta * v T_theta = (1.0 - theta) * T0 + theta * T Tf_theta = (1.0 - theta) * Tf0 + theta * Tf # TODO: Verify forms r_v = (inner(sym(grad(v_t)), 2.0 * mu * sym(grad(v))) - div(v_t) * p - T * v_t[2]) * dx r_p = p_t * div(v) * dx heat_transfer = Constant(k_s) * (Tf_theta - T_theta) * dt r_T = ( T_t * ((T - T0) + dt * inner(v_theta, grad(T_theta))) # TODO: Inner vs dot + (dt / Ra) * inner(grad(T_t), grad(T_theta)) - T_t * heat_transfer) * dx v_melt = Function(W) z_hat = Constant((0.0, 0.0, 1.0)) # TODO: inner -> dot, take out Tf_t r_Tf = (Tf_t * ((Tf - Tf0) + dt * inner(v_melt, grad(Tf_theta))) + Tf_t * heat_transfer) * dx r = r_v + r_p + r_T + r_Tf bcv0 = DirichletBC(WSSS.sub(0), zero_slip, top) bcv1 = DirichletBC(WSSS.sub(0), slip_velocity, bottom) bcv2 = DirichletBC(WSSS.sub(0).sub(1), Constant(0.0), back) bcv3 = DirichletBC(WSSS.sub(0).sub(1), Constant(0.0), front) bcp0 = DirichletBC(WSSS.sub(1), Constant(0.0), bottom) bct0 = DirichletBC(WSSS.sub(2), Constant(temp_prof.surface), top) bct1 = DirichletBC(WSSS.sub(2), Constant(temp_prof.bottom), bottom) bctf1 = DirichletBC(WSSS.sub(3), Constant(temp_prof.bottom), bottom) bcs = [bcv0, bcv1, bcv2, bcv3, bcp0, bct0, bct1, bctf1] t = 0 count = 0 files = DefaultDictByKey(partial(create_xdmf, path)) while t < t_end: mu.interpolate(mu_exp) rhosolid = rho_0 * (1.0 - alpha * (T0 * temp_prof.delta - 1573.0)) deltarho = rhosolid - rho_melt # TODO: project (accuracy) vs interpolate assign( v_melt, project( v0 - darcy * (grad(p) * p0 / h - deltarho * z_hat * g) / w0, W)) # TODO: Written out one step later? # v_melt.assign(v0 - darcy * (grad(p) * p0 / h - deltarho * yvec * g) / w0) # TODO: use nP after to avoid projection? solve(r == 0, u, bcs) nV, nP, nT, nTf = u.split() # TODO: write with Tf, ... etc if count % output_every == 0: time_left(count, t_end / time_step, run_time_init) # TODO: timestep vs dt # TODO: Make sure all writes are to the same function for each time step files['T_fluid'].write(nTf, t) files['p'].write(nP, t) files['v_solid'].write(nV, t) files['T_solid'].write(nT, t) files['mu'].write(mu, t) files['v_melt'].write(v_melt, t) files['gradp'].write(project(grad(nP), W), t) files['rho'].write(project(rhosolid, S), t) files['Tf_grad'].write(project(grad(Tf), W), t) files['advect'].write(project(dt * dot(v_melt, grad(nTf))), t) files['ht'].write(project(heat_transfer, S), t) assign(T0, nT) assign(v0, nV) assign(Tf0, nTf) t += time_step count += 1 log('Case mu={}, Tb={}, k={} complete. Run time = {:.2f} minutes'.format( mu_a, Tb, k_s, (clock() - run_time_init) / 60.0))
def list2point(self, list_in): """ Turn a list of coord into a Fenics Point Inputs: list_in = list containing coordinates of the Point """ dim = np.size(list_in) return Point(dim, np.array(list_in, dtype=float))
def test_eim_approximation_02(expression_type, basis_generation): """ This is a second basic test for EIM/DEIM, based on the test case of section 3.3.2 of S. Chaturantabut and D. C. Sorensen Nonlinear Model Reduction via Discrete Empirical Interpolation SIAM Journal on Scientific Computing 2010 32:5, 2737-2764 * EIM: test interpolation of a scalar function * DEIM: test interpolation of form with scalar integrand on a scalar space """ class MockProblem(ParametrizedProblem): def __init__(self, V, **kwargs): ParametrizedProblem.__init__(self, "") self.V = V def name(self): return "MockProblem_02_" + expression_type + "_" + basis_generation class ParametrizedFunctionApproximation(EIMApproximation): def __init__(self, V, expression_type, basis_generation): self.V = V # Parametrized function to be interpolated mock_problem = MockProblem(V) f = ParametrizedExpression( mock_problem, "1/sqrt(pow(x[0]-mu[0], 2) + pow(x[1]-mu[1], 2) + 0.01)", mu=(-1., -1.), element=V.ufl_element()) # folder_prefix = os.path.join("test_eim_approximation_02_tempdir", expression_type, basis_generation) assert expression_type in ("Function", "Vector", "Matrix") if expression_type == "Function": # Call Parent constructor EIMApproximation.__init__(self, mock_problem, ParametrizedExpressionFactory(f), folder_prefix, basis_generation) elif expression_type == "Vector": v = TestFunction(V) form = f * v * dx # Call Parent constructor EIMApproximation.__init__(self, mock_problem, ParametrizedTensorFactory(form), folder_prefix, basis_generation) elif expression_type == "Matrix": u = TrialFunction(V) v = TestFunction(V) form = f * u * v * dx # Call Parent constructor EIMApproximation.__init__(self, mock_problem, ParametrizedTensorFactory(form), folder_prefix, basis_generation) else: # impossible to arrive here anyway thanks to the assert raise AssertionError("Invalid expression_type") # 1. Create the mesh for this test mesh = RectangleMesh(Point(0.1, 0.1), Point(0.9, 0.9), 20, 20) # 2. Create Finite Element space (Lagrange P1) V = FunctionSpace(mesh, "Lagrange", 1) # 3. Allocate an object of the ParametrizedFunctionApproximation class parametrized_function_approximation = ParametrizedFunctionApproximation( V, expression_type, basis_generation) mu_range = [(-1., -0.01), (-1., -0.01)] parametrized_function_approximation.set_mu_range(mu_range) # 4. Prepare reduction with EIM parametrized_function_reduction_method = EIMApproximationReductionMethod( parametrized_function_approximation) parametrized_function_reduction_method.set_Nmax(50) parametrized_function_reduction_method.set_tolerance(0.) # 5. Perform the offline phase parametrized_function_reduction_method.initialize_training_set( 225, sampling=EquispacedDistribution()) reduced_parametrized_function_approximation = parametrized_function_reduction_method.offline( ) # 6. Perform an online solve online_mu = (-1., -1.) reduced_parametrized_function_approximation.set_mu(online_mu) reduced_parametrized_function_approximation.solve() # 7. Perform an error analysis parametrized_function_reduction_method.initialize_testing_set(225) parametrized_function_reduction_method.error_analysis()
def average_matrix(V, TV, radius, quad_degree): '''Averaging matrix''' mesh = V.mesh() line_mesh = TV.mesh() # We are going to perform the integration with Gauss quadrature at # the end (PI u)(x): # A cell of mesh (an edge) defines a normal vector. Let P be the plane # that is defined by the normal vector n and some point x on Gamma. Let L # be the circle that is the intersect of P and S. The value of q (in Q) at x # is defined as # # q(x) = (1/|L|)*\int_{L}g(x)*dL # # which simplifies to g(x) = (1/(2*pi*R))*\int_{-pi}^{pi}u(L)*R*d(theta) and # or = (1/2) * \int_{-1}^{1} u (L(pi*s)) * ds # This can be integrated no problemo once we figure out L. To this end, let # t_1 and t_2 be two unit mutually orthogonal vectors that are orthogonal to # n. Then L(pi*s) = p + R*t_1*cos(pi*s) + R*t_2*sin(pi*s) can be seen to be # such that i) |x-p| = R and ii) x.n = 0 [i.e. this the suitable # parametrization] # Clearly we can scale the weights as well as precompute # cos and sin terms. xq, wq = leggauss(quad_degree) wq *= 0.5 cos_xq = np.cos(np.pi * xq).reshape((-1, 1)) sin_xq = np.sin(np.pi * xq).reshape((-1, 1)) if is_number(radius): radius = lambda x, radius=radius: radius mesh_x = TV.mesh().coordinates() # The idea for point evaluation/computing dofs of TV is to minimize # the number of evaluation. I mean a vector dof if done naively would # have to evaluate at same x number of component times. value_size = TV.ufl_element().value_size() # Eval at points will require serch tree = mesh.bounding_box_tree() limit = mesh.num_cells() TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1)) TV_dm = TV.dofmap() V_dm = V.dofmap() # For non scalar we plan to make compoenents by shift if value_size > 1: TV_dm = TV.sub(0).dofmap() Vel = V.element() basis_values = np.zeros(V.element().space_dimension() * value_size) with petsc_serial_matrix(TV, V) as mat: for line_cell in cells(line_mesh): # Get the tangent => orthogonal tangent vectors v0, v1 = mesh_x[line_cell.entities(0)] n = v0 - v1 t1 = np.array([n[1] - n[2], n[2] - n[0], n[0] - n[1]]) t2 = np.cross(n, t1) t1 /= np.linalg.norm(t1) t2 = t2 / np.linalg.norm(t2) # The idea is now to minimize the point evaluation scalar_dofs = TV_dm.cell_dofs(line_cell.index()) scalar_dofs_x = TV_coordinates[scalar_dofs] for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x): # Get radius and integration points rad = radius(avg_point) integration_points = avg_point + rad * t1 * sin_xq + rad * t2 * cos_xq data = {} for index, ip in enumerate(integration_points): c = tree.compute_first_entity_collision(Point(*ip)) if c >= limit: continue Vcell = Cell(mesh, c) vertex_coordinates = Vcell.get_vertex_coordinates() cell_orientation = Vcell.orientation() Vel.evaluate_basis_all(basis_values, ip, vertex_coordinates, cell_orientation) cols_ip = V_dm.cell_dofs(c) values_ip = basis_values * wq[index] # Add for col, value in zip(cols_ip, values_ip.reshape((-1, value_size))): if col in data: data[col] += value else: data[col] = value # The thing now that with data we can assign to several # rows of the matrix column_indices = np.array(data.keys(), dtype='int32') for shift in range(value_size): row = scalar_row + shift column_values = np.array( [data[col][shift] for col in column_indices]) mat.setValues([row], column_indices, column_values, PETSc.InsertMode.INSERT_VALUES) # On to next avg point # On to next cell return PETScMatrix(mat)
def test_eim_approximation_06(expression_type, basis_generation): """ The aim of this test is to check the interpolation of vector valued functions. * EIM: test interpolation of a scalar function * DEIM: test interpolation of form with integrand given by the inner product of a vector valued function and some derivative of a test/trial functions of a scalar space """ class MockProblem(ParametrizedProblem): def __init__(self, V, **kwargs): ParametrizedProblem.__init__(self, "") self.V = V def name(self): return "MockProblem_06_" + expression_type + "_" + basis_generation class ParametrizedFunctionApproximation(EIMApproximation): def __init__(self, V, expression_type, basis_generation): self.V = V # Parametrized function to be interpolated mock_problem = MockProblem(V) f_expression = ( "1/sqrt(pow(x[0]-mu[0], 2) + pow(x[1]-mu[1], 2) + 0.01)", "10.*(1-x[0])*cos(3*pi*(pi+mu[1])*(1+x[1]))*exp(-(pi+mu[0])*(1+x[0]))" ) vector_element = VectorElement(V.ufl_element()) f = ParametrizedExpression(mock_problem, f_expression, mu=(-1., -1.), element=vector_element) # folder_prefix = os.path.join("test_eim_approximation_06_tempdir", expression_type, basis_generation) assert expression_type in ("Function", "Vector", "Matrix") if expression_type == "Function": # Call Parent constructor EIMApproximation.__init__(self, mock_problem, ParametrizedExpressionFactory(f), folder_prefix, basis_generation) elif expression_type == "Vector": v = TestFunction(V) form = inner(f, grad(v)) * dx # Call Parent constructor EIMApproximation.__init__(self, mock_problem, ParametrizedTensorFactory(form), folder_prefix, basis_generation) elif expression_type == "Matrix": u = TrialFunction(V) v = TestFunction(V) form = inner(f, grad(u)) * v * dx # Call Parent constructor EIMApproximation.__init__(self, mock_problem, ParametrizedTensorFactory(form), folder_prefix, basis_generation) else: # impossible to arrive here anyway thanks to the assert raise AssertionError("Invalid expression_type") # 1. Create the mesh for this test mesh = RectangleMesh(Point(0.1, 0.1), Point(0.9, 0.9), 20, 20) # 2. Create Finite Element space (Lagrange P1) V = FunctionSpace(mesh, "Lagrange", 1) # 3. Allocate an object of the ParametrizedFunctionApproximation class parametrized_function_approximation = ParametrizedFunctionApproximation( V, expression_type, basis_generation) mu_range = [(-1., -0.01), (-1., -0.01)] parametrized_function_approximation.set_mu_range(mu_range) # 4. Prepare reduction with EIM parametrized_function_reduction_method = EIMApproximationReductionMethod( parametrized_function_approximation) parametrized_function_reduction_method.set_Nmax(50) parametrized_function_reduction_method.set_tolerance(0.) # 5. Perform the offline phase parametrized_function_reduction_method.initialize_training_set( 225, sampling=EquispacedDistribution()) reduced_parametrized_function_approximation = parametrized_function_reduction_method.offline( ) # 6. Perform an online solve online_mu = (-1., -1.) reduced_parametrized_function_approximation.set_mu(online_mu) reduced_parametrized_function_approximation.solve() # 7. Perform an error analysis parametrized_function_reduction_method.initialize_testing_set(225) parametrized_function_reduction_method.error_analysis()
def Regular(n, size=50): """Build mesh for a regular polygon with n sides.""" points = [(np.cos(2 * np.pi * i / n), np.sin(2 * np.pi * i / n)) for i in range(n)] polygon = Polygon([Point(*p) for p in points]) return generate_mesh(polygon, size)
degree=7, U=float(1.0), nu=float(nu), t=float(0.0), mode=mode) P_exact = "-rho*0.25*exp(-4*nu*pow(pi,2)*t)*(cos(2*pi*(x[0])) + cos(2*pi*(x[1]))-2.0)" p_exact = Expression(P_exact, degree=3, rho=1.0, nu=float(nu), t=float(0.0)) f = Constant((0.0, 0.0)) # Create mesh xmin, ymin = geometry["xmin"], geometry["ymin"] xmax, ymax = geometry["xmax"], geometry["ymax"] mesh = RectangleMesh(MPI.comm_world, Point(xmin, ymin), Point(xmax, ymax), nx, ny) pbc = PeriodicBoundary(geometry) # xdmf output xdmf_u = XDMFFile(mesh.mpi_comm(), outdir_base + "u.xdmf") xdmf_p = XDMFFile(mesh.mpi_comm(), outdir_base + "p.xdmf") # Required elements W_E_2 = VectorElement("DG", mesh.ufl_cell(), k) T_E_2 = VectorElement("DG", mesh.ufl_cell(), 0) Wbar_E_2 = VectorElement("DGT", mesh.ufl_cell(), kbar) Wbar_E_2_H12 = VectorElement("CG", mesh.ufl_cell(), kbar)["facet"] Q_E = FiniteElement("DG", mesh.ufl_cell(), k - 1) Qbar_E = FiniteElement("DGT", mesh.ufl_cell(), k)
def test_eim_approximation_18(expression_type, basis_generation): """ This test is the version of test 17 where high fidelity solution is used in place of reduced order one. """ @StoreMapFromProblemNameToProblem @StoreMapFromProblemToTrainingStatus @StoreMapFromSolutionToProblem class MockProblem(ParametrizedProblem): def __init__(self, V, **kwargs): # Call parent ParametrizedProblem.__init__( self, os.path.join("test_eim_approximation_18_tempdir", expression_type, basis_generation, "mock_problem")) # Minimal subset of a ParametrizedDifferentialProblem self.V = V self._solution = Function(V) self.components = ["u", "s", "p"] # Parametrized function to be interpolated x = SpatialCoordinate(V.mesh()) mu = SymbolicParameters(self, V, (-1., -1.)) self.f00 = 1. / sqrt( pow(x[0] - mu[0], 2) + pow(x[1] - mu[1], 2) + 0.01) self.f01 = 1. / sqrt( pow(x[0] - mu[0], 4) + pow(x[1] - mu[1], 4) + 0.01) # Inner product f = TrialFunction(self.V) g = TestFunction(self.V) self.inner_product = assemble(inner(f, g) * dx) # Collapsed vector and space self.V0 = V.sub(0).collapse() self.V00 = V.sub(0).sub(0).collapse() self.V1 = V.sub(1).collapse() def name(self): return "MockProblem_18_" + expression_type + "_" + basis_generation def init(self): pass def solve(self): print("solving mock problem at mu =", self.mu) assert not hasattr(self, "_is_solving") self._is_solving = True f00 = project(self.f00, self.V00) f01 = project(self.f01, self.V00) assign(self._solution.sub(0).sub(0), f00) assign(self._solution.sub(0).sub(1), f01) delattr(self, "_is_solving") return self._solution @StoreMapFromProblemToReductionMethod class MockReductionMethod(ReductionMethod): def __init__(self, truth_problem, **kwargs): # Call parent ReductionMethod.__init__( self, os.path.join("test_eim_approximation_18_tempdir", expression_type, basis_generation, "mock_problem")) # Minimal subset of a DifferentialProblemReductionMethod self.truth_problem = truth_problem self.reduced_problem = None def initialize_training_set(self, ntrain, enable_import=True, sampling=None, **kwargs): return ReductionMethod.initialize_training_set( self, self.truth_problem.mu_range, ntrain, enable_import, sampling, **kwargs) def initialize_testing_set(self, ntest, enable_import=False, sampling=None, **kwargs): return ReductionMethod.initialize_testing_set( self, self.truth_problem.mu_range, ntest, enable_import, sampling, **kwargs) def offline(self): pass def update_basis_matrix(self, snapshot): pass def error_analysis(self, N=None, **kwargs): pass def speedup_analysis(self, N=None, **kwargs): pass class ParametrizedFunctionApproximation(EIMApproximation): def __init__(self, truth_problem, expression_type, basis_generation): self.V = truth_problem.V # folder_prefix = os.path.join("test_eim_approximation_18_tempdir", expression_type, basis_generation) assert expression_type in ("Function", "Vector", "Matrix") if expression_type == "Function": # Call Parent constructor EIMApproximation.__init__( self, truth_problem, ParametrizedExpressionFactory(truth_problem._solution), folder_prefix, basis_generation) elif expression_type == "Vector": v = TestFunction(self.V) form = inner(truth_problem._solution, v) * dx # Call Parent constructor EIMApproximation.__init__(self, truth_problem, ParametrizedTensorFactory(form), folder_prefix, basis_generation) elif expression_type == "Matrix": u = TrialFunction(self.V) v = TestFunction(self.V) form = inner(truth_problem._solution, u) * v[0] * dx # Call Parent constructor EIMApproximation.__init__(self, truth_problem, ParametrizedTensorFactory(form), folder_prefix, basis_generation) else: # impossible to arrive here anyway thanks to the assert raise AssertionError("Invalid expression_type") # 1. Create the mesh for this test mesh = RectangleMesh(Point(0.1, 0.1), Point(0.9, 0.9), 20, 20) # 2. Create Finite Element space (Lagrange P1) element_0 = VectorElement("Lagrange", mesh.ufl_cell(), 2) element_1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1) element = MixedElement(element_0, element_1) V = FunctionSpace(mesh, element, components=[["u", "s"], "p"]) # 3. Create a parametrized problem problem = MockProblem(V) mu_range = [(-1., -0.01), (-1., -0.01)] problem.set_mu_range(mu_range) # 4. Create a reduction method, but postpone generation of the reduced problem MockReductionMethod(problem) # 5. Allocate an object of the ParametrizedFunctionApproximation class parametrized_function_approximation = ParametrizedFunctionApproximation( problem, expression_type, basis_generation) parametrized_function_approximation.set_mu_range(mu_range) # 6. Prepare reduction with EIM parametrized_function_reduction_method = EIMApproximationReductionMethod( parametrized_function_approximation) parametrized_function_reduction_method.set_Nmax(16) parametrized_function_reduction_method.set_tolerance(0.) # 7. Perform EIM offline phase parametrized_function_reduction_method.initialize_training_set( 64, sampling=EquispacedDistribution()) reduced_parametrized_function_approximation = parametrized_function_reduction_method.offline( ) # 8. Perform EIM online solve online_mu = (-1., -1.) reduced_parametrized_function_approximation.set_mu(online_mu) reduced_parametrized_function_approximation.solve() # 9. Perform EIM error analysis parametrized_function_reduction_method.initialize_testing_set(100) parametrized_function_reduction_method.error_analysis()
def initMesh(self, n): # Set mesh to square on [0,1]^2 self.mesh = RectangleMesh(Point(-np.pi, -np.pi), Point(np.pi, np.pi), n, n)
# Load meshes and mesh-functions used in the MultiMesh from file multimesh = MultiMesh() mfs = [] meshes = [] for i in range(2): mesh_i = Mesh() with XDMFFile("meshes/multimesh_%d.xdmf" % i) as infile: infile.read(mesh_i) mvc = MeshValueCollection("size_t", mesh_i, 1) with XDMFFile("meshes/mf_%d.xdmf" % i) as infile: infile.read(mvc, "name_to_read") mfs.append(cpp.mesh.MeshFunctionSizet(mesh_i, mvc)) meshes.append(mesh_i) multimesh.add(mesh_i) multimesh.build() multimesh.auto_cover(0, Point(1.25, 0.875)) def deformation_vector(): # n1 = VolumeNormal(multimesh.part(1)) x1 = SpatialCoordinate(multimesh.part(1)) n1 = as_vector((x1[1], x1[0])) S_sm = VectorFunctionSpace(multimesh.part(1), "CG", 1) #bcs = [DirichletBC(S_sm, Constant((0,0)), mfs[1],2), bcs = [DirichletBC(S_sm, n1, mfs[1], 1)] u, v = TrialFunction(S_sm), TestFunction(S_sm) a = inner(grad(u), grad(v)) * dx l = inner(Constant((0., 0.)), v) * dx n = Function(S_sm) solve(a == l, n, bcs=bcs)
D_an = Constant(1.0E-7) # m^2/s D_ca = Constant(1.0E-7) # m^2/s mu_an = Constant(3.9607E-6) # m^2/s*V mu_ca = Constant(3.9607E-6) # m^2/s*V z_an = Constant(-1.0) z_ca = Constant(1.0) z_fc = Constant(-1.0) Farad = Constant(9.6487E4) # C/mol eps0 = Constant(8.854E-12) # As/Vm epsR = Constant(100.0) Temp = Constant(293.0) R = Constant(8.3143) ################################## mesh part ################################## mesh = RectangleMesh(Point(0.0, 0.0), Point(15.0E-3, 15.0E-3), 150, 150) #File('mesh_orig.pvd') << mesh refinement_cycles = 5 for _ in range(refinement_cycles): refine_cells = MeshFunction("bool", mesh, 2) refine_cells.set_all(False) for cell in cells(mesh): mp = cell.midpoint() if mp.x() > 4.8e-3 and mp.x() < 10.2e-3: if abs(mp.y() - 10.0e-3) < 0.2e-3: refine_cells[cell] = True elif abs(mp.y() - 5.0e-3) < 0.2e-3: refine_cells[cell] = True if mp.y() > 4.8e-3 and mp.y() < 10.2e-3:
outdir + "rhop_nx" + str(nx) + ".pickle", ] property_list = [0, 1] conservation_data = outdir + "conservation_nx" + str(nx) + ".csv" if comm.rank == 0: with open(conservation_data, "w") as write_file: writer = csv.writer(write_file) writer.writerow(["Time", "Total mass", "Mass conservation"]) # Compute num steps till completion num_steps = np.rint(Tend / float(dt)) # Generate mesh mesh = RectangleMesh.create( [Point(xmin, ymin), Point(xmax, ymax)], [nx, nx], CellType.Type.triangle) output_field = XDMFFile(mesh.mpi_comm(), outdir + "psi_h" + "_nx" + str(nx) + ".xdmf") # Velocity and initial condition V = VectorFunctionSpace(mesh, "CG", 1) uh = Function(V) uh.assign(Expression((ux, vy), degree=1)) psi0_expression = SineHump(center=[0.5, 0.5], U=[float(ux), float(vy)], time=0.0, degree=6) # Generate particles
outdir + "psi_h_nx" + str(nx) + ".xdmf") # Velocity and initial condition V = VectorFunctionSpace(mesh, 'DG', 3) uh = Function(V) uh.assign(Expression(('-Uh*x[1]', 'Uh*x[0]'), Uh=Uh, degree=3)) psi0_expression = GaussianPulse(center=(xc, yc), sigma=float(sigma), U=[Uh, Uh], time=0., height=1., degree=3) # Generate particles x = RandomCircle(Point(x0, y0), r).generate([pres, pres]) s = np.zeros((len(x), 1), dtype=np.float_) # Initialize particles with position x and scalar property s at the mesh p = particles(x, [s], mesh) property_idx = 1 # Scalar quantity is stored at slot 1 # Initialize advection class, use RK3 scheme ap = advect_rk3(p, V, uh, 'open') # Define the variational (projection problem) W_e = FiniteElement("DG", mesh.ufl_cell(), k) T_e = FiniteElement("DG", mesh.ufl_cell(), 0) Wbar_e = FiniteElement("DGT", mesh.ufl_cell(), k) W = FunctionSpace(mesh, W_e)
RectangleMesh, SpatialCoordinate, TestFunction, TrialFunction, solve) from dolfin.io import XDMFFile from ufl import ds, dx, grad, inner # We begin by defining a mesh of the domain and a finite element # function space :math:`V` relative to this mesh. As the unit square is # a very standard domain, we can use a built-in mesh provided by the # class :py:class:`UnitSquareMesh <dolfin.cpp.UnitSquareMesh>`. In order # to create a mesh consisting of 32 x 32 squares with each square # divided into two triangles, we do as follows :: # Create mesh and define function space mesh = RectangleMesh.create( MPI.comm_world, [Point(0, 0)._cpp_object, Point(1, 1)._cpp_object], [32, 32], CellType.Type.triangle, dolfin.cpp.mesh.GhostMode.none) V = FunctionSpace(mesh, ("Lagrange", 1)) cmap = dolfin.fem.create_coordinate_map(mesh.ufl_domain()) mesh.geometry.coord_mapping = cmap # The second argument to :py:class:`FunctionSpace # <dolfin.functions.functionspace.FunctionSpace>` is the finite element # family, while the third argument specifies the polynomial # degree. Thus, in this case, our space ``V`` consists of first-order, # continuous Lagrange finite element functions (or in order words, # continuous piecewise linear polynomials). # # Next, we want to consider the Dirichlet boundary condition. A simple # Python function, returning a boolean, can be used to define the
def initMesh(self, n): self.mesh = RectangleMesh(Point(self.pmin, self.Imin), Point(self.pmax, self.Imax), n, n)
basis = VectorSpaceBasis(nullspace_basis) basis.orthonormalize() _x = [basis[i] for i in range(6)] nsp = PETSc.NullSpace() nsp.create(_x) return nsp # Load mesh from file # mesh = Mesh(MPI.comm_world) # XDMFFile(MPI.comm_world, "../pulley.xdmf").read(mesh) # mesh = UnitCubeMesh(2, 2, 2) mesh = BoxMesh.create( MPI.comm_world, [Point(0, 0, 0)._cpp_object, Point(2, 1, 1)._cpp_object], [12, 12, 12], CellType.Type.tetrahedron, dolfin.cpp.mesh.GhostMode.none) cmap = dolfin.fem.create_coordinate_map(mesh.ufl_domain()) mesh.geometry.coord_mapping = cmap # Function to mark inner surface of pulley # def inner_surface(x, on_boundary): # r = 3.75 - x[2]*0.17 # return (x[0]*x[0] + x[1]*x[1]) < r*r and on_boundary def boundary(x, on_boundary): return np.logical_or(x[:, 0] < np.finfo(float).eps, x[:, 0] > 1.0 - np.finfo(float).eps)
def spherical_shell(dim, radii, boundary_ids, n_refinements=0): assert isinstance(dim, int) assert dim == 2 or dim == 3 assert isinstance(radii, (list, tuple)) and len(radii) == 2 ri, ro = radii assert isinstance(ri, float) and ri > 0. assert isinstance(ro, float) and ro > 0. assert ri < ro assert isinstance(boundary_ids, (list, tuple)) and len(boundary_ids) == 2 inner_boundary_id, outer_boundary_id = boundary_ids assert isinstance(inner_boundary_id, int) and inner_boundary_id >= 0 assert isinstance(outer_boundary_id, int) and outer_boundary_id >= 0 assert inner_boundary_id != outer_boundary_id assert isinstance(n_refinements, int) and n_refinements >= 0 # mesh generation from dolfin import Point if dim == 2: center = Point(0., 0.) elif dim == 3: center = Point(0., 0., 0.) from mshr import Sphere, Circle, generate_mesh if dim == 2: domain = Circle(center, ro) \ - Circle(center, ri) mesh = generate_mesh(domain, 75) elif dim == 3: domain = Sphere(center, ro) \ - Sphere(center, ri) mesh = generate_mesh(domain, 15) # mesh refinement from dolfin import refine for i in range(n_refinements): mesh = refine(mesh) # subdomains for boundaries from dolfin import MeshFunctionSizet facet_marker = MeshFunctionSizet(mesh, mesh.topology().dim() - 1) facet_marker.set_all(0) # size of smallest element hmin = mesh.hmin() # inner circle boundary class InnerCircle(SubDomain): def inside(self, x, on_boundary): # tolerance: half length of smallest element tol = hmin / 2. from math import sqrt if dim == 2: result = abs(sqrt(x[0]**2 + x[1]**2) - ri) < tol elif dim == 3: result = abs(sqrt(x[0]**2 + x[1]**2 + x[2]**2) - ri) < tol return result and on_boundary # outer cirlce boundary class OuterCircle(SubDomain): def inside(self, x, on_boundary): # tolerance: half length of smallest element tol = hmin / 2. from math import sqrt if dim == 2: result = abs(sqrt(x[0]**2 + x[1]**2) - ro) < tol elif dim == 3: result = abs(sqrt(x[0]**2 + x[1]**2 + x[2]**2) - ro) < tol return result and on_boundary # mark boundaries gamma_inner = InnerCircle() gamma_inner.mark(facet_marker, inner_boundary_id) gamma_outer = OuterCircle() gamma_outer.mark(facet_marker, outer_boundary_id) return mesh, facet_marker
pres = 2200 res = 'high' dt = Constant(5.e-4) store_step = 200 mu = 1e-2 theta_p = .5 theta_L = Constant(1.0) probe_radius = 0.01 probe1_y = 0.003 probe2_y = 0.015 probe3_y = 0.03 probe4_y = 0.08 probe1_loc = Point(xmax - 1e-10, probe1_y) probe2_loc = Point(xmax - 1e-10, probe2_y) probe3_loc = Point(xmax - 1e-10, probe3_y) probe4_loc = Point(xmax - 1e-10, probe4_y) # Specify body force f = Constant((0, -9.81)) geometry = { 'xmin': xmin_rho1, 'xmax': xmax_rho1, 'ymin': ymin_rho1, 'ymax': ymax_rho1 } rho1 = Constant(1000.)
def run_with_params(Tb, mu_value, k_s, path): run_time_init = clock() temperature_vals = [27.0 + 273, Tb + 273, 1300.0 + 273, 1305.0 + 273] temp_prof = TemperatureProfile(temperature_vals) mu_a = mu_value # this was taken from the Blankenbach paper, can change Ep = b / temp_prof.delta mu_bot = exp(-Ep * (temp_prof.bottom * temp_prof.delta - 1573.0) + cc) * mu_a Ra = rho_0 * alpha * g * temp_prof.delta * h**3 / (kappa_0 * mu_a) w0 = rho_0 * alpha * g * temp_prof.delta * h**2 / mu_a tau = h / w0 p0 = mu_a * w0 / h log(mu_a, mu_bot, Ra, w0, p0) vslipx = 1.6e-09 / w0 vslip = Constant((vslipx, 0.0)) # Non-dimensional noslip = Constant((0.0, 0.0)) time_step = 3.0E11 / tau / 10.0 dt = Constant(time_step) tEnd = 3.0E15 / tau / 5.0 # Non-dimensionalising times mesh = RectangleMesh(Point(0.0, 0.0), Point(mesh_width, mesh_height), nx, ny) pbc = PeriodicBoundary() W = VectorFunctionSpace(mesh, 'CG', 2, constrained_domain=pbc) S = FunctionSpace(mesh, 'CG', 1, constrained_domain=pbc) WSSS = MixedFunctionSpace([W, S, S, S]) # WSSS -> W u = Function(WSSS) # Instead of TrialFunctions, we use split(u) for our non-linear problem v, p, T, Tf = split(u) v_t, p_t, T_t, Tf_t = TestFunctions(WSSS) T0 = interpolate(temp_prof, S) FluidTemp = Expression('max(T0, 1.031)', T0=T0) muExp = Expression( 'exp(-Ep * (T_val * dTemp - 1573.0) + cc * x[1] / mesh_height)', Ep=Ep, dTemp=temp_prof.delta, cc=cc, mesh_height=mesh_height, T_val=T0) Tf0 = interpolate(temp_prof, S) mu = Function(S) v0 = Function(W) v_theta = (1.0 - theta) * v0 + theta * v T_theta = (1.0 - theta) * T0 + theta * T Tf_theta = (1.0 - theta) * Tf0 + theta * Tf r_v = (inner(sym(grad(v_t)), 2.0 * mu * sym(grad(v))) - div(v_t) * p - T * v_t[1]) * dx r_p = p_t * div(v) * dx heat_transfer = k_s * (Tf_theta - T_theta) * dt r_T = (T_t * ((T - T0) + dt * inner(v_theta, grad(T_theta))) + (dt / Ra) * inner(grad(T_t), grad(T_theta)) - T_t * heat_transfer) * dx # yvec = Constant((0.0, 1.0)) # rhosolid = rho_0 * (1.0 - alpha * (T_theta * temp_prof.delta - 1573.0)) # deltarho = rhosolid - rhomelt # v_f = v_theta - darcy * (grad(p) * p0 / h - deltarho * yvec * g) / w0 v_melt = Function(W) yvec = Constant((0.0, 1.0)) # TODO: inner -> dot, take out Tf_t r_Tf = (Tf_t * ((Tf - Tf0) + dt * dot(v_melt, grad(Tf_theta))) + Tf_t * heat_transfer) * dx r = r_v + r_p + r_T + r_Tf bcv0 = DirichletBC(WSSS.sub(0), noslip, top) bcv1 = DirichletBC(WSSS.sub(0), vslip, bottom) bcp0 = DirichletBC(WSSS.sub(1), Constant(0.0), bottom) bct0 = DirichletBC(WSSS.sub(2), Constant(temp_prof.surface), top) bct1 = DirichletBC(WSSS.sub(2), Constant(temp_prof.bottom), bottom) bctf1 = DirichletBC(WSSS.sub(3), Constant(temp_prof.bottom), bottom) bcs = [bcv0, bcv1, bcp0, bct0, bct1, bctf1] t = 0 count = 0 files = DefaultDictByKey(partial(create_xdmf, path)) while t < tEnd: mu.interpolate(muExp) rhosolid = rho_0 * (1.0 - alpha * (T0 * temp_prof.delta - 1573.0)) deltarho = rhosolid - rhomelt assign( v_melt, project(v0 - darcy * (grad(p) * p0 / h - deltarho * yvec * g) / w0, W)) # use nP after to avoid projection? # pdb.set_trace() solve(r == 0, u, bcs) nV, nP, nT, nTf = u.split() if count % output_every == 0: time_left(count, tEnd / time_step, run_time_init) # TODO: Make sure all writes are to the same function for each time step files['T_fluid'].write(nTf) files['p'].write(nP) files['v_solid'].write(nV) files['T_solid'].write(nT) files['mu'].write(mu) files['v_melt'].write(v_melt) files['gradp'].write(project(grad(nP), W)) files['rho'].write(project(rhosolid, S)) files['Tf_grad'].write(project(grad(Tf), W)) files['advect'].write(project(dt * dot(v_melt, grad(nTf)))) files['ht'].write(project(heat_transfer, S)) assign(T0, nT) assign(v0, nV) assign(Tf0, nTf) t += time_step count += 1 log('Case mu=%g, Tb=%g complete. Run time = %g s' % (mu_a, Tb, clock() - run_time_init))
def test_eim_approximation_08(expression_type, basis_generation): """ The aim of this script is to test that DEIM correctly handles collapsed subspaces. This is a prototype of the restricted operators required by the right-hand side of a supremizer solve. * EIM: not applicable. * DEIM: define a test function on a collapsed subspace (while, in case of rank 2 forms, the trial is defined on the full space), and integrate. """ class MockProblem(ParametrizedProblem): def __init__(self, V, **kwargs): ParametrizedProblem.__init__(self, "") self.V = V def name(self): return "MockProblem_08_" + expression_type + "_" + basis_generation class ParametrizedFunctionApproximation(EIMApproximation): def __init__(self, V, expression_type, basis_generation): self.V = V # Parametrized function to be interpolated mock_problem = MockProblem(V) f1 = ParametrizedExpression( mock_problem, "1/sqrt(pow(x[0]-mu[0], 2) + pow(x[1]-mu[1], 2) + 0.01)", mu=(-1., -1.), element=V.sub(1).ufl_element()) # folder_prefix = os.path.join("test_eim_approximation_08_tempdir", expression_type, basis_generation) assert expression_type in ("Vector", "Matrix") if expression_type == "Vector": q = TestFunction(V.sub(1).collapse()) form = f1 * q * dx # Call Parent constructor EIMApproximation.__init__( self, mock_problem, ParametrizedTensorFactory(form), folder_prefix, basis_generation) elif expression_type == "Matrix": up = TrialFunction(V) q = TestFunction(V.sub(1).collapse()) (u, p) = split(up) form = f1 * q * div(u) * dx # Call Parent constructor EIMApproximation.__init__( self, mock_problem, ParametrizedTensorFactory(form), folder_prefix, basis_generation) else: # impossible to arrive here anyway thanks to the assert raise AssertionError("Invalid expression_type") # 1. Create the mesh for this test mesh = RectangleMesh(Point(0.1, 0.1), Point(0.9, 0.9), 20, 20) # 2. Create Finite Element space (Lagrange P1) element_0 = VectorElement("Lagrange", mesh.ufl_cell(), 2) element_1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1) element = MixedElement(element_0, element_1) V = FunctionSpace(mesh, element) # 3. Allocate an object of the ParametrizedFunctionApproximation class parametrized_function_approximation = ParametrizedFunctionApproximation(V, expression_type, basis_generation) mu_range = [(-1., -0.01), (-1., -0.01)] parametrized_function_approximation.set_mu_range(mu_range) # 4. Prepare reduction with EIM parametrized_function_reduction_method = EIMApproximationReductionMethod(parametrized_function_approximation) parametrized_function_reduction_method.set_Nmax(20) parametrized_function_reduction_method.set_tolerance(0.) # 5. Perform the offline phase parametrized_function_reduction_method.initialize_training_set(100, sampling=EquispacedDistribution()) reduced_parametrized_function_approximation = parametrized_function_reduction_method.offline() # 6. Perform an online solve online_mu = (-1., -1.) reduced_parametrized_function_approximation.set_mu(online_mu) reduced_parametrized_function_approximation.solve() # 7. Perform an error analysis parametrized_function_reduction_method.initialize_testing_set(100) parametrized_function_reduction_method.error_analysis()
def test_eim_approximation_05(expression_type, basis_generation): """ This test is combination of tests 01-03. The aim of this script is to test that integration correctly handles several parametrized expressions. * EIM: not applicable. * DEIM: several parametrized expressions are combined when defining forms. """ class MockProblem(ParametrizedProblem): def __init__(self, V, **kwargs): ParametrizedProblem.__init__(self, "") self.V = V def name(self): return "MockProblem_05_" + expression_type + "_" + basis_generation class ParametrizedFunctionApproximation(EIMApproximation): def __init__(self, V, expression_type, basis_generation): self.V = V # Parametrized function to be interpolated mock_problem = MockProblem(V) f1 = ParametrizedExpression( mock_problem, "1/sqrt(pow(x[0]-mu[0], 2) + pow(x[1]-mu[1], 2) + 0.01)", mu=(-1., -1.), element=V.sub(1).ufl_element()) f2 = ParametrizedExpression( mock_problem, "exp( - 2*pow(x[0]-mu[0], 2) - 2*pow(x[1]-mu[1], 2) )", mu=(-1., -1.), element=V.sub(1).ufl_element()) f3 = ParametrizedExpression( mock_problem, "(1-x[0])*cos(3*pi*(pi+mu[1])*(1+x[1]))*exp(-(pi+mu[0])*(1+x[0]))", mu=(-1., -1.), element=V.sub(1).ufl_element()) # folder_prefix = os.path.join("test_eim_approximation_05_tempdir", expression_type, basis_generation) assert expression_type in ("Vector", "Matrix") if expression_type == "Vector": vq = TestFunction(V) (v, q) = split(vq) form = f1 * v[0] * dx + f2 * v[1] * dx + f3 * q * dx # Call Parent constructor EIMApproximation.__init__(self, mock_problem, ParametrizedTensorFactory(form), folder_prefix, basis_generation) elif expression_type == "Matrix": up = TrialFunction(V) vq = TestFunction(V) (u, p) = split(up) (v, q) = split(vq) form = f1 * inner(grad(u), grad(v)) * dx + f2 * p * div( v) * dx + f3 * q * div(u) * dx # Call Parent constructor EIMApproximation.__init__(self, mock_problem, ParametrizedTensorFactory(form), folder_prefix, basis_generation) else: # impossible to arrive here anyway thanks to the assert raise AssertionError("Invalid expression_type") # 1. Create the mesh for this test mesh = RectangleMesh(Point(0.1, 0.1), Point(0.9, 0.9), 20, 20) # 2. Create Finite Element space (Lagrange P1) element_0 = VectorElement("Lagrange", mesh.ufl_cell(), 2) element_1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1) element = MixedElement(element_0, element_1) V = FunctionSpace(mesh, element) # 3. Allocate an object of the ParametrizedFunctionApproximation class parametrized_function_approximation = ParametrizedFunctionApproximation( V, expression_type, basis_generation) mu_range = [(-1., -0.01), (-1., -0.01)] parametrized_function_approximation.set_mu_range(mu_range) # 4. Prepare reduction with EIM parametrized_function_reduction_method = EIMApproximationReductionMethod( parametrized_function_approximation) parametrized_function_reduction_method.set_Nmax(20) parametrized_function_reduction_method.set_tolerance(0.) # 5. Perform the offline phase parametrized_function_reduction_method.initialize_training_set( 100, sampling=EquispacedDistribution()) reduced_parametrized_function_approximation = parametrized_function_reduction_method.offline( ) # 6. Perform an online solve online_mu = (-1., -1.) reduced_parametrized_function_approximation.set_mu(online_mu) reduced_parametrized_function_approximation.solve() # 7. Perform an error analysis parametrized_function_reduction_method.initialize_testing_set(100) parametrized_function_reduction_method.error_analysis()
from dolfin import UnitSquareMesh, FunctionSpace, TestFunction, TrialFunction,\ Constant, Expression, assemble, dx, Point, PointSource, plot, interactive,\ inner, nabla_grad, Function, solve, MPI, mpi_comm_world import numpy as np from fenicstools.sourceterms import PointSources mycomm = mpi_comm_world() myrank = MPI.rank(mycomm) mesh = UnitSquareMesh(2, 2) V = FunctionSpace(mesh, 'Lagrange', 1) trial = TrialFunction(V) test = TestFunction(V) f0 = Constant('0') L0 = f0 * test * dx b = assemble(L0) P = Point(0.1, 0.5) delta = PointSource(V, P, 1.0) delta.apply(b) myown = PointSources(V, [[0.1, 0.5], [0.9, 0.5]]) print 'p{}: max(PointSource)={}, max(PointSources[0])={}, max(PointSources[1])={}'.format(\ myrank, max(abs(b.array())), max(abs(myown[0].array())), max(abs(myown[1].array())))
def get(self): """Built-in mesh.""" x = self.pad(self.values) return BoxMesh(Point(-x[0] / 2.0, -x[1] / 2.0, -x[2] / 2.0), Point(x[0] / 2.0, x[1] / 2.0, x[2] / 2.0), x[3], x[4], x[5])