def _assemble(self): coordinates = np.array(np.zeros(self.mesh.num_vertices()), dtype=[('x', float), ('y', float)]) for i, vertex in enumerate(df.vertices(self.mesh)): coordinates['x'][i] = vertex.x(0) coordinates['y'][i] = vertex.x(1) self._Ny = len(np.unique(coordinates['y'])) self._Nx = len(np.unique(coordinates['x'])) coordinates = np.sort(coordinates, order=['y', 'x']) X = coordinates['x'].reshape(self._Ny, self._Nx) Y = coordinates['y'].reshape(self._Ny, self._Nx) self._X = np.flipud(X) self._Y = np.flipud(Y) # check if uniform mesh T = np.diff(X, axis=1) self._dx = T[0, 0] assert (np.all(np.abs(T - self._dx) < 1e-12)) T = np.diff(Y, axis=0) self._dy = T[0, 0] assert (np.all(np.abs(T - self._dy) < 1e-12)) Interpolator = np.zeros( ((self._Ny - 1) * (self._Nx - 1), self.V.dim())) for i, cell in enumerate(df.cells(self.mesh)): x = cell.midpoint().x() y = cell.midpoint().y() cx = int(x // self._dx) cy = int(y // self._dy) cy = (self._Ny - 2) - cy pixel_id = cy * (self._Ny - 1) + cx Interpolator[pixel_id, i] = 0.5 ReverseInterpolator = np.zeros( (self.V.dim(), (self._Ny - 1) * (self._Nx - 1))) for i, row in enumerate(Interpolator): ind = np.where(row)[0] ReverseInterpolator[ind[0], i] = 1 ReverseInterpolator[ind[1], i] = 1 self.Interpolator = Interpolator self.ReverseInterpolator = ReverseInterpolator # a, b = np.where(self.Interpolator != 0) self._DofToPixelPermutator = torch.tensor(b, dtype=torch.long) a, b = self.ReverseInterpolator.nonzero() self._PixelToDofPermutator = torch.tensor(b, dtype=torch.long)
def refine_near_left_boundary(mesh, cycles): """ Refine mesh near the left boundary. The usual approach of using SubDomain and EdgeFunction isn't appearing to work in 1D, so I'm going to just loop through the cells of the mesh and set markers manually. """ for i in range(cycles): cell_markers = fenics.CellFunction("bool", mesh) cell_markers.set_all(False) for cell in fenics.cells(mesh): found_left_boundary = False for vertex in fenics.vertices(cell): if fenics.near(vertex.x(0), 0.): found_left_boundary = True if found_left_boundary: cell_markers[cell] = True break # There should only be one such point. mesh = fenics.refine(mesh, cell_markers) return mesh
def refine_initial_mesh(self): """ Locally refine near the hot boundary """ for i in range(self.initial_hot_boundary_refinement_cycles): cell_markers = fenics.MeshFunction("bool", self.mesh, self.mesh.topology().dim(), False) cell_markers.set_all(False) for cell in fenics.cells(self.mesh): found_left_boundary = False for vertex in fenics.vertices(cell): if fenics.near(vertex.x(0), 0.): found_left_boundary = True break if found_left_boundary: cell_markers[cell] = True break # There should only be one such point in 1D. self.mesh = fenics.refine( self.mesh, cell_markers) # Does this break references?
def initial_mesh(self): self.initial_hot_wall_refinement_cycles = 8 mesh = self.coarse_mesh() for i in range(self.initial_hot_wall_refinement_cycles): cell_markers = fenics.MeshFunction("bool", mesh, mesh.topology().dim(), False) cell_markers.set_all(False) for cell in fenics.cells(mesh): found_left_boundary = False for vertex in fenics.vertices(cell): if fenics.near(vertex.x(0), 0.): found_left_boundary = True break if found_left_boundary: cell_markers[cell] = True break # There should only be one such point in 1D. mesh = fenics.refine(mesh, cell_markers) return mesh
def refine_initial_mesh(self): """ Replace 2D refinement method with 3D method. Perhaps one could make an n-dimensional method. """ for i in range(self.initial_hot_wall_refinement_cycles): cell_markers = fenics.MeshFunction("bool", self.mesh, self.mesh.topology().dim(), False) for cell in fenics.cells(self.mesh): found_left_boundary = False for vertex in fenics.vertices(cell): if fenics.near(vertex.x(0), 0.): found_left_boundary = True break if found_left_boundary: cell_markers[cell] = True self.mesh = fenics.refine(self.mesh, cell_markers)
def integrate_field(self, fn_spec, specific, fn_main, r=20, val=0.0): """ Assimilate a field with filename <fn_spec> from DataInput object <specific> into this DataInput's field with filename <fn_main>. The parameter <val> should be set to the specific dataset's value for undefined regions, default is 0.0. <r> is a parameter used to eliminate border artifacts from interpolation; increase this value to eliminate edge noise. """ print "::: integrating %s field from %s :::" % (fn_spec, specific.name) # get the dofmap to map from mesh vertex indices to function indicies : df = self.func_space.dofmap() dfmap = df.vertex_to_dof_map(self.mesh) unew = self.get_projection(fn_main) # existing dataset projection uocom = unew.compute_vertex_values() # mesh indexed main vertex values uspec = specific.get_projection(fn_spec) # specific dataset projection uscom = uspec.compute_vertex_values() # mesh indexed spec vertex values d = float64(specific.data[fn_spec]) # original matlab spec dataset # get arrays of x-values for specific domain xs = specific.x ys = specific.y nx = specific.nx ny = specific.ny for v in vertices(self.mesh): # mesh vertex x,y coordinate : i = v.index() p = v.point() x = p.x() y = p.y() # indexes of closest datapoint to specific dataset's x and y domains : idx = abs(xs - x).argmin() idy = abs(ys - y).argmin() # data value for closest value and square around the value in question : dv = d[idy, idx] db = d[max(0,idy-r) : min(ny, idy+r), max(0, idx-r) : min(nx, idx+r)] # if the vertex is in the domain of the specific dataset, and the value # of the dataset at this point is not abov <val>, set the array value # of the main file to this new specific region's value. if dv > val: #print "found:", x, y, idx, idy, v.index() # if the values is not near an edge, make the value equal to the # nearest specific region's dataset value, otherwise, use the # specific region's projected value : if all(db > val): uocom[i] = uscom[i] else : uocom[i] = dv # set the values of the projected original dataset equal to the assimilated # dataset : unew.vector().set_local(uocom[dfmap]) return unew
def meshReader(dictMesh): ''' converts json mesh to fenics mesh and apply the boundary conditions. dict:: dictMesh, the mesh from database in the form of a python dictionary output: dolfin.ccp.mesh:: feMesh, mesh defined in fenics object ''' # conver dict to object mesh = namedtuple("mesh", dictMesh.keys())(*dictMesh.values()) nodes = np.array(mesh.nodes) cells = np.array(mesh.connectivity, dtype=np.uintp) feMesh = fn.Mesh() editor = fn.MeshEditor() # cell type, topological, and geometrical dimensions. i.e. 2, 3 for 3d surface mesh editor.open(feMesh, "triangle", 2, 3) # cell types available: point, interval, triangle, quadrilateral, tetrahedron, hexahedron editor.init_vertices(len(mesh.nodes)) editor.init_cells(len(mesh.connectivity)) [editor.add_vertex(i, n) for i, n in enumerate(nodes)] [editor.add_cell(i, n - 1) for i, n in enumerate(cells)] editor.close() # construct faces faceSets = fn.MeshFunction('size_t', feMesh, 2) for face, faceCells in mesh.faces.items(): for index in faceCells: faceSets.set_value(index, int(face), feMesh) # construct edges edgeSets = fn.MeshFunction('size_t', feMesh, 1) meshEdges = {} for edge in fn.edges(feMesh): meshEdges[edge.index()] = [] for vert in fn.vertices(edge): meshEdges[edge.index()].append(vert.index()) for edge, nodes in meshEdges.items(): for edgeName, edgeList in mesh.edges.items(): if nodes in edgeList: edgeSets.set_value(edge, int(edgeName), feMesh) # construct points pointSets = fn.MeshFunction('size_t', feMesh, 0) for pointName, point in mesh.points.items(): pointSets.set_value(point, int(pointName), feMesh) return dict(feMesh=feMesh, faceSets=faceSets, edgeSets=edgeSets, pointSets=pointSets)
def solve_poisson_with_fem(lightweight=False): # Create mesh and define function space mesh = fs.UnitSquareMesh(8, 8) V = fs.FunctionSpace(mesh, 'P', 1) # Define boundary condition u_code = 'x[0] + 2*x[1] + 1' u_D = fs.Expression(u_code, degree=2) def boundary(x, on_boundary): return on_boundary bc = fs.DirichletBC(V, u_D, boundary) # Define variational problem u = fs.Function(V) # Note: not TrialFunction! v = fs.TestFunction(V) # f = fs.Expression(f_code, degree=2) f_code = '-10*x[0] - 20*x[1] - 10' f = fs.Expression(f_code, degree=2) F = q(u) * fs.dot(fs.grad(u), fs.grad(v)) * fs.dx - f * v * fs.dx # Compute solution fs.solve(F == 0, u, bc) # Plot solution fs.plot(u) # Compute maximum error at vertices. This computation illustrates # an alternative to using compute_vertex_values as in poisson.py. u_e = fs.interpolate(u_D, V) # Restore numpy object image1d = np.empty((81, ), dtype=np.float) for v in fs.vertices(mesh): image1d[v.index()] = u(*mesh.coordinates()[v.index()]) if not lightweight: error_max = np.abs(u_e.vector().get_local() - u.vector().get_local()).max() print('error_max = ', error_max) fs.plot(u) plt.show() save_contour(image1d, 1.0, 1.0, 'poisson') return image1d
def get_nearest(self, fn): """ returns a dolfin Function object with values given by interpolated nearest-neighbor data <fn>. """ #FIXME: get to work with a change of projection. # get the dofmap to map from mesh vertex indices to function indicies : df = self.func_space.dofmap() dfmap = df.vertex_to_dof_map(self.mesh) unew = Function(self.func_space) # existing dataset projection uocom = unew.vector().array() # mesh indexed main vertex values d = float64(self.data[fn]) # original matlab spec dataset # get arrays of x-values for specific domain xs = self.x ys = self.y for v in vertices(self.mesh): # mesh vertex x,y coordinate : i = v.index() p = v.point() x = p.x() y = p.y() # indexes of closest datapoint to specific dataset's x and y domains : idx = abs(xs - x).argmin() idy = abs(ys - y).argmin() # data value for closest value : dv = d[idy, idx] if dv > 0: dv = 1.0 uocom[i] = dv # set the values of the empty function's vertices to the data values : unew.vector().set_local(uocom[dfmap]) return unew
def sd_nodenode(mesh, V, u_n, De, nexp): """ SD node-to-node Flow routing from node-to-node based on the steepest route of descent :param mesh: mesh object generated using mshr (fenics) :param V: finite element function space :param u_n: solution (trial function) for water flux :param De: dimensionless diffusion coefficient :param nexp: water flux exponent :return: """ # get the global coordinates gdim = mesh.geometry().dim() if dolfin.dolfin_version() == '1.6.0': dofmap = V.dofmap() gc = dofmap.tabulate_all_coordinates(mesh).reshape((-1, gdim)) else: gc = V.tabulate_dof_coordinates().reshape((-1, gdim)) vtd = vertex_to_dof_map(V) # first get the elevation of each vertex elevation = np.zeros(len(gc)) elevation = u_n.compute_vertex_values(mesh) # loop to get the local flux mesh.init(0, 1) flux = np.zeros(len(gc)) neighbors = [] for v in vertices(mesh): idx = v.index() # get the local neighbourhood neighborhood = [Edge(mesh, i).entities(0) for i in v.entities(1)] neighborhood = np.array(neighborhood).flatten() # Remove own index from neighborhood neighborhood = neighborhood[np.where(neighborhood != idx)[0]] neighbors.append(neighborhood) # get location xh = v.x(0) yh = v.x(1) # get distance to neighboring vertices length = np.zeros(len(neighborhood)) weight = np.zeros(len(neighborhood)) i = 0 for vert in neighborhood: nidx = vtd[vert] xn = gc[nidx, 0] yn = gc[nidx, 1] length[i] = np.sqrt((xh - xn) * (xh - xn) + (yh - yn) * (yh - yn)) flux[vert] = length[i] # weight[i] = elevation[idx] - elevation[vert] # # downhill only # if weight[i] < 0: # weight[i] = 0 # i += 1 # # # find steepest slope # steepest = len(neighborhood)+2 # if max(weight) > 0: # steepest = np.argmax(weight) # else: # weight[:] = 0 # i = 0 # for vert in neighborhood: # if i == steepest: # weight[i] = 1 # else: # weight[i] = 0 # flux[vert] = flux[vert] + length[i]*weight[i] # i += 1 # sort from top to botton sortedidx = np.argsort(-elevation) # accumulate fluxes from top to bottom for idx in sortedidx: neighborhood = neighbors[idx] weight = np.zeros(len(neighborhood)) i = 0 for vert in neighborhood: weight[i] = elevation[idx] - elevation[vert] # downhill only if weight[i] < 0: weight[i] = 0 i += 1 # find steepest slope steepest = len(neighborhood) + 2 if max(weight) > 0: steepest = np.argmax(weight) else: weight[:] = 0 i = 0 for vert in neighborhood: if i == steepest: weight[i] = 1 else: weight[i] = 0 flux[vert] = flux[vert] + flux[idx] * weight[i] i += 1 # calculate the diffusion coefficient q0 = 1 + De * pow(flux, nexp) q = Function(V) q.vector()[:] = q0[dof_to_vertex_map(V)] return (q)
def solve_heat_with_fem(lightweight=False): T = 2.0 # final time num_steps = 100 # number of time steps dt = T / num_steps # time step size alpha = 3 # parameter alpha beta = 1.2 # parameter beta # Create mesh and define function space nx = ny = 8 mesh = fs.UnitSquareMesh(nx, ny) V = fs.FunctionSpace(mesh, 'P', 1) # Define boundary condition u_D = fs.Expression('1 + x[0]*x[0] + alpha*x[1]*x[1] + beta*t', degree=2, alpha=alpha, beta=beta, t=0) bc = fs.DirichletBC(V, u_D, boundary) # Define initial value u_n = fs.interpolate(u_D, V) # u_n = project(u_D, V) # Define variational problem u = fs.TrialFunction(V) v = fs.TestFunction(V) f = fs.Constant(beta - 2 - 2 * alpha) F = u * v * fs.dx + dt * fs.dot( fs.grad(u), fs.grad(v)) * fs.dx - (u_n + dt * f) * v * fs.dx a, L = fs.lhs(F), fs.rhs(F) # Time-stepping u = fs.Function(V) t = 0 images1d = [] for n in range(num_steps): # Update current time t += dt u_D.t = t # Compute solution fs.solve(a == L, u, bc) # Restore numpy object image1d = np.empty((81, ), dtype=np.float) for v in fs.vertices(mesh): image1d[v.index()] = u(*mesh.coordinates()[v.index()]) images1d.append(image1d) if not lightweight: # Compute error at vertices u_e = fs.interpolate(u_D, V) error = np.abs(u_e.vector().get_local() - u.vector().get_local()).max() print('t = %.2f: error = %.3g' % (t, error)) # Update previous solution u_n.assign(u) # Plotting if not lightweight: fs.plot(u) plt.show() save_dynamic_contours(images1d, 1.0, 1.0, 'heat2d') return images1d