def build_mapping(self, S3, V3): """ S3 is the vector function space of the 2d mesh V3 is the vector function space of the corresponding 3d mesh """ vert_to_dof2 = df.vertex_to_dof_map(S3) dof_to_vert2 = df.dof_to_vertex_map(S3) vert_to_dof3 = df.vertex_to_dof_map(V3) dof_to_vert3 = df.dof_to_vertex_map(V3) map_2d_to_3d = np.zeros(V3.dim(), dtype=np.int32) n2d = S3.dim() for i in range(n2d): map_2d_to_3d[i] = vert_to_dof3[dof_to_vert2[i]] map_2d_to_3d[i + n2d] = vert_to_dof3[dof_to_vert2[i] + n2d] self.map_2d_to_3d = map_2d_to_3d # print map_2d_to_3d n3d = V3.dim() map_3d_to_2d = np.zeros(V3.dim(), dtype=np.int32) for i in range(V3.dim()): map_3d_to_2d[i] = vert_to_dof2[dof_to_vert3[i] % n2d] self.map_3d_to_2d = map_3d_to_2d
def data_reordering(V): '''Reshaping/reordering data read from files''' # HDF5/VTK store 3d vectors and 3d tensor so we need to chop the data # also reorder as in 2017.2.0 only(?) vertex values are dumped if V.ufl_element().value_shape() == (): dof2v = dof_to_vertex_map(V) reorder = lambda a: a[dof2v] return reorder Vi = V.sub(0).collapse() dof2v = dof_to_vertex_map(Vi) gdim = V.mesh().geometry().dim() # WARNING: below there are assumption on component ordering # Vector if len(V.ufl_element().value_shape()) == 1: # Ellim Z for vectors in 2d keep = [0, 1] if gdim == 2 else range(gdim) reorder = lambda a, keep=keep, dof2f=dof2v: (np.column_stack( [row[dof2v] for row in (a[:, keep]).T]).flatten()) return reorder # And tensor if len(V.ufl_element().value_shape()) == 2: # Ellim Z keep = [0, 1, 3, 4] if gdim == 2 else range(gdim**2) reorder = lambda a, keep=keep, dof2f=dof2v: (np.column_stack( [row[dof2v] for row in (a[:, keep]).T]).flatten()) return reorder
def _assemble(self, dtype, device): if self._mode.lower() == 'manualinterpolation': Vf, Vc = self._physics['fom'].V, self._physics['rom'].V free_dofs = self._physics['fom'].free_dofs coords = Vf.mesh().coordinates() dvmap = df.dof_to_vertex_map(Vf) points = np.zeros((Vf.dim(), self._physics['fom'].tdim)) for i, mapped_dof in enumerate(dvmap): points[i, :] = coords[mapped_dof, :] if self._only_free_dofs: points = points[free_dofs, :] W = AssembleBasisFunctionMatrix(Vc, points, ReturnType='scipy') self._W = Convert_ScipySparse_PyTorchSparse(W.T, dtype=dtype, device=device) # ugly fix: cast to dense tensor self._W = self._W.to_dense() else: raise ValueError('Interpolation mode unknown')
def transfer_vertex_function(mesh_fine, mesh_foo_coarse, output=VertexFunction): ''' Assuming that mesh_fine is created by meshing around the mesh underlying mesh_foo_coarse this function interpolates the data from mesh_foo_coarse ''' assert isinstance(mesh_fine, EmbeddedMesh) mesh_fine = mesh_fine.mesh # FIXME: remove when EmbeddedMesh <: Mesh assert mesh_fine.topology().dim() == 1 and mesh_fine.geometry().dim() > 1 mesh = mesh_foo_coarse.mesh() assert mesh.topology().dim() == 1 and mesh.geometry().dim() > 1 # The strategy here is to interpolate into a CG1 function on mesh_fine # and then turn it to vertex function. NOTE: consider CG1 as function # for it is easier to get e.g. DG0 (midpoint values) out of it Vf = FunctionSpace(mesh_fine, 'CG', 1) assert mesh_foo_coarse.cpp_value_type() == 'double' assert mesh_foo_coarse.dim() == 0 mesh_coarse = mesh_foo_coarse.mesh() Vc = FunctionSpace(mesh, 'CG', 1) fc = Function(Vc) # Fill the data fc.vector().set_local(mesh_foo_coarse.array()[dof_to_vertex_map(Vc)]) fc.vector().apply('insert') ff = interpolate(fc, Vf) if output == Function: return ff # Combe back to vertex function vertex_foo = VertexFunction('double', mesh_fine, 0.0) vertex_foo.set_values(ff.vector().array()[vertex_to_dof_map(Vf)]) return vertex_foo
def __init__(self, V, geo, name, f): self.V = V # get dofs lying in subdomain dofmap = V.dofmap() tup = geo.physicaldomain(name) sub = geo.subdomains mesh = geo.mesh subdofs = set() for i, cell in enumerate(dolfin.cells(mesh)): if sub[cell] in tup: celldofs = dofmap.cell_dofs(i) subdofs.update(celldofs) subdofs = np.array(list(subdofs), dtype="intc") d2v = dolfin.dof_to_vertex_map(V) co = mesh.coordinates() # create function with desired values # could also be implemented with Expression.eval_cell like pwconst bc_f = dolfin.Function(V) for dof in subdofs: x = co[d2v[dof]] bc_f.vector()[dof] = f(x) self.bc_f = bc_f self.dof_set = subdofs
def test_invariance(): """Asserts invariance of cost functional w.r.t. translation and rotation""" centers, J = _get_macadam() n = 1 problem = PiecewiseEllipse(centers, J.copy(), n) alpha = problem.alpha.copy() c0 = problem.cost_min(alpha) alpha += 1.23 c1 = problem.cost_min(alpha) assert abs(c0 - c1) < 1.0e-12 * c0 d2v = dof_to_vertex_map(problem.V) v2d = vertex_to_dof_map(problem.V) alpha = problem.alpha.copy() alpha = alpha.reshape(2, -1).T coords = alpha[v2d] # rotate theta = 0.35 * numpy.pi sin = numpy.sin(theta) cos = numpy.cos(theta) R = numpy.array([[cos, -sin], [sin, cos]]) rcoords = numpy.dot(R, coords.T) # map back to alpha) alpha = numpy.concatenate(rcoords[:, d2v]) c2 = problem.cost_min(alpha) assert abs(c0 - c2) < 1.0e-12 * c0 return
def img2funvec(img: np.array) -> np.array: """Takes a 2D array and returns an array suited to assign to piecewise linear approximation on a triangle grid. Each pixel corresponds to one vertex of a triangle mesh. Args: img (np.array): The input array of shape (m, n). Returns: np.array: A vector of shape (m * n,). """ m, n = img.shape # Create mesh and function space. mesh = UnitSquareMesh(m - 1, n - 1) xm = mesh.coordinates().reshape((-1, 2)) # Create function space. V = create_function_space(mesh, 'default') # Evaluate function at vertices. hx, hy = 1 / (m - 1), 1 / (n - 1) x = np.array(np.round(xm[:, 0] / hx), dtype=int) y = np.array(np.round(xm[:, 1] / hy), dtype=int) fv = img[x, y] # Map pixel values to vertices. d2v = dof_to_vertex_map(V) return fv[d2v]
def imgseq2funvec(img: np.array) -> np.array: """Takes a 3D array and returns an array suited to assign to piecewise linear approximation on a triangle grid. Each pixel corresponds to one vertex of a triangle mesh. Args: img (np.array): The input array. Returns: np.array: A vector. """ # Create mesh. [m, n, o] = img.shape mesh = UnitCubeMesh(m-1, n-1, o-1) mc = mesh.coordinates().reshape((-1, 3)) # Evaluate function at vertices. hx, hy, hz = 1./(m-1), 1./(n-1), 1./(o-1) x = np.array(np.round(mc[:, 0]/hx), dtype=int) y = np.array(np.round(mc[:, 1]/hy), dtype=int) z = np.array(np.round(mc[:, 2]/hz), dtype=int) fv = img[x, y, z] # Create function space. V = FunctionSpace(mesh, 'CG', 1) # Map pixel values to vertices. d2v = dof_to_vertex_map(V) return fv[d2v]
def p1_trace(fenics_space): """ Return the P1 trace operator. This function returns a pair (space, trace_matrix), where space is a BEM++ space object and trace_matrix is the corresponding matrix that maps the coefficients of a FEniCS function to its boundary trace coefficients in the corresponding BEM++ space. """ import dolfin #pylint: disable=import-error from bempp.api.fenics_interface.coupling import fenics_space_info from bempp.api import function_space, grid_from_element_data import numpy as np family, degree = fenics_space_info(fenics_space) if not (family == 'Lagrange' and degree == 1): raise ValueError("fenics_space must be a p1 Lagrange space") mesh = fenics_space.mesh() boundary_mesh = dolfin.BoundaryMesh(mesh, "exterior", False) bm_nodes = boundary_mesh.entity_map(0).array().astype(np.int64) bm_coords = boundary_mesh.coordinates() bm_cells = boundary_mesh.cells() bempp_boundary_grid = grid_from_element_data(bm_coords.transpose(), bm_cells.transpose()) # First get trace space space = function_space(bempp_boundary_grid, "P", 1) # Now compute the mapping from FEniCS dofs to BEM++ dofs. # First the BEM++ dofs from the boundary vertices from scipy.sparse import coo_matrix bempp_dofs_from_b_vertices = p1_dof_to_vertex_matrix(space).transpose() # Now FEniCS vertices to boundary dofs b_vertices_from_vertices = coo_matrix( (np.ones(len(bm_nodes)), (np.arange(len(bm_nodes)), bm_nodes)), shape=(len(bm_nodes), mesh.num_vertices()), dtype='float64').tocsc() # Finally FEniCS dofs to vertices. vertices_from_fenics_dofs = coo_matrix( (np.ones(mesh.num_vertices()), (dolfin.dof_to_vertex_map(fenics_space), np.arange(mesh.num_vertices()))), shape=(mesh.num_vertices(), mesh.num_vertices()), dtype='float64').tocsc() # Get trace matrix by multiplication trace_matrix = bempp_dofs_from_b_vertices * \ b_vertices_from_vertices * vertices_from_fenics_dofs # Now return everything return space, trace_matrix
def p1_trace(fenics_space): """ Return the P1 trace operator. This function returns a pair (space, trace_matrix), where space is a Bempp space object and trace_matrix is the corresponding matrix that maps the coefficients of a FEniCS function to its boundary trace coefficients in the corresponding Bempp space. """ import dolfin import bempp.api from scipy.sparse import coo_matrix import numpy as np family, degree = fenics_space_info(fenics_space) if not (family == "Lagrange" and degree == 1): raise ValueError("fenics_space must be a p1 Lagrange space") mesh = fenics_space.mesh() boundary_mesh = dolfin.BoundaryMesh(mesh, "exterior", False) bm_nodes = boundary_mesh.entity_map(0).array().astype(np.int64) bm_coords = boundary_mesh.coordinates() bm_cells = boundary_mesh.cells() bempp_boundary_grid = bempp.api.Grid(bm_coords.transpose(), bm_cells.transpose()) # First get trace space space = bempp.api.function_space(bempp_boundary_grid, "P", 1) # Now FEniCS vertices to boundary dofs b_vertices_from_vertices = coo_matrix( (np.ones(len(bm_nodes)), (np.arange(len(bm_nodes)), bm_nodes)), shape=(len(bm_nodes), mesh.num_vertices()), dtype="float64", ).tocsc() # Finally FEniCS dofs to vertices. vertices_from_fenics_dofs = coo_matrix( ( np.ones(mesh.num_vertices()), (dolfin.dof_to_vertex_map(fenics_space), np.arange(mesh.num_vertices())), ), shape=(mesh.num_vertices(), mesh.num_vertices()), dtype="float64", ).tocsc() # Get trace matrix by multiplication trace_matrix = b_vertices_from_vertices @ vertices_from_fenics_dofs # Now return everything return space, trace_matrix
def to_P1_function(f): '''Vertex function -> P1''' assert f.dim() == 0, f.dim() V = df.FunctionSpace(f.mesh(), 'CG', 1) g = df.Function(V) g_values = g.vector().get_local() g_values[:] = f.array()[df.dof_to_vertex_map(V)] g.vector().set_local(g_values) return g
def vertices(self): """ Returns the vertices of the surface of the object This information might be useful for calculating the current density into the object surface """ coords = self.V.mesh().coordinates() d2v = df.dof_to_vertex_map(self.V) vertex_indices = list(set(d2v[self.dofs])) return coords[vertex_indices]
def p1_trace(fenics_space): import dolfin from .coupling import fenics_space_info from bempp import function_space, grid_from_element_data import numpy as np family, degree = fenics_space_info(fenics_space) if not (family == 'Lagrange' and degree == 1): raise ValueError("fenics_space must be a p1 Lagrange space") mesh = fenics_space.mesh() bm = dolfin.BoundaryMesh(mesh, "exterior", False) bm_nodes = bm.entity_map(0).array().astype(np.int64) bm_coords = bm.coordinates() bm_cells = bm.cells() bempp_boundary_grid = grid_from_element_data(bm_coords.transpose(), bm_cells.transpose()) # First get trace space space = function_space(bempp_boundary_grid, "P", 1) # Now compute the mapping from BEM++ dofs to FEniCS dofs # First the BEM++ dofs to the boundary vertices from ._lagrange_coupling import p1_vertex_map from scipy.sparse import coo_matrix vertex_to_dof_map = p1_vertex_map(space) vertex_indices = np.arange(space.global_dof_count) data = np.ones(space.global_dof_count) bempp_dofs_from_b_vertices = coo_matrix( (data, (vertex_to_dof_map, vertex_indices)), dtype='float64').tocsr() # Now the boundary vertices to all the vertices b_vertices_from_vertices = coo_matrix( (np.ones(len(bm_nodes)), (np.arange(len(bm_nodes)), bm_nodes)), shape=(len(bm_nodes), mesh.num_vertices()), dtype='float64').tocsr() # Finally the vertices to FEniCS dofs vertices_from_fenics_dofs = coo_matrix( (np.ones(mesh.num_vertices()), (dolfin.dof_to_vertex_map(fenics_space), np.arange(mesh.num_vertices()))), shape=(mesh.num_vertices(), mesh.num_vertices()), dtype='float64').tocsr() # Get trace matrix by multiplication trace_matrix = bempp_dofs_from_b_vertices * b_vertices_from_vertices * vertices_from_fenics_dofs # Now return everything return (space, trace_matrix)
def submesh_dof_to_vertex(Vsubmesh, species_index, index=None): num_species = Vsubmesh.num_sub_spaces() if num_species == 0: num_species = 1 num_dofs = int(len(Vsubmesh.dofmap().dofs()) / num_species) if index == None: index = range(num_dofs) mapping = d.dof_to_vertex_map(Vsubmesh) mapping = mapping[range(species_index, len(mapping), num_species)] / num_species mapping = [int(x) for x in mapping] return [mapping[x] for x in index]
def plot_deltas_2D(vertex_vector, mesh_path, save_dir): mesh = Mesh() with XDMFFile(mesh_path) as f: f.read(mesh) V = FunctionSpace(mesh, 'CG', 1) value = Function(V) value.vector()[:] = vertex_vector[dof_to_vertex_map(V)] delta_S = project(value.dx(0), V) delta_file = XDMFFile(save_dir) delta_file.write_checkpoint(delta_S, 'delta_S', 0, XDMFFile.Encoding.HDF5, True) delta_v = project(value.dx(1), V) delta_file.write_checkpoint(delta_v, 'delta_v', 0, XDMFFile.Encoding.HDF5, True)
def normalise_dofmap(self): """ Overwrite own field values with normalised ones. """ dofmap = df.vertex_to_dof_map(self.functionspace) reordered = self.f.vector().array()[ dofmap] # [x1, y1, z1, ..., xn, yn, zn] vectors = reordered.reshape( (3, -1)) # [[x1, y1, z1], ..., [xn, yn, zn]] lengths = np.sqrt(np.add.reduce(vectors * vectors, axis=1)) normalised = np.dot(vectors.T, np.diag(1 / lengths)).T.ravel() vertexmap = df.dof_to_vertex_map(self.functionspace) normalised_original_order = normalised[vertexmap] self.from_array(normalised_original_order)
def vertex_to_DG0_foo(data): ''' Convert vertex function to DG0 function on the same mesh ''' mesh = data.mesh() # Build CG P1 = FunctionSpace(mesh, 'CG', 1) f = Function(P1) f.vector().set_local( np.array(data.array()[dof_to_vertex_map(P1)], dtype=float)) f.vector().apply('insert') # Then gen midpoint value by interpolation DG0 = FunctionSpace(mesh, 'DG', 0) f = interpolate(f, DG0) return f
def p1_trace(fenics_space): import dolfin from .coupling import fenics_space_info from bempp import function_space,grid_from_element_data import numpy as np family,degree = fenics_space_info(fenics_space) if not (family=='Lagrange' and degree == 1): raise ValueError("fenics_space must be a p1 Lagrange space") mesh = fenics_space.mesh() bm = dolfin.BoundaryMesh(mesh,"exterior",False) bm_nodes = bm.entity_map(0).array().astype(np.int64) bm_coords = bm.coordinates() bm_cells = bm.cells() bempp_boundary_grid = grid_from_element_data(bm_coords.transpose(),bm_cells.transpose()) # First get trace space space = function_space(bempp_boundary_grid,"P",1) # Now compute the mapping from BEM++ dofs to FEniCS dofs # First the BEM++ dofs to the boundary vertices from ._lagrange_coupling import p1_vertex_map from scipy.sparse import coo_matrix vertex_to_dof_map = p1_vertex_map(space) vertex_indices = np.arange(space.global_dof_count) data = np.ones(space.global_dof_count) bempp_dofs_from_b_vertices = coo_matrix((data,(vertex_to_dof_map,vertex_indices)),dtype='float64').tocsr() # Now the boundary vertices to all the vertices b_vertices_from_vertices = coo_matrix(( np.ones(len(bm_nodes)),(np.arange(len(bm_nodes)),bm_nodes)), shape=(len(bm_nodes),mesh.num_vertices()),dtype='float64').tocsr() # Finally the vertices to FEniCS dofs vertices_from_fenics_dofs = coo_matrix(( np.ones(mesh.num_vertices()),(dolfin.dof_to_vertex_map(fenics_space),np.arange(mesh.num_vertices()))), shape=(mesh.num_vertices(),mesh.num_vertices()),dtype='float64').tocsr() # Get trace matrix by multiplication trace_matrix = bempp_dofs_from_b_vertices*b_vertices_from_vertices*vertices_from_fenics_dofs # Now return everything return (space,trace_matrix)
def read_function_group(self, key): group = self.file[key] if key in self.scalar_groups: space = dolfin.FunctionSpace(self.mesh, 'CG', 1) elif key in self.vector_groups: space = dolfin.VectorFunctionSpace(self.mesh, 'CG', 1) index_map = dolfin.dof_to_vertex_map(space) functions = [] ii = 0 key = f'{ii}' while key in group: dset = group[key] functions.append(dolfin.Function(space, name='u')) functions[-1].vector().set_local(dset[:][index_map]) ii += 1 key = f'{ii}' return functions
def time_deriv(img: np.array) -> Function: # Evaluate function at vertices. mc = mesh.coordinates().reshape((-1, 3)) hx, hy, hz = 1./(t-2), 1./(m-1), 1./(n-1) x = np.array(mc[:, 0]/hx, dtype=int) y = np.array(mc[:, 1]/hy, dtype=int) z = np.array(mc[:, 2]/hz, dtype=int) # Map pixel values to vertices. d2v = dof_to_vertex_map(V) # Compute derivative wrt. time. imgt = img[1:] - img[0:-1] ftv = imgt[x, y, z] # Create function. ft = Function(V) ft.vector()[:] = ftv[d2v] return ft
def retrieve_ensemble(bip, dir_name, f_name, ensbl_sz, max_iter, img_out=False, whiten=False): f = df.HDF5File(bip.pde.mpi_comm, os.path.join(dir_name, f_name), "r") ensbl_f = df.Function(bip.prior.V) num_ensbls = max_iter * ensbl_sz eldeg = bip.prior.V.ufl_element().degree() if img_out: gdim = bip.prior.V.mesh().geometry().dim() imsz = bip.meshsz if hasattr(bip, 'meshsz') else (np.floor( (bip.prior.V.dim() / eldeg**2)**(1. / gdim)).astype('int'), ) * gdim # out_shape=(num_ensbls,np.int((bip.prior.V.dim()/bip.prior.V.ufl_element().degree()**2)/imsz**(gdim-1)))+(imsz,)*(gdim-1) out_shape = (num_ensbls, ) + imsz else: out_shape = (num_ensbls, bip.mesh.num_vertices()) out = np.zeros(out_shape) prog = np.ceil(num_ensbls * (.1 + np.arange(0, 1, .1))) V_P1 = df.FunctionSpace(adif.mesh, 'Lagrange', 1) d2v = df.dof_to_vertex_map(V_P1) for n in range(max_iter): for j in range(ensbl_sz): f.read(ensbl_f, 'iter{0}_ensbl{1}'.format(n + ('Y' not in TRAIN), j)) s = n * ensbl_sz + j ensbl_v = ensbl_f.vector() if whiten: ensbl_v = bip.prior.u2v(ensbl_v) if img_out: out[s] = bip.vec2img(ensbl_v) # convert to images else: out[s] = ensbl_f.compute_vertex_values( bip.mesh)[d2v] if eldeg > 1 else ensbl_v.get_local( ) # convert to P1 space (keep dof order) if necessary if s + 1 in prog: print('{0:.0f}% ensembles have been retrieved.'.format( np.float(s + 1) / num_ensbls * 100)) f.close() return out
def dolfin_function2BoxField(dolfin_function, dolfin_mesh, division=None, uniform_mesh=True): """ Turn a DOLFIN P1 finite element field over a structured mesh into a BoxField object. (Mostly for ease of plotting with scitools.) Standard DOLFIN numbering numbers the nodes along the x[0] axis, then x[1] axis, and so on. If the DOLFIN function employs elements of degree > 1, one should project or interpolate the field onto a field with elements of degree=1. """ if dolfin_function.ufl_element().degree() != 1: raise TypeError("""\ The dolfin_function2BoxField function works with degree=1 elements only. The DOLFIN function (dolfin_function) has finite elements of type %s i.e., the degree=%d != 1. Project or interpolate this function onto a space of P1 elements, i.e., V2 = FunctionSpace(mesh, 'CG', 1) u2 = project(u, V2) # or u2 = interpolate(u, V2) """ % (str(dolfin_function.ufl_element()), dolfin_function.ufl_element().degree())) if dolfin.__version__[:3] == "1.0": nodal_values = dolfin_function.vector().array().copy() else: #map = dolfin_function.function_space().dofmap().vertex_to_dof_map(dolfin_mesh) d2v = dolfin.dof_to_vertex_map(dolfin_function.function_space()) nodal_values = dolfin_function.vector().array().copy() nodal_values[d2v] = dolfin_function.vector().array().copy() if uniform_mesh: grid = dolfin_mesh2UniformBoxGrid(dolfin_mesh, division) else: grid = dolfin_mesh2BoxGrid(dolfin_mesh, division) if nodal_values.size > grid.npoints: # vector field, treat each component separately ncomponents = int(nodal_values.size/grid.npoints) try: nodal_values.shape = (ncomponents, grid.npoints) except ValueError as e: raise ValueError('Vector field (nodal_values) has length %d, there are %d grid points, and this does not match with %d components' % (nodal_values.size, grid.npoints, ncomponents)) vector_field = [_rank12rankd_mesh(nodal_values[i,:].copy(), grid.shape) \ for i in range(ncomponents)] nodal_values = array(vector_field) bf = BoxField(grid, name=dolfin_function.name(), vector=ncomponents, values=nodal_values) else: try: nodal_values = _rank12rankd_mesh(nodal_values, grid.shape) except ValueError as e: raise ValueError('DOLFIN function has vector of size %s while the provided mesh has %d points and shape %s' % (nodal_values.size, grid.npoints, grid.shape)) bf = BoxField(grid, name=dolfin_function.name(), vector=0, values=nodal_values) return bf
def solve_tr_dir__const_rheo(mesh_name, hol_cyl, deg_choice, T_in_expr, T_inf_expr, HTC, T_old_v, k_mesh, cp_mesh, rho_mesh, k_mesh_old, cp_mesh_old, rho_mesh_old, dt, time_v, theta, bool_plot, bool_solv, savings_do, logger_f): ''' mesh_name: a proper XML file. bool_plot: plots if bool_plot = 1. Solves a direct, steady-state heat conduction problem, and returns A_np, b_np, D_np, T_np, bool_ex, bool_in. A_np: stiffness matrix, ordered by vertices. b_np: integrated volumetric heat sources and surface heat fluxes, ordered by vertices. The surface heat fluxes come from the solution to the direct problem; hence, these terms will not be there in a real IHCP. D_np: integrated Laplacian of T, ordered by vertices. Option 2. The Laplacian of q is properly assembled from D_np. If do.dx(domain = hol_cyl) -> do.Measure('ds')[boundary_faces] and do.Measure('ds')[boundary_faces] -> something representative of Gamma, I would get option 1. T_np: solution to the direct heat conduction problem, ordered by vertices. bool_ex: boolean array declaring which vertices lie on the outer boundary. bool_in: boolean array indicating which vertices lie on the inner boundary. T_sol: solution to the direct heat conduction problem. deg_choice: degree in FunctionSpace. hol_cyl: mesh. ''' #comm1 = MPI.COMM_WORLD #current proc #rank1 = comm1.Get_rank() V = do.FunctionSpace(hol_cyl, 'CG', deg_choice) if 'hollow' in mesh_name and 'cyl' in mesh_name: from hollow_cyl_inv_mesh import geo_fun as geo_fun_hollow_cyl geo_params_d = geo_fun_hollow_cyl()[1] #x_c is a scalar here #y_c is a scalar here elif 'four' in mesh_name and 'cyl' in mesh_name: from four_hole_cyl_inv_mesh import geo_fun as geo_fun_four_hole_cyl geo_params_d = geo_fun_four_hole_cyl()[1] #x_c is an array here #y_c is an array here x_c_l = [geo_params_d['x_0_{}'.format(itera)] for itera in xrange(4)] y_c_l = [geo_params_d['y_0_{}'.format(itera)] for itera in xrange(4)] elif 'one_hole_cir' in mesh_name: from one_hole_cir_adj_mesh import geo_fun as geo_fun_one_hole_cir geo_params_d = geo_fun_one_hole_cir()[1] #x_c is an array here #y_c is an array here x_c_l = [geo_params_d['x_0']] y_c_l = [geo_params_d['y_0']] elif 'reinh_cir' in mesh_name: from reinh_cir_adj_mesh import geo_fun as geo_fun_one_hole_cir geo_params_d = geo_fun_one_hole_cir()[1] #x_c is an array here #y_c is an array here x_c_l = [geo_params_d['x_0']] y_c_l = [geo_params_d['y_0']] elif 'small_circle' in mesh_name: from four_hole_small_cir_adj_mesh import geo_fun as geo_fun_four_hole_cir geo_params_d = geo_fun_four_hole_cir()[1] #x_c is an array here #y_c is an array here x_c_l = [geo_params_d['x_0_{}'.format(itera)] for itera in xrange(4)] y_c_l = [geo_params_d['y_0_{}'.format(itera)] for itera in xrange(4)] #center of the cylinder base x_c = geo_params_d['x_0'] y_c = geo_params_d['y_0'] R_in = geo_params_d['R_in'] R_ex = geo_params_d['R_ex'] #define variational problem T = do.TrialFunction(V) g = do.Function(V) v = do.TestFunction(V) T_old = do.Function(V) T_inf = do.Function(V) #scalar T_old.vector()[:] = T_old_v T_inf.vector()[:] = T_inf_expr #solution T_sol = do.Function(V) #scalar T_sol.vector()[:] = T_old_v # Create boundary markers mark_all = 3 mark_in = 4 mark_ex = 5 #x_c is an array here #y_c is an array here g_in = g_in_mesh(mesh_name, x_c_l, y_c_l, R_in) g_ex = g_ex_mesh(mesh_name, x_c, y_c, R_ex) in_boundary = do.AutoSubDomain(g_in) ex_boundary = do.AutoSubDomain(g_ex) #normal unitNormal = do.FacetNormal(hol_cyl) boundary_faces = do.MeshFunction('size_t', hol_cyl, hol_cyl.topology().dim() - 1) boundary_faces.set_all(mark_all) in_boundary.mark(boundary_faces, mark_in) ex_boundary.mark(boundary_faces, mark_ex) bc_in = do.DirichletBC(V, T_in_expr, boundary_faces, mark_in) #bc_ex = do.DirichletBC(V, T_ex_expr, boundary_faces, mark_ex) bcs = [bc_in] #k = do.Function(V) #W/m/K #k.vector()[:] = k_mesh #A0 = k * do.dot(do.grad(T), do.grad(v)) * do.dx(domain = hol_cyl) A = dt / 2. * k_mesh * do.dot(do.grad(T), do.grad(v)) * do.dx(domain = hol_cyl) + \ rho_mesh * cp_mesh * T * v * do.dx(domain = hol_cyl) A_full = A + dt / 2. * HTC * T * v * do.ds( mark_ex, domain=hol_cyl, subdomain_data=boundary_faces) L = -dt / 2. * k_mesh_old * do.dot(do.grad(T_old), do.grad(v)) * \ do.dx(domain = hol_cyl) + \ rho_mesh_old * cp_mesh_old * T_old * v * do.dx(domain = hol_cyl) - \ dt / 2. * HTC * (T_old) * v * do.ds(mark_ex, domain = hol_cyl, subdomain_data = boundary_faces) + \ dt * HTC * T_inf * v * do.ds(mark_ex, domain = hol_cyl, subdomain_data = boundary_faces) #numpy version of A, T, and (L + int_fluxT) #A_np__not_v2d = do.assemble(A).array() #before applying BCs - needs v2d #L_np__not_v2d = do.assemble(L).array() #before applying BCs - needs v2d #Laplacian of T, without any -1/k int_S q*n*v dS ''' Approximated integral of the Laplacian of T. Option 2. The Laplacian of q is properly assembled from D_np. If do.dx(domain = hol_cyl) -> do.Measure('ds')[boundary_faces] and do.Measure('ds')[boundary_faces] -> something representative of Gamma, I would get option 1. ''' #D_np__not_v2d = do.assemble(-do.dot(do.grad(T), do.grad(v)) * do.dx(domain = hol_cyl) + # do.dot(unitNormal, do.grad(T)) * v * # do.Measure('ds')[boundary_faces]).array() #print np.max(D_np__not_v2d)#, np.max(A_np__not_v2d) #logger_f.warning('shape of D_np = {}, {}'.format(D_np__not_v2d.shape[0], #D_np__not_v2d.shape[1])) #nonzero_entries = [] #for row in D_np__not_v2d: # nonzero_entries += [len(np.where(abs(row) > 1e-16)[0])] #logger_f.warning('max, min, and mean of nonzero_entries = {}, {}, {}'.format( # max(nonzero_entries), min(nonzero_entries), np.mean(nonzero_entries))) #solver parameters #linear solvers from #list_linear_solver_methods() #preconditioners from #do.list_krylov_solver_preconditioners() solver = do.KrylovSolver('gmres', 'ilu') do.info(solver.parameters, True) #prints default values solver.parameters['relative_tolerance'] = 1e-16 solver.parameters['maximum_iterations'] = 20000000 solver.parameters['monitor_convergence'] = True #on the screen #http://fenicsproject.org/qa/1124/is-there-a-way-to-set-the-inital-guess-in-the-krylov-solver '''solver.parameters['nonzero_initial_guess'] = True''' solver.parameters['absolute_tolerance'] = 1e-15 #uses whatever in q_v as my initial condition #the next lines are used for CHECK 3 only #A_sys, b_sys = do.assemble_system(A, L, bcs) do.File( os.path.join(savings_do, '{}__markers.pvd'.format( mesh_name.split('.')[0]))) << boundary_faces if bool_plot: do.plot(boundary_faces, '3D mesh', title='boundary markers') #storage T_sol_d = {} g_d = {} if bool_solv == 1: xdmf_DHCP_T = do.File(os.path.join(savings_do, 'DHCP', 'T.pvd')) xdmf_DHCP_q = do.File(os.path.join(savings_do, 'DHCP', 'q.pvd')) for count_t_i, t_i in enumerate(time_v[1:]): #T_in_expr.ts = t_i #T_ex_expr.ts = t_i #storage T_sol_d[count_t_i] = do.Function(V) T_sol_d[count_t_i].vector()[:] = T_sol.vector().array() do.solve(A_full == L, T_sol, bcs) ''' TO BE UPDATED: rheology is not updated ''' #updates L T_old.assign(T_sol) T_sol.rename('DHCP_T', 'temperature from DHCP') #write solution to file #paraview format xdmf_DHCP_T << (T_sol, t_i) #plot solution if bool_plot: do.plot(T_sol, title='T') #, interactive = True) logger_f.warning('len(T) = {}'.format(len(T_sol.vector().array()))) print 'T: count_t = {}, min(T_DHCP) = {}'.format( count_t_i, min(T_sol_d[count_t_i].vector().array())) print 'T: count_t = {}, max(T_DHCP) = {}'.format( count_t_i, max(T_sol_d[count_t_i].vector().array())), '\n' #save flux - required for solving IHCP #same result if do.ds(mark_ex, subdomain_data = boundary_faces) #instead of do.Measure('ds')[boundary_faces] #Langtangen, p. 37: #either do.dot(do.nabla_grad(T), unitNormal) #or do.dot(unitNormal, do.grad(T)) #int_fluxT = do.assemble(-k * do.dot(unitNormal, do.grad(T_sol)) * v * # do.Measure('ds')[boundary_faces]) fluxT = do.project( -k_mesh * do.grad(T_sol), do.VectorFunctionSpace(hol_cyl, 'CG', deg_choice, dim=2)) if bool_plot: do.plot(fluxT, title='flux at iteration = {}'.format(count_t_i)) fluxT.rename('DHCP_flux', 'flux from DHCP') xdmf_DHCP_q << (fluxT, t_i) print 'DHCP: iteration = {}'.format(count_t_i) #################################################### #full solution #T_sol_full = do.Vector() #T_sol.vector().gather(T_sol_full, np.array(range(V.dim()), 'intc')) #################################################### count_t_i += 1 #copy previous lines #storage T_sol_d[count_t_i] = do.Function(V) T_sol_d[count_t_i].vector()[:] = T_sol.vector().array() for count_t_i, t_i in enumerate(time_v): #storage g_d[count_t_i] = do.Function(V) g_d[count_t_i].vector()[:] = g.vector().array() gdim = hol_cyl.geometry().dim() dofmap = V.dofmap() dofs = dofmap.dofs() #Get coordinates as len(dofs) x gdim array dofs_x = V.tabulate_dof_coordinates().reshape((-1, gdim)) #booleans corresponding to the outer boundary -> ints since they are sent to root = 0 bool_ex = 1. * np.array([g_ex(dof_x) for dof_x in dofs_x]) #booleans corresponding to the inner boundary -> ints since they are sent to root = 0 bool_in = 1. * np.array([g_in(dof_x) for dof_x in dofs_x]) T_np_ex = [] T_np_in = [] for i_coor, coor in enumerate(dofs_x): if g_ex(coor): T_np_ex += [T_sol.vector().array()[i_coor]] if g_in(coor): T_np_in += [T_sol.vector().array()[i_coor]] print 'CHECK: mean(T) on the outer boundary = ', np.mean(np.array(T_np_ex)) print 'CHECK: mean(T) on the inner boundary = ', np.mean(np.array(T_np_in)) print 'CHECK: mean(HTC) = ', np.mean(do.project(HTC, V).vector().array()) #v2d = do.vertex_to_dof_map(V) #orders by hol_cyl.coordinates() if deg_choice == 1: print 'len(dof_to_vertex_map) = ', len(do.dof_to_vertex_map(V)) print 'min(dofs) = ', min(dofs), ', max(dofs) = ', max(dofs) print 'len(bool ex) = ', len(bool_ex) print 'len(bool in) = ', len(bool_in) print 'bool ex[:10] = ', repr(bool_ex[:10]) print 'type(T) = ', type(T_sol.vector().array()) #first global results, then local results return A, L, g_d, \ V, v, k_mesh, \ mark_in, mark_ex, \ boundary_faces, bool_ex, \ R_in, R_ex, T_sol_d, deg_choice, hol_cyl, \ unitNormal, dofs_x
def long_run_compare(self): mesh = UnitIntervalMesh(5) # FIXME: We need to make this run in paralell. if MPI.size(mesh.mpi_comm()) > 1: return Model = Tentusscher_2004_mcell tstop = 10 ind_V = 0 dt_ref = 0.1 time_ref = np.linspace(0, tstop, int(tstop / dt_ref) + 1) dir_path = os.path.dirname(__file__) Vm_reference = np.fromfile(os.path.join(dir_path, "Vm_reference.npy")) params = Model.default_parameters() time = Constant(0.0) stim = Expression("(time >= stim_start) && (time < stim_start + stim_duration)"\ " ? stim_amplitude : 0.0 ", time=time, stim_amplitude=52.0, \ stim_start=1.0, stim_duration=1.0, degree=1) # Initiate solver, with model and Scheme if dolfin_adjoint: adj_reset() solver = self._setup_solver(Model, Scheme, mesh, time, stim, params) solver._pi_solver.parameters["newton_solver"][ "relative_tolerance"] = 1e-8 solver._pi_solver.parameters["newton_solver"][ "maximum_iterations"] = 30 solver._pi_solver.parameters["newton_solver"]["report"] = False scheme = solver._scheme (vs_, vs) = solver.solution_fields() vs.assign(vs_) dof_to_vertex_map_values = dof_to_vertex_map(vs.function_space()) scheme.t().assign(0.0) vs_array = np.zeros(mesh.num_vertices()*\ vs.function_space().dofmap().num_entity_dofs(0)) vs_array[dof_to_vertex_map_values] = vs.vector().get_local() output = [vs_array[ind_V]] time_output = [0.0] dt = dt_org # Time step next_dt = max(min(tstop - float(scheme.t()), dt), 0.0) t0 = 0.0 while next_dt > 0.0: # Step solver solver.step((t0, t0 + next_dt)) vs_.assign(vs) # Collect plt output data vs_array[dof_to_vertex_map_values] = vs.vector().get_local() output.append(vs_array[ind_V]) time_output.append(float(scheme.t())) # Next time step t0 += next_dt next_dt = max(min(tstop - float(scheme.t()), dt), 0.0) # Compare solution from CellML run using opencell assert_almost_equal(output[-1], Vm_reference[-1], abs_tol) output = np.array(output) time_output = np.array(time_output) output = np.interp(time_ref, time_output, output) value = np.sqrt(np.sum( ((Vm_reference - output) / Vm_reference)**2)) / len(Vm_reference) assert_almost_equal(value, 0.0, rel_tol)
def fun(mesh3d, npoints): '''A random curve starting close to (-1, -1, -1) and continueing inside''' import networkx as nx import random edge_f = df.MeshFunction('size_t', mesh3d, 1, 0) mesh3d.init(1, 0) # Init the graph G = nx.Graph() edge_indices = { tuple(sorted(e.entities(0).tolist())): e_index for e_index, e in enumerate(df.edges(mesh3d)) } G.add_edges_from(iter(edge_indices.keys())) # Let's find boundary vertices V = df.FunctionSpace(mesh3d, 'CG', 1) bdry = df.CompiledSubDomain( 'near(std::max(std::max(std::abs(x[0]), std::abs(x[1])), std::abs(x[2])), 1, tol)', tol=TOL) bc = df.DirichletBC(V, df.Constant(0), bdry, 'pointwise') bc_vertices = set( df.dof_to_vertex_map(V)[list(bc.get_boundary_values().keys())]) # Start at the boundary at (-1, -1, 1) X = mesh3d.coordinates() start = np.argmin(np.sum((X - np.array([-1, -1, -1]))**2, axis=1)) # All vertices vertices = list(range(mesh3d.num_vertices())) first = None while npoints: # Pick the next vertex, inside while True: stop = random.choice(vertices) if start != stop and stop not in bc_vertices: break # The path is a shortest path between vertices path = nx.shortest_path(G, source=start, target=stop) # Here it can happen that the path will have surface points if first is None: # So we walk back (guaranteed to be in) until we hit the surface clean_path = [] for p in reversed(path): clean_path.append(p) if p in bc_vertices: print('Shifted start to', X[p]) first = p break path = clean_path start = first # Start in, must end in and stay in else: if set(path) & bc_vertices: continue for v0, v1 in zip(path[:-1], path[1:]): edge = (v0, v1) if v0 < v1 else (v1, v0) edge_f[edge_indices[edge]] = 1 start = stop npoints -= 1 # df.File('x.pvd') << edge_f return edge_f, X[first]
def initialize_variables(self): r""" Initialize the model variables to default values. The variables defined here are: Various things : * ``self.element`` -- the finite-element * ``self.top_dim`` -- the topological dimension * ``self.dofmap`` -- :class:`~dolfin.cpp.fem.DofMap` for converting between vertex to nodal indicies * ``self.h`` -- Cell diameter formed by calling :func:`~dolfin.functions.specialfunctions.CellDiameter` with ``self.mesh`` Grid velocity vector :math:`\mathbf{u}_i = [u\ v\ w]^{\intercal}`: * ``self.U_mag`` -- velocity vector magnitude * ``self.U3`` -- velocity vector * ``self.u`` -- :math:`x`-component of velocity vector * ``self.v`` -- :math:`y`-component of velocity vector * ``self.w`` -- :math:`z`-component of velocity vector Grid acceleration vector :math:`\mathbf{a}_i = [a_x\ a_y\ a_z]^{\intercal}`: * ``self.a_mag`` -- acceleration vector magnitude * ``self.a3`` -- acceleration vector * ``self.a_x`` -- :math:`x`-component of acceleration vector * ``self.a_y`` -- :math:`y`-component of acceleration vector * ``self.a_z`` -- :math:`z`-component of acceleration vector Grid internal force vector :math:`\mathbf{f}_i^{\mathrm{int}} = [f_x^{\mathrm{int}}\ f_y^{\mathrm{int}}\ f_z^{\mathrm{int}}]^{\intercal}`: * ``self.f_int_mag`` -- internal force vector magnitude * ``self.f_int`` -- internal force vector * ``self.f_int_x`` -- :math:`x`-component of internal force vector * ``self.f_int_y`` -- :math:`y`-component of internal force vector * ``self.f_int_z`` -- :math:`z`-component of internal force vector Grid mass :math:`m_i`: * ``self.m`` -- mass :math:`m_i` * ``self.m0`` -- inital mass :math:`m_i^0` """ s = "::: initializing grid variables :::" print_text(s, cls=self.this) # the finite-element used : self.element = self.Q.element() # topological dimension : self.top_dim = self.element.geometric_dimension() # map from verticies to nodes : self.dofmap = self.Q.dofmap() # for finding vertices sitting on boundary : self.d2v = dl.dof_to_vertex_map(self.Q) # list of arrays of vertices and bcs set by self.set_boundary_conditions() : self.bc_vrt = None self.bc_val = None # cell diameter : self.h = dl.project(dl.CellDiameter(self.mesh), self.Q) # cell volume : self.Ve = dl.project(dl.CellVolume(self.mesh), self.Q) # grid velocity : self.U_mag = dl.Function(self.Q, name='U_mag') self.U3 = dl.Function(self.Q3, name='U3') u, v, w = self.U3.split() u.rename('u', '') v.rename('v', '') w.rename('w', '') self.u = u self.v = v self.w = w # grid acceleration : self.a_mag = dl.Function(self.Q, name='a_mag') self.a3 = dl.Function(self.Q3, name='a3') a_x, a_y, a_z = self.a3.split() a_x.rename('a_x', '') a_y.rename('a_y', '') a_z.rename('a_z', '') self.a_x = a_x self.a_y = a_y self.a_z = a_z # grid internal force vector : self.f_int_mag = dl.Function(self.Q, name='f_int_mag') self.f_int = dl.Function(self.Q3, name='f_int') f_int_x, f_int_y, f_int_z = self.f_int.split() f_int_x.rename('f_int_x', '') f_int_y.rename('f_int_y', '') f_int_z.rename('f_int_z', '') self.f_int_x = f_int_x self.f_int_y = f_int_y self.f_int_z = f_int_z # grid mass : self.m = dl.Function(self.Q, name='m') self.m0 = dl.Function(self.Q, name='m0') # function assigners speed assigning up : self.assu = dl.FunctionAssigner(self.u.function_space(), self.Q) self.assv = dl.FunctionAssigner(self.v.function_space(), self.Q) self.assw = dl.FunctionAssigner(self.w.function_space(), self.Q) self.assa_x = dl.FunctionAssigner(self.a_x.function_space(), self.Q) self.assa_y = dl.FunctionAssigner(self.a_y.function_space(), self.Q) self.assa_z = dl.FunctionAssigner(self.a_z.function_space(), self.Q) self.assf_int_x = dl.FunctionAssigner(self.f_int_x.function_space(), self.Q) self.assf_int_y = dl.FunctionAssigner(self.f_int_y.function_space(), self.Q) self.assf_int_z = dl.FunctionAssigner(self.f_int_z.function_space(), self.Q) self.assm = dl.FunctionAssigner(self.m.function_space(), self.Q) # save the number of degrees of freedom : self.dofs = self.m.vector().size()
def scalar_laplacians(mesh): """ Calculate the laplacians needed by fiberrule algorithms Arguments --------- mesh : dolfin.Mesh A dolfin mesh with marked boundaries: base = 10, rv = 20, lv = 30, epi = 40 The base is assumed placed at x=0 """ if not isinstance(mesh, d.Mesh): raise TypeError("Expected a dolfin.Mesh as the mesh argument.") # Init connectivities mesh.init(2) facet_markers = d.MeshFunction("size_t", mesh, 2, mesh.domains()) # Boundary markers, solutions and cases markers = dict(base=10, rv=20, lv=30, epi=40, apex=50) # Solver parameters solver_param=dict(solver_parameters=dict( preconditioner="ml_amg" if d.has_krylov_solver_preconditioner("ml_amg") \ else "default", linear_solver="gmres")) cases = ["rv", "lv", "epi"] boundaries = cases + ["base"] # Check that all boundary faces are marked num_boundary_facets = d.BoundaryMesh(mesh, "exterior").num_cells() if num_boundary_facets != sum(np.sum(\ facet_markers.array()==markers[boundary])\ for boundary in boundaries): d.error("Not all boundary faces are marked correctly. Make sure all "\ "boundary facets are marked as: base = 10, rv = 20, lv = 30, "\ "epi = 40.") # Coords and cells coords = mesh.coordinates() cells_info = mesh.cells() # Find apex by solving a laplacian with base solution = 0 # Create Base variational problem V = d.FunctionSpace(mesh, "CG", 1) u = d.TrialFunction(V) v = d.TestFunction(V) a = d.dot(d.grad(u), d.grad(v)) * d.dx L = v * d.Constant(1) * d.dx DBC_10 = d.DirichletBC(V, 1, facet_markers, markers["base"], "topological") # Create solutions solutions = dict( (what, d.Function(V)) for what in markers if what != "base") d.solve(a == L, solutions["apex"], DBC_10, solver_parameters={"linear_solver": "gmres"}) apex_values = solutions["apex"].vector().array() apex_values[d.dof_to_vertex_map(V)] = solutions["apex"].vector().array() ind_apex_max = apex_values.argmax() apex_coord = coords[ind_apex_max, :] # Update rhs L = v * d.Constant(0) * d.dx d.info(" Apex coord: ({0}, {1}, {2})".format(*apex_coord)) d.info(" Num coords: {0}".format(len(coords))) d.info(" Num cells: {0}".format(len(cells_info))) # Calculate volume volume = 0.0 for cell in d.cells(mesh): volume += cell.volume() d.info(" Volume: {0}".format(volume)) d.info("") # Cases # ===== # # 1) base: 1, apex: 0 # 2) lv: 1, rv, epi: 0 # 3) rv: 1, lv, epi: 0 # 4) epi: 1, rv, lv: 0 class ApexDomain(d.SubDomain): def inside(self, x, on_boundary): return d.near(x[0], apex_coord[0]) and d.near(x[1], apex_coord[1]) and \ d.near(x[2], apex_coord[2]) apex_domain = ApexDomain() # Case 1: Poisson = 1 DBC_11 = d.DirichletBC(V, 0, apex_domain, "pointwise") # Using Poisson if Poisson: d.solve(a == L, solutions["apex"], [DBC_10, DBC_11], solver_parameters={"linear_solver": "gmres"}) # Using Eikonal equation else: Le = v * d.Constant(1) * d.dx d.solve(a == Le, solutions["apex"], DBC_11, solver_parameters={"linear_solver": "gmres"}) # Create Eikonal problem eps = d.Constant(mesh.hmax() / 25) y = solutions["apex"] F = d.sqrt(d.inner(d.grad(y), d.grad(y)))*v*d.dx - \ d.Constant(1)*v*d.dx + eps*d.inner(d.grad(y), d.grad(v))*d.dx d.solve(F == 0, y, DBC_11, solver_parameters={ "linear_solver": "lu", "newton_solver": { "relative_tolerance": 1e-5 } }) # Check that solution of the three last cases all sum to 1. sol = solutions["apex"].vector().copy() sol[:] = 0.0 # Iterate over the three different cases for case in cases: # Solve linear system bcs = [d.DirichletBC(V, 1 if what == case else 0, \ facet_markers, markers[what], "topological") \ for what in cases]
def dolfin_fiberrules( mesh, fiber_space=None, fiber_rotation_epi=50, # 50 fiber_rotation_endo=40, # 40 sheet_rotation_epi=65, # 65 sheet_rotation_endo=25, # 25 alpha_noise=0.0, beta_noise=0.0): """ Create fiber, cross fibers and sheet directions Arguments --------- mesh : dolfin.Mesh A dolfin mesh with marked boundaries: base = 10, rv = 20, lv = 30, epi = 40 The base is assumed placed at x=0 fiber_space : dolfin.FunctionSpace (optional) Determines for what space the fibers should be calculated for. fiber_rotation_epi : float (optional) Fiber rotation angle on the endocardial surfaces. fiber_rotation_endo : float (optional) Fiber rotation angle on the epicardial surfaces. sheet_rotation_epi : float (optional) Sheet rotation angle on the endocardial surfaces. sheet_rotation_endo : float (optional) Sheet rotation angle on the epicardial surfaces. """ #import cpp if not isinstance(mesh, d.Mesh): raise TypeError("Expected a dolfin.Mesh as the mesh argument.") # Default fiber space is P1 fiber_space = fiber_space or d.FunctionSpace(mesh, "P", 1) # Create scalar laplacian solutions d.info("Calculating scalar fields") scalar_solutions = scalar_laplacians(mesh) # Create gradients d.info("\nCalculating gradients") data = project_gradients(mesh, fiber_space, scalar_solutions) # Assign the fiber and sheet rotations data.fiber_rotation_epi = fiber_rotation_epi data.fiber_rotation_endo = fiber_rotation_endo data.sheet_rotation_epi = sheet_rotation_epi data.sheet_rotation_endo = sheet_rotation_endo #Gaussian field with correlation length l #p , l = np.inf, 2 #number of terms in expansion #k = 10 #kernel = Matern(p = p,l = l) #0,1Exponential covariance kernel #kle = KLE(mesh, kernel, verbose = True) #kle.compute_eigendecomposition(k = k)#20 by default #Generate realizations #noise = kle.realizations() #x = np.zeros(len(noise)) #noise = np.array(x) #noise[0:len(noise)/2] = 30 ##print y #print "noise array", noise #V = d.FunctionSpace(mesh, "CG", 1) #random_field =d.Function(V) #random_f = np.zeros((mesh.num_vertices()),dtype =float) #random_f = noise #random_field.vector()[:] = random_f[d.dof_to_vertex_map(V)] #d.plot(random_field, mesh, interactive =True) ##random_f = noise #x = np.zeros(len(noise)) #y = np.array(x) #y[0:len(noise)/2] = 30 ##print y #print "noise array", y # Check noise if np.isscalar(alpha_noise): alpha_noise = np.zeros(mesh.num_vertices(), dtype=float) if np.isscalar(beta_noise): beta_noise = np.zeros(mesh.num_vertices(), dtype=float) # Call the fiber sheet generation cpp.computeFiberSheetSystem(data, alpha_noise[d.dof_to_vertex_map(fiber_space)], beta_noise[d.dof_to_vertex_map(fiber_space)]) #cpp.computeFiberSheetSystem(data,noise) # Create output Functions Vv = d.VectorFunctionSpace(mesh, fiber_space.ufl_element().family(), \ fiber_space.ufl_element().degree()) V = fiber_space fiber_sheet_tensor = data.fiber_sheet_tensor fiber_components = [] scalar_size = fiber_sheet_tensor.size / 9 indices = np.zeros(scalar_size * 3, dtype="L") indices[0::3] = np.arange(scalar_size, dtype="L") * 9 # x indices[1::3] = np.arange(scalar_size, dtype="L") * 9 + 3 # y indices[2::3] = np.arange(scalar_size, dtype="L") * 9 + 6 # z for ind, name in enumerate(["f0", "n0", "s0"]): component = d.Function(Vv, name=name) # Sort the fibers and sheets in dolfin degrees of freedom (dofs) component.vector()[:] = fiber_sheet_tensor[indices] fiber_components.append(component) indices += 1 # Make sheet the last component fiber_components = [fiber_components[0]] + fiber_components[-1:0:-1] return fiber_components
fig,axes = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True, figsize=(12,6), facecolor='white') plt.ion() # plt.show(block=True) n_dif = 100 dif = np.zeros((n_dif,2)) loaded=np.load(file=os.path.join(folder,algs[alg_no]+'_ensbl'+str(ensbl_sz)+'_training_XY.npz')) prng=np.random.RandomState(2020) sel4eval = prng.choice(num_samp,size=n_dif,replace=False) X=loaded['X'][sel4eval]; Y=loaded['Y'][sel4eval] sel4print = prng.choice(n_dif,size=10,replace=False) prog=np.ceil(n_dif*(.1+np.arange(0,1,.1))) u_f = df.Function(adif.prior.V) eldeg = adif.prior.V.ufl_element().degree() if eldeg>1: V_P1 = df.FunctionSpace(adif.mesh,'Lagrange',1) d2v = df.dof_to_vertex_map(V_P1) u_f1 = df.Function(V_P1) else: u_f1 = u_f for n in range(n_dif): u=X[n] # calculate gradient t_start=timeit.default_timer() u_f1.vector().set_local(u); u_v = u_f1.vector() # u already in dof order if eldeg>1: u_f.interpolate(u_f1) u_v = u_f.vector() # u_f = img2fun(u, adif.prior.V); u_v = u_f.vector() # for u in vertex order ll_xact,dll_xact = adif.get_geom(u_v,[0,1])[:2] t_used[0] += timeit.default_timer()-t_start # emulate gradient
# closest_point_vectorized() took 157871 microseconds # nquery= 100000 , dt_closest_SM= 0.2701895236968994 # and # closest_point_vectorized() took 1249172 microseconds # nquery= 1000000 , dt_closest_SM= 3.3071250915527344 # EVALUATE FUNCTION AT POINT mesh = circle_mesh(np.array([0.0, 0.0]), 1.0, 1e-2) V = dl.FunctionSpace(mesh, 'CG', 1) # check that I'm using the dof to vertex mapping right mesh_coords = mesh.coordinates() dof_coords = V.tabulate_dof_coordinates() dof2vertex = dl.dof_to_vertex_map(V) vertex2dof = dl.vertex_to_dof_map(V) u = dl.Function(V) u.vector()[:] = dof_coords[:, 0]**2 + 2 * dof_coords[:, 1]**2 u.set_allow_extrapolation(True) uu = u.vector()[vertex2dof] # uu = u.vector()[dof2vertex] uu_true = np.zeros(V.dim()) for ii in range(V.dim()): uu_true[ii] = u(mesh_coords[ii, :]) err_uu = np.linalg.norm(uu - uu_true) print('err_uu=', err_uu)
def dolfin_function2BoxField(dolfin_function, dolfin_mesh, division=None, uniform_mesh=True): """ Turn a DOLFIN P1 finite element field over a structured mesh into a BoxField object. (Mostly for ease of plotting with scitools.) Standard DOLFIN numbering numbers the nodes along the x[0] axis, then x[1] axis, and so on. If the DOLFIN function employs elements of degree > 1, one should project or interpolate the field onto a field with elements of degree=1. """ if dolfin_function.ufl_element().degree() != 1: raise TypeError("""\ The dolfin_function2BoxField function works with degree=1 elements only. The DOLFIN function (dolfin_function) has finite elements of type %s i.e., the degree=%d != 1. Project or interpolate this function onto a space of P1 elements, i.e., V2 = FunctionSpace(mesh, 'CG', 1) u2 = project(u, V2) # or u2 = interpolate(u, V2) """ % (str(dolfin_function.ufl_element()), dolfin_function.ufl_element().degree())) if dolfin.__version__[:3] == "1.0": nodal_values = dolfin_function.vector().array().copy() else: #map = dolfin_function.function_space().dofmap().vertex_to_dof_map(dolfin_mesh) d2v = dolfin.dof_to_vertex_map(dolfin_function.function_space()) nodal_values = dolfin_function.vector().array().copy() nodal_values[d2v] = dolfin_function.vector().array().copy() if uniform_mesh: grid = dolfin_mesh2UniformBoxGrid(dolfin_mesh, division) else: grid = dolfin_mesh2BoxGrid(dolfin_mesh, division) if nodal_values.size > grid.npoints: # vector field, treat each component separately ncomponents = int(nodal_values.size / grid.npoints) try: nodal_values.shape = (ncomponents, grid.npoints) except ValueError as e: raise ValueError( 'Vector field (nodal_values) has length %d, there are %d grid points, and this does not match with %d components' % (nodal_values.size, grid.npoints, ncomponents)) vector_field = [_rank12rankd_mesh(nodal_values[i,:].copy(), grid.shape) \ for i in range(ncomponents)] nodal_values = array(vector_field) bf = BoxField(grid, name=dolfin_function.name(), vector=ncomponents, values=nodal_values) else: try: nodal_values = _rank12rankd_mesh(nodal_values, grid.shape) except ValueError as e: raise ValueError( 'DOLFIN function has vector of size %s while the provided mesh has %d points and shape %s' % (nodal_values.size, grid.npoints, grid.shape)) bf = BoxField(grid, name=dolfin_function.name(), vector=0, values=nodal_values) return bf
def geom(unknown_lat,bip_lat,bip,autoencoder,geom_ord=[0],whitened=False,**kwargs): loglik=None; gradlik=None; metact=None; rtmetact=None; eigs=None # un-whiten if necessary if whitened=='latent': unknown_lat=bip_lat.prior.v2u(unknown_lat) eldeg = bip.prior.V.ufl_element().degree() if 'Conv' in type(autoencoder).__name__: u_latin=bip_lat.vec2img(unknown_lat) width=tuple(np.mod(i,2) for i in u_latin.shape) u_latin=chop(u_latin,width)[None,:,:,None] if autoencoder.activations['latent'] is None else u_latin.flatten()[None,:] unknown=bip.img2vec(pad(np.squeeze(autoencoder.decode(u_latin)),width), bip.prior.V if eldeg>1 else None) else: u_latin=unknown_lat.get_local()[None,:] # unknown=df.Function(V).vector() # unknown.set_local(autoencoder.decode(u_latin).flatten()) u_decoded=autoencoder.decode(u_latin).flatten() unknown=bip.prior.gen_vector(u_decoded) if eldeg==1 else vinPn(u_decoded, bip.prior.V) emul_geom=kwargs.pop('emul_geom',None) full_geom=kwargs.pop('full_geom',None) try: if len(kwargs)==0: loglik,gradlik,metact_,rtmetact_ = emul_geom(unknown,geom_ord,whitened=='emulated',inP1=True) else: loglik,gradlik,metact_,eigs_ = emul_geom(unknown,geom_ord,whitened=='emulated',inP1=True,**kwargs) except: try: if len(kwargs)==0: loglik,gradlik,metact_,rtmetact_ = full_geom(unknown,geom_ord,whitened=='original') else: loglik,gradlik,metact_,eigs_ = full_geom(unknown,geom_ord,whitened=='original',**kwargs) except: raise RuntimeError('No geometry in the original space available!') if any(s>=1 for s in geom_ord): if whitened=='latent': gradlik = bip.prior.C_act(gradlik,.5) # jac=autoencoder.jacobian(u_latin,'decode') if 'Conv' in type(autoencoder).__name__: jac=autoencoder.jacobian(u_latin,'decode') jac=pad(jac,width*2 if autoencoder.activations['latent'] is None else width+(0,)) jac=jac.reshape((np.prod(jac.shape[:2]),np.prod(jac.shape[2:]))) jac=jac[np.ix_(df.dof_to_vertex_map(bip.prior.V), df.dof_to_vertex_map(bip_lat.prior.V))] gradlik_=jac.T.dot(gradlik.get_local()) gradlik_ = autoencoder.jacvec(u_latin,gradlik.get_local()[None,:]) gradlik=bip_lat.prior.gen_vector(gradlik_) # print('time consumed:{}'.format(timeit.default_timer()-t_start)) if any(s>=1.5 for s in geom_ord): def _get_metact_misfit(u_actedon): if type(u_actedon) is df.Vector: u_actedon=u_actedon.get_local() tmp=df.Vector(unknown); tmp.zero() jac.reduce(tmp,u_actedon) v=df.Vector(unknown_lat) v.set_local(jac.dot(metact_(tmp))) return v def _get_rtmetact_misfit(u_actedon): if type(u_actedon) is not df.Vector: u_=df.Vector(unknown) u_.set_local(u_actedon) u_actedon=u_ v=df.Vector(unknown_lat) v.set_local(jac.dot(rtmetact_(u_actedon))) return v metact = _get_metact_misfit rtmetact = _get_rtmetact_misfit if any(s>1 for s in geom_ord) and len(kwargs)!=0: if bip_lat is None: raise ValueError('No latent inverse problem defined!') # compute eigen-decomposition using randomized algorithms if whitened=='latent': # generalized eigen-decomposition (_C^(1/2) F _C^(1/2), M), i.e. _C^(1/2) F _C^(1/2) = M V D V', V' M V = I def invM(a): a=bip_lat.prior.gen_vector(a) invMa=bip_lat.prior.gen_vector() bip_lat.prior.Msolver.solve(invMa,a) return invMa eigs = geigen_RA(metact, lambda u: bip_lat.prior.M*u, invM, dim=bip_lat.prior.V.dim(),**kwargs) else: # generalized eigen-decomposition (F, _C^(-1)), i.e. F = _C^(-1) U D U^(-1), U' _C^(-1) U = I, V = _C^(-1/2) U eigs = geigen_RA(metact,lambda u: bip_lat.prior.C_act(u,-1),lambda u: bip_lat.prior.C_act(u),dim=bip_lat.prior.V.dim(),**kwargs) if any(s>1.5 for s in geom_ord): # adjust the gradient # update low-rank approximate Gaussian posterior bip_lat.post_Ga = Gaussian_apx_posterior(bip_lat.prior,eigs=eigs) # Hu = bip_lat.prior.gen_vector() # bip_lat.post_Ga.Hlr.mult(unknown, Hu) # gradlik.axpy(1.0,Hu) if len(kwargs)==0: return loglik,gradlik,metact,rtmetact else: return loglik,gradlik,metact,eigs