def update_cpp(self, dt, velocity): alpha = get_local(self.alpha_function) beta = get_local(self.blending_function) gradient = [ get_local(gi) for gi in self.gradient_reconstructor.gradient ] velocity = [get_local(vi) for vi in velocity] g_vecs = numpy.array(gradient, dtype=float) v_vecs = numpy.array(velocity, dtype=float) assert g_vecs.shape[0] == g_vecs.shape[0] == self.simulation.ndim hric_funcs = {2: self.cpp_mod.hric_2D, 3: self.cpp_mod.hric_3D} hric_func = hric_funcs[self.simulation.ndim] Co_max = hric_func(self.cpp_inp, self.mesh, alpha, g_vecs, v_vecs, beta, dt, self.variant) set_local(self.blending_function, beta, apply='insert') return Co_max
def crossing_points_and_cells(simulation, field, value, preprocessed): """ Find cells that contain the value iso surface. This is done by connecting cell midpoints across facets and seing if the level set crosses this line. If it does, the point where it crosses, the cell containing the free surface crossing point and the vector from the low value cell to the high value cell is stored. The direction vector is made into a unit vector and then scaled by the value difference between the two cells. The vectors are computed in this way so that they can be averaged (for a cell with multiple crossing points) to get an approximate direction of increasing value (typically increasing density, meaning they point into the fluid in a water/air simulation). This is used such that the high value and the low value sides of the field can be approximately determined. The field is assumed to be piecewice constant (DG0) """ facet_data, cell_dofs, is_ghost_cell = preprocessed all_values = get_local(field) # We define acronym LCCM: line connecting cell midpoints # - We restrinct ourselves to LCCMs that cross only ONE facet # - We number LLCMs by the index of the crossed facet # Find the crossing points where the contour crosses a LCCM crossing_points = {} for fid, fdata in facet_data.items(): # Get preprocessed data cid0, cid1, coords0, coords1, uvec = fdata # Check for level crossing v0 = all_values[cell_dofs[cid0]] v1 = all_values[cell_dofs[cid1]] b1, b2 = v0 < value, v1 < value if (b1 and b2) or not (b1 or b2): # LCCM not crossed by contour continue # Find the location where the contour line crosses the LCCM fac = (v0 - value) / (v0 - v1) crossing_point = tuple((1 - fac) * coords0 + fac * coords1) # Scaled direction vector direction = uvec * (v0 - v1) # Find the cell containing the contour line surf_cid = cid0 if fac <= 0.5 else cid1 is_ghost = is_ghost_cell[cid0] if fac <= 0.5 else is_ghost_cell[cid1] if not is_ghost: # Store the point and direction towards the high value cell crossing_points.setdefault(surf_cid, []).append( (crossing_point, direction)) return crossing_points
def _reconstruct_gradient( alpha_function, num_neighbours, neighbours, lstsq_matrices, lstsq_inv_matrices, gradient, ): """ Reconstruct the gradient, Python version of the code This function used to have a more Pythonic implementation that was most likely also faster. See old commits for that code. This code is here to verify the C++ version that is much faster than this (and the old Pythonic version) """ a_cell_vec = get_local(alpha_function) mesh = alpha_function.function_space().mesh() V = alpha_function.function_space() assert V == gradient[0].function_space() cell_dofs = cell_dofmap(V) np_gradient = [gi.vector().get_local() for gi in gradient] # Reshape arrays. The C++ version needs flatt arrays # (limitation in Instant/Dolfin) and we have the same # interface for both versions of the code ncells = len(num_neighbours) ndim = mesh.topology().dim() num_cells_owned, num_neighbours_max = neighbours.shape assert ncells == num_cells_owned lstsq_matrices = lstsq_matrices.reshape((ncells, ndim, num_neighbours_max)) lstsq_inv_matrices = lstsq_inv_matrices.reshape((ncells, ndim, ndim)) for icell in range(num_cells_owned): cdof = cell_dofs[icell] Nnbs = num_neighbours[icell] nbs = neighbours[icell, :Nnbs] # Get the matrices AT = lstsq_matrices[icell, :, :Nnbs] ATAI = lstsq_inv_matrices[icell] a0 = a_cell_vec[cdof] b = [(a_cell_vec[cell_dofs[ni]] - a0) for ni in nbs] b = numpy.array(b, float) # Calculate the and store the gradient g = numpy.dot(ATAI, numpy.dot(AT, b)) for d in range(ndim): np_gradient[d][cdof] = g[d] for i, np_grad in enumerate(np_gradient): set_local(gradient[i], np_grad, apply='insert')
def update_level_set_view(simulation, level_set_view, crossings, cache): """ Create a level set CG1 scalar function where the value is 0 at the given crossing point locations and approximately the distance to the nearest crossing point by following the edges of the mesh away from the crossing points and keeping track of the closest such point """ dofs_x, dof_dist, cell_dofs, dof_cells = cache values = level_set_view.vector().get_local() values[:] = 1e100 Nlocal = len(values) # For MPI ranks that contain the free surface we first fill out the # distance values starting at the free surface if crossings: # Mark distances in cells with a free surface for cid, cross in crossings.items(): for crossing_point, direction in cross: for dof in cell_dofs[cid]: if dof < Nlocal: dpos = dofs_x[dof] vec = dpos - crossing_point dist = (vec[0]**2 + vec[1]**2 + vec[2]**2)**0.5 # Make the level set function a signed distance function # if vec.dot(direction) < 0: # dist = -dist if abs(dist) < abs(values[dof]): values[dof] = dist # Breadth-first search to populate all of the local domain queue = deque() for cid in crossings: queue.extend([d for d in cell_dofs[cid] if d < Nlocal]) bfs(queue, values, cell_dofs, dof_cells, dof_dist) # Update ghost cell values level_set_view.vector().set_local(values) level_set_view.vector().apply('insert') # Breadth first search from ghost dofs values2 = get_local(level_set_view) queue = deque(range(Nlocal, len(values2))) bfs(queue, values2, cell_dofs, dof_cells, dof_dist) level_set_view.vector().set_local(values2[:Nlocal]) level_set_view.vector().apply('insert')
def test_get_set_local_with_ghosts(): comm = dolfin.MPI.comm_world rank = dolfin.MPI.rank(comm) dolfin.parameters['ghost_mode'] = 'shared_vertex' mesh = dolfin.UnitSquareMesh(comm, 4, 4) V = dolfin.FunctionSpace(mesh, 'DG', 0) u = dolfin.Function(V) dm = V.dofmap() im = dm.index_map() # Write global dof number into array for all dofs start, end = u.vector().local_range() arr = u.vector().get_local() arr[:] = numpy.arange(start, end) u.vector().set_local(arr) u.vector().apply('insert') # Get local + ghost values arr2 = get_local(u.vector(), V) # Get ghost global indices and some sizes global_ghost_dofs = im.local_to_global_unowned() Nall = im.size(im.MapSize.ALL) Nghost = global_ghost_dofs.size Nown = im.size(im.MapSize.OWNED) assert Nown + Nghost == Nall # Produce the expected answer dofs = numpy.arange(start, end + Nghost) dofs[Nown:] = global_ghost_dofs # Check the results assert dofs.shape == arr2.shape diff = abs(dofs - arr2).max() print print(rank, start, end, global_ghost_dofs) print(rank, numpy.array(arr2, dtype=numpy.intc), '\n ', dofs, diff) assert diff == 0
def run(self, use_weak_bcs=None): """ Perform slope limiting of DG Lagrange functions """ # No limiter needed for piecewice constant functions if self.degree == 0: return timer = df.Timer('Ocellaris HierarchalTaylorSlopeLimiter') # Update the Taylor function with the current Lagrange values lagrange_to_taylor(self.phi, self.taylor) taylor_arr = get_local(self.taylor) alpha_arrs = [alpha.vector().get_local() for alpha in self.alpha_funcs] # Get global bounds, see SlopeLimiterBase.set_initial_field() global_min, global_max = self.global_bounds # Update previous field values Taylor functions if self.phi_old is not None: lagrange_to_taylor(self.phi_old, self.taylor_old) taylor_arr_old = get_local(self.taylor_old) else: taylor_arr_old = taylor_arr # Get updated boundary conditions weak_vals = None use_weak_bcs = self.use_weak_bcs if use_weak_bcs is None else use_weak_bcs if use_weak_bcs: weak_vals = self.phi.vector().get_local() boundary_dof_type, boundary_dof_value = self.boundary_conditions.get_bcs( weak_vals) # Run the limiter implementation if self.use_cpp: self._run_cpp( taylor_arr, taylor_arr_old, alpha_arrs, global_min, global_max, boundary_dof_type, boundary_dof_value, ) elif self.degree == 1 and self.ndim == 2: self._run_dg1( taylor_arr, taylor_arr_old, alpha_arrs[0], global_min, global_max, boundary_dof_type, boundary_dof_value, ) elif self.degree == 2 and self.ndim == 2: self._run_dg2( taylor_arr, taylor_arr_old, alpha_arrs[0], alpha_arrs[1], global_min, global_max, boundary_dof_type, boundary_dof_value, ) else: raise OcellarisError( 'Unsupported dimension for Python version of the HierarchalTaylor limiter', 'Only 2D is supported', ) # Update the Lagrange function with the limited Taylor values set_local(self.taylor, taylor_arr, apply='insert') taylor_to_lagrange(self.taylor, self.phi) # Enforce boundary conditions if self.enforce_boundary_conditions: has_dbc = boundary_dof_type == self.boundary_conditions.BC_TYPE_DIRICHLET vals = self.phi.vector().get_local() vals[has_dbc] = boundary_dof_value[has_dbc] self.phi.vector().set_local(vals) self.phi.vector().apply('insert') # Update the secondary output arrays, alphas for alpha, alpha_arr in zip(self.alpha_funcs, alpha_arrs): alpha.vector().set_local(alpha_arr) alpha.vector().apply('insert') timer.stop()
def _run_dg1( self, taylor_arr, taylor_arr_old, alpha_arr, global_min, global_max, boundary_dof_type, boundary_dof_value, ): """ Perform slope limiting of a DG1 function """ inp = self.input lagrange_arr = get_local(self.phi) for icell in range(self.num_cells_owned): dofs = inp.cell_dofs_V[icell] center_value = taylor_arr[dofs[0]] skip_this_cell = self.limit_cell[icell] == 0 # Find the minimum slope limiter coefficient alpha alpha = 1.0 if not skip_this_cell: for i in range(3): dof = dofs[i] nn = inp.num_neighbours[dof] if nn == 0: skip_this_cell = True break # Find vertex neighbours minimum and maximum values minval = maxval = center_value for nb in inp.neighbours[dof, :nn]: nb_center_val_dof = inp.cell_dofs_V[nb][0] nb_val = taylor_arr[nb_center_val_dof] minval = min(minval, nb_val) maxval = max(maxval, nb_val) nb_val = taylor_arr_old[nb_center_val_dof] minval = min(minval, nb_val) maxval = max(maxval, nb_val) # Modify local bounds to incorporate the global bounds minval = max(minval, global_min) maxval = min(maxval, global_max) center_value = max(center_value, global_min) center_value = min(center_value, global_max) vertex_value = lagrange_arr[dof] if vertex_value > center_value: alpha = min(alpha, (maxval - center_value) / (vertex_value - center_value)) elif vertex_value < center_value: alpha = min(alpha, (minval - center_value) / (vertex_value - center_value)) if skip_this_cell: alpha = 1.0 alpha_arr[inp.cell_dofs_V0[icell]] = alpha taylor_arr[dofs[0]] = center_value taylor_arr[dofs[1]] *= alpha taylor_arr[dofs[2]] *= alpha
def gather_vtk_info(mesh, funcs): """ Gather the necessary information to write a legacy VTK output file on the root process """ # The code below assumes that the first function is DG2 (u0) assert funcs[0].function_space().ufl_element().family() in ( 'Discontinuous Lagrange', 'Lagrange', ) assert funcs[0].function_space().ufl_element().degree() == 2 # The code is currently 3D only gdim = mesh.geometry().dim() dofs_x = funcs[0].function_space().tabulate_dof_coordinates().reshape( (-1, gdim)) assert gdim in ( 2, 3), 'VTK output currently only supported for 2D and 3D meshes' # Collect information about the functions func_names = [] all_vals = [] dofmaps = [] for u in funcs: func_names.append(u.name()) all_vals.append(get_local(u)) dofmaps.append(u.function_space().dofmap()) # Collect local data local_res = _collect_3D(mesh, gdim, dofs_x, func_names, all_vals, dofmaps) # MPI communication to get all data on root process comm = mesh.mpi_comm() all_results = comm.gather(local_res) if all_results is None: return None # Assemble information from all processes coords, connectivity, cell_types = [], [], [] func_vals = {n: [] for n in func_names} for coords_i, connectivity_i, cell_types_i, func_vals_i in all_results: K = len(coords) coords.extend(coords_i) for conn in connectivity_i: connectivity.append(conn[:1] + [c + K for c in conn[1:]]) cell_types.extend(cell_types_i) for n in func_names: func_vals[n].extend(func_vals_i[n]) # Convert to numpy arrays coords = numpy.array(coords, dtype=numpy.float32) connectivity = numpy.array(connectivity, dtype=numpy.intc) cell_types = numpy.array(cell_types, dtype=numpy.intc) for n in func_names: func_vals[n] = numpy.array(func_vals[n], dtype=numpy.float32) # Check that the data is appropriate for output Nverts = len(coords) Ncells = len(connectivity) assert coords.shape == (Nverts, 3) assert connectivity.shape == (Ncells, 11 if gdim == 3 else 7) assert cell_types.shape == (Ncells, ) for n in func_names: assert func_vals[n].shape == (Nverts, ) return coords, connectivity, cell_types, func_vals
def update_python(self, dt, velocity): alpha_arr = get_local(self.alpha_function) beta_arr = get_local(self.blending_function) cell_dofs = self.cpp_inp.cell_dofmap facet_dofs = self.cpp_inp.facet_dofmap polydeg = self.alpha_function.ufl_element().degree() conFC = self.simulation.data['connectivity_FC'] facet_info = self.simulation.data['facet_info'] cell_info = self.simulation.data['cell_info'] # Get the numpy arrays of the input functions gradient = self.gradient_reconstructor.gradient gradient_arrs = [get_local(gi) for gi in gradient] velocity_arrs = [get_local(vi) for vi in velocity] EPS = 1e-6 Co_max = 0 for facet in dolfin.facets(self.mesh, 'regular'): fidx = facet.index() fdof = facet_dofs[fidx] finfo = facet_info[fidx] # Find the local cells (the two cells sharing this face) connected_cells = conFC(fidx) if len(connected_cells) != 2: # This should be an exterior facet (on ds) assert facet.exterior() beta_arr[fdof] = 0.0 continue # Indices of the two local cells ic0, ic1 = connected_cells # Velocity at the facet ump = [vi[fdof] for vi in velocity_arrs] # Midpoint of local cells cell0_mp = cell_info[ic0].midpoint cell1_mp = cell_info[ic1].midpoint mp_dist = cell1_mp - cell0_mp # Normal pointing out of cell 0 normal = finfo.normal # Find indices of downstream ("D") cell and central ("C") cell uf = numpy.dot(normal, ump) if uf > 0: iaC = ic0 iaD = ic1 vec_to_downstream = mp_dist # nminC, nmaxC = nmin0, nmax0 else: iaC = ic1 iaD = ic0 vec_to_downstream = -mp_dist # nminC, nmaxC = nmin1, nmax1 # Find alpha in D and C cells if polydeg == 0: aD = alpha_arr[cell_dofs[iaD]] aC = alpha_arr[cell_dofs[iaC]] elif polydeg == 1: aD, aC = numpy.zeros(1), numpy.zeros(1) self.alpha_function.eval(aD, cell_info[iaD].midpoint) self.alpha_function.eval(aC, cell_info[iaC].midpoint) aD, aC = aD[0], aC[0] if abs(aC - aD) < EPS: # No change in this area, use upstream value beta_arr[fdof] = 0.0 continue # Gradient of alpha in the central cell gC = [gi[cell_dofs[iaC]] for gi in gradient_arrs] len_gC2 = numpy.dot(gC, gC) if len_gC2 == 0: # No change in this area, use upstream value beta_arr[fdof] = 0.0 continue # Upstream value # See Ubbink's PhD (1997) equations 4.21 and 4.22 aU = aD - 2 * numpy.dot(gC, vec_to_downstream) aU = min(max(aU, 0.0), 1.0) # Calculate the facet Courant number Co = abs(uf) * dt * finfo.area / cell_info[iaC].volume Co_max = max(Co_max, Co) if abs(aU - aD) < EPS: # No change in this area, use upstream value beta_arr[fdof] = 0.0 continue # Angle between face normal and surface normal len_normal2 = numpy.dot(normal, normal) cos_theta = numpy.dot(normal, gC) / (len_normal2 * len_gC2)**0.5 # Introduce normalized variables tilde_aC = (aC - aU) / (aD - aU) if tilde_aC <= 0 or tilde_aC >= 1: # Only upwind is stable beta_arr[fdof] = 0.0 continue if self.variant == 'HRIC': # Compressive scheme tilde_aF = 2 * tilde_aC if 0 <= tilde_aC <= 0.5 else 1 # Correct tilde_aF to avoid aligning with interfaces t = abs(cos_theta)**0.5 tilde_aF_star = tilde_aF * t + tilde_aC * (1 - t) # Correct tilde_af_star for high Courant numbers if Co < 0.4: tilde_aF_final = tilde_aF_star elif Co < 0.75: tilde_aF_final = tilde_aC + (tilde_aF_star - tilde_aC) * ( 0.75 - Co) / (0.75 - 0.4) else: tilde_aF_final = tilde_aC elif self.variant == 'MHRIC': # Compressive scheme tilde_aF = 2 * tilde_aC if 0 <= tilde_aC <= 0.5 else 1 # Less compressive scheme tilde_aF_ultimate_quickest = min((6 * tilde_aC + 3) / 8, tilde_aF) # Correct tilde_aF to avoid aligning with interfaces t = abs(cos_theta)**0.5 tilde_aF_final = tilde_aF * t + tilde_aF_ultimate_quickest * ( 1 - t) elif self.variant == 'RHRIC': # Compressive scheme tilde_aF_hyperc = min(tilde_aC / Co, 1) # Less compressive scheme tilde_aF_hric = min(tilde_aC * Co + 2 * tilde_aC * (1 - Co), tilde_aF_hyperc) # Correct tilde_aF to avoid aligning with interfaces t = cos_theta**4 tilde_aF_final = tilde_aF_hyperc * t + tilde_aF_hric * (1 - t) # Avoid tilde_aF being slightly lower that tilde_aC due to # floating point errors, it must be greater or equal if tilde_aC - EPS < tilde_aF_final < tilde_aC: tilde_aF_final = tilde_aC # Calculate the downstream blending factor (0=upstream, 1=downstream) tilde_beta = (tilde_aF_final - tilde_aC) / (1 - tilde_aC) if not (0.0 <= tilde_beta <= 1.0): print('ERROR, tilde_beta %r is out of range [0, 1]' % tilde_beta) print(' face normal: %r' % normal) print(' surface gradient: %r' % gC) print(' cos(theta): %r' % cos_theta) print(' sqrt(abs(cos(theta))) %r' % t) print(' tilde_aF_final %r' % tilde_aF_final) print(' tilde_aC %r' % tilde_aC) print(' aU %r, aC %r, aD %r' % (aU, aC, aD)) assert 0.0 <= tilde_beta <= 1.0 beta_arr[fdof] = tilde_beta set_local(self.blending_function, beta_arr, apply='insert') return Co_max