def _reconstruct_gradient( alpha_function, num_neighbours, neighbours, lstsq_matrices, lstsq_inv_matrices, gradient, ): """ Reconstruct the gradient, Python version of the code This function used to have a more Pythonic implementation that was most likely also faster. See old commits for that code. This code is here to verify the C++ version that is much faster than this (and the old Pythonic version) """ a_cell_vec = get_local(alpha_function) mesh = alpha_function.function_space().mesh() V = alpha_function.function_space() assert V == gradient[0].function_space() cell_dofs = cell_dofmap(V) np_gradient = [gi.vector().get_local() for gi in gradient] # Reshape arrays. The C++ version needs flatt arrays # (limitation in Instant/Dolfin) and we have the same # interface for both versions of the code ncells = len(num_neighbours) ndim = mesh.topology().dim() num_cells_owned, num_neighbours_max = neighbours.shape assert ncells == num_cells_owned lstsq_matrices = lstsq_matrices.reshape((ncells, ndim, num_neighbours_max)) lstsq_inv_matrices = lstsq_inv_matrices.reshape((ncells, ndim, ndim)) for icell in range(num_cells_owned): cdof = cell_dofs[icell] Nnbs = num_neighbours[icell] nbs = neighbours[icell, :Nnbs] # Get the matrices AT = lstsq_matrices[icell, :, :Nnbs] ATAI = lstsq_inv_matrices[icell] a0 = a_cell_vec[cdof] b = [(a_cell_vec[cell_dofs[ni]] - a0) for ni in nbs] b = numpy.array(b, float) # Calculate the and store the gradient g = numpy.dot(ATAI, numpy.dot(AT, b)) for d in range(ndim): np_gradient[d][cdof] = g[d] for i, np_grad in enumerate(np_gradient): set_local(gradient[i], np_grad, apply='insert')
def update_cpp(self, dt, velocity): alpha = get_local(self.alpha_function) beta = get_local(self.blending_function) gradient = [ get_local(gi) for gi in self.gradient_reconstructor.gradient ] velocity = [get_local(vi) for vi in velocity] g_vecs = numpy.array(gradient, dtype=float) v_vecs = numpy.array(velocity, dtype=float) assert g_vecs.shape[0] == g_vecs.shape[0] == self.simulation.ndim hric_funcs = {2: self.cpp_mod.hric_2D, 3: self.cpp_mod.hric_3D} hric_func = hric_funcs[self.simulation.ndim] Co_max = hric_func(self.cpp_inp, self.mesh, alpha, g_vecs, v_vecs, beta, dt, self.variant) set_local(self.blending_function, beta, apply='insert') return Co_max
def run(self, use_weak_bcs=None): """ Perform slope limiting of DG Lagrange functions """ # No limiter needed for piecewice constant functions if self.degree == 0: return timer = df.Timer('Ocellaris HierarchalTaylorSlopeLimiter') # Update the Taylor function with the current Lagrange values lagrange_to_taylor(self.phi, self.taylor) taylor_arr = get_local(self.taylor) alpha_arrs = [alpha.vector().get_local() for alpha in self.alpha_funcs] # Get global bounds, see SlopeLimiterBase.set_initial_field() global_min, global_max = self.global_bounds # Update previous field values Taylor functions if self.phi_old is not None: lagrange_to_taylor(self.phi_old, self.taylor_old) taylor_arr_old = get_local(self.taylor_old) else: taylor_arr_old = taylor_arr # Get updated boundary conditions weak_vals = None use_weak_bcs = self.use_weak_bcs if use_weak_bcs is None else use_weak_bcs if use_weak_bcs: weak_vals = self.phi.vector().get_local() boundary_dof_type, boundary_dof_value = self.boundary_conditions.get_bcs( weak_vals) # Run the limiter implementation if self.use_cpp: self._run_cpp( taylor_arr, taylor_arr_old, alpha_arrs, global_min, global_max, boundary_dof_type, boundary_dof_value, ) elif self.degree == 1 and self.ndim == 2: self._run_dg1( taylor_arr, taylor_arr_old, alpha_arrs[0], global_min, global_max, boundary_dof_type, boundary_dof_value, ) elif self.degree == 2 and self.ndim == 2: self._run_dg2( taylor_arr, taylor_arr_old, alpha_arrs[0], alpha_arrs[1], global_min, global_max, boundary_dof_type, boundary_dof_value, ) else: raise OcellarisError( 'Unsupported dimension for Python version of the HierarchalTaylor limiter', 'Only 2D is supported', ) # Update the Lagrange function with the limited Taylor values set_local(self.taylor, taylor_arr, apply='insert') taylor_to_lagrange(self.taylor, self.phi) # Enforce boundary conditions if self.enforce_boundary_conditions: has_dbc = boundary_dof_type == self.boundary_conditions.BC_TYPE_DIRICHLET vals = self.phi.vector().get_local() vals[has_dbc] = boundary_dof_value[has_dbc] self.phi.vector().set_local(vals) self.phi.vector().apply('insert') # Update the secondary output arrays, alphas for alpha, alpha_arr in zip(self.alpha_funcs, alpha_arrs): alpha.vector().set_local(alpha_arr) alpha.vector().apply('insert') timer.stop()
def update_python(self, dt, velocity): alpha_arr = get_local(self.alpha_function) beta_arr = get_local(self.blending_function) cell_dofs = self.cpp_inp.cell_dofmap facet_dofs = self.cpp_inp.facet_dofmap polydeg = self.alpha_function.ufl_element().degree() conFC = self.simulation.data['connectivity_FC'] facet_info = self.simulation.data['facet_info'] cell_info = self.simulation.data['cell_info'] # Get the numpy arrays of the input functions gradient = self.gradient_reconstructor.gradient gradient_arrs = [get_local(gi) for gi in gradient] velocity_arrs = [get_local(vi) for vi in velocity] EPS = 1e-6 Co_max = 0 for facet in dolfin.facets(self.mesh, 'regular'): fidx = facet.index() fdof = facet_dofs[fidx] finfo = facet_info[fidx] # Find the local cells (the two cells sharing this face) connected_cells = conFC(fidx) if len(connected_cells) != 2: # This should be an exterior facet (on ds) assert facet.exterior() beta_arr[fdof] = 0.0 continue # Indices of the two local cells ic0, ic1 = connected_cells # Velocity at the facet ump = [vi[fdof] for vi in velocity_arrs] # Midpoint of local cells cell0_mp = cell_info[ic0].midpoint cell1_mp = cell_info[ic1].midpoint mp_dist = cell1_mp - cell0_mp # Normal pointing out of cell 0 normal = finfo.normal # Find indices of downstream ("D") cell and central ("C") cell uf = numpy.dot(normal, ump) if uf > 0: iaC = ic0 iaD = ic1 vec_to_downstream = mp_dist # nminC, nmaxC = nmin0, nmax0 else: iaC = ic1 iaD = ic0 vec_to_downstream = -mp_dist # nminC, nmaxC = nmin1, nmax1 # Find alpha in D and C cells if polydeg == 0: aD = alpha_arr[cell_dofs[iaD]] aC = alpha_arr[cell_dofs[iaC]] elif polydeg == 1: aD, aC = numpy.zeros(1), numpy.zeros(1) self.alpha_function.eval(aD, cell_info[iaD].midpoint) self.alpha_function.eval(aC, cell_info[iaC].midpoint) aD, aC = aD[0], aC[0] if abs(aC - aD) < EPS: # No change in this area, use upstream value beta_arr[fdof] = 0.0 continue # Gradient of alpha in the central cell gC = [gi[cell_dofs[iaC]] for gi in gradient_arrs] len_gC2 = numpy.dot(gC, gC) if len_gC2 == 0: # No change in this area, use upstream value beta_arr[fdof] = 0.0 continue # Upstream value # See Ubbink's PhD (1997) equations 4.21 and 4.22 aU = aD - 2 * numpy.dot(gC, vec_to_downstream) aU = min(max(aU, 0.0), 1.0) # Calculate the facet Courant number Co = abs(uf) * dt * finfo.area / cell_info[iaC].volume Co_max = max(Co_max, Co) if abs(aU - aD) < EPS: # No change in this area, use upstream value beta_arr[fdof] = 0.0 continue # Angle between face normal and surface normal len_normal2 = numpy.dot(normal, normal) cos_theta = numpy.dot(normal, gC) / (len_normal2 * len_gC2)**0.5 # Introduce normalized variables tilde_aC = (aC - aU) / (aD - aU) if tilde_aC <= 0 or tilde_aC >= 1: # Only upwind is stable beta_arr[fdof] = 0.0 continue if self.variant == 'HRIC': # Compressive scheme tilde_aF = 2 * tilde_aC if 0 <= tilde_aC <= 0.5 else 1 # Correct tilde_aF to avoid aligning with interfaces t = abs(cos_theta)**0.5 tilde_aF_star = tilde_aF * t + tilde_aC * (1 - t) # Correct tilde_af_star for high Courant numbers if Co < 0.4: tilde_aF_final = tilde_aF_star elif Co < 0.75: tilde_aF_final = tilde_aC + (tilde_aF_star - tilde_aC) * ( 0.75 - Co) / (0.75 - 0.4) else: tilde_aF_final = tilde_aC elif self.variant == 'MHRIC': # Compressive scheme tilde_aF = 2 * tilde_aC if 0 <= tilde_aC <= 0.5 else 1 # Less compressive scheme tilde_aF_ultimate_quickest = min((6 * tilde_aC + 3) / 8, tilde_aF) # Correct tilde_aF to avoid aligning with interfaces t = abs(cos_theta)**0.5 tilde_aF_final = tilde_aF * t + tilde_aF_ultimate_quickest * ( 1 - t) elif self.variant == 'RHRIC': # Compressive scheme tilde_aF_hyperc = min(tilde_aC / Co, 1) # Less compressive scheme tilde_aF_hric = min(tilde_aC * Co + 2 * tilde_aC * (1 - Co), tilde_aF_hyperc) # Correct tilde_aF to avoid aligning with interfaces t = cos_theta**4 tilde_aF_final = tilde_aF_hyperc * t + tilde_aF_hric * (1 - t) # Avoid tilde_aF being slightly lower that tilde_aC due to # floating point errors, it must be greater or equal if tilde_aC - EPS < tilde_aF_final < tilde_aC: tilde_aF_final = tilde_aC # Calculate the downstream blending factor (0=upstream, 1=downstream) tilde_beta = (tilde_aF_final - tilde_aC) / (1 - tilde_aC) if not (0.0 <= tilde_beta <= 1.0): print('ERROR, tilde_beta %r is out of range [0, 1]' % tilde_beta) print(' face normal: %r' % normal) print(' surface gradient: %r' % gC) print(' cos(theta): %r' % cos_theta) print(' sqrt(abs(cos(theta))) %r' % t) print(' tilde_aF_final %r' % tilde_aF_final) print(' tilde_aC %r' % tilde_aC) print(' aU %r, aC %r, aD %r' % (aU, aC, aD)) assert 0.0 <= tilde_beta <= 1.0 beta_arr[fdof] = tilde_beta set_local(self.blending_function, beta_arr, apply='insert') return Co_max