def __init__(self, u, locations, t0=0.0, record=''): # The idea here is that u(x) means: search for cell containing x, # evaluate the basis functions of that element at x, restrict # the coef vector of u to the cell. Of these 3 steps the first # two don't change. So we cache them # Check the scalar assumption assert u.value_rank() == 0 and u.value_size() == 1 # Locate each point mesh = u.function_space().mesh() limit = mesh.num_entities_global(mesh.topology().dim()) bbox_tree = mesh.bounding_box_tree() cells_for_x = [None] * len(locations) for i, x in enumerate(locations): cell = bbox_tree.compute_first_entity_collision(Point(*x)) if -1 < cell < limit: cells_for_x[i] = cell # Ignore the cells that are not in the mesh. Note that we don't # care if a node is found in several cells -l think CPU interface xs_cells = filter(lambda (xi, c): c is not None, zip(locations, cells_for_x)) V = u.function_space() element = V.dolfin_element() coefficients = np.zeros(element.space_dimension()) # I build a series of functions bound to right variables that # when called compute the value at x evals = [] locations = [] for x, ci in xs_cells: basis_matrix = np.zeros(element.space_dimension()) cell = Cell(mesh, ci) vertex_coords, orientation = cell.get_vertex_coordinates( ), cell.orientation() # Eval the basis once element.evaluate_basis_all(basis_matrix, x, vertex_coords, orientation) def foo(A=basis_matrix, cell=cell, vc=vertex_coords): # Restrict for each call using the bound cell, vc ... u.restrict(coefficients, element, cell, vc, cell) # A here is bound to the right basis_matri return np.dot(A, coefficients) evals.append(foo) locations.append(x) self.probes = evals self.locations = locations self.rank = MPI.rank(mesh.mpi_comm()) self.data = [] self.record = record # Make the initial record self.probe(t=t0)
def point_trace_matrix(V, TV, x0): ''' Let u in V; u = ck phi_k then u(x0) \in TV = ck phi_k(x0). So this is a 1 by dim(V) matrix where the column values are phi_k(x0). ''' mesh = V.mesh() tree = mesh.bounding_box_tree() cell = tree.compute_first_entity_collision(Point(*x0)) assert cell < mesh.num_cells() # Cell for restriction Vcell = Cell(mesh, cell) vertex_coordinates = Vcell.get_vertex_coordinates() cell_orientation = Vcell.orientation() x0 = np.fromiter(x0, dtype=float) # Columns - get all components at once all_dofs = V.dofmap().cell_dofs(cell).tolist() Vel = V.element() value_size = V.ufl_element().value_size() basis_values = np.zeros(V.element().space_dimension() * value_size) Vel.evaluate_basis_all(basis_values, x0, vertex_coordinates, cell_orientation) with petsc_serial_matrix(TV, V) as mat: # Scalar gets all if value_size == 1: component_dofs = lambda component: V.dofmap().cell_dofs(cell) # Slices else: component_dofs = lambda component: V.sub(component).dofmap( ).cell_dofs(cell) for row in map(int, TV.dofmap().cell_dofs(cell)): # R^n components sub_dofs = component_dofs(row) sub_dofs_local = [all_dofs.index(dof) for dof in sub_dofs] print row, sub_dofs, sub_dofs_local, basis_values[sub_dofs_local] mat.setValues([row], sub_dofs, basis_values[sub_dofs_local], PETSc.InsertMode.INSERT_VALUES) return mat
def point_trace_matrix(V, TV, x0): ''' Let u in V; u = ck phi_k then u(x0) \in TV = ck phi_k(x0). So this is a 1 by dim(V) matrix where the column values are phi_k(x0). ''' mesh = V.mesh() tree = mesh.bounding_box_tree() cell = tree.compute_first_entity_collision(Point(*x0)) assert cell < mesh.num_cells() # Cell for restriction Vcell = Cell(mesh, cell) vertex_coordinates = Vcell.get_vertex_coordinates() cell_orientation = Vcell.orientation() x0 = np.fromiter(x0, dtype=float) # Columns - get all components at once all_dofs = V.dofmap().cell_dofs(cell).tolist() Vel = V.element() value_size = V.ufl_element().value_size() basis_values = np.zeros(V.element().space_dimension()*value_size) Vel.evaluate_basis_all(basis_values, x0, vertex_coordinates, cell_orientation) with petsc_serial_matrix(TV, V) as mat: # Scalar gets all if value_size == 1: component_dofs = lambda component: V.dofmap().cell_dofs(cell) # Slices else: component_dofs = lambda component: V.sub(component).dofmap().cell_dofs(cell) for row in map(int, TV.dofmap().cell_dofs(cell)): # R^n components sub_dofs = component_dofs(row) sub_dofs_local = [all_dofs.index(dof) for dof in sub_dofs] print row, sub_dofs, sub_dofs_local, basis_values[sub_dofs_local] mat.setValues([row], sub_dofs, basis_values[sub_dofs_local], PETSc.InsertMode.INSERT_VALUES) return mat
def cylinder_average_matrix(V, TV, radius, quad_degree): '''Averaging matrix''' mesh = V.mesh() line_mesh = TV.mesh() # We are going to perform the integration with Gauss quadrature at # the end (PI u)(x): # A cell of mesh (an edge) defines a normal vector. Let P be the plane # that is defined by the normal vector n and some point x on Gamma. Let L # be the circle that is the intersect of P and S. The value of q (in Q) at x # is defined as # # q(x) = (1/|L|)*\int_{L}g(x)*dL # # which simplifies to g(x) = (1/(2*pi*R))*\int_{-pi}^{pi}u(L)*R*d(theta) and # or = (1/2) * \int_{-1}^{1} u (L(pi*s)) * ds # This can be integrated no problemo once we figure out L. To this end, let # t_1 and t_2 be two unit mutually orthogonal vectors that are orthogonal to # n. Then L(pi*s) = p + R*t_1*cos(pi*s) + R*t_2*sin(pi*s) can be seen to be # such that i) |x-p| = R and ii) x.n = 0 [i.e. this the suitable # parametrization] # Clearly we can scale the weights as well as precompute # cos and sin terms. xq, wq = leggauss(quad_degree) wq *= 0.5 cos_xq = np.cos(np.pi*xq).reshape((-1, 1)) sin_xq = np.sin(np.pi*xq).reshape((-1, 1)) if is_number(radius): radius = lambda x, radius=radius: radius mesh_x = TV.mesh().coordinates() # The idea for point evaluation/computing dofs of TV is to minimize # the number of evaluation. I mean a vector dof if done naively would # have to evaluate at same x number of component times. value_size = TV.ufl_element().value_size() # Eval at points will require serch tree = mesh.bounding_box_tree() limit = mesh.num_cells() TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1)) TV_dm = TV.dofmap() V_dm = V.dofmap() # For non scalar we plan to make compoenents by shift if value_size > 1: TV_dm = TV.sub(0).dofmap() Vel = V.element() basis_values = np.zeros(V.element().space_dimension()*value_size) with petsc_serial_matrix(TV, V) as mat: for line_cell in cells(line_mesh): # Get the tangent => orthogonal tangent vectors v0, v1 = mesh_x[line_cell.entities(0)] n = v0 - v1 t1 = np.array([n[1]-n[2], n[2]-n[0], n[0]-n[1]]) t2 = np.cross(n, t1) t1 /= np.linalg.norm(t1) t2 = t2/np.linalg.norm(t2) # The idea is now to minimize the point evaluation scalar_dofs = TV_dm.cell_dofs(line_cell.index()) scalar_dofs_x = TV_coordinates[scalar_dofs] for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x): # Get radius and integration points rad = radius(avg_point) integration_points = avg_point + rad*t1*sin_xq + rad*t2*cos_xq data = {} for index, ip in enumerate(integration_points): c = tree.compute_first_entity_collision(Point(*ip)) if c >= limit: continue Vcell = Cell(mesh, c) vertex_coordinates = Vcell.get_vertex_coordinates() cell_orientation = Vcell.orientation() Vel.evaluate_basis_all(basis_values, ip, vertex_coordinates, cell_orientation) cols_ip = V_dm.cell_dofs(c) values_ip = basis_values*wq[index] # Add for col, value in zip(cols_ip, values_ip.reshape((-1, value_size))): if col in data: data[col] += value else: data[col] = value # The thing now that with data we can assign to several # rows of the matrix column_indices = np.array(data.keys(), dtype='int32') for shift in range(value_size): row = scalar_row + shift column_values = np.array([data[col][shift] for col in column_indices]) mat.setValues([row], column_indices, column_values, PETSc.InsertMode.INSERT_VALUES) # On to next avg point # On to next cell return PETScMatrix(mat)
def sphere_average_matrix(V, TV, radius, quad_degree): '''Averaging matrix over the sphere''' mesh = V.mesh() line_mesh = TV.mesh() # Lebedev below need off degrees if quad_degree % 2 == 0: quad_degree += 1 # NOTE: this is a dependency from quadpy.sphere import Lebedev integrator = Lebedev(quad_degree) xq = integrator.points wq = integrator.weights if is_number(radius): radius = lambda x, radius=radius: radius mesh_x = TV.mesh().coordinates() # The idea for point evaluation/computing dofs of TV is to minimize # the number of evaluation. I mean a vector dof if done naively would # have to evaluate at same x number of component times. value_size = TV.ufl_element().value_size() # Eval at points will require serch tree = mesh.bounding_box_tree() limit = mesh.num_cells() TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1)) TV_dm = TV.dofmap() V_dm = V.dofmap() # For non scalar we plan to make compoenents by shift if value_size > 1: TV_dm = TV.sub(0).dofmap() Vel = V.element() basis_values = np.zeros(V.element().space_dimension()*value_size) with petsc_serial_matrix(TV, V) as mat: for line_cell in cells(line_mesh): # The idea is now to minimize the point evaluation scalar_dofs = TV_dm.cell_dofs(line_cell.index()) scalar_dofs_x = TV_coordinates[scalar_dofs] for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x): # Get radius and integration points rad = radius(avg_point) # Scale and shift the unit sphere to the point integration_points = xq*rad + avg_point data = {} for index, ip in enumerate(integration_points): c = tree.compute_first_entity_collision(Point(*ip)) if c >= limit: continue Vcell = Cell(mesh, c) vertex_coordinates = Vcell.get_vertex_coordinates() cell_orientation = Vcell.orientation() Vel.evaluate_basis_all(basis_values, ip, vertex_coordinates, cell_orientation) cols_ip = V_dm.cell_dofs(c) values_ip = basis_values*wq[index] # Add for col, value in zip(cols_ip, values_ip.reshape((-1, value_size))): if col in data: data[col] += value else: data[col] = value # The thing now that with data we can assign to several # rows of the matrix column_indices = np.array(data.keys(), dtype='int32') for shift in range(value_size): row = scalar_row + shift column_values = np.array([data[col][shift] for col in column_indices]) mat.setValues([row], column_indices, column_values, PETSc.InsertMode.INSERT_VALUES) # On to next avg point # On to next cell return PETScMatrix(mat)
def cylinder_average_matrix(V, TV, radius, quad_degree): '''Averaging matrix''' mesh = V.mesh() line_mesh = TV.mesh() # We are going to perform the integration with Gauss quadrature at # the end (PI u)(x): # A cell of mesh (an edge) defines a normal vector. Let P be the plane # that is defined by the normal vector n and some point x on Gamma. Let L # be the circle that is the intersect of P and S. The value of q (in Q) at x # is defined as # # q(x) = (1/|L|)*\int_{L}g(x)*dL # # which simplifies to g(x) = (1/(2*pi*R))*\int_{-pi}^{pi}u(L)*R*d(theta) and # or = (1/2) * \int_{-1}^{1} u (L(pi*s)) * ds # This can be integrated no problemo once we figure out L. To this end, let # t_1 and t_2 be two unit mutually orthogonal vectors that are orthogonal to # n. Then L(pi*s) = p + R*t_1*cos(pi*s) + R*t_2*sin(pi*s) can be seen to be # such that i) |x-p| = R and ii) x.n = 0 [i.e. this the suitable # parametrization] # Clearly we can scale the weights as well as precompute # cos and sin terms. xq, wq = leggauss(quad_degree) wq *= 0.5 cos_xq = np.cos(np.pi * xq).reshape((-1, 1)) sin_xq = np.sin(np.pi * xq).reshape((-1, 1)) if is_number(radius): radius = lambda x, radius=radius: radius mesh_x = TV.mesh().coordinates() # The idea for point evaluation/computing dofs of TV is to minimize # the number of evaluation. I mean a vector dof if done naively would # have to evaluate at same x number of component times. value_size = TV.ufl_element().value_size() # Eval at points will require serch tree = mesh.bounding_box_tree() limit = mesh.num_cells() TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1)) TV_dm = TV.dofmap() V_dm = V.dofmap() # For non scalar we plan to make compoenents by shift if value_size > 1: TV_dm = TV.sub(0).dofmap() Vel = V.element() basis_values = np.zeros(V.element().space_dimension() * value_size) with petsc_serial_matrix(TV, V) as mat: for line_cell in cells(line_mesh): # Get the tangent => orthogonal tangent vectors v0, v1 = mesh_x[line_cell.entities(0)] n = v0 - v1 t1 = np.array([n[1] - n[2], n[2] - n[0], n[0] - n[1]]) t2 = np.cross(n, t1) t1 /= np.linalg.norm(t1) t2 = t2 / np.linalg.norm(t2) # The idea is now to minimize the point evaluation scalar_dofs = TV_dm.cell_dofs(line_cell.index()) scalar_dofs_x = TV_coordinates[scalar_dofs] for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x): # Get radius and integration points rad = radius(avg_point) integration_points = avg_point + rad * t1 * sin_xq + rad * t2 * cos_xq data = {} for index, ip in enumerate(integration_points): c = tree.compute_first_entity_collision(Point(*ip)) if c >= limit: continue Vcell = Cell(mesh, c) vertex_coordinates = Vcell.get_vertex_coordinates() cell_orientation = Vcell.orientation() Vel.evaluate_basis_all(basis_values, ip, vertex_coordinates, cell_orientation) cols_ip = V_dm.cell_dofs(c) values_ip = basis_values * wq[index] # Add for col, value in zip(cols_ip, values_ip.reshape((-1, value_size))): if col in data: data[col] += value else: data[col] = value # The thing now that with data we can assign to several # rows of the matrix column_indices = np.array(data.keys(), dtype='int32') for shift in range(value_size): row = scalar_row + shift column_values = np.array( [data[col][shift] for col in column_indices]) mat.setValues([row], column_indices, column_values, PETSc.InsertMode.INSERT_VALUES) # On to next avg point # On to next cell return PETScMatrix(mat)
def sphere_average_matrix(V, TV, radius, quad_degree): '''Averaging matrix over the sphere''' mesh = V.mesh() line_mesh = TV.mesh() # Lebedev below need off degrees if quad_degree % 2 == 0: quad_degree += 1 # NOTE: this is a dependency from quadpy.sphere import Lebedev integrator = Lebedev(quad_degree) xq = integrator.points wq = integrator.weights if is_number(radius): radius = lambda x, radius=radius: radius mesh_x = TV.mesh().coordinates() # The idea for point evaluation/computing dofs of TV is to minimize # the number of evaluation. I mean a vector dof if done naively would # have to evaluate at same x number of component times. value_size = TV.ufl_element().value_size() # Eval at points will require serch tree = mesh.bounding_box_tree() limit = mesh.num_cells() TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1)) TV_dm = TV.dofmap() V_dm = V.dofmap() # For non scalar we plan to make compoenents by shift if value_size > 1: TV_dm = TV.sub(0).dofmap() Vel = V.element() basis_values = np.zeros(V.element().space_dimension() * value_size) with petsc_serial_matrix(TV, V) as mat: for line_cell in cells(line_mesh): # The idea is now to minimize the point evaluation scalar_dofs = TV_dm.cell_dofs(line_cell.index()) scalar_dofs_x = TV_coordinates[scalar_dofs] for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x): # Get radius and integration points rad = radius(avg_point) # Scale and shift the unit sphere to the point integration_points = xq * rad + avg_point data = {} for index, ip in enumerate(integration_points): c = tree.compute_first_entity_collision(Point(*ip)) if c >= limit: continue Vcell = Cell(mesh, c) vertex_coordinates = Vcell.get_vertex_coordinates() cell_orientation = Vcell.orientation() Vel.evaluate_basis_all(basis_values, ip, vertex_coordinates, cell_orientation) cols_ip = V_dm.cell_dofs(c) values_ip = basis_values * wq[index] # Add for col, value in zip(cols_ip, values_ip.reshape((-1, value_size))): if col in data: data[col] += value else: data[col] = value # The thing now that with data we can assign to several # rows of the matrix column_indices = np.array(data.keys(), dtype='int32') for shift in range(value_size): row = scalar_row + shift column_values = np.array( [data[col][shift] for col in column_indices]) mat.setValues([row], column_indices, column_values, PETSc.InsertMode.INSERT_VALUES) # On to next avg point # On to next cell return PETScMatrix(mat)
def average_matrix(V, TV, shape): ''' Averaging matrix for reduction of g in V to TV by integration over shape. ''' # We build a matrix representation of u in V -> Pi(u) in TV where # # Pi(u)(s) = |L(s)|^-1*\int_{L(s)}u(t) dx(s) # # Here L is the shape over which u is integrated for reduction. # Its measure is |L(s)|. mesh_x = TV.mesh().coordinates() # The idea for point evaluation/computing dofs of TV is to minimize # the number of evaluation. I mean a vector dof if done naively would # have to evaluate at same x number of component times. value_size = TV.ufl_element().value_size() mesh = V.mesh() # Eval at points will require serch tree = mesh.bounding_box_tree() limit = mesh.num_cells() TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1)) line_mesh = TV.mesh() TV_dm = TV.dofmap() V_dm = V.dofmap() # For non scalar we plan to make compoenents by shift if value_size > 1: TV_dm = TV.sub(0).dofmap() Vel = V.element() basis_values = np.zeros(V.element().space_dimension() * value_size) with petsc_serial_matrix(TV, V) as mat: for line_cell in cells(line_mesh): # Get the tangent (normal of the plane which cuts the virtual # surface to yield the bdry curve v0, v1 = mesh_x[line_cell.entities(0)] n = v0 - v1 # The idea is now to minimize the point evaluation scalar_dofs = TV_dm.cell_dofs(line_cell.index()) scalar_dofs_x = TV_coordinates[scalar_dofs] for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x): # Avg point here has the role of 'height' coordinate quadrature = shape.quadrature(avg_point, n) integration_points = quadrature.points wq = quadrature.weights curve_measure = sum(wq) data = {} for index, ip in enumerate(integration_points): c = tree.compute_first_entity_collision(Point(*ip)) if c >= limit: continue cs = tree.compute_entity_collisions(Point(*ip)) # assert False for c in cs[:1]: Vcell = Cell(mesh, c) vertex_coordinates = Vcell.get_vertex_coordinates() cell_orientation = Vcell.orientation() basis_values[:] = Vel.evaluate_basis_all( ip, vertex_coordinates, cell_orientation) cols_ip = V_dm.cell_dofs(c) values_ip = basis_values * wq[index] # Add for col, value in zip( cols_ip, values_ip.reshape((-1, value_size))): if col in data: data[col] += value / curve_measure else: data[col] = value / curve_measure # The thing now that with data we can assign to several # rows of the matrix column_indices = np.array(list(data.keys()), dtype='int32') for shift in range(value_size): row = scalar_row + shift column_values = np.array( [data[col][shift] for col in column_indices]) mat.setValues([row], column_indices, column_values, PETSc.InsertMode.INSERT_VALUES) # On to next avg point # On to next cell return mat
def surface_average_matrix(V, TV, bdry_curve): '''Averaging matrix''' mesh = V.mesh() line_mesh = TV.mesh() # We build a matrix representation of u in V -> Pi(u) in TV where # # Pi(u)(s) = |L(s)|^-1*\int_{L(s)}u(t) dL(s) # # Here L represents a curve bounding the surface at 'height' s. # # We do this numerically as |L(s)|^-1*\sum_q u(x_q)*w_q # Weights remaing fixed wq = bdry_curve.weights mesh_x = TV.mesh().coordinates() # The idea for point evaluation/computing dofs of TV is to minimize # the number of evaluation. I mean a vector dof if done naively would # have to evaluate at same x number of component times. value_size = TV.ufl_element().value_size() # Eval at points will require serch tree = mesh.bounding_box_tree() limit = mesh.num_cells() TV_coordinates = TV.tabulate_dof_coordinates().reshape((TV.dim(), -1)) TV_dm = TV.dofmap() V_dm = V.dofmap() # For non scalar we plan to make compoenents by shift if value_size > 1: TV_dm = TV.sub(0).dofmap() Vel = V.element() basis_values = np.zeros(V.element().space_dimension() * value_size) with petsc_serial_matrix(TV, V) as mat: for line_cell in cells(line_mesh): # Get the tangent (normal of the plane which cuts the virtual # surface to yield the bdry curve v0, v1 = mesh_x[line_cell.entities(0)] n = v0 - v1 # We can specialize quadrature points; we can have several # height points with same normal pts_at_n = bdry_curve.points(n) len_at_n = bdry_curve.length(n) # The idea is now to minimize the point evaluation scalar_dofs = TV_dm.cell_dofs(line_cell.index()) scalar_dofs_x = TV_coordinates[scalar_dofs] for scalar_row, avg_point in zip(scalar_dofs, scalar_dofs_x): # Avg point here has the role of 'height' coordinate integration_points = pts_at_n(avg_point) len_bdry_curve = len_at_n(avg_point) data = {} for index, ip in enumerate(integration_points): c = tree.compute_first_entity_collision(Point(*ip)) if c >= limit: continue Vcell = Cell(mesh, c) vertex_coordinates = Vcell.get_vertex_coordinates() cell_orientation = Vcell.orientation() Vel.evaluate_basis_all(basis_values, ip, vertex_coordinates, cell_orientation) cols_ip = V_dm.cell_dofs(c) values_ip = basis_values * wq[index] # Add for col, value in zip(cols_ip, values_ip.reshape((-1, value_size))): if col in data: data[col] += value / len_bdry_curve else: data[col] = value / len_bdry_curve # The thing now that with data we can assign to several # rows of the matrix column_indices = np.array(data.keys(), dtype='int32') for shift in range(value_size): row = scalar_row + shift column_values = np.array( [data[col][shift] for col in column_indices]) mat.setValues([row], column_indices, column_values, PETSc.InsertMode.INSERT_VALUES) # On to next avg point # On to next cell return PETScMatrix(mat)