def test_grad(self): g = igl.grad(self.v, self.f) h = igl.grad(self.v, self.f, uniform=True) self.assertTrue(g.shape == (self.f.shape[0] * self.v.shape[1], self.v.shape[0])) self.assertTrue(h.shape == (self.f.shape[0] * self.v.shape[1], self.v.shape[0])) self.assertTrue(type(g) == type(h) == csc.csc_matrix)
def tri_grad(t, v, f, grad_matrix=None): """ Calculate gradient of function defined on vertices of triangular mesh. Assumes a time dependent field, time-independent case can be handled by passing an array with only one time point. If a tensore is passed, each tensor component is interpreted as an individual scalar function. Parameters ---------- t : np.array of shape (#timepoints, #vertices,...) scalar function or tensor v : np.array of shape (#vertices, dim) vertices. f : np.array of shape (#faces, 3) faces. grad_matrix : scipy.sparse, optional Gradient operator. The default is None (calculate g from v, f). Returns ------- np.array of shape (dim, #timepoints, #vertices, ...) gradient of scalar function/tensor defined on vertices. """ # fix these dimension variables before reshaping n_t = t.shape[0] n_ind = t.shape[2:] if grad_matrix is None: # calculate gradient operator grad_matrix = igl.grad(v, f) # swap axes and reshape so that the sparse gradient op can be applied t = t.swapaxes(0, 1) t = t.reshape((t.shape[0], np.prod(t.shape[1:]))) # calculate the gradient of t by matrix multiplication, then reshape grad_t = grad_matrix.dot(t) # check this 'F' - copied from tutorial grad_t = grad_t.reshape((f.shape[0], v.shape[1], grad_t.shape[1]), order='F') # now, average onto vertices. Need to iterate over all other axes grad_t = np.stack([ igl.average_onto_vertices(v, f, grad_t[:, :, i]) for i in range(grad_t.shape[2]) ], axis=2) # finally, reshape into original shape grad_t = grad_t.reshape((v.shape[0], v.shape[1], n_t) + n_ind) # shape is now (#vertices, dim, #timepoints, ...) grad_t = grad_t.swapaxes(0, 1).swapaxes(1, 2) return grad_t
def construct_matrices(self): """ Construct FEM matrices """ V = self.vertices F = self.faces # Compute gradient operator: #F*3 by #V G = igl.grad(V, F).tocoo() L = igl.cotmatrix(V, F).tocoo() N = igl.per_face_normals(V, F, np.array([0., 0., 0.])) A = igl.doublearea(V, F) A = A[:, np.newaxis] M = igl.massmatrix(V, F, igl.MASSMATRIX_TYPE_VORONOI).tocoo() M = M.data # Compute latitude and longitude directional vector fields NS = np.reshape(G.dot(self.lat), [self.nf, 3], order='F') EW = np.cross(NS, N) # Compute F2V matrix (weigh by area) # adjacency i = self.faces.ravel() j = np.arange(self.nf).repeat(3) one = np.ones(self.nf * 3) adj = sparse.csc_matrix((one, (i, j)), shape=(self.nv, self.nf)) tot_area = adj.dot(A) norm_area = A.ravel().repeat(3) / np.squeeze(tot_area[i]) F2V = sparse.csc_matrix((norm_area, (i, j)), shape=(self.nv, self.nf)) # Compute interpolation matrix if self.level > 0: intp = self.intp[self.nv_prev:] i = np.concatenate( (np.arange(self.nv), np.arange(self.nv_prev, self.nv))) j = np.concatenate((np.arange(self.nv_prev), intp[:, 0], intp[:, 1])) ratio = np.concatenate( (np.ones(self.nv_prev), 0.5 * np.ones(2 * intp.shape[0]))) intp = sparse.csc_matrix((ratio, (i, j)), shape=(self.nv, self.nv_prev)) else: intp = sparse.csc_matrix(np.eye(self.nv)) # Compute vertex mean matrix self.G = G # gradient matrix self.L = L # laplacian matrix self.N = N # normal vectors (per-triangle) self.NS = NS # north-south vectors (per-triangle) self.EW = EW # east-west vectors (per-triangle) self.F2V = F2V # map face quantities to vertices self.M = M # mass matrix (area of voronoi cell around node. for integration) self.Seq = self._rotseq(self.vertices) self.Intp = intp
def get_face_gradient_from_scalar_field(mesh, u, use_igl=True): """ Finds face gradient from scalar field u. Scalar field u is given per vertex. Parameters ---------- mesh: :class: 'compas.datastructures.Mesh' u: list, float. (dimensions : #VN x 1) Returns ---------- np.array (dimensions : #F x 3) one gradient vector per face. """ logger.info('Computing per face gradient') if use_igl: try: import igl v, f = mesh.to_vertices_and_faces() G = igl.grad(np.array(v), np.array(f)) X = G * u nf = len(list(mesh.faces())) X = np.array([[X[i], X[i + nf], X[i + 2 * nf]] for i in range(nf)]) return X except ModuleNotFoundError: print( "Could not calculate gradient with IGL because it is not installed. Falling back to default function" ) grad = [] for fkey in mesh.faces(): A = mesh.face_area(fkey) N = mesh.face_normal(fkey) edge_0, edge_1, edge_2 = get_face_edge_vectors(mesh, fkey) v0, v1, v2 = mesh.face_vertices(fkey) u0 = u[v0] u1 = u[v1] u2 = u[v2] vc0 = np.array(mesh.vertex_coordinates(v0)) vc1 = np.array(mesh.vertex_coordinates(v1)) vc2 = np.array(mesh.vertex_coordinates(v2)) # grad_u = -1 * ((u1-u0) * np.cross(vc0-vc2, N) + (u2-u0) * np.cross(vc1-vc0, N)) / (2 * A) grad_u = ((u1 - u0) * np.cross(vc0 - vc2, N) + (u2 - u0) * np.cross(vc1 - vc0, N)) / (2 * A) # grad_u = (np.cross(N, edge_0) * u2 + # np.cross(N, edge_1) * u0 + # np.cross(N, edge_2) * u1) / (2 * A) grad.append(grad_u) return np.array(grad)
def construct_mesh_matrices_de(vertices, faces): v_num, f_num = vertices.shape[0], faces.shape[0] G = igl.grad(vertices, faces) L = igl.cotmatrix(vertices, faces) A = igl.doublearea(vertices, faces) XN = np.array([1, 0, 0], dtype=np.float32) YN = np.array([0, 1, 0], dtype=np.float32) i = faces.ravel() j = np.arange(f_num).repeat(3) one = np.ones(f_num * 3) adj = sparse.csc_matrix((one, (i, j)), shape=(v_num, f_num)) tot_area = adj.dot(A) norm_area = A.ravel().repeat(3) / np.squeeze(tot_area[i]) F2V = sparse.csc_matrix((norm_area, (i, j)), shape=(v_num, f_num)) return {'G': G, 'L': L, 'A': A, 'XN': XN, 'YN': YN, 'F2V': F2V}
def construct_mesh_matrices(vertices, faces): v_num, f_num = vertices.shape[0], faces.shape[0] G = igl.grad(vertices, faces) # L = igl.cotmatrix(vertices, faces) L = cotmatrix_firstvonly(vertices, faces) A = igl.doublearea(vertices, faces) XN = np.array([1, 0, 0], dtype=np.float32) YN = np.array([0, 1, 0], dtype=np.float32) i = faces[:, 0].ravel() # each triangle only belongs to the first vertex! j = np.arange(f_num) one = np.ones(f_num) adj = sparse.csc_matrix((one, (i, j)), shape=(v_num, f_num)) tot_area = adj.dot(A) norm_area = A.ravel() / np.squeeze(tot_area[i] + 1e-6) F2V = sparse.csc_matrix((norm_area, (i, j)), shape=(v_num, f_num)) return {'G': G, 'L': L, 'A': A, 'XN': XN, 'YN': YN, 'F2V': F2V}
def construct_mesh_matrices_de(vertices, faces, lat): v_num, f_num = vertices.shape[0], faces.shape[0] G = igl.grad(vertices, faces) L = igl.cotmatrix(vertices, faces) A = igl.doublearea(vertices, faces) N = igl.per_face_normals(vertices, faces, vertices) YN = np.reshape(G.dot(lat), [f_num, 3], order='F') YN = YN / (np.linalg.norm(YN, axis=1)[:, np.newaxis]+1e-6) XN = np.cross(YN, N) i = faces.ravel() j = np.arange(f_num).repeat(3) one = np.ones(f_num * 3) adj = sparse.csc_matrix((one, (i, j)), shape=(v_num, f_num)) tot_area = adj.dot(A) norm_area = A.ravel().repeat(3) / np.squeeze(tot_area[i] + 1e-6) F2V = sparse.csc_matrix((norm_area, (i, j)), shape=(v_num, f_num)) return { 'G': G, 'L': L, 'A': A, 'XN': XN, 'YN': YN, 'F2V': F2V }
def construct_mesh_matrices(vertices, faces, lat): v_num, f_num = vertices.shape[0], faces.shape[0] G = igl.grad(vertices, faces) # L = igl.cotmatrix(vertices, faces) L = cotmatrix_firstvonly(vertices, faces) A = igl.doublearea(vertices, faces) N = igl.per_face_normals(vertices, faces, vertices) # XN = np.array([1, 0, 0], dtype=np.float32) # YN = np.array([0, 1, 0], dtype=np.float32) YN = np.reshape(G.dot(lat), [f_num, 3], order='F') YN = YN / (np.linalg.norm(YN, axis=1)[:, np.newaxis]+1e-6) XN = np.cross(YN, N) i = faces[:, 0].ravel() # each triangle only belongs to the first vertex! j = np.arange(f_num) one = np.ones(f_num) adj = sparse.csc_matrix((one, (i, j)), shape=(v_num, f_num)) tot_area = adj.dot(A) norm_area = A.ravel() / np.squeeze(tot_area[i] + 1e-6) F2V = sparse.csc_matrix((norm_area, (i, j)), shape=(v_num, f_num)) return { 'G': G, 'L': L, 'A': A, 'XN': XN, 'YN': YN, 'F2V': F2V, 'N': N }
L = igl.eigen.SparseMatrixd() viewer = igl.viewer.Viewer() # Load a mesh in OFF format igl.readOFF("../../tutorial/shared/cow.off", V, F) # Compute Laplace-Beltrami operator: #V by #V igl.cotmatrix(V,F,L) # Alternative construction of same Laplacian G = igl.eigen.SparseMatrixd() K = igl.eigen.SparseMatrixd() # Gradient/Divergence igl.grad(V,F,G); # Diagonal per-triangle "mass matrix" dblA = igl.eigen.MatrixXd() igl.doublearea(V,F,dblA) # Place areas along diagonal #dim times T = (dblA.replicate(3,1)*0.5).asDiagonal() * 1 # Laplacian K built as discrete divergence of gradient or equivalently # discrete Dirichelet energy Hessian temp = -G.transpose() K = -G.transpose() * T * G print("|K-L|: ",(K-L).norm())
import igl V = igl.eigen.MatrixXd() F = igl.eigen.MatrixXi() # Load a mesh in OFF format igl.readOFF("../../tutorial/shared/cheburashka.off", V, F) # Read scalar function values from a file, U: #V by 1 U = igl.eigen.MatrixXd() igl.readDMAT("../../tutorial/shared/cheburashka-scalar.dmat", U) U = U.col(0) # Compute gradient operator: #F*3 by #V G = igl.eigen.SparseMatrixd() igl.grad(V, F, G) # Compute gradient of U GU = (G * U).MapMatrix(F.rows(), 3) # Compute gradient magnitude GU_mag = GU.rowwiseNorm() viewer = igl.viewer.Viewer() viewer.data.set_mesh(V, F) # Compute pseudocolor for original function C = igl.eigen.MatrixXd() igl.jet(U, True, C)
def lie_derivative(u, tens, covariant=False, density=False, v=None, f=None, delta_t=1, grid_coords=None): """ Compute Lie derivative of an arbitrary rank tensor field in 2d or 3d. Can calculate the Lie derivative either in a chart or on an embedded triangular mesh. If #time points == 1, the autonomous Lie derivative is calculated. Chart: input the values of t, u in that chart (assumed to be a 2d/3d rectangular grid, not necessarily evenly spaced, with n_x/n_y/n_z = #points along x-axis/y-axis/z-axis). Mesh: input u, t in cartesian components, defined on vertices v of a mesh. Assumes that u and t are expressed in cartesian/embedding components, i.e. NOT in a local attached to mesh vertices or faces. By default, the code assumes the 'chart' case. To , pass the vertices and faces via the arguments v, f. IMPORTANT CONVENTIONS FOR CHARTS: Arrays/matrices represent images/tensors defined on points on a Cartesian grid. For a python arr: arr[i,j] = row i, column j My convention for the map array indices -> cartesian coordinates is that row == y-axis, column == x-axis. The x/y - coordinates of a row/column are specified using the grid_coords argument. In case of a 3d grid, the array index order is y-axis - x-axis - z-axis. By default, x-coords are ascending and y-coordinates descending: arr[i,j] coorresponds to x-coord = j, y-coord = #rows-i Beyond the indexes standing for positions on the rectangular grid, there are also vector and tensor indices. Here, my convention is - vector index (k=0) == x-component, (k=1) == y-component, (k=2) == z-component, ... The Lie derivative can be calculated for tensor fields of any rank. The number of tensor components is inferred automatically from the input, but the user must specify whether the tensor is co-, contravariant or mixed (i.e. which indices are "upper"/"lower") via the "covariant" keyword. Lie derivatives of tensor densities are computed via the "density" keyword. Parameters ---------- u : np.array of shape (#time points, n_y, n_x, 2), (#time points, n_y, n_x, n_z, 3) or (#time points, #vertices, 3) Time dependent vector field. tens : np.array of shape (#time points, n_y, n_x, ...), (#time points, n_y, n_x, n_z, ...) or (#time points, #vertices, ...) Time dependent tensor field, function, vector, or rank(r,s) tensor, depending on the shape. ... are the tensor component indices. No indices: tens is a scalar function, index: tens is a (co)vector ... covariant : bool or list of bool, optional Which tensor indices are covariant. True: index is covariant, False: index is contravariant. If a single True/False is supplied, then all/no indices are covariant. The default is False. density : bool, optional Whether the input transforms as a tensor density. The default is False. v : np.array of shape (#vertices, dim) or None Vertices of triangular mesh. If None is passed, the code assumes that fields are defined on a rectangular grid ('chart' case). f : np.array of shape (#faces, dim) or None Faces of triangular mesh. Each row is a triple of vertex indices belonging to a face. If None is passed, the code assumes that fields are defined on a rectangular grid ('chart' case). delta_t : float, optional Spacing in time. Choose so that the output is compatible with the units of u, e.g. if u is in pixels/minute and the sample rate is 30s, take delta_t=0.5. The default is 1. grid_coords : list of np.arrays, optional Coordinates of rectangular grid, grid_coords[0] = y-axis coords, grid_coords[1] = x-axis, ... (e.g. grid_coords[0][i] == y-coordinate of tens[#time point,i,:]). Only used if v is None and f is None. The default is unitary spacing in all dimensions, with x-coords are ascending and y-coordinates descending (see above). Returns ------- lie_tens : np.array of same shape as tens. Lie derivative L_u tens. """ # preliminary argument parsing dim = u.shape[-1] tensor_rank = (len(tens.shape) - dim - 1 if (v is None and f is None) else len(tens.shape) - 2) # depends on whether or not in_chart! covariant = (covariant if isinstance(covariant, list) else (covariant, ) * tensor_rank) if grid_coords is None: # default grid coordinates grid_coords = [ np.arange(tens.shape[2]), np.arange(tens.shape[1])[::-1] ] if dim == 3: grid_coords.append(tens.shape[3]) grid_coords = (grid_coords if isinstance(grid_coords, list) else list(grid_coords)) if (v is None and f is None): assert len(u.shape) == 2+dim and len(tens.shape) >= 1+dim, \ "shapes incorrect" assert u.shape[:1+dim] == tens.shape[:1+dim], \ "u, tens shapes incompatible" def my_grad(x): # axis order is due to coordinate convention return np.stack( np.gradient(x, *grid_coords, axis=(2, 1) + tuple(range(2 + 1, dim + 1)))) s_ind = 'xyz'[:dim] else: assert len(u.shape) == 3 and len(tens.shape) >= 2, "shapes incorrect" assert u.shape[:2] == tens.shape[:2], "u,t shapes incompatible" grad_matrix = igl.grad(v, f) def my_grad(x): return tri_grad(x, v, f, grad_matrix=grad_matrix) s_ind = 'v' # get matrix of partial derivatives of vector and tensor field dc_u = my_grad(u) dc_tens = my_grad(tens) # first term in Lie derivative, always the same # f-strings create the right index expression for chart & trimesh case uc_dc_tens = np.einsum(f't{s_ind}c,ct{s_ind}...->t{s_ind}...', u, dc_tens) # partial time derivative - 0 in time-independent case dt_tens = np.gradient(tens, delta_t, axis=0) if tens.shape[0] > 1 else 0 # density term div_u = np.einsum(f'ct{s_ind}c->t{s_ind}', dc_u) * tens if density else 0 # sum up all the contributions so far lie_tens = dt_tens + uc_dc_tens + div_u # Now, interate over the co- and contra-variant indices. # The challenge is to construct the correct np.einsum str tensor_indices = string.ascii_lowercase[9:9 + tensor_rank] base_str = f'ct{s_ind}i,t{s_ind}' + tensor_indices for index, is_cov in enumerate(covariant): contr_index = tensor_indices[index] if is_cov: # rename the tensor index to be contracted to i contr_str = base_str.replace(contr_index, 'i') # make the target index order target_str = f'->t{s_ind}' + tensor_indices.replace( contr_index, 'c') lie_tens += np.einsum(contr_str + target_str, dc_u, tens) else: contr_str = base_str.replace(contr_index, 'c') target_str = f'->t{s_ind}' + tensor_indices.replace( contr_index, 'i') lie_tens += -np.einsum(contr_str + target_str, dc_u, tens) return lie_tens
import igl V = igl.eigen.MatrixXd() F = igl.eigen.MatrixXi() # Load a mesh in OFF format igl.readOFF("../tutorial/shared/cheburashka.off", V, F) # Read scalar function values from a file, U: #V by 1 U = igl.eigen.MatrixXd() igl.readDMAT("../tutorial/shared/cheburashka-scalar.dmat",U) U = U.col(0) # Compute gradient operator: #F*3 by #V G = igl.eigen.SparseMatrixd() igl.grad(V,F,G) # Compute gradient of U GU = (G*U).MapMatrix(F.rows(),3) # Compute gradient magnitude GU_mag = GU.rowwiseNorm() viewer = igl.viewer.Viewer() viewer.data.set_mesh(V, F) # Compute pseudocolor for original function C = igl.eigen.MatrixXd() igl.jet(U,True,C)