Example #1
0
def compute_mesh_laplacian(verts, tris):
    """
    computes a sparse matrix representing the discretized laplace-beltrami operator of the mesh
    given by n vertex positions ("verts") and a m triangles ("tris") 
    
    verts: (n, 3) array (float)
    tris: (m, 3) array (int) - indices into the verts array

    computes the conformal weights ("cotangent weights") for the mesh, ie:
    w_ij = - .5 * (cot \alpha + cot \beta)

    See:
        Olga Sorkine, "Laplacian Mesh Processing"
        and for theoretical comparison of different discretizations, see 
        Max Wardetzky et al., "Discrete Laplace operators: No free lunch"

    returns matrix L that computes the laplacian coordinates, e.g. L * x = delta
    """
    n = len(verts)
    W_ij = np.empty(0)
    I = np.empty(0, np.int32)
    J = np.empty(0, np.int32)
    for i1, i2, i3 in [(0, 1, 2), (1, 2, 0),
                       (2, 0, 1)]:  # for edge i2 --> i3 facing vertex i1
        vi1 = tris[:, i1]  # vertex index of i1
        vi2 = tris[:, i2]
        vi3 = tris[:, i3]
        # vertex vi1 faces the edge between vi2--vi3
        # compute the angle at v1
        # add cotangent angle at v1 to opposite edge v2--v3
        # the cotangent weights are symmetric
        u = verts[vi2] - verts[vi1]
        v = verts[vi3] - verts[vi1]
        cotan = (u * v).sum(axis=1) / veclen(np.cross(u, v))
        W_ij = np.append(W_ij, 0.5 * cotan)
        I = np.append(I, vi2)
        J = np.append(J, vi3)
        W_ij = np.append(W_ij, 0.5 * cotan)
        I = np.append(I, vi3)
        J = np.append(J, vi2)
    L = sparse.csr_matrix((W_ij, (I, J)), shape=(n, n))
    # compute diagonal entries
    L = L - sparse.spdiags(L * np.ones(n), 0, n, n)
    L = L.tocsr()
    # area matrix
    e1 = verts[tris[:, 1]] - verts[tris[:, 0]]
    e2 = verts[tris[:, 2]] - verts[tris[:, 0]]
    n = np.cross(e1, e2)
    triangle_area = .5 * veclen(n)
    # compute per-vertex area
    vertex_area = np.zeros(len(verts))
    ta3 = triangle_area / 3
    for i in xrange(tris.shape[1]):
        bc = np.bincount(tris[:, i].astype(int), ta3)
        vertex_area[:len(bc)] += bc
    VA = sparse.spdiags(vertex_area, 0, len(verts), len(verts))
    return L, VA
Example #2
0
def compute_mesh_laplacian(verts, tris):
    """
    computes a sparse matrix representing the discretized laplace-beltrami operator of the mesh
    given by n vertex positions ("verts") and a m triangles ("tris") 
    
    verts: (n, 3) array (float)
    tris: (m, 3) array (int) - indices into the verts array

    computes the conformal weights ("cotangent weights") for the mesh, ie:
    w_ij = - .5 * (cot \alpha + cot \beta)

    See:
        Olga Sorkine, "Laplacian Mesh Processing"
        and for theoretical comparison of different discretizations, see 
        Max Wardetzky et al., "Discrete Laplace operators: No free lunch"

    returns matrix L that computes the laplacian coordinates, e.g. L * x = delta
    """
    n = len(verts)
    W_ij = np.empty(0)
    I = np.empty(0, np.int32)
    J = np.empty(0, np.int32)
    for i1, i2, i3 in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]: # for edge i2 --> i3 facing vertex i1
        vi1 = tris[:,i1] # vertex index of i1
        vi2 = tris[:,i2]
        vi3 = tris[:,i3]
        # vertex vi1 faces the edge between vi2--vi3
        # compute the angle at v1
        # add cotangent angle at v1 to opposite edge v2--v3
        # the cotangent weights are symmetric
        u = verts[vi2] - verts[vi1]
        v = verts[vi3] - verts[vi1]
        cotan = (u * v).sum(axis=1) / veclen(np.cross(u, v))
        W_ij = np.append(W_ij, 0.5 * cotan)
        I = np.append(I, vi2)
        J = np.append(J, vi3)
        W_ij = np.append(W_ij, 0.5 * cotan)
        I = np.append(I, vi3)
        J = np.append(J, vi2)
    L = sparse.csr_matrix((W_ij, (I, J)), shape=(n, n))
    # compute diagonal entries
    L = L - sparse.spdiags(L * np.ones(n), 0, n, n)
    L = L.tocsr()
    # area matrix
    e1 = verts[tris[:,1]] - verts[tris[:,0]]
    e2 = verts[tris[:,2]] - verts[tris[:,0]]
    n = np.cross(e1, e2)
    triangle_area = .5 * veclen(n)
    # compute per-vertex area
    vertex_area = np.zeros(len(verts))
    ta3 = triangle_area / 3
    for i in xrange(tris.shape[1]):
        bc = np.bincount(tris[:,i].astype(int), ta3)
        vertex_area[:len(bc)] += bc
    VA = sparse.spdiags(vertex_area, 0, len(verts), len(verts))
    return L, VA
Example #3
0
 def update_plot(self):
     c = self._components[self.component]
     self.pd.points = self._Xmean + self.activation * c
     magnitude = veclen(c)
     self.pd.point_data.scalars = magnitude
     self.actor.mapper.scalar_range = (0, magnitude.max())
     self.scene.render()
Example #4
0
 def update_plot(self):
     c = self._components[self.component]
     self.pd.points = self._Xmean + self.activation * c
     magnitude = veclen(c)
     self.pd.point_data.scalars = magnitude
     self.actor.mapper.scalar_range = (0, magnitude.max())
     self.scene.render()
Example #5
0
    def __call__(self, idx):
        """ 
        computes geodesic distances to all vertices in the mesh
        idx can be either an integer (single vertex index) or a list of vertex indices
        or an array of bools of length n (with n the number of vertices in the mesh) 
        """
        u0 = np.zeros(len(self._verts))
        u0[idx] = 1.0
        # heat method, step 1
        u = self._factored_AtLc(u0).ravel()
        # heat method step 2
        grad_u = 1 / (2 * self._triangle_area)[:,np.newaxis] * (
              self._unit_normal_cross_e01 * u[self._tris[:,2]][:,np.newaxis]
            + self._unit_normal_cross_e12 * u[self._tris[:,0]][:,np.newaxis]
            + self._unit_normal_cross_e20 * u[self._tris[:,1]][:,np.newaxis]
        )
        X = - grad_u / veclen(grad_u)[:,np.newaxis]
        # heat method step 3
        div_Xs = np.zeros(len(self._verts))
        for i1, i2, i3 in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]: # for edge i2 --> i3 facing vertex i1
            vi1, vi2, vi3 = self._tris[:,i1], self._tris[:,i2], self._tris[:,i3]
            e1 = self._verts[vi2] - self._verts[vi1]
            e2 = self._verts[vi3] - self._verts[vi1]
            e_opp = self._verts[vi3] - self._verts[vi2]
            cot1 = 1 / np.tan(np.arccos( 
                (normalized(-e2) * normalized(-e_opp)).sum(axis=1)))
            cot2 = 1 / np.tan(np.arccos(
                (normalized(-e1) * normalized( e_opp)).sum(axis=1)))
            div_Xs += np.bincount(
                vi1.astype(int), 
		0.5 * (cot1 * (e1 * X).sum(axis=1) + cot2 * (e2 * X).sum(axis=1)), 
		minlength=len(self._verts))
        phi = self._factored_L(div_Xs).ravel()
        phi -= phi.min()
        return phi
Example #6
0
 def __init__(self, verts, tris, m=1.0):
     self._verts = verts
     self._tris = tris
     # precompute some stuff needed later on
     e01 = verts[tris[:, 1]] - verts[tris[:, 0]]
     e12 = verts[tris[:, 2]] - verts[tris[:, 1]]
     e20 = verts[tris[:, 0]] - verts[tris[:, 2]]
     self._triangle_area = .5 * veclen(np.cross(e01, e12))
     unit_normal = normalized(np.cross(normalized(e01), normalized(e12)))
     self._un = unit_normal
     self._unit_normal_cross_e01 = np.cross(unit_normal, -e01)
     self._unit_normal_cross_e12 = np.cross(unit_normal, -e12)
     self._unit_normal_cross_e20 = np.cross(unit_normal, -e20)
     # parameters for heat method
     h = np.mean(map(veclen, [e01, e12, e20]))
     t = m * h**2
     # pre-factorize poisson systems
     Lc, vertex_area = compute_mesh_laplacian(verts,
                                              tris,
                                              area_type='lumped_mass')
     A = sparse.spdiags(vertex_area, 0, len(verts), len(verts))
     #self._factored_AtLc = splu((A - t * Lc).tocsc()).solve
     self._factored_AtLc = cholesky((A - t * Lc).tocsc(), mode='simplicial')
     #self._factored_L = splu(Lc.tocsc()).solve
     self._factored_L = cholesky(Lc.tocsc(), mode='simplicial')
Example #7
0
 def __call__(self, idx):
     """
     computes geodesic distances to all vertices in the mesh
     idx can be either an integer (single vertex index) or a list of vertex indices
     or an array of bools of length n (with n the number of vertices in the mesh) 
     """
     u0 = np.zeros(len(self._verts))
     u0[idx] = 1.0
     # -- heat method, step 1
     u = self._factored_AtLc(u0).ravel()
     # -- heat method step 2
     # magnitude that we use to normalize the heat values across triangles
     n_u = 1. / (u[self._tris].sum(axis=1))
     # compute gradient
     grad_u =  self._unit_normal_cross_e01 * (n_u * u[self._tris[:,2]])[:,np.newaxis] \
             + self._unit_normal_cross_e12 * (n_u * u[self._tris[:,0]])[:,np.newaxis] \
             + self._unit_normal_cross_e20 * (n_u * u[self._tris[:,1]])[:,np.newaxis]
     X = -grad_u / veclen(grad_u)[:, np.newaxis]
     # -- heat method step 3
     # TODO this recomputes the cotangent weights, which is not at all necessary
     div_Xs = np.zeros(len(self._verts))
     for i1, i2, i3 in [(0, 1, 2), (1, 2, 0),
                        (2, 0, 1)]:  # for edge i2 --> i3 facing vertex i1
         vi1, vi2, vi3 = self._tris[:, i1], self._tris[:,
                                                       i2], self._tris[:,
                                                                       i3]
         e1 = self._verts[vi2] - self._verts[vi1]
         e2 = self._verts[vi3] - self._verts[vi1]
         e_opp = self._verts[vi3] - self._verts[vi2]
         #cot1 = 1 / np.tan(np.arccos(
         #    (normalized(-e2) * normalized(-e_opp)).sum(axis=1)))
         cot1 = 0.5 * (e2 * e_opp).sum(axis=1) / veclen(np.cross(
             e2, -e_opp))
         #cot2 = 1 / np.tan(np.arccos(
         #    (normalized(-e1) * normalized( e_opp)).sum(axis=1)))
         cot2 = 0.5 * (e1 * -e_opp).sum(axis=1) / veclen(
             np.cross(e1, +e_opp))
         div_Xs += np.bincount(
             vi1.astype(int),
             (((cot1[:, np.newaxis] * e1) * X).sum(axis=1) +
              ((cot2[:, np.newaxis] * e2) * X).sum(axis=1)),
             minlength=len(self._verts))
     # solve poisson system
     phi = self._factored_L(div_Xs).ravel()
     phi = phi - phi.min()
     phi = phi.max() - phi
     return phi
Example #8
0
 def __call__(self, idx):
     """
     computes geodesic distances to all vertices in the mesh
     idx can be either an integer (single vertex index) or a list of vertex indices
     or an array of bools of length n (with n the number of vertices in the mesh) 
     """
     u0 = np.zeros(len(self._verts))
     u0[idx] = 1.0
     # -- heat method, step 1
     u = self._factored_AtLc(u0).ravel()
     # -- heat method step 2
     # magnitude that we use to normalize the heat values across triangles
     n_u = 1. / (u[self._tris].sum(axis=1))
     # compute gradient
     grad_u =  self._unit_normal_cross_e01 * (n_u * u[self._tris[:,2]])[:,np.newaxis] \
             + self._unit_normal_cross_e12 * (n_u * u[self._tris[:,0]])[:,np.newaxis] \
             + self._unit_normal_cross_e20 * (n_u * u[self._tris[:,1]])[:,np.newaxis]
     X = - grad_u / veclen(grad_u)[:,np.newaxis]
     # -- heat method step 3
     # TODO this recomputes the cotangent weights, which is not at all necessary
     div_Xs = np.zeros(len(self._verts))
     for i1, i2, i3 in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]: # for edge i2 --> i3 facing vertex i1
         vi1, vi2, vi3 = self._tris[:,i1], self._tris[:,i2], self._tris[:,i3]
         e1 = self._verts[vi2] - self._verts[vi1]
         e2 = self._verts[vi3] - self._verts[vi1]
         e_opp = self._verts[vi3] - self._verts[vi2]
         #cot1 = 1 / np.tan(np.arccos( 
         #    (normalized(-e2) * normalized(-e_opp)).sum(axis=1)))
         cot1 = 0.5 * (e2 *  e_opp).sum(axis=1) / veclen(np.cross(e2, -e_opp))
         #cot2 = 1 / np.tan(np.arccos(
         #    (normalized(-e1) * normalized( e_opp)).sum(axis=1)))
         cot2 = 0.5 * (e1 * -e_opp).sum(axis=1) / veclen(np.cross(e1, +e_opp))
         div_Xs += np.bincount(
             vi1.astype(int),
             (((cot1[:,np.newaxis] * e1) * X).sum(axis=1) + ((cot2[:,np.newaxis] * e2) * X).sum(axis=1)),
             minlength=len(self._verts))
     # solve poisson system
     phi = self._factored_L(div_Xs).ravel()
     phi = phi - phi.min()
     phi = phi.max() - phi
     return phi
Example #9
0
 def __init__(self, verts, tris, m=10.0):
     self._verts = verts
     self._tris = tris
     # precompute some stuff needed later on
     e01 = verts[tris[:, 1]] - verts[tris[:, 0]]
     e12 = verts[tris[:, 2]] - verts[tris[:, 1]]
     e20 = verts[tris[:, 0]] - verts[tris[:, 2]]
     self._triangle_area = .5 * veclen(np.cross(e01, e12))
     unit_normal = normalized(np.cross(normalized(e01), normalized(e12)))
     self._unit_normal_cross_e01 = np.cross(unit_normal, e01)
     self._unit_normal_cross_e12 = np.cross(unit_normal, e12)
     self._unit_normal_cross_e20 = np.cross(unit_normal, e20)
     # parameters for heat method
     h = np.mean(map(veclen, [e01, e12, e20]))
     t = m * h**2
     # pre-factorize poisson systems
     Lc, A = compute_mesh_laplacian(verts, tris)
     self._factored_AtLc = splu((A - t * Lc).tocsc()).solve
     self._factored_L = splu(Lc.tocsc()).solve
Example #10
0
 def __init__(self, verts, tris, m=10.0):
     self._verts = verts
     self._tris = tris
     # precompute some stuff needed later on
     e01 = verts[tris[:,1]] - verts[tris[:,0]]
     e12 = verts[tris[:,2]] - verts[tris[:,1]]
     e20 = verts[tris[:,0]] - verts[tris[:,2]]
     self._triangle_area = .5 * veclen(np.cross(e01, e12))
     unit_normal = normalized(np.cross(normalized(e01), normalized(e12)))
     self._unit_normal_cross_e01 = np.cross(unit_normal, e01)
     self._unit_normal_cross_e12 = np.cross(unit_normal, e12)
     self._unit_normal_cross_e20 = np.cross(unit_normal, e20)
     # parameters for heat method
     h = np.mean(map(veclen, [e01, e12, e20]))
     t = m * h ** 2
     # pre-factorize poisson systems
     Lc, A = compute_mesh_laplacian(verts, tris)
     self._factored_AtLc = splu((A - t * Lc).tocsc()).solve
     self._factored_L = splu(Lc.tocsc()).solve
Example #11
0
def preprocess_mesh_animation(verts, tris):
    """
    Preprocess the mesh animation:
        - removes zero-area triangles
        - keep only the biggest connected component in the mesh
        - normalize animation into -0.5 ... 0.5 cube
    """
    print "Vertices: ", verts.shape
    print "Triangles: ", verts.shape
    assert verts.ndim == 3
    assert tris.ndim == 2
    # check for zero-area triangles and filter
    e1 = verts[0, tris[:, 1]] - verts[0, tris[:, 0]]
    e2 = verts[0, tris[:, 2]] - verts[0, tris[:, 0]]
    n = np.cross(e1, e2)
    tris = tris[veclen(n) > 1.e-8]
    # remove unconnected vertices
    ij = np.r_[np.c_[tris[:, 0], tris[:, 1]], np.c_[tris[:, 0], tris[:, 2]],
               np.c_[tris[:, 1], tris[:, 2]]]
    G = csr_matrix((np.ones(len(ij)), ij.T),
                   shape=(verts.shape[1], verts.shape[1]))
    n_components, labels = connected_components(G, directed=False)
    if n_components > 1:
        size_components = np.bincount(labels)
        if len(size_components) > 1:
            print "[warning] found %d connected components in the mesh, keeping only the biggest one" % n_components
            print "component sizes: "
            print size_components
        keep_vert = labels == size_components.argmax()
    else:
        keep_vert = np.ones(verts.shape[1], np.bool)
    verts = verts[:, keep_vert, :]
    tris = filter_reindex(keep_vert, tris[keep_vert[tris].all(axis=1)])
    # normalize triangles to -0.5...0.5 cube
    verts_mean = verts.mean(axis=0).mean(axis=0)
    verts -= verts_mean
    verts_scale = np.abs(verts.ptp(axis=1)).max()
    verts /= verts_scale
    print "after preprocessing:"
    print "Vertices: ", verts.shape
    print "Triangles: ", verts.shape
    return verts, tris, ~keep_vert, verts_mean, verts_scale
Example #12
0
def preprocess_mesh_animation(verts, tris):
    """ 
    Preprocess the mesh animation:
        - removes zero-area triangles
        - keep only the biggest connected component in the mesh
        - normalize animation into -0.5 ... 0.5 cube
    """
    print "Vertices: ", verts.shape
    print "Triangles: ", verts.shape
    assert verts.ndim == 3
    assert tris.ndim == 2
    # check for zero-area triangles and filter
    e1 = verts[0, tris[:,1]] - verts[0, tris[:,0]]
    e2 = verts[0, tris[:,2]] - verts[0, tris[:,0]]
    n = np.cross(e1, e2)
    tris = tris[veclen(n) > 1.e-8]
    # remove unconnected vertices
    ij = np.r_[np.c_[tris[:,0], tris[:,1]], 
               np.c_[tris[:,0], tris[:,2]], 
               np.c_[tris[:,1], tris[:,2]]]
    G = csr_matrix((np.ones(len(ij)), ij.T), shape=(verts.shape[1], verts.shape[1]))
    n_components, labels = connected_components(G, directed=False)
    if n_components > 1:
        size_components = np.bincount(labels)
        if len(size_components) > 1:
            print "[warning] found %d connected components in the mesh, keeping only the biggest one" % n_components
            print "component sizes: "
            print size_components
        keep_vert = labels == size_components.argmax()
    else:
        keep_vert = np.ones(verts.shape[1], np.bool)
    verts = verts[:, keep_vert, :]
    tris = filter_reindex(keep_vert, tris[keep_vert[tris].all(axis=1)])
    # normalize triangles to -0.5...0.5 cube
    verts_mean = verts.mean(axis=0).mean(axis=0)
    verts -= verts_mean
    verts_scale = np.abs(verts.ptp(axis=1)).max()
    verts /= verts_scale
    print "after preprocessing:"
    print "Vertices: ", verts.shape
    print "Triangles: ", verts.shape
    return verts, tris, ~keep_vert, verts_mean, verts_scale
Example #13
0
 def __call__(self, idx):
     """
     computes geodesic distances to all vertices in the mesh
     idx can be either an integer (single vertex index) or a list of vertex indices
     or an array of bools of length n (with n the number of vertices in the mesh)
     """
     u0 = np.zeros(len(self._verts))
     u0[idx] = 1.0
     # heat method, step 1
     u = self._factored_AtLc(u0).ravel()
     # heat method step 2
     grad_u = 1 / (2 * self._triangle_area)[:, np.newaxis] * (
         self._unit_normal_cross_e01 * u[self._tris[:, 2]][:, np.newaxis] +
         self._unit_normal_cross_e12 * u[self._tris[:, 0]][:, np.newaxis] +
         self._unit_normal_cross_e20 * u[self._tris[:, 1]][:, np.newaxis])
     X = -grad_u / veclen(grad_u)[:, np.newaxis]
     # heat method step 3
     div_Xs = np.zeros(len(self._verts))
     for i1, i2, i3 in [(0, 1, 2), (1, 2, 0),
                        (2, 0, 1)]:  # for edge i2 --> i3 facing vertex i1
         # 0 1 2
         # 1 2 0
         # 2 0 1
         vi1, vi2, vi3 = self._tris[:, i1], self._tris[:,
                                                       i2], self._tris[:,
                                                                       i3]
         e1 = self._verts[vi2] - self._verts[vi1]
         e2 = self._verts[vi3] - self._verts[vi1]
         e_opp = self._verts[vi3] - self._verts[vi2]
         cot1 = 1 / np.tan(
             np.arccos((normalized(-e2) * normalized(-e_opp)).sum(axis=1)))
         cot2 = 1 / np.tan(
             np.arccos((normalized(-e1) * normalized(+e_opp)).sum(axis=1)))
         div_Xs += np.bincount(vi1.astype(int),
                               0.5 * (cot1 * (e1 * X).sum(axis=1) + cot2 *
                                      (e2 * X).sum(axis=1)),
                               minlength=len(self._verts))
     phi = self._factored_L(div_Xs).ravel()
     phi -= phi.min()
     return phi
Example #14
0
 def __init__(self, verts, tris, m=1.0):
     self._verts = verts
     self._tris = tris
     # precompute some stuff needed later on
     e01 = verts[tris[:,1]] - verts[tris[:,0]]
     e12 = verts[tris[:,2]] - verts[tris[:,1]]
     e20 = verts[tris[:,0]] - verts[tris[:,2]]
     self._triangle_area = .5 * veclen(np.cross(e01, e12))
     unit_normal = normalized(np.cross(normalized(e01), normalized(e12)))
     self._un = unit_normal
     self._unit_normal_cross_e01 = np.cross(unit_normal, -e01)
     self._unit_normal_cross_e12 = np.cross(unit_normal, -e12)
     self._unit_normal_cross_e20 = np.cross(unit_normal, -e20)
     # parameters for heat method
     h = np.mean(map(veclen, [e01, e12, e20]))
     t = m * h ** 2
     # pre-factorize poisson systems
     Lc, vertex_area = compute_mesh_laplacian(verts, tris, area_type='lumped_mass')
     A = sparse.spdiags(vertex_area, 0, len(verts), len(verts))
     #self._factored_AtLc = splu((A - t * Lc).tocsc()).solve
     self._factored_AtLc = cholesky((A - t * Lc).tocsc(), mode='simplicial')
     #self._factored_L = splu(Lc.tocsc()).solve
     self._factored_L = cholesky(Lc.tocsc(), mode='simplicial')
Example #15
0
def compute_mesh_laplacian(verts, tris, weight_type='cotangent',
                           return_vertex_area=True, area_type='mixed',
                           add_diagonal=True):
    """
    computes a sparse matrix representing the 
    discretized laplace-beltrami operator of the mesh
    given by n vertex positions ("verts") and a m triangles ("tris")

    verts: (n, 3) array (float)
    tris: (m, 3) array (int) - indices into the verts array
    weight_type: either 'mean_value', 'uniform' or 'cotangent' (default)
    return_vertex_area: wether to return another area with the areas per vertex
    area_type: can be 'mixed' or 'lumped_mass'

    if weight_type == 'cotangent':
        computes the conformal weights ("cotangent weights") for the mesh, ie:
        w_ij = - .5 * (cot \alpha + cot \beta)

    if weight_type == 'mean_value':
        computes mean value coordinates for the mesh
        w_ij = - (tan(theta1_ij / 2) + tan(theta2_ij / 2)) / || v_i - v_j ||

    if weight_type == 'uniform':
        w_ij = - 1

    for all weight types:
        w_ii = sum(w_ij for j in [1..n])


    if area_type == 'mixed':
        compute the vertex area as the voronoi area for non-obtuse triangles,
        use the barycentric area for obtuse triangles 
        (according to Mark Meyer's 2002 paper)

    if area_type == 'lumped_mass':
        compute vertex area by equally dividing the triangle area to the vertices,
        e.g. area of vertex i is the sum of areas of adjacent triangles divided by 3

    See:
        Olga Sorkine, "Laplacian Mesh Processing"
        and also
        Mark Meyer et al., "Discrete Differential-Geometry Operators for Triangulated 2-Manifolds"
        and for theoretical comparison of different discretizations, see
        Max Wardetzky et al., "Discrete Laplace operators: No free lunch"

    returns matrix L that computes the laplacian coordinates, e.g. L * x = delta
    """
    if area_type not in ['mixed', 'lumped_mass']:
        raise ValueError('unknown area type: %s' % area_type)
    if weight_type not in ['cotangent', 'mean_value', 'uniform']:
        raise ValueError('unknown weight type: %s' % weight_type)

    n = len(verts)
    # we consider the triangle P, Q, R
    iP = tris[:, 0]
    iQ = tris[:, 1]
    iR = tris[:, 2]
    # edges forming the triangle
    PQ = verts[iP] - verts[iQ] # P--Q
    QR = verts[iQ] - verts[iR] # Q--R
    RP = verts[iR] - verts[iP] # R--P
    if weight_type == 'cotangent' or (return_vertex_area and area_type == 'mixed'):
        # compute cotangent at all 3 points in triangle PQR
        cotP = -1 * (PQ * RP).sum(axis=1) / veclen(np.cross(PQ, RP)) # angle at vertex P
        cotQ = -1 * (QR * PQ).sum(axis=1) / veclen(np.cross(QR, PQ)) # angle at vertex Q
        cotR = -1 * (RP * QR).sum(axis=1) / veclen(np.cross(RP, QR)) # angle at vertex R

    # compute weights and indices
    if weight_type == 'cotangent':
        I =       np.concatenate((  iP,   iR,    iP,   iQ,    iQ,   iR))
        J =       np.concatenate((  iR,   iP,    iQ,   iP,    iR,   iQ))
        W = 0.5 * np.concatenate((cotQ, cotQ,  cotR, cotR,  cotP, cotP))

    elif weight_type == 'mean_value':
        # TODO: I didn't check this code yet
        PQlen = 1 / veclen(PQ)
        QRlen = 1 / veclen(QR)
        RPlen = 1 / veclen(RP)
        PQn = PQ * PQlen[:,np.newaxis] # normalized
        QRn = QR * QRlen[:,np.newaxis]
        RPn = RP * RPlen[:,np.newaxis]
        # TODO pretty sure there is a simpler solution to those 3 formulas
        tP = np.tan(0.5 * np.arccos((PQn * -RPn).sum(axis=1)))
        tQ = np.tan(0.5 * np.arccos((-PQn * QRn).sum(axis=1)))
        tR = np.tan(0.5 * np.arccos((RPn * -QRn).sum(axis=1)))
        I = np.concatenate((      iP,       iP,       iQ,       iQ,       iR,       iR))
        J = np.concatenate((      iQ,       iR,       iP,       iR,       iP,       iQ))
        W = np.concatenate((tP*PQlen, tP*RPlen, tQ*PQlen, tQ*QRlen, tR*RPlen, tR*QRlen))

    elif weight_type == 'uniform':
        # this might add an edge twice to the matrix
        # but prevents the problem of boundary edges going only in one direction
        # we fix this problem after the matrix L is constructed
        I = np.concatenate((iP, iQ,  iQ, iR,  iR, iP))
        J = np.concatenate((iQ, iP,  iR, iQ,  iP, iR))
        W = np.ones(len(tris) * 6)

    # construct sparse matrix
    # notice that this will also sum duplicate entries of (i,j), 
    # which is explicitely assumed by the code above
    L = sparse.csr_matrix((W, (I, J)), shape=(n, n))
    if weight_type == 'uniform':
        # because we probably add weights in both directions of an edge earlier, 
        # and the csr_matrix constructor sums them, some values in L might be 2 instead of 1
        # so reset them
        L.data[:] = 1
    # add diagonal entries as the sum across rows
    if add_diagonal:
        L = L - sparse.spdiags(L * np.ones(n), 0, n, n)

    if return_vertex_area:
        if area_type == 'mixed':
            # compute voronoi cell areas
            aP = 1/8. * (cotR * (PQ**2).sum(axis=1) + cotQ * (RP**2).sum(axis=1)) # area at point P
            aQ = 1/8. * (cotR * (PQ**2).sum(axis=1) + cotP * (QR**2).sum(axis=1)) # area at point Q
            aR = 1/8. * (cotQ * (RP**2).sum(axis=1) + cotP * (QR**2).sum(axis=1)) # area at point R
            # replace by barycentric areas for obtuse triangles
            triangle_area = .5 * veclen(np.cross(PQ, RP))
            for i, c in enumerate([cotP, cotQ, cotR]):
                is_x_obtuse = c < 0 # obtuse at point?
                # TODO: the paper by Desbrun says that we should divide by 1/2 or 1/4,
                #       but according to other code I found we should divide by 1 or 1/2
                #       check which scheme is correct!
                aP[is_x_obtuse] = triangle_area[is_x_obtuse] * (1 if i == 0 else 1/2.)
                aQ[is_x_obtuse] = triangle_area[is_x_obtuse] * (1 if i == 1 else 1/2.)
                aR[is_x_obtuse] = triangle_area[is_x_obtuse] * (1 if i == 2 else 1/2.)
            area = np.bincount(iP, aP, minlength=n) + \
                    np.bincount(iQ, aQ, minlength=n) + np.bincount(iR, aR, minlength=n)

        elif area_type == 'lumped_mass':
            lump_area = veclen(np.cross(PQ, RP)) / 6.
            area = sum(np.bincount(tris[:,i], lump_area, minlength=n) for i in xrange(3))

        return L, area
    else:
        return L