Пример #1
0
    def test_solver_parameters(self):
        A = poisson((50, 50), format='csr')

        for method in methods:
            # method = ('richardson', {'omega':4.0/3.0})
            ml = smoothed_aggregation_solver(A, presmoother=method,
                                             postsmoother=method,
                                             max_coarse=10)

            residuals = profile_solver(ml)
            assert((residuals[-1]/residuals[0])**(1.0/len(residuals)) < 0.95)
            assert(ml.symmetric_smoothing)

        for method in methods2:
            ml = smoothed_aggregation_solver(A, max_coarse=10)
            change_smoothers(ml, presmoother=method[0], postsmoother=method[1])

            residuals = profile_solver(ml)
            assert((residuals[-1]/residuals[0])**(1.0/len(residuals)) < 0.95)
            assert(not ml.symmetric_smoothing)

        for method in methods3:
            ml = smoothed_aggregation_solver(A, max_coarse=10)
            change_smoothers(ml, presmoother=method[0], postsmoother=method[1])
            assert(ml.symmetric_smoothing)

        for method in methods4:
            ml = smoothed_aggregation_solver(A, max_coarse=10)
            change_smoothers(ml, presmoother=method[0], postsmoother=method[1])
            assert(not ml.symmetric_smoothing)
Пример #2
0
 def build_pyamg(self,A):
     try:
         import pyamg
     except:
         raise ImportError(f"*** pyamg not found {self.linearsolver=} ***")
     B = pyamg.solver_configuration(A, verb=False)['B']
     symmetry = 'nonsymmetric'
     smoother = 'gauss_seidel_nr'
     smooth = ('energy', {'krylov': 'gmres'})
     improve_candidates = [('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 4}), None]
     symmetry = 'symmetric'
     smooth = ('energy', {'krylov': 'gmres'})
     smoother = 'gauss_seidel'
     improve_candidates = None
     SA_build_args = {
         'max_levels': 10, 'max_coarse': 25,
         'coarse_solver': 'pinv',
         'symmetry': symmetry
     }
     strength = [('evolution', {'k': 2, 'epsilon': 10.0})]
     presmoother = (smoother, {'sweep': 'symmetric', 'iterations': 3})
     postsmoother = (smoother, {'sweep': 'symmetric', 'iterations': 3})
     return pyamg.smoothed_aggregation_solver(A, B, smooth=smooth, strength=strength, presmoother=presmoother,
                                              postsmoother=postsmoother, improve_candidates=improve_candidates,
                                             **SA_build_args)
     return pyamg.smoothed_aggregation_solver(A, B=B, smooth='energy')
Пример #3
0
    def test_solver_parameters(self):
        A = poisson((50, 50), format='csr')

        for method in methods:
            # method = ('richardson', {'omega':4.0/3.0})
            ml = smoothed_aggregation_solver(A,
                                             presmoother=method,
                                             postsmoother=method,
                                             max_coarse=10)

            residuals = profile_solver(ml)
            assert ((residuals[-1] / residuals[0])**(1.0 / len(residuals)) <
                    0.95)
            assert (ml.symmetric_smoothing)

        for method in methods2:
            ml = smoothed_aggregation_solver(A, max_coarse=10)
            change_smoothers(ml, presmoother=method[0], postsmoother=method[1])

            residuals = profile_solver(ml)
            assert ((residuals[-1] / residuals[0])**(1.0 / len(residuals)) <
                    0.95)
            assert (not ml.symmetric_smoothing)

        for method in methods3:
            ml = smoothed_aggregation_solver(A, max_coarse=10)
            change_smoothers(ml, presmoother=method[0], postsmoother=method[1])
            assert (ml.symmetric_smoothing)

        for method in methods4:
            ml = smoothed_aggregation_solver(A, max_coarse=10)
            change_smoothers(ml, presmoother=method[0], postsmoother=method[1])
            assert (not ml.symmetric_smoothing)
Пример #4
0
    def test_solver_parameters(self):
        A = poisson((50, 50), format='csr')

        for method in methods:
            #method = ('richardson', {'omega':4.0/3.0})
            ml = smoothed_aggregation_solver(A,
                                             presmoother=method,
                                             postsmoother=method,
                                             max_coarse=10)

            residuals = profile_solver(ml)
            #print "method",method
            #print "residuals",residuals
            #print "convergence rate:",(residuals[-1]/residuals[0])**(1.0/len(residuals))
            assert ((residuals[-1] / residuals[0])**(1.0 / len(residuals)) <
                    0.95)

        for method in methods2:
            ml = smoothed_aggregation_solver(A, max_coarse=10)
            change_smoothers(ml, presmoother=method[0], postsmoother=method[1])

            residuals = profile_solver(ml)
            #print "method",method
            #print "residuals",residuals
            #print "convergence rate:",(residuals[-1]/residuals[0])**(1.0/len(residuals))
            assert ((residuals[-1] / residuals[0])**(1.0 / len(residuals)) <
                    0.95)
Пример #5
0
def test():
    class EnergyEdgeKernel(object):
        def __init__(self):
            self.subdomains = [None]
            return

        def eval(self, mesh, cell_mask):
            edge_ce_ratio = mesh.ce_ratios[..., cell_mask]
            beta = 1.0
            return numpy.array(
                [
                    [edge_ce_ratio, -edge_ce_ratio * numpy.exp(1j * beta)],
                    [-edge_ce_ratio * numpy.exp(-1j * beta), edge_ce_ratio],
                ]
            )

    vertices, cells = meshzoo.rectangle(0.0, 2.0, 0.0, 1.0, 101, 51)
    mesh = meshplex.MeshTri(vertices, cells)

    matrix = pyfvm.get_fvm_matrix(mesh, [EnergyEdgeKernel()], [], [], [])
    rhs = mesh.control_volumes.copy()

    sa = pyamg.smoothed_aggregation_solver(matrix, smooth="energy")
    u = sa.solve(rhs, tol=1e-10)

    # Cannot write complex data ot VTU; split real and imaginary parts first.
    # <http://stackoverflow.com/a/38902227/353337>
    mesh.write("out.vtk", point_data={"u": u.view("(2,)float")})
    return
Пример #6
0
def random_walker(mask, prior, gamma):
    """
    Assume prior is given on the mask, of shape (NPTS, K).
    Return random walker probability map.
    """
    gc.enable()

    # Assembling graph Laplacian
    L = make_laplacian(mask)
    n = L.shape[0]
    L = L + sparse.coo_matrix((gamma * prior.sum(axis=1),
                               (range(n), range(n))))

    # Creating sparse solver
    mls = smoothed_aggregation_solver(L.tocsr())
    del L
    gc.collect()

    # Loop over classes
    X = []
    for k in range(prior.shape[-1]):
        X += [mls.solve(gamma * prior[:, k])]

    del mls
    gc.collect()
    return np.array(X).T
Пример #7
0
def _get_amg_solver(pyom):
    matrix = _assemble_poisson_matrix(pyom)
    if pyom.backend_name == "bohrium":
        near_null_space = np.ones(matrix.shape[0], bohrium=False)
    else:
        near_null_space = np.ones(matrix.shape[0])
    ml = pyamg.smoothed_aggregation_solver(matrix, near_null_space[:,
                                                                   np.newaxis])

    def amg_solver(rhs, x0):
        if pyom.backend_name == "bohrium":
            rhs = rhs.copy2numpy()
            x0 = x0.copy2numpy()
        residuals = []
        tolerance = pyom.congr_epsilon * 1e-8  # to achieve the same precision as the preconditioned scipy solver
        solution = ml.solve(b=rhs.flatten(),
                            x0=x0.flatten(),
                            tol=tolerance,
                            residuals=residuals,
                            accel="bicgstab")
        rel_res = residuals[-1] / residuals[0]
        if rel_res > tolerance:
            warnings.warn(
                "Streamfunction solver did not converge - residual: {:.2e}".
                format(rel_res))
        return np.asarray(solution)

    return amg_solver
Пример #8
0
 def setup(self):
     n = 1000
     self.A = pyamg.gallery.poisson((n, n), format="csr")
     self.settings = {
         "B": None,
         "BH": None,
         "symmetry": "hermitian",
         "strength": "symmetric",
         "aggregate": "standard",
         "smooth": ("jacobi", {
             "omega": 4.0 / 3.0
         }),
         "presmoother": ("gauss_seidel", {
             "sweep": "symmetric"
         }),
         "postsmoother": ("gauss_seidel", {
             "sweep": "symmetric"
         }),
         "improve_candidates": None,
         "max_levels": 10,
         "max_coarse": 10,
         "diagonal_dominance": False,
         "keep": False
     }
     self.b = np.zeros(self.A.shape[0])
     self.x0 = np.random.rand(self.A.shape[0])
     self.ml = pyamg.smoothed_aggregation_solver(self.A, **self.settings)
Пример #9
0
def solve(name, A, b, x0=None, grid=None):
    t0 = time.time()
    if name == "direct":
        x, iter = splinalg.spsolve(A.tocsc(), b), -1
    elif name == "mg":
        assert grid is not None
        x, res = multigrid.solve(A, b, grid=grid, x0=x0, verbose=False)
        iter = len(res)
    elif name == 'pyamg':
        import warnings
        warnings.simplefilter("ignore", sp.sparse.SparseEfficiencyWarning)
        res = []
        B = np.ones((A.shape[0], 1))
        SA_build_args = {
            'max_levels': 10,
            'max_coarse': 10,
            'coarse_solver': 'lu',
            'symmetry': 'hermitian'}
        # smooth = ('energy', {'krylov': 'cg'})
        # strength = [('evolution', {'k': 2, 'epsilon': 0.2})]
        presmoother = ('gauss_seidel', {'sweep': 'symmetric', 'iterations': 2})
        postsmoother = ('gauss_seidel', {'sweep': 'symmetric', 'iterations': 2})
        ml = pyamg.smoothed_aggregation_solver(A, B=B, **SA_build_args)
        SA_solve_args = {'cycle': 'V', 'maxiter': 200, 'tol': 1e-10}
        x, iter = ml.solve(b=b, x0=x0, residuals=res, **SA_solve_args), len(res)
        # uh = pyamg.solve(A, b, verb=0, tol=1e-10, x0=u0, residuals=res)
    else:
        raise ValueError(f"Problem: unknown solver name={name}")
    t1 = time.time()
    return x, t1-t0, iter
Пример #10
0
def edgeAMG(Anode, Acurl, D):
    nodalAMG = pyamg.smoothed_aggregation_solver(Anode,
                                                 max_coarse=10,
                                                 keep=True)

    # construct multilevel structure
    levels = []
    levels.append(pyamg.multilevel_solver.level())
    levels[-1].A = Acurl
    levels[-1].D = D
    for i in range(1, len(nodalAMG.levels)):
        A = levels[-1].A
        Pnode = nodalAMG.levels[i - 1].AggOp
        P = findPEdge(D, Pnode)
        R = P.T
        levels[-1].P = P
        levels[-1].R = R
        levels.append(pyamg.multilevel_solver.level())
        A = R * A * P
        M = sparse.dia_matrix((1.0 / ((P.T * P).diagonal()), 0),
                              shape=(P.shape[1], P.shape[1]))
        D = M * (P.T * D * Pnode)
        D = D.tocsr()
        levels[-1].A = A
        levels[-1].D = D

    edgeML = pyamg.multilevel_solver(levels)
    for i in range(0, len(edgeML.levels)):
        edgeML.levels[i].presmoother = setup_hiptmair(levels[i])
        edgeML.levels[i].postsmoother = setup_hiptmair(levels[i])
    return edgeML
    def initialize_linear_solver(self) -> None:

        solver = self.params.get("linear_solver", "direct")

        if solver == "direct":
            """ In theory, it should be possible to instruct SuperLU to reuse the
            symbolic factorization from one iteration to the next. However, it seems
            the scipy wrapper around SuperLU has not implemented the necessary
            functionality, as discussed in

                https://github.com/scipy/scipy/issues/8227

            We will therefore pass here, and pay the price of long computation times.
            """
            self.linear_solver = "direct"

        elif solver == "pyamg":
            self.linear_solver = solver
            import pyamg

            assembler = self.assembler

            A, _ = assembler.assemble_matrix_rhs()

            g = self.gb.grids_of_dimension(self.Nd)[0]
            mechanics_dof = assembler.dof_ind(g, self.displacement_variable)

            pyamg_solver = pyamg.smoothed_aggregation_solver(
                A[mechanics_dof][:, mechanics_dof])
            self.mechanics_precond = pyamg_solver.aspreconditioner(cycle="W")

        else:
            raise ValueError("unknown linear solver " + solver)
Пример #12
0
def locally_linear_embedding(X, n_neighbors, out_dim, tol=1e-6, max_iter=200):

    #W = neighbors.kneighbors_graph(
    #   X, n_neighbors=n_neighbors, mode='distance')
    W = barycenter_kneighbors_graph(X, n_neighbors)
    print(W)
    # M = (I-W)' (I-W)
    A = eye(*W.shape, format=W.format) - W
    A = (A.T).dot(A).tocsr()

    # initial approximation to the eigenvectors
    X = np.random.rand(W.shape[0], out_dim)
    ml = smoothed_aggregation_solver(A, symmetry='symmetric')
    prec = ml.aspreconditioner()

    # compute eigenvalues and eigenvectors with LOBPCG
    eigen_values, eigen_vectors = linalg.lobpcg(A,
                                                X,
                                                M=prec,
                                                largest=False,
                                                tol=tol,
                                                maxiter=max_iter)

    index = np.argsort(eigen_values)
    return eigen_vectors[:, index], np.sum(eigen_values)
Пример #13
0
    def make_eigenvector_matrix(self, A):
        # Sparse matrix of the diagonal
        D = np.diag(np.ravel(np.sum(A, axis=1)))
        # square root of the inverse of the sparse amtrix D
        Dinvsq = np.sqrt(np.linalg.inv(D))

        L = D-A
        L = Dinvsq.dot(L)
        L = L.dot(Dinvsq)

        # Find the K smallest eigenvectors of L
        # Use this method from the multigrid solver
        ml = pyamg.smoothed_aggregation_solver(L, D)
        # Perform multigrid 'preconditioning'
        M = ml.aspreconditioner()
        # Find the k smallest eigenvalues and corresponding eigenvectos of the matrix
        eigvals, eigvects = scipy.sparse.linalg.eigs(M, k=self.num_clusters, which='SM')
        # eigvals, eigvects = np.linalg.eigh(M)

        # lowest_eigs = eigvects[:, :self.num_clusters]
        # print("Found %i lowest Eigenvalues" % self.num_clusters)
        # print(eigvals[:self.num_clusters])

        LX = Dinvsq.dot(eigvects)
        LX = (LX.T / np.linalg.norm(LX, axis=1)).T
        self._LX = LX
        return self._LX
    def compliance(self, x):
        """
        Compute the structural compliance
        """

        # Compute the filtered compliance. Note that 'dot' is scipy
        # matrix-vector multiplicataion
        self._rho = self.F.dot(x)

        # Compute the stiffness matrix
        solid_lib.computekmat(self.conn.T, self.dof.T, self.X.T, self.qval,
                              self.C.T, self._rho, self.rowp, self.cols,
                              self.Kvals)

        # Form the matrix
        Kmat = sparse.csr_matrix((self.Kvals, self.cols, self.rowp),
                                 shape=(self.ndof, self.ndof))

        if self.use_pyamg:
            self.ml = pyamg.smoothed_aggregation_solver(Kmat, max_coarse=10)
            self.u = self.ml.solve(self.force,
                                   accel='cg',
                                   cycle='V',
                                   tol=1e-10)

        else:
            self.Kmat = Kmat.tocsc()
            self.LU = linalg.dsolve.factorized(self.Kmat)

            # Compute the solution to the linear system K*u = f
            self.u = self.LU(self.force)

        # Return the compliance
        return np.dot(self.force, self.u)
Пример #15
0
def edgeAMG(Anode, Acurl, D):
    nodalAMG = pyamg.smoothed_aggregation_solver(Anode, max_coarse=10, keep=True)

    # construct multilevel structure
    levels = []
    levels.append(pyamg.multilevel_solver.level())
    levels[-1].A = Acurl
    levels[-1].D = D
    for i in range(1, len(nodalAMG.levels)):
        A = levels[-1].A
        Pnode = nodalAMG.levels[i - 1].AggOp
        P = findPEdge(D, Pnode)
        R = P.T
        levels[-1].P = P
        levels[-1].R = R
        levels.append(pyamg.multilevel_solver.level())
        A = R * A * P
        M = sparse.dia_matrix((1.0 / ((P.T * P).diagonal()), 0),
                              shape=(P.shape[1], P.shape[1]))
        D = M * (P.T * D * Pnode)
        D = D.tocsr()
        levels[-1].A = A
        levels[-1].D = D

    edgeML = pyamg.multilevel_solver(levels)
    for i in range(0, len(edgeML.levels)):
        edgeML.levels[i].presmoother = setup_hiptmair(levels[i])
        edgeML.levels[i].postsmoother = setup_hiptmair(levels[i])
    return edgeML
Пример #16
0
def initialize_solver(vs):
    matrix = _assemble_poisson_matrix(vs)
    preconditioner = _jacobi_preconditioner(vs, matrix)
    matrix = preconditioner * matrix
    extra_args = {}

    if vs.use_amg_preconditioner:
        if has_pyamg:
            ml = pyamg.smoothed_aggregation_solver(matrix)
            extra_args["M"] = ml.aspreconditioner()
        else:
            warnings.warn(
                "pyamg was not found, falling back to un-preconditioned CG solver"
            )

    def scipy_solver(rhs, x0):
        rhs = rhs.flatten() * preconditioner.diagonal()
        solution, info = spalg.bicgstab(matrix,
                                        rhs,
                                        x0=x0.flatten(),
                                        tol=vs.congr_epsilon,
                                        maxiter=vs.congr_max_iterations,
                                        **extra_args)
        if info > 0:
            warnings.warn(
                "Streamfunction solver did not converge after {} iterations".
                format(info))
        return solution

    vs.poisson_solver = scipy_solver
def eig_multi(A, B=None, n_components=2, tol=1E-12, random_state=None):
    """Solves the generalized Eigenvalue problem:
    A x = lambda B x using the multigrid method.
    Works well with very large matrices but there are some
    instabilities sometimes.
    """
    random_state = check_random_state(random_state)
    # convert matrix A and B to float
    A = A.astype(np.float64)

    if B is not None:
        B = B.astype(np.float64)

    # import the solver
    ml = smoothed_aggregation_solver(check_array(A, accept_sparse=['csr']))

    # preconditioner
    M = ml.aspreconditioner()

    n_nodes = A.shape[0]
    n_find = min(n_nodes, 5 + 2 * n_components)
    # initial guess for X
    np.random.RandomState(seed=1234)
    X = random_state.rand(n_nodes, n_find)

    # solve using the lobpcg algorithm
    eigVals, eigVecs = lobpcg(A, X, M=M, B=B, tol=tol, largest='False')

    sort_order = np.argsort(eigVals)
    eigVals = eigVals[sort_order]
    eigVecs = eigVecs[:, sort_order]

    eigVals = eigVals[:n_components]
    eigVecs = eigVecs[:, :n_components]
    return eigVals, eigVecs
Пример #18
0
    def test_accel(self):
        from pyamg import smoothed_aggregation_solver
        from pyamg.krylov import cg, bicgstab

        A = poisson((50, 50), format='csr')
        b = rand(A.shape[0])

        ml = smoothed_aggregation_solver(A)

        # cg halts based on the preconditioner norm
        for accel in ['cg', cg]:
            x = ml.solve(b, maxiter=30, tol=1e-8, accel=accel)
            assert(precon_norm(b - A*x, ml) < 1e-8*precon_norm(b, ml))
            residuals = []
            x = ml.solve(b, maxiter=30, tol=1e-8, residuals=residuals,
                         accel=accel)
            assert(precon_norm(b - A*x, ml) < 1e-8*precon_norm(b, ml))
            # print residuals
            assert_almost_equal(precon_norm(b - A*x, ml), residuals[-1])

        # cgs and bicgstab use the Euclidean norm
        for accel in ['bicgstab', 'cgs', bicgstab]:
            x = ml.solve(b, maxiter=30, tol=1e-8, accel=accel)
            assert(norm(b - A*x) < 1e-8*norm(b))
            residuals = []
            x = ml.solve(b, maxiter=30, tol=1e-8, residuals=residuals,
                         accel=accel)
            assert(norm(b - A*x) < 1e-8*norm(b))
            # print residuals
            assert_almost_equal(norm(b - A*x), residuals[-1])
Пример #19
0
def solveMG(A, b):
    ml = pyamg.smoothed_aggregation_solver(A)

    x = np.zeros(b.shape)
    for bi in range(3):
        x[:, bi] = ml.solve(b[:, bi], tol=1e-10)
    return x
Пример #20
0
def test():
    class EnergyEdgeKernel:
        def __init__(self):
            self.subdomains = [None]
            return

        def eval(self, mesh, cell_mask):
            edge_ce_ratio = mesh.ce_ratios[..., cell_mask]
            beta = 1.0
            return numpy.array([
                [edge_ce_ratio, -edge_ce_ratio * numpy.exp(1j * beta)],
                [-edge_ce_ratio * numpy.exp(-1j * beta), edge_ce_ratio],
            ])

    vertices, cells = meshzoo.rectangle(0.0, 2.0, 0.0, 1.0, 101, 51)
    mesh = meshplex.MeshTri(vertices, cells)

    matrix = pyfvm.get_fvm_matrix(mesh, [EnergyEdgeKernel()], [], [], [])
    rhs = mesh.control_volumes.copy()

    sa = pyamg.smoothed_aggregation_solver(matrix, smooth="energy")
    u = sa.solve(rhs, tol=1e-10)

    # Cannot write complex data ot VTU; split real and imaginary parts first.
    # <http://stackoverflow.com/a/38902227/353337>
    mesh.write("out.vtk", point_data={"u": u.view("(2,)float")})
    return
Пример #21
0
def solveMG(A, b):
    ml = pyamg.smoothed_aggregation_solver(A)

    x = np.zeros(b.shape)
    for bi in range(3):
        x[:, bi] = ml.solve(b[:, bi], tol=1e-10)
    return x
Пример #22
0
def random_walker(mask, prior, gamma):
    """
    Assume prior is given on the mask, of shape (NPTS, K)
    """
    gc.enable()

    print('Assembling graph Laplacian...')
    L = make_laplacian(mask)
    n = L.shape[0]
    L = L + sparse.coo_matrix((gamma * prior.sum(axis=1),
                               (range(n), range(n))))

    print('Creating sparse solver...')
    mls = smoothed_aggregation_solver(L.tocsr())
    del L
    gc.collect()

    print('Loop over classes...')
    X = []
    for k in range(prior.shape[-1]):
        print('  Doing class %d...' % k)
        X += [mls.solve(gamma * prior[:, k])]

    del mls
    gc.collect()
    return np.array(X).T
Пример #23
0
    def test_steepest_descent(self):
        # Ensure repeatability
        np.random.seed(0)

        for case in self.spd_cases:
            A = case['A']
            b = case['b']
            x0 = case['x0']
            maxiter = case['maxiter']
            reduction_factor = case['reduction_factor']

            # This function should always decrease
            fvals = []

            def callback(x):
                fvals.append(
                    0.5 *
                    np.dot(np.ravel(x), np.ravel(A.dot(x.reshape(-1, 1)))) -
                    np.dot(np.ravel(b), np.ravel(x)))

            x, _ = steepest_descent(A,
                                    b,
                                    x0=x0,
                                    tol=1e-16,
                                    maxiter=maxiter,
                                    callback=callback)
            norm1 = norm(np.ravel(b) - np.ravel(A.dot(x.reshape(-1, 1))))
            norm2 = norm(np.ravel(b) - np.ravel(A.dot(x0.reshape(-1, 1))))
            actual_factor = norm1 / norm2
            assert (actual_factor < reduction_factor)

            if A.dtype != complex:
                for i in range(len(fvals) - 1):
                    assert (fvals[i + 1] <= fvals[i])

        # Test preconditioning
        A = pyamg.gallery.poisson((10, 10), format='csr')
        b = np.random.rand(A.shape[0], 1)
        x0 = np.random.rand(A.shape[0], 1)
        fvals = []

        def callback(x):
            fvals.append(
                0.5 * np.dot(np.ravel(x), np.ravel(A.dot(x.reshape(-1, 1)))) -
                np.dot(np.ravel(b), np.ravel(x)))

        resvec = []
        sa = pyamg.smoothed_aggregation_solver(A)
        x, _ = steepest_descent(A,
                                b,
                                x0,
                                tol=1e-8,
                                maxiter=20,
                                residuals=resvec,
                                M=sa.aspreconditioner(),
                                callback=callback)
        assert (resvec[-1] / resvec[0] < 1e-8)
        for i in range(len(fvals) - 1):
            assert (fvals[i + 1] <= fvals[i])
Пример #24
0
def laplacian_basis(W, k, largest = False, method = "arpack"):
    """Build laplacian basis matrix with k bases from weighted adjacency matrix W."""

    logger.info(
        "solving for %i %s eigenvector(s) of the Laplacian (using %s)",
        k,
        "largest" if largest else "smallest",
        method,
        )

    L = laplacian_operator(W) 

    assert isinstance(L, scipy.sparse.csr_matrix)
    assert k > 0
    assert k < L.shape[0]

    if method == "amg":
        solver = pyamg.smoothed_aggregation_solver(L)
        pre = solver.aspreconditioner()
        initial = scipy.rand(L.shape[0], k)

        (evals, basis) = scipy.sparse.linalg.lobpcg(L, initial, M = pre, tol = 1e-10, largest = largest)
        logger.info('amg eigen values: '+str(evals))
    elif method == "arpack":
        logger.info('using arpack')
        if largest:
            which = "LM"
        else:
            which = "SM"

        if hasattr(scipy.sparse.linalg, "eigsh"): # check scipy version
            which = "LM" # use sigma=0 and ask for the large eigenvalues (shift trick, see arpack doc)
            (evals, basis) = scipy.sparse.linalg.eigsh(L, k, which = which, tol=1e-10, sigma = 0, maxiter=15000)
            try:
                for i in xrange(len(basis)):
                    b = basis[i]
                    print 'basis vector shape: ', b.shape
                    residual = np.linalg.norm((np.dot(L,b)).todense()-evals[i]*b)
                    perp_test = np.linalg.norm(np.dot(basis[i],basis[1]))
                    logger.info('eigenvalue residual: %f',residual)
                    logger.info('dot of %ith eigenvector with first: %f',i,perp_test)

            except:
                print 'error in eigensolver test code'

            logger.info('arpack eigen values: '+str(evals))
        else: 
            (evals, basis) = scipy.sparse.linalg.eigen_symmetric(L, k, which = which)
            logger.info('arpack (old) eigen values: '+str(evals))
    elif method == "dense":
        (evals, full_basis) = np.linalg.eigh(L.todense())

        basis = full_basis[:, :k]
    else:
        raise ValueError("unrecognized eigenvector method name")

    assert basis.shape[1] == k

    return basis
Пример #25
0
    def test_minimal_residual(self):
        # Ensure repeatability
        random.seed(0)

        self.definite_cases.extend(self.spd_cases)

        for case in self.definite_cases:
            A = case['A']
            maxiter = case['maxiter']
            x0 = rand(A.shape[0], )
            b = zeros_like(x0)
            reduction_factor = case['reduction_factor']
            if A.dtype != complex:

                # This function should always decrease (assuming zero RHS)
                fvals = []

                def callback(x):
                    fvals.append(
                        sqrt(dot(ravel(x), ravel(A * x.reshape(-1, 1)))))

                #
                (x, flag) = minimal_residual(A,
                                             b,
                                             x0=x0,
                                             tol=1e-16,
                                             maxiter=maxiter,
                                             callback=callback)
                actual_factor = (norm(ravel(b) - ravel(A * x.reshape(-1, 1))) /
                                 norm(ravel(b) - ravel(A * x0.reshape(-1, 1))))
                assert (actual_factor < reduction_factor)
                if A.dtype != complex:
                    for i in range(len(fvals) - 1):
                        assert (fvals[i + 1] <= fvals[i])

        # Test preconditioning
        A = pyamg.gallery.poisson((10, 10), format='csr')
        x0 = rand(A.shape[0], 1)
        b = zeros_like(x0)
        fvals = []

        def callback(x):
            fvals.append(sqrt(dot(ravel(x), ravel(A * x.reshape(-1, 1)))))

        #
        resvec = []
        sa = pyamg.smoothed_aggregation_solver(A)
        (x, flag) = minimal_residual(A,
                                     b,
                                     x0,
                                     tol=1e-8,
                                     maxiter=20,
                                     residuals=resvec,
                                     M=sa.aspreconditioner(),
                                     callback=callback)
        assert (resvec[-1] < 1e-8)
        for i in range(len(fvals) - 1):
            assert (fvals[i + 1] <= fvals[i])
Пример #26
0
def solver(A, config):
    """
    Given a matrix A and a solver configuration dictionary, generate a
    smoothed_aggregation_solver

    Parameters
    ----------
    A : {array, matrix, csr_matrix, bsr_matrix}
        Matrix to invert, CSR or BSR format preferred for efficiency
    config : {dict}
        A dictionary of solver configuration parameters that is used to
        generate a smoothed aggregation solver

    Returns
    -------
    ml : {smoothed_aggregation_solver}
        smoothed aggregation hierarchy

    Notes
    -----
    config must contain the following parameter entries for
    smoothed_aggregation_solver:
        symmetry, smooth, presmoother, postsmoother, B, strength,
        max_levels, max_coarse, coarse_solver, aggregate, keep

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg import solver_configuration,solver
    >>> A = poisson((40,40),format='csr')
    >>> config = solver_configuration(A,verb=False)
    >>> ml = solver(A,config)

    """

    # Convert A to acceptable format
    A = make_csr(A)

    # Generate smoothed aggregation solver
    try:
        return smoothed_aggregation_solver(
            A,
            B=config["B"],
            BH=config["BH"],
            smooth=config["smooth"],
            strength=config["strength"],
            max_levels=config["max_levels"],
            max_coarse=config["max_coarse"],
            coarse_solver=config["coarse_solver"],
            symmetry=config["symmetry"],
            aggregate=config["aggregate"],
            presmoother=config["presmoother"],
            postsmoother=config["postsmoother"],
            keep=config["keep"],
        )
    except:
        raise TypeError("Failed generating smoothed_aggregation_solver")
Пример #27
0
    def solve_with_initial_guess(self, initial_guess, solver="LU"):
        A = self.matrix.get_csr_matrix()

        if(solver == "AMG"):
            ml = smoothed_aggregation_solver(A)
            M = ml.aspreconditioner()
            self.sol = gmres(A, self.rhs.val, x0=initial_guess, M=M)[0]
        elif( solver == "LU"):
            self.sol = linsolve.spsolve(A, self.rhs.val)
Пример #28
0
 def solve(self, solver="AMG"):
     A = self.matrix.get_csr_matrix()
     if(solver == "AMG"):
         ml = smoothed_aggregation_solver(A)
         M = ml.aspreconditioner()
         self.sol = gmres(A, self.rhs.val, M=M)[0]
     elif( solver == "LU"):
         lu = get_lu_factorization_memoized(A)
         self.sol = lu.solve(self.rhs.val)
Пример #29
0
def solver(A, config):
    """
    Given a matrix A and a solver configuration dictionary, generate a
    smoothed_aggregation_solver

    Parameters
    ----------
    A : {array, matrix, csr_matrix, bsr_matrix}
        Matrix to invert, CSR or BSR format preferred for efficiency
    config : {dict}
        A dictionary of solver configuration parameters that is used to
        generate a smoothed aggregation solver

    Returns
    -------
    ml : {smoothed_aggregation_solver}
        smoothed aggregation hierarchy

    Notes
    -----
    config must contain the following parameter entries for
    smoothed_aggregation_solver:
        symmetry, smooth, presmoother, postsmoother, B, strength,
        max_levels, max_coarse, coarse_solver, aggregate, keep

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg import solver_configuration,solver
    >>> A = poisson((40,40),format='csr')
    >>> config = solver_configuration(A,verb=False)
    >>> ml = solver(A,config)

    """

    # Convert A to acceptable format
    A = make_csr(A)

    # Generate smoothed aggregation solver
    try:
        return \
            smoothed_aggregation_solver(A,
                                        B=config['B'],
                                        BH=config['BH'],
                                        smooth=config['smooth'],
                                        strength=config['strength'],
                                        max_levels=config['max_levels'],
                                        max_coarse=config['max_coarse'],
                                        coarse_solver=config['coarse_solver'],
                                        symmetry=config['symmetry'],
                                        aggregate=config['aggregate'],
                                        presmoother=config['presmoother'],
                                        postsmoother=config['postsmoother'],
                                        keep=config['keep'])
    except:
        raise TypeError('Failed generating smoothed_aggregation_solver')
Пример #30
0
def MGsetup(nx):
    import numpy as np
    import scipy as sp
    import scipy.sparse
    import pyamg
    import scipy.io
    # scipy.io.savemat('A.mat', {'A': A})
    A = scipy.io.loadmat('A.mat')['A'].tocsr()
    ml = pyamg.smoothed_aggregation_solver(A, max_coarse=10)
    b = np.random.rand(A.shape[0])
Пример #31
0
def MGsetup(nx):
    import numpy as np
    import scipy as sp
    import scipy.sparse
    import pyamg
    import scipy.io
    # scipy.io.savemat('A.mat', {'A': A})
    A = scipy.io.loadmat('A.mat')['A'].tocsr()
    ml = pyamg.smoothed_aggregation_solver(A, max_coarse=10)
    b = np.random.rand(A.shape[0])
Пример #32
0
    def __init__(self, system_of_eqs, method=None):
        """

        Arguments
        ---------

        """
        if method is None and sparse.issparse(system_of_eqs):
            method = "lu"
        elif method is None:
            method = "chol"
        
        method = method.lower()

        if (not sparse.issparse(system_of_eqs)) and (method not in self.supported_dense):
            warn(f"'{method}' is not supported for dense matrices. Resolving to 'chol'.")

        if method == "amg":
            system_of_eqs = sparse.csr_matrix(system_of_eqs)
            self.amg_grid = pyamg.smoothed_aggregation_solver(system_of_eqs)
        elif method == "amg_cg":
            system_of_eqs = sparse.csr_matrix(system_of_eqs)
            self.preconditioner = pyamg.smoothed_aggregation_solver(system_of_eqs).aspreconditioner()
        elif method == "lu":
            self.lu = spla.splu(system_of_eqs)
        elif method == "ilu_cg":
            self.spilu = spla.spilu(system_of_eqs)
            self.preconditioner = spla.LinearOperator(system_of_eqs.shape, lambda x: self.spilu.solve(x))
        elif method == "chol":
            self.chol = sla.cho_factor(system_of_eqs)
        elif method == "cg":
            self.preconditioner = None
        else:
            raise ValueError(dedent(
                f"""\
                Unsupported method {method}
                  * For dense matrices, choose one of {self.supported_dense}
                  * For sparse matrices, choose one of {self.supported_sparse}
                """
            ))
        
        self.system_of_eqs = system_of_eqs
        self.method = method
Пример #33
0
def amg_linsolve(A, b):
    """

    :param A:
    :param b:
    :return:
    """
    ml = pyamg.smoothed_aggregation_solver(
        A)  # construct the multigrid hierarchy
    return ml.solve(b, tol=1e-5)
Пример #34
0
    def __init__(self,
                 linear_op: LinearOperator,
                 heuristic: str = 'ruge_stuben',
                 cycle: str = 'F',
                 n_cycles: int = 1,
                 **kwargs):
        """
        Abstract representation for Algebraic Multi-grid (AMG) class of preconditioners. Available heuristics are the
        one from the PyAMG Python library.

        :param linear_op: Linear operator to build the algebraic multi-grid preconditioner on.
        :param heuristic: Name of the algebraic multi-grid heuristic used for multi-level hierarchy construction.
        :param cycle: Type of cycle of the multigrid method, either "V", "W", "F" or "AMLI".
        :param n_cycles: Number of cycles done per application of the multi-grid method as a preconditioner.
        """
        # Sanitize the heuristic argument
        if heuristic not in ['ruge_stuben', 'smoothed_aggregated', 'rootnode']:
            raise PreconditionerError(
                'AMG heuristic {} unknown.'.format(heuristic))

        matrix_repr = linear_op.mat

        # Setup multi-grid hierarchical structure with corresponding heuristic
        if heuristic == 'ruge_stuben':
            self.amg = pyamg.ruge_stuben_solver(matrix_repr.tocsr(), **kwargs)

        elif heuristic == 'smoothed_aggregated':
            self.amg = pyamg.smoothed_aggregation_solver(
                matrix_repr.tocsr(), **kwargs)

        elif heuristic == 'rootnode':
            self.amg = pyamg.rootnode_solver(matrix_repr.tocsr(), **kwargs)

        # Sanitize the number of cycles argument
        if not isinstance(n_cycles, int) or n_cycles < 1:
            raise PreconditionerError(
                'Number of cycles must be a positive integer, received {}.'.
                format(n_cycles))

        # Sanitize the cycle argument
        if cycle not in ['V', 'F', 'W', 'AMLI']:
            raise PreconditionerError(
                'AMG cycle type {} unknown.'.format(cycle))

        self.cycle = cycle
        self.n_cycles = n_cycles

        # Get hierarchy key attributes
        self.cycle_complexity = self.amg.cycle_complexity(cycle=self.cycle)
        self.operator_complexity = self.amg.operator_complexity()

        self.levels = self.amg.levels
        self.amg = self.amg.aspreconditioner(cycle=self.cycle)

        super().__init__(linear_op)
Пример #35
0
    def keo_amg(self, psi):
        """Algebraic multigrid solve.
        """
        import pyamg

        if self._keo_amg_solver is None:
            if self._modeleval._keo is None:
                self._modeleval._assemble_keo()
            self._keo_amg_solver = pyamg.smoothed_aggregation_solver(
                self._modeleval._keo)
        return self._keo_amg_solver.solve(psi, tol=1e-12, accel=None)
Пример #36
0
def solve(A, b, tol=1e-8):
    ml = pyamg.smoothed_aggregation_solver(A)

    if len(b.shape) == 1:
        x = ml.solve(b, tol=tol)
        return x

    x = np.zeros(b.shape)
    for bi in xrange(b.shape[1]):
        x[:, bi] = ml.solve(b[:, bi], tol=tol)
    return x
Пример #37
0
    def _linear_analysis(self, method, **kwargs):
        """Performs the linear analysis, in which the pressure and flow fields
        are computed.
        INPUT: method: This can be either 'direct' or 'iterative'
               **kwargs
               precision: The accuracy to which the ls is to be solved. If not
                          supplied, machine accuracy will be used. (This only
                          applies to the iterative solver)
        OUTPUT: The maximum, mean, and median pressure change. Moreover,
                pressure and flow are modified in-place.
        """

        G = self._G
        A = self._A.tocsr()
        if method == 'direct':
            linalg.use_solver(useUmfpack=True)
            x = linalg.spsolve(A, self._b)
        elif method == 'iterative':
            if kwargs.has_key('precision'):
                eps = kwargs['precision']
            else:
                eps = self._eps
            AA = smoothed_aggregation_solver(A, max_levels=10, max_coarse=500)
            x = abs(AA.solve(self._b, x0=None, tol=eps, accel='cg', cycle='V', maxiter=150))
            # abs required, as (small) negative pressures may arise
        elif method == 'iterative2':
         # Set linear solver
             ml = rootnode_solver(A, smooth=('energy', {'degree':2}), strength='evolution' )
             M = ml.aspreconditioner(cycle='V')
             # Solve pressure system
             #x,info = gmres(A, self._b, tol=self._eps, maxiter=50, M=M, x0=self._x)
             #x,info = gmres(A, self._b, tol=self._eps/10000000000000, maxiter=50, M=M)
             x,info = gmres(A, self._b, tol=self._eps/10000, maxiter=50, M=M)
             if info != 0:
                 print('SOLVEERROR in Solving the Matrix')

        pdiff = map(abs, [(p - xx) / p if p > 0 else 0.0
                          for p, xx in zip(G.vs['pressure'], x)])
        maxPDiff = max(pdiff)
        meanPDiff = np.mean(pdiff)
        medianPDiff = np.median(pdiff)
        log.debug(np.nonzero(np.array(pdiff) == maxPDiff)[0])

        G.vs['pressure'] = x
        G.es['flow'] = [abs(G.vs[edge.source]['pressure'] -   \
                            G.vs[edge.target]['pressure']) *  \
                        edge['conductance'] for edge in G.es]

	self._maxPDiff=maxPDiff
        self._meanPDiff=meanPDiff
        self._medianPDiff=medianPDiff

        return maxPDiff, meanPDiff, medianPDiff
Пример #38
0
 def solver(A, b, rtol=None, max_it=None, x0=None, **kwargs):
     r"""
     Wrapper method for PyAMG sparse linear solvers.
     """
     import pyamg
     ml = pyamg.smoothed_aggregation_solver(A)
     x = ml.solve(b=b,
                  x0=x0,
                  tol=rtol,
                  maxiter=max_it,
                  accel="bicgstab")
     return x
Пример #39
0
    def amg(self, A, null_space=None, as_precond=True, **kwargs):
        """ Wrapper around the pyamg solver by Bell, Olson and Schroder.

        For the moment, the method creates a smoothed aggregation amg solver.
        More elaborate options may be added in the future if the need arises.
        If you need other types of solvers or functionality, access pyamg
        directly.

        For documentation of pyamg, including parameters options, confer
        https://github.com/pyamg/pyamg.

        This wrapper can either produce a solver or a preconditioner
        (LinearOperator). For the moment we provide limited parsing of options,
        the solver will be a GMRES accelerated V-cycle, while the
        preconditioner is simply a V-cycle. Expanding this is not difficult,
        but tedious.

        Parameters:
            A (Matrix): To be factorized.
            null_space (optional): Null space of the matrix. Accurate
                information here is essential to creating a good coarse space
                hierarchy. Defaults to vector of ones, which is the correct
                choice for standard elliptic equations.
            as_precond (optional, defaults to True): Whether to return a solver
                or a preconditioner function.
            **kwargs: For the moment not in use.

        Returns:
            Function: Either a LinearOperator to be used as preconditioner,
                or a solver.

        """

        if null_space is None:
            null_space = np.ones(A.shape[0])
        try:
            ml = pyamg.smoothed_aggregation_solver(A, B=null_space)
        except NameError:
            raise ImportError(
                'Using amg needs requires the pyamg package. pyamg was not imported'
            )

        def solve(b, res=None, **kwargs):
            if res is None:
                return ml.solve(b, accel='gmres', cycle='V')
            else:
                return ml.solve(b, residuals=res, accel='gmres', cycle='V')

        if as_precond:
            M_x = lambda x: ml.solve(x, tol=1e-20, maxiter=10, cycle='W')
            return spl.LinearOperator(A.shape, M_x)
        else:
            return solve
Пример #40
0
    def keo_amg(self, psi):
        """Algebraic multigrid solve.
        """
        import pyamg

        if self._keo_amg_solver is None:
            if self._modeleval._keo is None:
                self._modeleval._assemble_keo()
            self._keo_amg_solver = pyamg.smoothed_aggregation_solver(
                self._modeleval._keo
            )
        return self._keo_amg_solver.solve(psi, tol=1e-12, accel=None)
Пример #41
0
def fiedler(adj_list, plot=False, fn="FiedlerPlots", n_fied=2):
    """calculate the first fiedler vector of a graph adjascancy list and optionally write associated plots to file.

    Takes:
    adj_list:
    An Nx2 nested list of ints of the form:
    [[node1,node2],
    ...]
    Representing the adjascancy list.

    plot=False: make plots or not.
    fn="FiedlerPlots": filename to prepend to the plot png file names
    n_fied=2: the number of fiedler vectors to calculate (values above 2 will not be output)

    Returns a Dictionary of the form:



    {"f1": the first fiedler vector,
    "f2": (if caclulated) the second fideler vector
    "d": the node degrees,
    "r1": the rank of each node in the first fiedler vector
    "r2": the rank of each node in the second fiedler vector}


    """

    A = graph_laplacian(adj_list)

    # construct preconditioner
    ml = smoothed_aggregation_solver(A, coarse_solver='pinv2', max_coarse=10)
    M = ml.aspreconditioner()

    # solve for lowest two modes: constant vector and Fiedler vector
    X = scipy.rand(A.shape[0], n_fied + 1)
    (eval,evec,res) = lobpcg(A, X, M=None, tol=1e-12, largest=False, \
            verbosityLevel=0, retResidualNormsHistory=True)

    if plot:
        doPlots(evec[:, 1], evec[:, 2], A.diagonal(), adj_list, fn)

    out = {
        "f1": list(evec[:, 1]),
        "d": list(A.diagonal()),
        "r1": [int(i) for i in list(numpy.argsort(numpy.argsort(evec[:, 1])))]
    }
    if n_fied > 1:
        out["f2"] = list(evec[:, 2])
        out["r2"] = [
            int(i) for i in list(numpy.argsort(numpy.argsort(evec[:, 2])))
        ]
    return out
Пример #42
0
    def test_solver_parameters(self):
        A = poisson((50,50), format='csr')

        for method in methods:
            #method = ('richardson', {'omega':4.0/3.0})
            ml = smoothed_aggregation_solver(A, presmoother=method, postsmoother=method, max_coarse=10)

            residuals = profile_solver(ml)
            #print "method",method
            #print "residuals",residuals
            #print "convergence rate:",(residuals[-1]/residuals[0])**(1.0/len(residuals))
            assert( (residuals[-1]/residuals[0])**(1.0/len(residuals)) < 0.95 )

        for method in methods2:
            ml = smoothed_aggregation_solver(A, max_coarse=10)
            change_smoothers(ml, presmoother=method[0], postsmoother=method[1])

            residuals = profile_solver(ml)
            #print "method",method
            #print "residuals",residuals
            #print "convergence rate:",(residuals[-1]/residuals[0])**(1.0/len(residuals))
            assert( (residuals[-1]/residuals[0])**(1.0/len(residuals)) < 0.95 )
Пример #43
0
    def test_minimal_residual(self):
        # Ensure repeatability
        np.random.seed(0)

        self.definite_cases.extend(self.spd_cases)

        for case in self.definite_cases:
            A = case['A']
            maxiter = case['maxiter']
            x0 = np.random.rand(A.shape[0],)
            b = np.zeros_like(x0)
            reduction_factor = case['reduction_factor']
            if A.dtype != complex:

                # This function should always decrease (assuming zero RHS)
                fvals = []

                def callback(x):
                    fvals.append(np.sqrt(np.dot(np.ravel(x),
                                 np.ravel(A*x.reshape(-1, 1)))))
                #
                (x, flag) = minimal_residual(A, b, x0=x0,
                                             tol=1e-16, maxiter=maxiter,
                                             callback=callback)
                actual_factor = (norm(np.ravel(b) -
                                 np.ravel(A * x.reshape(-1, 1))) /
                                 norm(np.ravel(b) -
                                 np.ravel(A * x0.reshape(-1, 1))))
                assert(actual_factor < reduction_factor)
                if A.dtype != complex:
                    for i in range(len(fvals)-1):
                        assert(fvals[i+1] <= fvals[i])

        # Test preconditioning
        A = pyamg.gallery.poisson((10, 10), format='csr')
        x0 = np.random.rand(A.shape[0], 1)
        b = np.zeros_like(x0)
        fvals = []

        def callback(x):
            fvals.append(np.sqrt(np.dot(np.ravel(x),
                                        np.ravel(A*x.reshape(-1, 1)))))
        #
        resvec = []
        sa = pyamg.smoothed_aggregation_solver(A)
        (x, flag) = minimal_residual(A, b, x0, tol=1e-8, maxiter=20,
                                     residuals=resvec, M=sa.aspreconditioner(),
                                     callback=callback)
        assert(resvec[-1]/resvec[0] < 1e-8)
        for i in range(len(fvals)-1):
            assert(fvals[i+1] <= fvals[i])
Пример #44
0
    def transform(self):
        # self.n_components = min(self.n_components, self.data.shape[1])
        laplacian, dd = csgraph_laplacian(self.data,
                                          normed=self.norm_laplacian,
                                          return_diag=True)
        laplacian = check_array(laplacian,
                                dtype=np.float64,
                                accept_sparse=True)
        laplacian = _set_diag(laplacian, 1, self.norm_laplacian)

        ## Seed the global number generator because the pyamg
        ## interface apparently uses that...
        ## Also, see https://github.com/pyamg/pyamg/issues/139
        np.random.seed(self.random_state.randint(2**31 - 1))

        diag_shift = 1e-5 * sparse.eye(laplacian.shape[0])
        laplacian += diag_shift
        ml = smoothed_aggregation_solver(check_array(laplacian, "csr"))
        laplacian -= diag_shift

        M = ml.aspreconditioner()
        X = self.random_state.rand(laplacian.shape[0], self.n_components + 1)
        X[:, 0] = dd.ravel()

        # laplacian *= -1
        # v0 = self.random_state.uniform(-1, 1, laplacian.shape[0])
        # eigvals, diffusion_map = eigsh(
        #     laplacian, k=self.n_components + 1, sigma=1.0, which="LM", tol=0.0, v0=v0
        # )
        # # eigsh needs reversing
        # embedding = diffusion_map.T[::-1]

        eigvals, diffusion_map = lobpcg(laplacian,
                                        X,
                                        M=M,
                                        tol=1.0e-5,
                                        largest=False)
        embedding = diffusion_map.T
        if self.norm_laplacian:
            embedding = embedding / dd

        if self.drop_first:
            self.data_ = embedding[1:self.n_components].T
            eigvals = eigvals[1:self.n_components]
        else:
            self.data_ = embedding[:self.n_components].T

        self.eigvals_ = eigvals[::
                                -1]  # reverse direction to have the largest first
Пример #45
0
  def solve_on_coarse_level(self):

    if comm.rank == 0:
      if self.verbosity >= 2:
        print pid+"  Solving on coarse level"

      timer = Timer("Coarse level solution")

      if self.problem.switch_matrices_on_coarse_level:
        A = self.B_coarse
        B = self.A_coarse
        largest = True
        which = 'LM'
      else:
        A = self.A_coarse
        B = self.B_coarse
        largest = False
        which = 'SM'

      # Set initial approximation
      self.v_coarse.fill(0.0)
      self.v_coarse[0] = 1.0

      if self.use_lobpcg_on_coarse_level:
        if self.precond_lobpcg_by_ml:
          if self.update_lobpcg_prec or self.M is None:
            if self.verbosity >= 3:
              print0(pid+"    Creating coarse level preconditioner")

            ml = smoothed_aggregation_solver(A)
            self.M = ml.aspreconditioner()

        w, v, h = lobpcg(A, self.v_coarse, B, self.M, tol=self.coarse_level_tol, maxiter=self.coarse_level_maxit,
                         largest=largest, verbosityLevel=self.lobpcg_verb, retResidualNormsHistory=True)
      else:
        if self.problem.sym:
          w, v = eigsh(A, 1, B, which=which, v0=self.v_coarse,
                       ncv=self.coarse_level_num_ritz_vec, maxiter=self.coarse_level_maxit, tol=self.coarse_level_tol)
        else:
          w, v = eigs(A, 1, B, which=which, v0=self.v_coarse,
                      ncv=self.coarse_level_num_ritz_vec, maxiter=self.coarse_level_maxit, tol=self.coarse_level_tol)

      self.lam = w[0]
      self.v_coarse = v[0]

      try:
        self.num_it_coarse += len(h)
      except NameError:
        pass  # There seems to be no way to obtain number of iterations for eigs/eigsh
Пример #46
0
def fiedler(adj_list,plot=False,fn="FiedlerPlots",n_fied=2):
    """calculate the first fiedler vector of a graph adjascancy list and optionally write associated plots to file.

    Takes:
    adj_list:
    An Nx2 nested list of ints of the form:
    [[node1,node2],
    ...]
    Representing the adjascancy list.

    plot=False: make plots or not.
    fn="FiedlerPlots": filename to prepend to the plot png file names
    n_fied=2: the number of fiedler vectors to calculate (values above 2 will not be output)

    Returns a Dictionary of the form:



    {"f1": the first fiedler vector,
    "f2": (if caclulated) the second fideler vector
    "d": the node degrees,
    "r1": the rank of each node in the first fiedler vector
    "r2": the rank of each node in the second fiedler vector}


    """
    

    A = graph_laplacian(adj_list)

    # construct preconditioner
    ml = smoothed_aggregation_solver(A, coarse_solver='pinv2',max_coarse=10)
    M = ml.aspreconditioner()

    # solve for lowest two modes: constant vector and Fiedler vector
    X = scipy.rand(A.shape[0], n_fied+1)
    (eval,evec,res) = lobpcg(A, X, M=None, tol=1e-12, largest=False, \
            verbosityLevel=0, retResidualNormsHistory=True)

    if plot:
        doPlots(evec[:,1],evec[:,2],A.diagonal(),adj_list,fn)
        
        
    out = {"f1":list(evec[:,1]),"d":list(A.diagonal()),"r1":[int(i) for i in list(numpy.argsort(numpy.argsort(evec[:,1])))]}
    if n_fied > 1:
        out["f2"]=list(evec[:,2])
        out["r2"]=[int(i) for i in list(numpy.argsort(numpy.argsort(evec[:,2])))]
    return out
Пример #47
0
    def test_steepest_descent(self):
        # Ensure repeatability
        np.random.seed(0)

        for case in self.spd_cases:
            A = case['A']
            b = case['b']
            x0 = case['x0']
            maxiter = case['maxiter']
            reduction_factor = case['reduction_factor']

            # This function should always decrease
            fvals = []

            def callback(x):
                fvals.append(0.5*np.dot(np.ravel(x),
                                        np.ravel(A*x.reshape(-1, 1))) -
                             np.dot(np.ravel(b), np.ravel(x)))

            (x, flag) = steepest_descent(A, b, x0=x0, tol=1e-16,
                                         maxiter=maxiter, callback=callback)
            actual_factor = (norm(np.ravel(b) - np.ravel(A*x.reshape(-1, 1))) /
                             norm(np.ravel(b) - np.ravel(A*x0.reshape(-1, 1))))
            assert(actual_factor < reduction_factor)

            if A.dtype != complex:
                for i in range(len(fvals)-1):
                    assert(fvals[i+1] <= fvals[i])

        # Test preconditioning
        A = pyamg.gallery.poisson((10, 10), format='csr')
        b = np.random.rand(A.shape[0], 1)
        x0 = np.random.rand(A.shape[0], 1)
        fvals = []

        def callback(x):
            fvals.append(0.5*np.dot(np.ravel(x),
                                    np.ravel(A*x.reshape(-1, 1))) -
                         np.dot(np.ravel(b), np.ravel(x)))

        resvec = []
        sa = pyamg.smoothed_aggregation_solver(A)
        (x, flag) = steepest_descent(A, b, x0, tol=1e-8, maxiter=20,
                                     residuals=resvec, M=sa.aspreconditioner(),
                                     callback=callback)
        assert(resvec[-1]/resvec[0] < 1e-8)
        for i in range(len(fvals)-1):
            assert(fvals[i+1] <= fvals[i])
Пример #48
0
	def __init__(self, p):
		"""Parses the file _infoSolver to initialize the solver.
		
		Arguments
		---------
		p -- field variable (Variable object) of the Poisson equation.
		"""
		with open(Case.path+'/_infoSolver.yaml', 'r') as infile:
			info = yaml.load(infile)['poisson']
		self.solver = Poisson.solvers[info['solver']]
		self.tol = info['tol']
		self.maxiter = info['maxiter']
		self.M = None
		if 'precond' in info:
			ml = pyamg.smoothed_aggregation_solver(p.laplacian.mat)
			self.M = ml.aspreconditioner(cycle=info['precond']['cycle'])
		self.iterations, self.residuals = [], []
Пример #49
0
def locally_linear_embedding(X, n_neighbors, out_dim, tol=1e-6, max_iter=200):
    W = neighbors.kneighbors_graph(X, n_neighbors=n_neighbors, mode="barycenter")

    # M = (I-W)' (I-W)
    A = eye(*W.shape, format=W.format) - W
    A = (A.T).dot(A).tocsr()

    # initial approximation to the eigenvectors
    X = np.random.rand(W.shape[0], out_dim)
    ml = smoothed_aggregation_solver(A, symmetry="symmetric")
    prec = ml.aspreconditioner()

    # compute eigenvalues and eigenvectors with LOBPCG
    eigen_values, eigen_vectors = linalg.lobpcg(A, X, M=prec, largest=False, tol=tol, maxiter=max_iter)

    index = np.argsort(eigen_values)
    return eigen_vectors[:, index], np.sum(eigen_values)
Пример #50
0
def solver_diagnostic(A):
    ##
    # Generate B
    B = ones((A.shape[0],1), dtype=A.dtype); BH = B.copy()

    ##
    # Random initial guess, zero right-hand side
    random.seed(0)
    b = zeros((A.shape[0],1))
    x0 = rand(A.shape[0],1)

    ##
    # Create solver
    ml = smoothed_aggregation_solver(A, B=B, BH=BH,
        strength=('symmetric', {'theta': 0.0}),
        smooth=('energy', {'weighting': 'local', 'krylov': 'gmres', 'degree': 1, 'maxiter': 2}),
        improve_candidates=[('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 4}), None],
        aggregate="standard",
        presmoother=('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 2}),
        postsmoother=('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 2}),
        max_levels=15,
        max_coarse=300,
        coarse_solver="pinv")

    ##
    # Solve system
    res = []
    x = ml.solve(b, x0=x0, tol=1e-08, residuals=res, accel="gmres", maxiter=300, cycle="V")
    res_rate = (res[-1]/res[0])**(1.0/(len(res)-1.))
    normr0 = norm(ravel(b) - ravel(A*x0))
    print " "
    print ml
    print "System size:                " + str(A.shape)
    print "Avg. Resid Reduction:       %1.2f"%res_rate
    print "Iterations:                 %d"%len(res)
    print "Operator Complexity:        %1.2f"%ml.operator_complexity()
    print "Work per DOA:               %1.2f"%(ml.cycle_complexity()/abs(log10(res_rate)))
    print "Relative residual norm:     %1.2e"%(norm(ravel(b) - ravel(A*x))/normr0)

    ##
    # Plot residual history
    pylab.semilogy(array(res)/normr0)
    pylab.title('Residual Histories')
    pylab.xlabel('Iteration')
    pylab.ylabel('Relative Residual Norm')
    pylab.show()
Пример #51
0
def spectral_partition(A):
    ml = smoothed_aggregation_solver(A,
            coarse_solver='pinv2',max_coarse=100,smooth=None, strength=None)
    print ml

    M = ml.aspreconditioner()

    X = sp.rand(A.shape[0], 2) 
    (evals,evecs,res) = lobpcg(A, X, M=M, tol=1e-12, largest=False, \
        verbosityLevel=0, retResidualNormsHistory=True, maxiter=200)

    fiedler = evecs[:,1]
    vmed = np.median(fiedler)
    v = np.zeros((A.shape[0],))
    K = np.where(fiedler<=vmed)[0]
    v[K]=-1
    K = np.where(fiedler>vmed)[0]
    v[K]=1
    return v, res
Пример #52
0
def isoperimetric(A, ground=None, residuals=None) :

  #from pyamg.graph import pseudo_peripheral_node
  #ground = pseudo_peripheral_node(A)[0]

  # select random ground 'dead' node
  if ground is None :
    seed()
    ground = randint(0,A.shape[0]-1)

  coarse = numpy.arange(0,A.shape[0])
  coarse = numpy.delete(coarse,ground,0)

  # remove ground node row and column
  L = A[coarse,:][:,coarse]
  r = numpy.ones((L.shape[0],))

  if residuals is None :
    res = []

  # construct preconditioner
  ml = smoothed_aggregation_solver(L,coarse_solver='pinv2')
  M = ml.aspreconditioner()

  # solve system using cg
  (x,flag) = cg(L,r,residuals=res,tol=1e-12,M=M)

  # use the median of solution, x, as the separator
  vmed = numpy.median(x)
  vmin = numpy.min(x)
  P1 = coarse[numpy.where(x<=vmed)[0]]
  P2 = coarse[numpy.where(x>vmed)[0]]

  weights = numpy.zeros((A.shape[0],))
  weights[P1] = x[numpy.where(x<=vmed)[0]]
  weights[P2] = x[numpy.where(x>vmed)[0]]
  weights[ground] = vmin-1

  P1 = numpy.append(P1,ground)

  return P1,P2,weights
Пример #53
0
    def test_aspreconditioner(self):
        from pyamg import smoothed_aggregation_solver
        from scipy.sparse.linalg import cg
        from pyamg.krylov import fgmres

        A = poisson((50, 50), format='csr')
        b = rand(A.shape[0])

        ml = smoothed_aggregation_solver(A)

        for cycle in ['V', 'W', 'F']:
            M = ml.aspreconditioner(cycle=cycle)
            x, info = cg(A, b, tol=1e-8, maxiter=30, M=M)
            # cg satisfies convergence in the preconditioner norm
            assert(precon_norm(b - A*x, ml) < 1e-8*precon_norm(b, ml))

        for cycle in ['AMLI']:
            M = ml.aspreconditioner(cycle=cycle)
            x, info = fgmres(A, b, tol=1e-8, maxiter=30, M=M)
            # fgmres satisfies convergence in the 2-norm
            assert(norm(b - A*x) < 1e-8*norm(b))
Пример #54
0
def spectral(A,eval=None,evec=None,plot=False,method='lobpcg') :

  # solve for lowest two modes: constant vector and Fiedler vector
  X = scipy.rand(A.shape[0], 2) 

  if method == 'lobpcg' :
  	# specify lowest eigenvector and orthonormalize fiedler against it
  	X[:,0] = numpy.ones((A.shape[0],))
  	X = numpy.linalg.qr(X, mode='full')[0]

  	# construct preconditioner
  	ml = smoothed_aggregation_solver(A,coarse_solver='pinv2')
  	M = ml.aspreconditioner()

  	(eval,evec,res) = lobpcg(A, X, M=M, tol=1e-5, largest=False, \
        	verbosityLevel=0, retResidualNormsHistory=True, maxiter=200)
  elif method == 'tracemin':
	res = []
	evec = tracemin_fiedler(A, residuals=res, tol=1e-5)
	evec[:,1] = rqi(A, evec[:,1], k=3)[1]
  else :
	raise InputError('Unknown method')
  
  # use the median of fiedler, as the separator
  fiedler = evec[:,1]
  vmed = numpy.median(fiedler)
  P1 = numpy.where(fiedler<=vmed)[0]
  P2 = numpy.where(fiedler>vmed)[0]

  if plot is True :
     from matplotlib.pyplot import semilogy,figure,show,title,xlabel,ylabel
     figure()
     semilogy(res)
     xlabel('Iteration')
     ylabel('Residual norm')
     title('Spectral convergence history')
     show()

  return P1,P2,fiedler
def eig_multi(A, B=None, n_components=2, tol=1E-12, random_state=None):
    """Solves the generalized Eigenvalue problem:
    A x = lambda B x using the multigrid method.
    Works well with very large matrices but there are some
    instabilities sometimes.
    """
    random_state = check_random_state(random_state)
    # convert matrix A and B to float
    A = A.astype(np.float64);

    if B is not None:
        B = B.astype(np.float64)

    # import the solver
    ml = smoothed_aggregation_solver(check_array(A, accept_sparse = ['csr']))

    # preconditioner
    M = ml.aspreconditioner()

    n_nodes = A.shape[0]
    n_find = min(n_nodes, 5 + 2*n_components)
    # initial guess for X
    np.random.RandomState(seed=1234)
    X = random_state.rand(n_nodes, n_find)

    # solve using the lobpcg algorithm
    eigVals, eigVecs = lobpcg(A, X, M=M, B=B,
                                       tol=tol,
                                       largest='False')

    sort_order = np.argsort(eigVals)
    eigVals = eigVals[sort_order]
    eigVecs = eigVecs[:, sort_order]

    eigVals = eigVals[:n_components]
    eigVecs = eigVecs[:, :n_components]
    return eigVals, eigVecs
Пример #56
0
    def _solve_(self, L, x, b):
        relres = []

        # create scipy.sparse matrix view
        (data, row, col) = L.matrix.find()
        A = sparse.csr_matrix((data, (row, col)), shape=L.matrix.shape)

        # solve and deep copy data
        ml = pyamg.smoothed_aggregation_solver(A, **self.MGSetupOpts)
        x[:] = ml.solve(b=b, residuals=relres, **self.MGSolveOpts)

        # fix relres and set info
        if len(relres) > 0:
            relres = np.array(relres) / relres[0]
            info = 0
        iter = len(relres)

        if self.verbosity:
            print(ml)
            print('MG iterations: %d' % iter)
            print('MG convergence factor: %g' % ((relres[-1])**(1.0 / iter)))
            print('MG residual history: ', relres)

        self._raiseWarning(info, iter, relres)
    
    ##
    # For demonstration, show that a naive SA solver 
    # yields unsatisfactory convergence
    smooth=('jacobi', {'filter' : True})
    strength=('symmetric', {'theta' : 0.1})
    SA_solve_args={'cycle':'W', 'maxiter':20, 'tol':1e-8, 'accel' : 'cg'}
    SA_build_args={'max_levels':10, 'max_coarse':25, 'coarse_solver':'pinv2', \
            'symmetry':'hermitian', 'keep':True}
    presmoother =('gauss_seidel', {'sweep':'symmetric', 'iterations':1})
    postsmoother=('gauss_seidel', {'sweep':'symmetric', 'iterations':1})
    
    ##
    # Construct solver and solve
    sa = smoothed_aggregation_solver(A, B=B, smooth=smooth, \
             strength=strength, presmoother=presmoother, \
             postsmoother=postsmoother, **SA_build_args)
    resvec = []
    x = sa.solve(b, x0=x0, residuals=resvec, **SA_solve_args)
    print "\n*************************************************************"
    print "*************************************************************"
    print "Observe that standard SA parameters for this p=5 discontinuous \n" + \
          "Galerkin system yield an inefficient solver.\n"
    print_cycle_history(resvec, sa, verbose=True, plotting=False)

    ##
    # Now, construct and solve with appropriate parameters 
    p = 5
    improve_candidates = [('block_gauss_seidel', {'sweep':'symmetric', 'iterations':p}),
                ('gauss_seidel', {'sweep':'symmetric', 'iterations':p})]   
    aggregate = ['naive', 'standard']
Пример #58
0
def spectral_embedding(adjacency, k=8, mode=None):
    """ Spectral embedding: project the sample on the k first
        eigen vectors of the graph laplacian. 

        Parameters
        -----------
        adjacency: array-like or sparse matrix, shape: (p, p)
            The adjacency matrix of the graph to embed.
        k: integer, optional
            The dimension of the projection subspace.
        mode: {None, 'arpack' or 'amg'}
            The eigenvalue decomposition strategy to use. AMG (Algebraic
            MultiGrid) is much faster, but requires pyamg to be
            installed.

        Returns
        --------
        embedding: array, shape: (p, k)
            The reduced samples

        Notes
        ------
        The graph should contain only one connect component,
        elsewhere the results make little sens.
    """

    from scipy import sparse
    from scipy.sparse.linalg.eigen.arpack import eigen_symmetric
    from scipy.sparse.linalg import lobpcg
    try:
        from pyamg import smoothed_aggregation_solver
        amg_loaded = True
    except ImportError:
        amg_loaded = False 

    n_nodes = adjacency.shape[0]
    # XXX: Should we check that the matrices given is symmetric
    if not amg_loaded:
        warnings.warn('pyamg not available, using scipy.sparse')
    if mode is None:
        mode = ('amg' if amg_loaded else 'arpack')
    laplacian, dd = graph_laplacian(adjacency,
                                    normed=True, return_diag=True)
    if (mode == 'arpack' 
        or not sparse.isspmatrix(laplacian)
        or n_nodes < 5*k # This is the threshold under which lobpcg has bugs 
       ):
        # We need to put the diagonal at zero
        if not sparse.isspmatrix(laplacian):
            laplacian[::n_nodes+1] = 0
        else:
            laplacian = laplacian.tocoo()
            diag_idx = (laplacian.row == laplacian.col)
            laplacian.data[diag_idx] = 0
            # If the matrix has a small number of diagonals (as in the
            # case of structured matrices comming from images), the
            # dia format might be best suited for matvec products:
            n_diags = np.unique(laplacian.row - laplacian.col).size
            if n_diags <= 7:
                # 3 or less outer diagonals on each side
                laplacian = laplacian.todia()
            else:
                # csr has the fastest matvec and is thus best suited to
                # arpack
                laplacian = laplacian.tocsr()
        lambdas, diffusion_map = eigen_symmetric(-laplacian, k=k, which='LA')
        embedding = diffusion_map.T[::-1]*dd
    elif mode == 'amg':
        # Use AMG to get a preconditionner and speed up the eigenvalue
        # problem.
        laplacian = laplacian.astype(np.float) # lobpcg needs the native float
        ml = smoothed_aggregation_solver(laplacian.tocsr())
        X = np.random.rand(laplacian.shape[0], k)
        X[:, 0] = 1. / dd.ravel()
        M = ml.aspreconditioner()
        lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12, 
                                        largest=False)
        embedding = diffusion_map.T * dd
        if embedding.shape[0] == 1: raise ValueError
    else:
        raise ValueError("Unknown value for mode: '%s'." % mode)
    return embedding
def spectral_embedding(adjacency, n_components=8, eigen_solver=None,
                       random_state=None, eigen_tol=0.0,
                       norm_laplacian=True, drop_first=True):
    """Project the sample on the first eigenvectors of the graph Laplacian.

    The adjacency matrix is used to compute a normalized graph Laplacian
    whose spectrum (especially the eigenvectors associated to the
    smallest eigenvalues) has an interpretation in terms of minimal
    number of cuts necessary to split the graph into comparably sized
    components.

    This embedding can also 'work' even if the ``adjacency`` variable is
    not strictly the adjacency matrix of a graph but more generally
    an affinity or similarity matrix between samples (for instance the
    heat kernel of a euclidean distance matrix or a k-NN matrix).

    However care must taken to always make the affinity matrix symmetric
    so that the eigenvector decomposition works as expected.

    Note : Laplacian Eigenmaps is the actual algorithm implemented here.

    Read more in the :ref:`User Guide <spectral_embedding>`.

    Parameters
    ----------
    adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
        The adjacency matrix of the graph to embed.

    n_components : integer, optional, default 8
        The dimension of the projection subspace.

    eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}, default None
        The eigenvalue decomposition strategy to use. AMG requires pyamg
        to be installed. It can be faster on very large, sparse problems,
        but may also lead to instabilities.

    random_state : int, RandomState instance or None, optional, default: None
        A pseudo random number generator used for the initialization of the
        lobpcg eigenvectors decomposition.  If int, random_state is the seed
        used by the random number generator; If RandomState instance,
        random_state is the random number generator; If None, the random number
        generator is the RandomState instance used by `np.random`. Used when
        ``solver`` == 'amg'.

    eigen_tol : float, optional, default=0.0
        Stopping criterion for eigendecomposition of the Laplacian matrix
        when using arpack eigen_solver.

    norm_laplacian : bool, optional, default=True
        If True, then compute normalized Laplacian.

    drop_first : bool, optional, default=True
        Whether to drop the first eigenvector. For spectral embedding, this
        should be True as the first eigenvector should be constant vector for
        connected graph, but for spectral clustering, this should be kept as
        False to retain the first eigenvector.

    Returns
    -------
    embedding : array, shape=(n_samples, n_components)
        The reduced samples.

    Notes
    -----
    Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
    has one connected component. If there graph has many components, the first
    few eigenvectors will simply uncover the connected components of the graph.

    References
    ----------
    * https://en.wikipedia.org/wiki/LOBPCG

    * Toward the Optimal Preconditioned Eigensolver: Locally Optimal
      Block Preconditioned Conjugate Gradient Method
      Andrew V. Knyazev
      http://dx.doi.org/10.1137%2FS1064827500366124
    """
    adjacency = check_symmetric(adjacency)

    try:
        from pyamg import smoothed_aggregation_solver
    except ImportError:
        if eigen_solver == "amg":
            raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
                             "not available.")

    if eigen_solver is None:
        eigen_solver = 'arpack'
    elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
        raise ValueError("Unknown value for eigen_solver: '%s'."
                         "Should be 'amg', 'arpack', or 'lobpcg'"
                         % eigen_solver)

    random_state = check_random_state(random_state)

    n_nodes = adjacency.shape[0]
    # Whether to drop the first eigenvector
    if drop_first:
        n_components = n_components + 1

    if not _graph_is_connected(adjacency):
        warnings.warn("Graph is not fully connected, spectral embedding"
                      " may not work as expected.")

    laplacian, dd = sparse.csgraph.laplacian(adjacency, normed=norm_laplacian,
                                             return_diag=True)
    if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
       (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)):
        # lobpcg used with eigen_solver='amg' has bugs for low number of nodes
        # for details see the source code in scipy:
        # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
        # /lobpcg/lobpcg.py#L237
        # or matlab:
        # http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
        laplacian = _set_diag(laplacian, 1, norm_laplacian)

        # Here we'll use shift-invert mode for fast eigenvalues
        # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
        #  for a short explanation of what this means)
        # Because the normalized Laplacian has eigenvalues between 0 and 2,
        # I - L has eigenvalues between -1 and 1.  ARPACK is most efficient
        # when finding eigenvalues of largest magnitude (keyword which='LM')
        # and when these eigenvalues are very large compared to the rest.
        # For very large, very sparse graphs, I - L can have many, many
        # eigenvalues very near 1.0.  This leads to slow convergence.  So
        # instead, we'll use ARPACK's shift-invert mode, asking for the
        # eigenvalues near 1.0.  This effectively spreads-out the spectrum
        # near 1.0 and leads to much faster convergence: potentially an
        # orders-of-magnitude speedup over simply using keyword which='LA'
        # in standard mode.
        try:
            # We are computing the opposite of the laplacian inplace so as
            # to spare a memory allocation of a possibly very large array
            laplacian *= -1
            v0 = random_state.uniform(-1, 1, laplacian.shape[0])
            lambdas, diffusion_map = eigsh(laplacian, k=n_components,
                                           sigma=1.0, which='LM',
                                           tol=eigen_tol, v0=v0)
            embedding = diffusion_map.T[n_components::-1] * dd
        except RuntimeError:
            # When submatrices are exactly singular, an LU decomposition
            # in arpack fails. We fallback to lobpcg
            eigen_solver = "lobpcg"
            # Revert the laplacian to its opposite to have lobpcg work
            laplacian *= -1

    if eigen_solver == 'amg':
        # Use AMG to get a preconditioner and speed up the eigenvalue
        # problem.
        if not sparse.issparse(laplacian):
            warnings.warn("AMG works better for sparse matrices")
        # lobpcg needs double precision floats
        laplacian = check_array(laplacian, dtype=np.float64,
                                accept_sparse=True)
        laplacian = _set_diag(laplacian, 1, norm_laplacian)
        ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
        M = ml.aspreconditioner()
        X = random_state.rand(laplacian.shape[0], n_components + 1)
        X[:, 0] = dd.ravel()
        lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
                                        largest=False)
        embedding = diffusion_map.T * dd
        if embedding.shape[0] == 1:
            raise ValueError

    elif eigen_solver == "lobpcg":
        # lobpcg needs double precision floats
        laplacian = check_array(laplacian, dtype=np.float64,
                                accept_sparse=True)
        if n_nodes < 5 * n_components + 1:
            # see note above under arpack why lobpcg has problems with small
            # number of nodes
            # lobpcg will fallback to eigh, so we short circuit it
            if sparse.isspmatrix(laplacian):
                laplacian = laplacian.toarray()
            lambdas, diffusion_map = eigh(laplacian)
            embedding = diffusion_map.T[:n_components] * dd
        else:
            laplacian = _set_diag(laplacian, 1, norm_laplacian)
            # We increase the number of eigenvectors requested, as lobpcg
            # doesn't behave well in low dimension
            X = random_state.rand(laplacian.shape[0], n_components + 1)
            X[:, 0] = dd.ravel()
            lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
                                            largest=False, maxiter=2000)
            embedding = diffusion_map.T[:n_components] * dd
            if embedding.shape[0] == 1:
                raise ValueError

    embedding = _deterministic_vector_sign_flip(embedding)
    if drop_first:
        return embedding[1:n_components].T
    else:
        return embedding[:n_components].T