Beispiel #1
0
def _build_staggered_first_derivative_matrix_part(npoints, order_accuracy, h=1.0, lbc="d", rbc="d"):
    # npoints is the number of regular grid points.

    if order_accuracy % 2:
        raise ValueError("Only even accuracy orders supported.")

    # coefficients for the first derivative evaluated in between two regular grid points.
    stagger_coeffs = staggered_difference(1, order_accuracy) / h
    # Use the old 'stencil_grid' routine.
    # Because we do a staggered grid we need to shift the coeffs one entry and the matrix will not be square
    incorrect_mtx = stencil_grid(np.insert(stagger_coeffs, 0, 0), (npoints,), format="lil")
    # Get rid of the last row which we dont want in our staggered approach
    mtx = incorrect_mtx[0:-1, :]

    if "n" in lbc or "n" in rbc:
        raise ValueError(
            "Did not yet implement Neumann boundaries. Perhaps looking at the centered grid implementation would be a good start?"
        )

    if "g" in lbc or "g" in rbc:
        raise ValueError(
            "Did not yet implement this boundary condition yet. Perhaps looking at the centered grid implementation would be a good start?"
        )

    # For dirichlet we don't need to alter the matrix for the first derivative for the boundary nodes as is done in the centered approach
    # The reason is that the first staggered point we evaluate at is in the interior of the domain.
    return mtx.tocsr()
def build_poisson(n, epsilon, theta, randomize):
    data = {}
    h = 1. / float(n + 1)

    print "Assembling diffusion using Q1 on a regular mesh with epsilon = " + \
          str(epsilon) + " and theta = " + str(theta) + " ..."
    stencil = diffusion_stencil_2d(type='FE', epsilon=epsilon, theta=theta)
    A = stencil_grid(stencil, (n, n), format='csr')
    X, Y = meshgrid(linspace(h, 1.0 - h, n), linspace(h, 1.0 - h, n))
    data['X'] = X
    data['Y'] = Y

    if randomize:
        print "Random diagonal scaling..."
        D = my_rand(A.shape[0], 1)
        D[D < 0.] -= 1e-3
        D[D >= 0.] += 1e-3
        data['D'] = D
        D_inv = 1. / D
        data['D_inv'] = D_inv
        A = scale_rows(A, D)
        A = scale_columns(A, D)

    if symmetric_scale:
        symmetric_rescaling(A, copy=False)

    print "Ratio of largest to smallest (in magnitude) diagonal element in A: %1.3e"% \
          (abs(A.diagonal()).max() / abs(A.diagonal()).min())
    data['A'] = A
    print 'Generate initial guess (which is random)...'
    data['x0'] = my_rand(data['A'].shape[0], 1)
    print 'Generate rhs (which is zero)...'
    data['b'] = numpy.zeros((data['A'].shape[0], 1))

    return data
Beispiel #3
0
def build_linear_interpolation_matrix_part(npoints):
    #same logic as in function 'build_staggered_first_derivative_matrix_part
    coeffs = np.array([0.5, 0.5])
    incorrect_mtx = stencil_grid(np.insert(coeffs, 0, 0), (npoints, ),
                                 format='lil')
    mtx = incorrect_mtx[0:-1, :]
    return mtx.tocsr()
Beispiel #4
0
def _build_staggered_first_derivative_matrix_part(npoints,
                                                  order_accuracy,
                                                  h=1.0,
                                                  lbc='d',
                                                  rbc='d'):
    #npoints is the number of regular grid points.

    if order_accuracy % 2:
        raise ValueError('Only even accuracy orders supported.')

    #coefficients for the first derivative evaluated in between two regular grid points.
    stagger_coeffs = staggered_difference(1, order_accuracy) / h
    #Use the old 'stencil_grid' routine.
    #Because we do a staggered grid we need to shift the coeffs one entry and the matrix will not be square
    incorrect_mtx = stencil_grid(np.insert(stagger_coeffs, 0, 0), (npoints, ),
                                 format='lil')
    #Get rid of the last row which we dont want in our staggered approach
    mtx = incorrect_mtx[0:-1, :]

    if 'n' in lbc or 'n' in rbc:
        raise ValueError(
            'Did not yet implement Neumann boundaries. Perhaps looking at the centered grid implementation would be a good start?'
        )

    if 'g' in lbc or 'g' in rbc:
        raise ValueError(
            'Did not yet implement this boundary condition yet. Perhaps looking at the centered grid implementation would be a good start?'
        )

    #For dirichlet we don't need to alter the matrix for the first derivative for the boundary nodes as is done in the centered approach
    #The reason is that the first staggered point we evaluate at is in the interior of the domain.
    return mtx.tocsr()
Beispiel #5
0
def _build_derivative_matrix_part(
    npoints, derivative, order_accuracy, h=1.0, lbc="d", rbc="d", use_shifted_differences=False
):

    if order_accuracy % 2:
        raise ValueError("Only even accuracy orders supported.")

    centered_coeffs = centered_difference(derivative, order_accuracy) / (h ** derivative)

    mtx = stencil_grid(centered_coeffs, (npoints,), format="lil")

    max_shift = order_accuracy / 2

    if use_shifted_differences:
        # Left side
        odd_even_offset = 1 - derivative % 2
        for i in xrange(0, max_shift):
            coeffs = shifted_difference(derivative, order_accuracy, -(max_shift + odd_even_offset) + i)
            mtx[i, 0 : len(coeffs)] = coeffs / (h ** derivative)

        # Right side
        for i in xrange(-1, -max_shift - 1, -1):
            coeffs = shifted_difference(derivative, order_accuracy, max_shift + i + odd_even_offset)
            mtx[i, slice(-1, -(len(coeffs) + 1), -1)] = coeffs[::-1] / (h ** derivative)

    if "d" in lbc:  # dirichlet
        mtx[0, :] = 0
        mtx[0, 0] = 1.0
    elif "n" in lbc:  # neumann
        mtx[0, :] = 0
        coeffs = shifted_difference(1, order_accuracy, -max_shift) / h
        coeffs /= -1 * coeffs[0]
        coeffs[0] = 0.0
        mtx[0, 0 : len(coeffs)] = coeffs
    elif type(lbc) is tuple and "g" in lbc[0]:  # ghost
        n_ghost_points = int(lbc[1])
        mtx[0:n_ghost_points, :] = 0
        for i in xrange(n_ghost_points):
            mtx[i, i] = 1.0

    if "d" in rbc:
        mtx[-1, :] = 0
        mtx[-1, -1] = 1.0
    elif "n" in rbc:
        mtx[-1, :] = 0
        coeffs = shifted_difference(1, order_accuracy, max_shift) / h
        coeffs /= -1 * coeffs[-1]
        coeffs[-1] = 0.0
        mtx[-1, slice(-1, -(len(coeffs) + 1), -1)] = coeffs[::-1]
    elif type(rbc) is tuple and "g" in rbc[0]:
        n_ghost_points = int(rbc[1])
        mtx[slice(-1, -(n_ghost_points + 1), -1), :] = 0
        for i in xrange(n_ghost_points):
            mtx[-i - 1, -i - 1] = 1.0

    return mtx.tocsr()
Beispiel #6
0
# task1.3
from pyamg.gallery.diffusion import diffusion_stencil_2d
from pyamg.gallery import stencil_grid
sten = diffusion_stencil_2d(type='FD', \
       epsilon=0.001, theta=3.1416/3.0)
A = stencil_grid(sten, (100, 100), format='csr')

# task1.4
from pyamg import *
ml = smoothed_aggregation_solver(A)

# task1.5
from numpy import ones
b = ones((A.shape[0], 1))
res = []
x = ml.solve(b, tol=1e-8, \
    residuals=res)

from pylab import *
semilogy(res[1:])
xlabel('iteration')
ylabel('residual norm')
title('Residual History')
show()
            'iterations': 1
        })
    smooth = ('energy', {'maxiter': 9, 'degree': 3})  # Prolongation Smoother
    classic_theta = 0.0  # Classic Strength Measure
    #    Drop Tolerance
    evolution_theta = 4.0  # evolution Strength Measure
    #    Drop Tolerance

    for n in nlist:
        nx = n
        ny = n
        print "Running Grid = (%d x %d)" % (nx, ny)

        # Rotated Anisotropic Diffusion Operator
        stencil = diffusion_stencil_2d(type='FE', epsilon=epsilon, theta=theta)
        A = stencil_grid(stencil, (nx, ny), format='csr')

        # Random initial guess, zero RHS
        x0 = scipy.rand(A.shape[0])
        b = numpy.zeros((A.shape[0], ))

        # Classic SA strength measure
        ml = smoothed_aggregation_solver(A,
                                         max_coarse=mcoarse,
                                         coarse_solver='pinv2',
                                         presmoother=prepost,
                                         postsmoother=prepost,
                                         smooth=smooth,
                                         strength=('symmetric', {
                                             'theta': classic_theta
                                         }))
Beispiel #8
0
def build_1D_fd(deriv, order, length, delta, lbc=None, rbc=None, limit_boundary=True):
    """ Builds the finite difference stencil matrix in 1D that can be kroncker producted to build higher dimensional operators.

    None in the BC slot leaves the purest form of the operator.

    """

    bulk_npoints = deriv + order - (1 if not deriv%2 else 0)
    bulk_center = int(math.floor(bulk_npoints/2))

    boundary_npoints = deriv + order

    stencil = fd_coeffs(deriv, (bulk_npoints, bulk_center))
    stencil[np.abs(stencil) < 1e-12] = 0.0
    L = stencil_grid(stencil, (length,), format='lil')

    if not limit_boundary:
        L /= (delta**deriv)
        return L.tocsr()

    # left side
    for i in range(bulk_center):
        boundary_center = i
        if i == 0:
            if lbc != 'dirichlet':
                warnings.warn('Only Dirichlet boundaries are supported in this matrix construction.')
            L[i,:] = 0.0
            L[0,0]=1.0
#           else: #lbc == 'neumann'
#               # Not sure that this is correct...neumann likely need to be patched after the time step...
#               L[i,:] = 0.0
#               coeffs = -fd_coeffs(1, (1+order,boundary_center))
#               coeffs /= coeffs[0]
#               coeffs[0] = 0.0
#               L[i,0:(1+order)] = coeffs
        else:
            L[i,:] = 0
            stencil = fd_coeffs(deriv, (boundary_npoints,boundary_center))
            stencil[np.abs(stencil) < 1e-12] = 0.0
            L[i,0:boundary_npoints] = stencil

    # right side
    print(boundary_npoints-bulk_center-1)

    for i in range(-1, -(boundary_npoints-bulk_center-deriv+1), -1):
        boundary_center = boundary_npoints + i
        idx = i
        print(i, boundary_center, idx)
        if idx == -1:
            if lbc != 'dirichlet':
                warnings.warn('Only Dirichlet boundaries are supported in this matrix construction.')
            L[idx,:] = 0.0
            L[-1,-1] = 1.0
#           else: #lbc == 'neumann'
#               # Not sure that this is correct...neumann likely need to be patched after the time step...
#               L[i,:] = 0.0
#               coeffs = -fd_coeffs(1, (1+order,boundary_center))
#               coeffs /= coeffs[0]
#               coeffs[0] = 0.0
#               L[i,0:(1+order)] = coeffs
        else:
            L[idx,:] = 0
            stencil = fd_coeffs(deriv, (boundary_npoints,boundary_center))
            stencil[np.abs(stencil)<1e-12] = 0.0
            L[idx,-boundary_npoints::] = stencil

    L /= (delta**deriv)

    return L.tocsr()
Beispiel #9
0
def build_linear_interpolation_matrix_part(npoints):
    #same logic as in function 'build_staggered_first_derivative_matrix_part
    coeffs = np.array([0.5, 0.5])
    incorrect_mtx = stencil_grid(np.insert(coeffs,0,0), (npoints, ), format='lil')
    mtx = incorrect_mtx[0:-1,:]
    return mtx.tocsr()
Beispiel #10
0
    def test_evolution_strength_of_connection(self):
        cases = []

        # Single near nullspace candidate
        stencil = [[0.0, -1.0, 0.0], [-0.001, 2.002, -0.001], [0.0, -1.0, 0.0]]
        A = 1.0j*stencil_grid(stencil, (4, 4), format='csr')
        B = 1.0j*np.ones((A.shape[0], 1))
        B[0] = 1.2 - 12.0j
        B[11] = -14.2
        cases.append({'A': A.copy(), 'B': B.copy(), 'epsilon': 4.0, 'k': 2,
                      'proj': 'l2'})

        # Multiple near nullspace candidate
        B = 1.0j*np.ones((A.shape[0], 2))
        B[0:-1:2, 0] = 0.0
        B[1:-1:2, 1] = 0.0
        B[-1, 0] = 0.0
        B[11, 1] = -14.2
        B[0, 0] = 1.2 - 12.0j
        cases.append({'A': A.copy(), 'B': B.copy(), 'epsilon': 4.0, 'k': 2,
                      'proj': 'l2'})
        Absr = A.tobsr(blocksize=(2, 2))
        cases.append({'A': Absr.copy(), 'B': B.copy(), 'epsilon': 4.0, 'k': 2,
                      'proj': 'l2'})

        for ca in cases:
            scipy.random.seed(0)  # make results deterministic
            result = evolution_soc(ca['A'], ca['B'], epsilon=ca['epsilon'],
                                   k=ca['k'], proj_type=ca['proj'],
                                   symmetrize_measure=False)
            scipy.random.seed(0)  # make results deterministic
            expected = reference_evolution_soc(ca['A'], ca['B'],
                                               epsilon=ca['epsilon'],
                                               k=ca['k'], proj_type=ca['proj'])
            assert_array_almost_equal(result.todense(), expected.todense())

        # Test Scale Invariance for a single candidate
        A = 1.0j*poisson((5, 5), format='csr')
        B = 1.0j*arange(1, A.shape[0]+1, dtype=float).reshape(-1, 1)
        scipy.random.seed(0)  # make results deterministic
        result_unscaled = evolution_soc(A, B, epsilon=4.0, k=2,
                                        proj_type="D_A",
                                        symmetrize_measure=False)
        # create scaled A
        D = spdiags([arange(A.shape[0], 2*A.shape[0], dtype=float)],
                    [0], A.shape[0], A.shape[0], format='csr')
        Dinv = spdiags([1.0/arange(A.shape[0], 2*A.shape[0], dtype=float)],
                       [0], A.shape[0], A.shape[0], format='csr')
        scipy.random.seed(0)  # make results deterministic
        result_scaled = evolution_soc(D*A*D, Dinv*B, epsilon=4.0, k=2,
                                      proj_type="D_A",
                                      symmetrize_measure=False)
        assert_array_almost_equal(result_scaled.todense(),
                                  result_unscaled.todense(), decimal=2)

        # Test that the l2 and D_A are the same for the 1 candidate case
        scipy.random.seed(0)  # make results deterministic
        resultDA = evolution_soc(D*A*D, Dinv*B, epsilon=4.0,
                                 k=2, proj_type="D_A",
                                 symmetrize_measure=False)
        scipy.random.seed(0)  # make results deterministic
        resultl2 = evolution_soc(D*A*D, Dinv*B, epsilon=4.0,
                                 k=2, proj_type="l2",
                                 symmetrize_measure=False)
        assert_array_almost_equal(resultDA.todense(), resultl2.todense())

        # Test Scale Invariance for multiple candidates
        (A, B) = linear_elasticity((5, 5), format='bsr')
        A = 1.0j*A
        B = 1.0j*B
        scipy.random.seed(0)  # make results deterministic
        result_unscaled = evolution_soc(A, B, epsilon=4.0, k=2,
                                        proj_type="D_A",
                                        symmetrize_measure=False)
        # create scaled A
        D = spdiags([arange(A.shape[0], 2*A.shape[0], dtype=float)],
                    [0], A.shape[0], A.shape[0], format='csr')
        Dinv = spdiags([1.0/arange(A.shape[0], 2*A.shape[0], dtype=float)],
                       [0], A.shape[0], A.shape[0], format='csr')
        scipy.random.seed(0)  # make results deterministic
        result_scaled = evolution_soc((D*A*D).tobsr(blocksize=(2, 2)), Dinv*B,
                                      epsilon=4.0, k=2, proj_type="D_A",
                                      symmetrize_measure=False)
        assert_array_almost_equal(result_scaled.todense(),
                                  result_unscaled.todense(), decimal=2)
Beispiel #11
0
# task1.3
from pyamg.gallery.diffusion import diffusion_stencil_2d
from pyamg.gallery import stencil_grid
sten = diffusion_stencil_2d(type='FD', \
       epsilon=0.001, theta=3.1416/3.0)
A = stencil_grid(sten, (100,100), format='csr')

# task1.4
from pyamg import *
ml = smoothed_aggregation_solver(A)

# task1.5
from numpy import ones
b = ones((A.shape[0],1))
res = []
x = ml.solve(b, tol=1e-8, \
    residuals=res)

from pylab import *
semilogy(res[1:])
xlabel('iteration')
ylabel('residual norm')
title('Residual History')
show()
Beispiel #12
0
        if iter == maxiter:
            return (postprocess(x), iter)

if __name__ == '__main__':
    # from numpy import diag
    # A = random((4,4))
    # A = A*A.transpose() + diag([10,10,10,10])
    # b = random((4,1))
    # x0 = random((4,1))

    from pyamg.gallery import stencil_grid
    from numpy.random import random
    import time
    from pyamg.krylov._gmres import gmres

    A = stencil_grid([[0, -1, 0], [-1, 4, -1], [0, -1, 0]], (100, 100),
                     dtype=float, format='csr')
    b = random((A.shape[0],))
    x0 = random((A.shape[0],))

    print '\n\nTesting CR with %d x %d 2D Laplace Matrix' % \
          (A.shape[0], A.shape[0])
    t1 = time.time()
    r = []
    (x, flag) = cr(A, b, x0, tol=1e-8, maxiter=100, residuals=r)
    t2 = time.time()
    print '%s took %0.3f ms' % ('cr', (t2-t1)*1000.0)
    print 'norm = %g' % (norm(b - A*x))
    print 'info flag = %d' % (flag)

    t1 = time.time()
    r2 = []
import scipy

from pyamg.gallery import stencil_grid
from pyamg.gallery.diffusion import diffusion_stencil_2d
from pyamg.strength import classical_strength_of_connection
from pyamg.classical.classical import ruge_stuben_solver

from convergence_tools import print_cycle_history

if __name__ == '__main__':
    n = 100
    nx = n
    ny = n

    # Rotated Anisotropic Diffusion
    stencil = diffusion_stencil_2d(type='FE',epsilon=0.001,theta=scipy.pi/3)

    A = stencil_grid(stencil, (nx,ny), format='csr')
    S = classical_strength_of_connection(A, 0.0)

    numpy.random.seed(625)
    x = scipy.rand(A.shape[0])
    b = A*scipy.rand(A.shape[0])

    ml = ruge_stuben_solver(A, max_coarse=10)

    resvec = []
    x = ml.solve(b, x0=x, maxiter=20, tol=1e-14, residuals=resvec)

    print_cycle_history(resvec, ml, verbose=True, plotting=True)
Beispiel #14
0
def _build_derivative_matrix_part(npoints,
                                  derivative,
                                  order_accuracy,
                                  h=1.0,
                                  lbc='d',
                                  rbc='d',
                                  use_shifted_differences=False):

    if order_accuracy % 2:
        raise ValueError('Only even accuracy orders supported.')

    centered_coeffs = centered_difference(derivative,
                                          order_accuracy) / (h**derivative)

    mtx = stencil_grid(centered_coeffs, (npoints, ), format='lil')

    max_shift = order_accuracy // 2

    if use_shifted_differences:
        # Left side
        odd_even_offset = 1 - derivative % 2
        for i in range(0, max_shift):
            coeffs = shifted_difference(derivative, order_accuracy,
                                        -(max_shift + odd_even_offset) + i)
            mtx[i, 0:len(coeffs)] = coeffs / (h**derivative)

        # Right side
        for i in range(-1, -max_shift - 1, -1):
            coeffs = shifted_difference(derivative, order_accuracy,
                                        max_shift + i + odd_even_offset)
            mtx[i, slice(-1, -(len(coeffs) +
                               1), -1)] = coeffs[::-1] / (h**derivative)

    if 'd' in lbc:  #dirichlet
        mtx[0, :] = 0
        mtx[0, 0] = 1.0
    elif 'n' in lbc:  #neumann
        mtx[0, :] = 0
        coeffs = shifted_difference(1, order_accuracy, -max_shift) / h
        coeffs /= (-1 * coeffs[0])
        coeffs[0] = 0.0
        mtx[0, 0:len(coeffs)] = coeffs
    elif type(lbc) is tuple and 'g' in lbc[0]:  #ghost
        n_ghost_points = int(lbc[1])
        mtx[0:n_ghost_points, :] = 0
        for i in range(n_ghost_points):
            mtx[i, i] = 1.0

    if 'd' in rbc:
        mtx[-1, :] = 0
        mtx[-1, -1] = 1.0
    elif 'n' in rbc:
        mtx[-1, :] = 0
        coeffs = shifted_difference(1, order_accuracy, max_shift) / h
        coeffs /= (-1 * coeffs[-1])
        coeffs[-1] = 0.0
        mtx[-1, slice(-1, -(len(coeffs) + 1), -1)] = coeffs[::-1]
    elif type(rbc) is tuple and 'g' in rbc[0]:
        n_ghost_points = int(rbc[1])
        mtx[slice(-1, -(n_ghost_points + 1), -1), :] = 0
        for i in range(n_ghost_points):
            mtx[-i - 1, -i - 1] = 1.0

    return mtx.tocsr()
Beispiel #15
0
from pyamg.gallery.diffusion import diffusion_stencil_2d
from pyamg.gallery import stencil_grid
from numpy import set_printoptions

set_printoptions(precision=2)
sten = diffusion_stencil_2d(type="FD", epsilon=0.001, theta=3.1416 / 3.0)
A = stencil_grid(sten, (100, 100), format="csr")
# print the matrix stencil
print(A[5050, :].data)
print(sten)
Beispiel #16
0
    def test_evolution_strength_of_connection(self):
        cases = []

        # Single near nullspace candidate
        stencil = [[0.0, -1.0, 0.0], [-0.001, 2.002, -0.001], [0.0, -1.0, 0.0]]
        A = 1.0j * stencil_grid(stencil, (4, 4), format='csr')
        B = 1.0j * np.ones((A.shape[0], 1))
        B[0] = 1.2 - 12.0j
        B[11] = -14.2
        cases.append({
            'A': A.copy(),
            'B': B.copy(),
            'epsilon': 4.0,
            'k': 2,
            'proj': 'l2'
        })

        # Multiple near nullspace candidate
        B = 1.0j * np.ones((A.shape[0], 2))
        B[0:-1:2, 0] = 0.0
        B[1:-1:2, 1] = 0.0
        B[-1, 0] = 0.0
        B[11, 1] = -14.2
        B[0, 0] = 1.2 - 12.0j
        cases.append({
            'A': A.copy(),
            'B': B.copy(),
            'epsilon': 4.0,
            'k': 2,
            'proj': 'l2'
        })
        Absr = A.tobsr(blocksize=(2, 2))
        cases.append({
            'A': Absr.copy(),
            'B': B.copy(),
            'epsilon': 4.0,
            'k': 2,
            'proj': 'l2'
        })

        for ca in cases:
            scipy.random.seed(0)  # make results deterministic
            result = evolution_soc(ca['A'],
                                   ca['B'],
                                   epsilon=ca['epsilon'],
                                   k=ca['k'],
                                   proj_type=ca['proj'],
                                   symmetrize_measure=False)
            scipy.random.seed(0)  # make results deterministic
            expected = reference_evolution_soc(ca['A'],
                                               ca['B'],
                                               epsilon=ca['epsilon'],
                                               k=ca['k'],
                                               proj_type=ca['proj'])
            assert_array_almost_equal(result.todense(), expected.todense())

            scipy.random.seed(0)  # make results deterministic
            result = evolution_soc(ca['A'],
                                   ca['B'],
                                   epsilon=ca['epsilon'],
                                   k=ca['k'],
                                   proj_type=ca['proj'],
                                   symmetrize_measure=False,
                                   weighting='local')
            scipy.random.seed(0)  # make results deterministic
            expected = reference_evolution_soc(ca['A'],
                                               ca['B'],
                                               epsilon=ca['epsilon'],
                                               k=ca['k'],
                                               proj_type=ca['proj'],
                                               weighting='local')
            assert_array_almost_equal(result.todense(), expected.todense())

        # Test Scale Invariance for a single candidate
        A = 1.0j * poisson((5, 5), format='csr')
        B = 1.0j * arange(1, A.shape[0] + 1, dtype=float).reshape(-1, 1)
        scipy.random.seed(0)  # make results deterministic
        result_unscaled = evolution_soc(A,
                                        B,
                                        epsilon=4.0,
                                        k=2,
                                        proj_type="D_A",
                                        symmetrize_measure=False)
        # create scaled A
        D = spdiags([arange(A.shape[0], 2 * A.shape[0], dtype=float)], [0],
                    A.shape[0],
                    A.shape[0],
                    format='csr')
        Dinv = spdiags([1.0 / arange(A.shape[0], 2 * A.shape[0], dtype=float)],
                       [0],
                       A.shape[0],
                       A.shape[0],
                       format='csr')
        scipy.random.seed(0)  # make results deterministic
        result_scaled = evolution_soc(D * A * D,
                                      Dinv * B,
                                      epsilon=4.0,
                                      k=2,
                                      proj_type="D_A",
                                      symmetrize_measure=False)
        assert_array_almost_equal(result_scaled.todense(),
                                  result_unscaled.todense(),
                                  decimal=2)

        # Test that the l2 and D_A are the same for the 1 candidate case
        scipy.random.seed(0)  # make results deterministic
        resultDA = evolution_soc(D * A * D,
                                 Dinv * B,
                                 epsilon=4.0,
                                 k=2,
                                 proj_type="D_A",
                                 symmetrize_measure=False)
        scipy.random.seed(0)  # make results deterministic
        resultl2 = evolution_soc(D * A * D,
                                 Dinv * B,
                                 epsilon=4.0,
                                 k=2,
                                 proj_type="l2",
                                 symmetrize_measure=False)
        assert_array_almost_equal(resultDA.todense(), resultl2.todense())

        # Test Scale Invariance for multiple candidates
        (A, B) = linear_elasticity((5, 5), format='bsr')
        A = 1.0j * A
        B = 1.0j * B
        scipy.random.seed(0)  # make results deterministic
        result_unscaled = evolution_soc(A,
                                        B,
                                        epsilon=4.0,
                                        k=2,
                                        proj_type="D_A",
                                        symmetrize_measure=False)
        # create scaled A
        D = spdiags([arange(A.shape[0], 2 * A.shape[0], dtype=float)], [0],
                    A.shape[0],
                    A.shape[0],
                    format='csr')
        Dinv = spdiags([1.0 / arange(A.shape[0], 2 * A.shape[0], dtype=float)],
                       [0],
                       A.shape[0],
                       A.shape[0],
                       format='csr')
        scipy.random.seed(0)  # make results deterministic
        result_scaled = evolution_soc((D * A * D).tobsr(blocksize=(2, 2)),
                                      Dinv * B,
                                      epsilon=4.0,
                                      k=2,
                                      proj_type="D_A",
                                      symmetrize_measure=False)
        assert_array_almost_equal(result_scaled.todense(),
                                  result_unscaled.todense(),
                                  decimal=2)
Beispiel #17
0
                              CSR matrix for a nonsymmetric recirculating flow problem  

Many more solver parameters may be specified than outlined in the below
examples.  Only the most basic are shown.

Run with 
    >>> python demo.py
and examine the on-screen output and file output.
'''
from scipy import pi
from pyamg import gallery
from pyamg.gallery import diffusion
stencil = diffusion.diffusion_stencil_2d(type='FE',
                                         epsilon=0.001,
                                         theta=2 * pi / 16.0)
A = gallery.stencil_grid(stencil, (50, 50), format='csr')

from pyamg import gallery
from solver_diagnostics import solver_diagnostics
from scipy import pi
from pyamg.gallery import diffusion

choice = input('\nThere are four different test problems.  Enter \n' + \
               '1:  Isotropic diffusion example\n' + \
               '2:  Anisotropic diffusion example\n' + \
               '3:  Elasticity example\n' + \
               '4:  Nonsymmetric flow example\n\n ')

if choice == 1:
    ##
    # Try a basic isotropic diffusion problem from finite differences
Beispiel #18
0
        if iter == maxiter:
            return (postprocess(x), iter)

if __name__ == '__main__':
    # from numpy import diag
    # A = random((4,4))
    # A = A*A.transpose() + diag([10,10,10,10])
    # b = random((4,1))
    # x0 = random((4,1))

    from pyamg.gallery import stencil_grid
    from numpy.random import random
    import time
    from pyamg.krylov._gmres import gmres

    A = stencil_grid([[0, -1, 0], [-1, 4, -1], [0, -1, 0]], (100, 100),
                     dtype=float, format='csr')
    b = random((A.shape[0],))
    x0 = random((A.shape[0],))

    print '\n\nTesting CR with %d x %d 2D Laplace Matrix' % \
          (A.shape[0], A.shape[0])
    t1 = time.time()
    r = []
    (x, flag) = cr(A, b, x0, tol=1e-8, maxiter=100, residuals=r)
    t2 = time.time()
    print '%s took %0.3f ms' % ('cr', (t2-t1)*1000.0)
    print 'norm = %g' % (norm(b - A*x))
    print 'info flag = %d' % (flag)

    t1 = time.time()
    r2 = []
Beispiel #19
0
# Step 1: import scipy and pyamg packages
#------------------------------------------------------------------
from numpy import meshgrid, linspace
from scipy import rand, pi
from scipy.linalg import norm
from pyamg import *
from pyamg.gallery import stencil_grid
from pyamg.gallery.diffusion import diffusion_stencil_2d

#------------------------------------------------------------------
# Step 2: setup up the system using pyamg.gallery
#------------------------------------------------------------------
n=200
X,Y = meshgrid(linspace(0,1,n),linspace(0,1,n))
stencil = diffusion_stencil_2d(type='FE',epsilon=0.001,theta=pi/3)
A = stencil_grid(stencil, (n,n), format='csr')
b = rand(A.shape[0])                     # pick a random right hand side

#------------------------------------------------------------------
# Step 3: setup of the multigrid hierarchy
#------------------------------------------------------------------
ml = smoothed_aggregation_solver(A)      # construct the multigrid hierarchy

#------------------------------------------------------------------
# Step 4: solve the system
#------------------------------------------------------------------
res1 = []
x = ml.solve(b, tol=1e-12, residuals=res1)# solve Ax=b to a tolerance of 1e-12

#------------------------------------------------------------------
# Step 5: print details
Beispiel #20
0
# Step 1: import scipy and pyamg packages
#------------------------------------------------------------------
from numpy import meshgrid, linspace
from scipy import rand, pi
from scipy.linalg import norm
from pyamg import *
from pyamg.gallery import stencil_grid
from pyamg.gallery.diffusion import diffusion_stencil_2d

#------------------------------------------------------------------
# Step 2: setup up the system using pyamg.gallery
#------------------------------------------------------------------
n=200
X,Y = meshgrid(linspace(0,1,n),linspace(0,1,n))
stencil = diffusion_stencil_2d(type='FE',epsilon=0.001,theta=pi/3)
A = stencil_grid(stencil, (n,n), format='csr')
b = rand(A.shape[0])                     # pick a random right hand side

#------------------------------------------------------------------
# Step 3: setup of the multigrid hierarchy
#------------------------------------------------------------------
ml = smoothed_aggregation_solver(A)      # construct the multigrid hierarchy

#------------------------------------------------------------------
# Step 4: solve the system
#------------------------------------------------------------------
res1 = []
x = ml.solve(b, tol=1e-12, residuals=res1)# solve Ax=b to a tolerance of 1e-12

#------------------------------------------------------------------
# Step 5: print details
    mcoarse = 10  # Max coarse grid size
    prepost = ("gauss_seidel", {"sweep": "symmetric", "iterations": 1})  # pre/post smoother
    smooth = ("energy", {"maxiter": 9, "degree": 3})  # Prolongation Smoother
    classic_theta = 0.0  # Classic Strength Measure
    #    Drop Tolerance
    evolution_theta = 4.0  # evolution Strength Measure
    #    Drop Tolerance

    for n in nlist:
        nx = n
        ny = n
        print "Running Grid = (%d x %d)" % (nx, ny)

        # Rotated Anisotropic Diffusion Operator
        stencil = diffusion_stencil_2d(type="FE", epsilon=epsilon, theta=theta)
        A = stencil_grid(stencil, (nx, ny), format="csr")

        # Random initial guess, zero RHS
        x0 = scipy.rand(A.shape[0])
        b = numpy.zeros((A.shape[0],))

        # Classic SA strength measure
        ml = smoothed_aggregation_solver(
            A,
            max_coarse=mcoarse,
            coarse_solver="pinv2",
            presmoother=prepost,
            postsmoother=prepost,
            smooth=smooth,
            strength=("symmetric", {"theta": classic_theta}),
        )