Exemplo n.º 1
0
def build_poisson(n, epsilon, theta, randomize):
    data = {}
    h = 1. / float(n + 1)

    print "Assembling diffusion using Q1 on a regular mesh with epsilon = " + \
          str(epsilon) + " and theta = " + str(theta) + " ..."
    stencil = diffusion_stencil_2d(type='FE', epsilon=epsilon, theta=theta)
    A = stencil_grid(stencil, (n, n), format='csr')
    X, Y = meshgrid(linspace(h, 1.0 - h, n), linspace(h, 1.0 - h, n))
    data['X'] = X
    data['Y'] = Y

    if randomize:
        print "Random diagonal scaling..."
        D = my_rand(A.shape[0], 1)
        D[D < 0.] -= 1e-3
        D[D >= 0.] += 1e-3
        data['D'] = D
        D_inv = 1. / D
        data['D_inv'] = D_inv
        A = scale_rows(A, D)
        A = scale_columns(A, D)

    if symmetric_scale:
        symmetric_rescaling(A, copy=False)

    print "Ratio of largest to smallest (in magnitude) diagonal element in A: %1.3e"% \
          (abs(A.diagonal()).max() / abs(A.diagonal()).min())
    data['A'] = A
    print 'Generate initial guess (which is random)...'
    data['x0'] = my_rand(data['A'].shape[0], 1)
    print 'Generate rhs (which is zero)...'
    data['b'] = numpy.zeros((data['A'].shape[0], 1))

    return data
Exemplo n.º 2
0
# ------------------------------------------------------------------------- #
# ------------------------------------------------------------------------- #

SOC = 'evol'
SOC_drop = 4.0
SA = 1  # Use SA type coarsening or CR
pow_G = 1
epsilon = 0.1
theta = 3.0 * math.pi / 16.0
N = 40
n = N * N
grid_dims = [N, N]
stencil = diffusion_stencil_2d(epsilon, theta)
A = stencil_grid(stencil, grid_dims, format='csr')
[d, d, A] = symmetric_rescaling(A)
A = csr_matrix(A)
B = np.kron(np.ones((A.shape[0] / 1, 1), dtype=A.dtype), np.eye(1))
tol = 1e-12  # Drop tolerance for singular values

if SA:
    if SOC == 'evol':
        C = evolution_strength_of_connection(A, B, epsilon=SOC_drop, k=2)
    else:
        SOC = 'symm'
        C = symmetric_strength_of_connection(A, theta=SOC_drop)

    AggOp, Cpts = standard_aggregation(C)
else:
    splitting = CR(A, method='habituated')
    Cpts = [i for i in range(0, n) if splitting[i] == 1]
Exemplo n.º 3
0
# ---------------------------------

rand_guess = True
zero_rhs = True
theta = np.pi / 3.0  # Must be in (0, pi/2)
num_rows = 150
num_cols = 150
sigt = 1.0
rn_residuals = []

L = lumped_transport(num_rows=num_rows,
                     num_cols=num_cols,
                     theta=theta,
                     sigt=sigt)
A = csr_matrix(L.T * L)
[d, d, A] = symmetric_rescaling(A)  # This is not symmetric...
vec_size = A.shape[0]

# Zero right hand side or sin(pi x)
if zero_rhs:
    b = np.zeros((vec_size, 1))
    # If zero rhs and zero initial guess, throw error
    if not rand_guess:
        print "Zero rhs and zero initial guess converges trivially."

# Random vs. zero initial guess
if rand_guess:
    x0 = np.random.rand(vec_size, 1)
else:
    x0 = np.zeros(vec_size, 1)
Exemplo n.º 4
0
def extend_hierarchy(levels, strength, aggregate, smooth, improve_candidates,
                     diagonal_dominance=False, keep=True, test_ind=0):
    """Service routine to implement the strength of connection, aggregation,
    tentative prolongation construction, and prolongation smoothing.  Called by
    smoothed_aggregation_solver.
    """

    def unpack_arg(v):
        if isinstance(v, tuple):
            return v[0], v[1]
        else:
            return v, {}

    A = levels[-1].A
    B = levels[-1].B
    if A.symmetry == "nonsymmetric":
        AH = A.H.asformat(A.format)
        BH = levels[-1].BH

    # Improve near nullspace candidates by relaxing on A B = 0
    fn, kwargs = unpack_arg(improve_candidates[len(levels)-1])
    if fn is not None:
        b = np.zeros((A.shape[0], 1), dtype=A.dtype)
        B = relaxation_as_linear_operator((fn, kwargs), A, b) * B
        levels[-1].B = B
        if A.symmetry == "nonsymmetric":
            BH = relaxation_as_linear_operator((fn, kwargs), AH, b) * BH
            levels[-1].BH = BH

    # Compute the strength-of-connection matrix C, where larger
    # C[i, j] denote stronger couplings between i and j.
    fn, kwargs = unpack_arg(strength[len(levels)-1])
    if fn == 'symmetric':
        C = symmetric_strength_of_connection(A, **kwargs)
    elif fn == 'classical':
        C = classical_strength_of_connection(A, **kwargs)
    elif fn == 'distance':
        C = distance_strength_of_connection(A, **kwargs)
    elif (fn == 'ode') or (fn == 'evolution'):
        if 'B' in kwargs:
            C = evolution_strength_of_connection(A, **kwargs)
        else:
            C = evolution_strength_of_connection(A, B, **kwargs)
    elif fn == 'energy_based':
        C = energy_based_strength_of_connection(A, **kwargs)
    elif fn == 'predefined':
        C = kwargs['C'].tocsr()
    elif fn == 'algebraic_distance':
        C = algebraic_distance(A, **kwargs)
    elif fn is None:
        C = A.tocsr()
    else:
        raise ValueError('unrecognized strength of connection method: %s' %
                         str(fn))

    # Avoid coarsening diagonally dominant rows
    flag, kwargs = unpack_arg(diagonal_dominance)
    if flag:
        C = eliminate_diag_dom_nodes(A, C, **kwargs)

    # Compute the aggregation matrix AggOp (i.e., the nodal coarsening of A).
    # AggOp is a boolean matrix, where the sparsity pattern for the k-th column
    # denotes the fine-grid nodes agglomerated into k-th coarse-grid node.
    fn, kwargs = unpack_arg(aggregate[len(levels)-1])
    if fn == 'standard':
        AggOp, Cnodes = standard_aggregation(C, **kwargs)
    elif fn == 'naive':
        AggOp, Cnodes = naive_aggregation(C, **kwargs)
    elif fn == 'lloyd':
        AggOp, Cnodes = lloyd_aggregation(C, **kwargs)
    elif fn == 'pairwise':
        AggOp, Cnodes = pairwise_aggregation(A, B, **kwargs)
    elif fn == 'predefined':
        AggOp = kwargs['AggOp'].tocsr()
        Cnodes = kwargs['Cnodes']
    else:
        raise ValueError('unrecognized aggregation method %s' % str(fn))

# ----------------------------------------------------------------------------- #
# ------------------- New ideal interpolation constructed --------------------  #
# ----------------------------------------------------------------------------- #

    # pdb.set_trace()

    # splitting = CR(A)
    # Cpts = [i for i in range(0,AggOp.shape[0]) if splitting[i]==1]

    # Compute prolongation operator.
    if test_ind==0:
        T = new_ideal_interpolation(A=A, AggOp=AggOp, Cnodes=Cnodes, B=B[:, 0:blocksize(A)], SOC=C)
    else: 
        T = py_ideal_interpolation(A=A, AggOp=AggOp, Cnodes=Cnodes, B=B[:, 0:blocksize(A)], SOC=C)

    print "\nSize of sparsity pattern - ", T.nnz

    # Smooth the tentative prolongator, so that it's accuracy is greatly
    # improved for algebraically smooth error.
    # fn, kwargs = unpack_arg(smooth[len(levels)-1])
    # if fn == 'jacobi':
    #     P = jacobi_prolongation_smoother(A, T, C, B, **kwargs)
    # elif fn == 'richardson':
    #     P = richardson_prolongation_smoother(A, T, **kwargs)
    # elif fn == 'energy':
    #     P = energy_prolongation_smoother(A, T, C, B, None, (False, {}),
    #                                      **kwargs)
    # elif fn is None:
    #     P = T
    # else:
    #     raise ValueError('unrecognized prolongation smoother method %s' %
    #                      str(fn))
    P = T
  
# ----------------------------------------------------------------------------- #
# ----------------------------------------------------------------------------- #

    # Compute the restriction matrix R, which interpolates from the fine-grid
    # to the coarse-grid.  If A is nonsymmetric, then R must be constructed
    # based on A.H.  Otherwise R = P.H or P.T.
    symmetry = A.symmetry
    if symmetry == 'hermitian':
        # symmetrically scale out the diagonal, include scaling in P, R
        A = P.H * A * P
        [dum, Dinv, dum] = symmetric_rescaling(A,copy=False)
        P = bsr_matrix(P * diags(Dinv,offsets=0,format='csr'), blocksize=A.blocksize)
        del dum
        R = P.H
    elif symmetry == 'symmetric':
        # symmetrically scale out the diagonal, include scaling in P, R
        A = P.T * A * P
        [dum, Dinv, dum] = symmetric_rescaling(A,copy=False)
        P = bsr_matrix(P * diags(Dinv,offsets=0,format='csr'), blocksize=A.blocksize)
        del dum
        R = P.T
    elif symmetry == 'nonsymmetric':
        raise TypeError('New ideal interpolation not implemented for non-symmetric matrix.')

    if keep:
        levels[-1].C = C                        # strength of connection matrix
        levels[-1].AggOp = AggOp                # aggregation operator
        levels[-1].Fpts = [i for i in range(0,AggOp.shape[0]) if i not in Cnodes]

    levels[-1].P = P                            # smoothed prolongator
    levels[-1].R = R                            # restriction operator
    levels[-1].Cpts = Cnodes                    # Cpts (i.e., rootnodes)

    levels.append(multilevel_solver.level())

    A.symmetry = symmetry
    levels[-1].A = A
    levels[-1].B = R*B                     # right near nullspace candidates

    test = A.tocsr()
    print "\nSize of coarse operator - ", test.nnz

    if A.symmetry == "nonsymmetric":
        levels[-1].BH = BH                      # left near nullspace candidates