예제 #1
0
    def test_solve_large_2d_using_c_ext_with_jacobi(self):
        """Standard 2d laplacian"""

        n = 20
        m = 10

        A = Sparse(m*n, m*n)

        for i in num.arange(0, n):
            for j in num.arange(0, m):
                I = j+m*i
                A[I, I] = 4.0
                if i > 0:
                    A[I, I-m] = -1.0
                if i < n-1:
                    A[I, I+m] = -1.0
                if j > 0:
                    A[I, I-1] = -1.0
                if j < m-1:
                    A[I, I+1] = -1.0

        xe = num.ones((n*m,), num.float)
        A = Sparse_CSR(A)
        b = A*xe
        x = conjugate_gradient(
            A, b, b, iprint=1, use_c_cg=True, precon='Jacobi')

        assert num.allclose(x, xe)
예제 #2
0
    def test_solve_large_2d_with_default_guess_using_c_ext(self):
        """Standard 2d laplacian using default first guess"""

        n = 20
        m = 10

        A = Sparse(m*n, m*n)

        for i in num.arange(0, n):
            for j in num.arange(0, m):
                I = j+m*i
                A[I, I] = 4.0
                if i > 0:
                    A[I, I-m] = -1.0
                if i < n-1:
                    A[I, I+m] = -1.0
                if j > 0:
                    A[I, I-1] = -1.0
                if j < m-1:
                    A[I, I+1] = -1.0

        xe = num.ones((n*m,), num.float)
        A = Sparse_CSR(A)
        b = A*xe
        x = conjugate_gradient(A, b, use_c_cg=True)

        assert num.allclose(x, xe)
예제 #3
0
    def test_solve_large_2d_csr_matrix_using_c_ext(self):
        """Standard 2d laplacian with csr format
        """

        n = 100
        m = 100

        A = Sparse(m*n, m*n)

        for i in num.arange(0, n):
            for j in num.arange(0, m):
                I = j+m*i
                A[I, I] = 4.0
                if i > 0:
                    A[I, I-m] = -1.0
                if i < n-1:
                    A[I, I+m] = -1.0
                if j > 0:
                    A[I, I-1] = -1.0
                if j < m-1:
                    A[I, I+1] = -1.0

        xe = num.ones((n*m,), num.float)

        # Convert to csr format
        # print 'start covert'
        A = Sparse_CSR(A)
        # print 'finish covert'
        b = A*xe
        x = conjugate_gradient(A, b, b, iprint=20, use_c_cg=True)

        assert num.allclose(x, xe)
예제 #4
0
    def test_sparse_solve_using_c_ext_with_jacobi(self):
        """Solve Small Sparse Matrix"""

        A = [[2.0, -1.0, 0.0, 0.0], [-1.0, 2.0, -1.0, 0.0],
             [0.0, -1.0, 2.0, -1.0], [0.0, 0.0, -1.0, 2.0]]

        A = Sparse_CSR(Sparse(A))

        xe = [0.0, 1.0, 2.0, 3.0]
        b = A * xe
        x = [0.0, 0.0, 0.0, 0.0]
        x = conjugate_gradient(A, b, x, use_c_cg=True, precon='Jacobi')

        assert num.allclose(x, xe)
예제 #5
0
    def test_sparse_solve_matrix_using_c_ext(self):
        """Solve Small Sparse Matrix"""

        A = [[2.0, -1.0, 0.0, 0.0], [-1.0, 2.0, -1.0, 0.0],
             [0.0, -1.0, 2.0, -1.0], [0.0, 0.0, -1.0, 2.0]]

        A = Sparse_CSR(Sparse(A))

        xe = [[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]
        b = A * xe
        x = [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]
        x = conjugate_gradient(A, b, x, iprint=0, use_c_cg=True)

        assert num.allclose(x, xe)
예제 #6
0
    def test_sparse_solve_matrix(self):
        """Solve Small Sparse Matrix"""

        A = [[2.0, -1.0, 0.0, 0.0], [-1.0, 2.0, -1.0, 0.0],
             [0.0, -1.0, 2.0, -1.0], [0.0, 0.0, -1.0, 2.0]]

        A = Sparse(A)

        xe = [[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]
        b = A * xe
        x = [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]
        x = conjugate_gradient(A, b, x, iprint=0)

        assert num.allclose(x, xe)
예제 #7
0
    def test_vector_shape_error(self):
        """Raise VectorShapeError"""

        A = [[2.0, -1.0, 0.0, 0.0], [-1.0, 2.0, -1.0, 0.0],
             [0.0, -1.0, 2.0, -1.0], [0.0, 0.0, -1.0, 2.0]]

        A = Sparse(A)

        xe = [[0.0, 2.0], [1.0, 3.0], [2.0, 4.0], [3.0, 2.0]]

        try:
            x = _conjugate_gradient(A, xe, xe, iprint=0)
        except VectorShapeError:
            pass
        else:
            msg = 'Should have raised exception'
            raise TestError, msg
예제 #8
0
    def test_solve_large(self):
        """Standard 1d laplacian """

        n = 50
        A = Sparse(n, n)

        for i in num.arange(0, n):
            A[i, i] = 1.0
            if i > 0:
                A[i, i - 1] = -0.5
            if i < n - 1:
                A[i, i + 1] = -0.5

        xe = num.ones((n, ), num.float)

        b = A * xe
        x = conjugate_gradient(A, b, b, tol=1.0e-5)

        assert num.allclose(x, xe)
예제 #9
0
    def test_max_iter(self):
        """Test max iteration Small Sparse Matrix"""

        A = [[2.0, -1.0, 0.0, 0.0], [-1.0, 2.0, -1.0, 0.0],
             [0.0, -1.0, 2.0, -1.0], [0.0, 0.0, -1.0, 2.0]]

        A = Sparse(A)

        xe = [0.0, 1.0, 2.0, 3.0]
        b = A * xe
        x = [0.0, 0.0, 0.0, 0.0]

        try:
            x = conjugate_gradient(A, b, x, imax=2)
        except ConvergenceError:
            pass
        else:
            msg = 'Should have raised exception'
            raise TestError, msg
예제 #10
0
    def test_max_iter_using_c_ext_with_jacobi(self):
        """Test max iteration Small Sparse Matrix"""

        A = [[2.0, -1.0, 0.0, 0.0],
             [-1.0, 2.0, -1.0, 0.0],
             [0.0, -1.0, 2.0, -1.0],
             [0.0, 0.0, -1.0, 2.0]]

        A = Sparse_CSR(Sparse(A))

        xe = [0.0, 1.0, 2.0, 3.0]
        b = A*xe
        x = [0.0, 0.0, 0.0, 0.0]

        try:
            x = conjugate_gradient(
                A, b, x, imax=2, use_c_cg=True, precon='Jacobi')
        except ConvergenceError:
            pass
        else:
            msg = 'Should have raised exception'
            raise TestError(msg)
예제 #11
0
    def test_solve_large_using_c_ext_with_jacobi(self):
        """Standard 1d laplacian """

        n = 50
        A = Sparse(n, n)

        for i in num.arange(0, n):
            A[i, i] = 1.0
            if i > 0:
                A[i, i-1] = -0.5
            if i < n-1:
                A[i, i+1] = -0.5

        xe = num.ones((n,), num.float)

        b = A*xe

        A = Sparse_CSR(A)

        x = conjugate_gradient(A, b, b, tol=1.0e-5,
                               use_c_cg=True, precon='Jacobi')

        assert num.allclose(x, xe)
예제 #12
0
def _conjugate_gradient_preconditioned(A, b, x0, M, 
                        imax=10000, tol=1.0e-8, atol=1.0e-10, iprint=None, Type='None'):
    """
   Try to solve linear equation Ax = b using
   preconditioned conjugate gradient method

   Input
   A: matrix or function which applies a matrix, assumed symmetric
      A can be either dense or sparse or a function
      (__mul__ just needs to be defined)
   b: right hand side
   x0: inital guess (default the 0 vector)
   imax: max number of iterations
   tol: tolerance used for residual

   Output
   x: approximate solution
   """

    # Padarn note: This is temporary while the Jacboi preconditioner is the only
    # one avaliable.
    D=[]
    if not Type=='Jacobi':
        log.warning('Only the Jacobi Preconditioner is impletment cg_solve python')
        msg = 'Only the Jacobi Preconditioner is impletment in cg_solve python'
        raise PreconditionerError, msg
    else:
        D=Sparse(A.M, A.M)
        for i in range(A.M):
            D[i,i]=1/M[i]
        D=Sparse_CSR(D)

    stats = Stats()

    b  = num.array(b, dtype=num.float)
    if len(b.shape) != 1:
        raise VectorShapeError, 'input vector should consist of only one column'

    if x0 is None:
        x0 = num.zeros(b.shape, dtype=num.float)
    else:
        x0 = num.array(x0, dtype=num.float)

    stats.x0 = num.linalg.norm(x0)

    if iprint is None or iprint == 0:
        iprint = imax

    dx = 0.0
    
    i = 1
    x = x0
    r = b - A * x
    z = D * r
    d = r
    rTr = num.dot(r, z)
    rTr0 = rTr

    stats.rTr0 = rTr0
    
    #FIXME Let the iterations stop if starting with a small residual
    while (i < imax and rTr > tol ** 2 * rTr0 and rTr > atol ** 2):
        q = A * d
        alpha = rTr / num.dot(d, q)
        xold = x
        x = x + alpha * d

        dx = num.linalg.norm(x-xold)
        
        #if dx < atol :
        #    break

        # Padarn Note 26/11/12: This modification to the algorithm seems
        # unnecessary, but also seem to have been implemented incorrectly -
        # it was set to perform the more expensive r = b - A * x routine in
        # 49/50 iterations. Suggest this being either removed completely or
        # changed to 'if i%50==0' (or equvialent).  
        #if i % 50:
        if False:
            r = b - A * x
        else:
            r = r - alpha * q
        rTrOld = rTr
        z = D * r
        rTr = num.dot(r, z)
        bt = rTr / rTrOld

        d = z + bt * d
        i = i + 1
        if i % iprint == 0:
            log.info('i = %g rTr = %15.8e dx = %15.8e' % (i, rTr, dx))

        if i == imax:
            log.warning('max number of iterations attained')
            msg = 'Conjugate gradient solver did not converge: rTr==%20.15e' % rTr
            raise ConvergenceError, msg

    stats.x = num.linalg.norm(x)
    stats.iter = i
    stats.rTr = rTr
    stats.dx = dx

    return x, stats
예제 #13
0
    def __init__(self, domain, use_triangle_areas=True, verbose=False):
        if verbose:
            log.critical('Kinematic Viscosity: Beginning Initialisation')

        Operator.__init__(self, domain)

        #Expose the domain attributes
        self.mesh = self.domain.mesh
        self.boundary = domain.boundary
        self.boundary_enumeration = domain.boundary_enumeration

        # Setup a quantity as diffusivity
        # FIXME SR: Could/Should pass a quantity which already exists
        self.diffusivity = Quantity(self.domain)
        self.diffusivity.set_values(1.0)
        self.diffusivity.set_boundary_values(1.0)

        self.n = len(self.domain)

        self.dt = 0.0  #Need to set to domain.timestep
        self.dt_apply = 0.0

        self.boundary_len = len(self.domain.boundary)
        self.tot_len = self.n + self.boundary_len

        self.verbose = verbose

        #Geometric Information
        if verbose:
            log.critical('Kinematic Viscosity: Building geometric structure')

        self.geo_structure_indices = num.zeros((self.n, 3), num.int)
        self.geo_structure_values = num.zeros((self.n, 3), num.float)

        # Only needs to built once, doesn't change
        kinematic_viscosity_operator_ext.build_geo_structure(self)

        # Setup type of scaling
        self.set_triangle_areas(use_triangle_areas)

        # FIXME SR: should this really be a matrix?
        temp = Sparse(self.n, self.n)
        for i in range(self.n):
            temp[i, i] = 1.0 / self.mesh.areas[i]

        self.triangle_areas = Sparse_CSR(temp)
        #self.triangle_areas

        # FIXME SR: More to do with solving equation
        self.qty_considered = 1  #1 or 2 (uh or vh respectively)

        #Sparse_CSR.data
        self.operator_data = num.zeros((4 * self.n, ), num.float)
        #Sparse_CSR.colind
        self.operator_colind = num.zeros((4 * self.n, ), num.int)
        #Sparse_CSR.rowptr (4 entries in every row, we know this already) = [0,4,8,...,4*n]
        self.operator_rowptr = 4 * num.arange(self.n + 1)

        # Build matrix self.elliptic_matrix [A B]
        self.build_elliptic_matrix(self.diffusivity)

        self.boundary_term = num.zeros((self.n, ), num.float)

        self.parabolic = False  #Are we doing a parabolic solve at the moment?

        self.u_stats = None
        self.v_stats = None

        if verbose: log.critical('Elliptic Operator: Initialisation Done')
예제 #14
0
    def _build_interpolation_matrix_A(self,
                                      point_coordinates,
                                      output_centroids=False,
                                      verbose=False):
        """Build n x m interpolation matrix, where
        n is the number of data points and
        m is the number of basis functions phi_k (one per vertex)

        This algorithm uses a quad tree data structure for fast binning
        of data points
        origin is a 3-tuple consisting of UTM zone, easting and northing.
        If specified coordinates are assumed to be relative to this origin.

        This one will override any data_origin that may be specified in
        instance interpolation

        Preconditions:
            Point_coordindates and mesh vertices have the same origin.
        """

        if verbose: log.critical('Building interpolation matrix')

        # Convert point_coordinates to numeric arrays, in case it was a list.
        point_coordinates = ensure_numeric(point_coordinates, num.float)

        if verbose: log.critical('Getting indices inside mesh boundary')

        # Quick test against boundary, but will not deal with holes in the mesh,
        # that is done below
        inside_boundary_indices, outside_poly_indices = \
            in_and_outside_polygon(point_coordinates,
                                   self.mesh.get_boundary_polygon(),
                                   closed=True, verbose=verbose)

        # Build n x m interpolation matrix
        if verbose and len(outside_poly_indices) > 0:
            log.critical('WARNING: Points outside mesh boundary.')

        # Since you can block, throw a warning, not an error.
        if verbose and 0 == len(inside_boundary_indices):
            log.critical('WARNING: No points within the mesh!')

        m = self.mesh.number_of_nodes  # Nbr of basis functions (1/vertex)
        n = point_coordinates.shape[0]  # Nbr of data points

        if verbose: log.critical('Number of datapoints: %d' % n)
        if verbose: log.critical('Number of basis functions: %d' % m)

        A = Sparse(n, m)

        n = len(inside_boundary_indices)

        centroids = []
        inside_poly_indices = []

        # Compute matrix elements for points inside the mesh
        if verbose:
            log.critical('Building interpolation matrix from %d points' % n)

        for d, i in enumerate(inside_boundary_indices):
            # For each data_coordinate point
            if verbose and d % ((n + 10) / 10) == 0:
                log.critical('Doing %d of %d' % (d, n))

            x = point_coordinates[i]
            element_found, sigma0, sigma1, sigma2, k = self.root.search_fast(x)
            # Update interpolation matrix A if necessary
            if element_found is True:

                #if verbose:
                #    print 'Point is within mesh:', d, i

                inside_poly_indices.append(i)

                # Assign values to matrix A
                j0 = self.mesh.triangles[k, 0]  # Global vertex id for sigma0
                j1 = self.mesh.triangles[k, 1]  # Global vertex id for sigma1
                j2 = self.mesh.triangles[k, 2]  # Global vertex id for sigma2
                js = [j0, j1, j2]

                if output_centroids is False:
                    # Weight each vertex according to its distance from x
                    sigmas = {j0: sigma0, j1: sigma1, j2: sigma2}
                    for j in js:
                        A[i, j] = sigmas[j]
                else:
                    # If centroids are needed, weight all 3 vertices equally
                    for j in js:
                        A[i, j] = 1.0 / 3.0
                    centroids.append(self.mesh.centroid_coordinates[k])
            else:
                if verbose:
                    log.critical(
                        'Mesh has a hole - moving this point to outside list')

                # This is a numpy arrays, so we need to do a slow transfer
                outside_poly_indices = num.append(outside_poly_indices, [i],
                                                  axis=0)

        return A, inside_poly_indices, outside_poly_indices, centroids