示例#1
0
    def collapse(self):
        '''Explicit matrix representation of the operator'''
        from xii.linalg.matrix_utils import diagonal_matrix, row_matrix
        from xii import ii_convert

        D = row_matrix(self.dual_basis)
        B = row_matrix(self.primal_basis)
        I = diagonal_matrix(B.size(1), 1.)
        return ii_convert(I - B.T * D)
示例#2
0
    def is_linear(L, u):
        '''Is L linear in u'''
        # Compute the deriative
        dL = ii_convert(ii_assemble(ii_derivative(L, u)))

        if dL == 0:
            info('dL/du is zero')
            return None

        # Random guy
        w = Function(u.function_space()).vector()
        w.set_local(np.random.rand(w.local_size()))
        # Where we evaluate the direction
        dw = PETScVector(as_backend_type(dL).mat().getVecLeft())
        dL.mult(w, dw)

        # Now L(u) + dw
        Lu_dw = ii_assemble(L) + dw
        # And if the thing is linear then L(u+dw) should be the same
        u.vector().axpy(1, w)
        Lu_dw0 = ii_assemble(L)

        return (Lu_dw - Lu_dw0).norm('linf'), Lu_dw0.norm(
            'linf')  #, dw.norm('linf')
示例#3
0
    def collapse(self):
        '''Explicit matrix representation of the operator'''
        from xii import ii_convert

        return ii_convert((self.P.M) * self.P.collapse())
示例#4
0
    ub, vb = TrialFunction(Vb), TestFunction(Vb)

    b = [[0, 0], [0, 0]]
    b[0][0] = inner(grad(u), grad(v)) * dx + inner(u, v) * dx
    b[0][1] = inner(grad(ub), grad(v)) * dx + inner(ub, v) * dx
    b[1][0] = inner(grad(u), grad(vb)) * dx + inner(u, vb) * dx
    b[1][1] = inner(grad(ub), grad(vb)) * dx + inner(ub, vb) * dx

    BB = ii_assemble(b)

    x = Function(V).vector()
    x.set_local(np.random.rand(x.local_size()))
    y = Function(Vb).vector()
    y.set_local(np.random.rand(y.local_size()))
    bb = block_vec([x, y])

    z_block = BB * bb

    # Make into a monolithic matrix
    BB_m = ii_convert(BB)

    R = ReductionOperator([2], W=[V, Vb])

    z = (R.T) * BB_m * (R * bb)

    print(z - z_block).norm()

    y = BB_m * (R * bb)
    print np.linalg.norm(
        np.hstack([bi.get_local() for bi in z_block]) - y.get_local())
示例#5
0
def analyze_cond(problem, precond, ncases, alpha, get_cond, logfile):
    '''Study of module over ncases for fixed alpha'''
    # Annotate columns
    columns = ['ndofs', 'h', 'cond', 'flag']
    header = ' '.join(columns)
    
    # Stuff for command line printing as we go, eigenvalua bounds, cond number, nzeros
    formats = ['%d'] + ['%.2E'] + ['\033[1;37;34m%g(%g)\033[0m']
    msg = ' '.join(formats)

    mms_data = problem.setup_mms(alpha)

    case0 = 0
    with open(logfile, 'a') as stream:
        # Run in context manager to keep the data
        history = []
        for n in [4*2**i for i in range(case0, case0+ncases)]:
            # Setting up the problem means obtaining a block_mat, block_vec
            # and a list space or matrix, vector and function space
            try:
                AA, bb, W = problem.setup_problem(n, mms_data, alpha)
                Z = []
            except ValueError:
                AA, bb, W, Z = problem.setup_problem(n, mms_data, alpha)

            # Let's get the preconditioner as block operator
            try: 
                BB = precond(W, mms_data, alpha)
            except TypeError:
                try:
                    BB = precond(W, mms_data, alpha, AA)
                except TypeError:
                    BB = precond(W, mms_data, alpha, AA, Z)
                
            # Need a monolithic matrix
            A = ii_convert(AA)
            # spectrum expects matrices
            B = ii_convert(BB)   # ii_convert is identity for mat
            # Again monolithic kernel
            Z = map(ii_convert, Z) if Z else Z
            # For dirichlet cases we might not have the list space
            h = W[0].mesh().hmin() if isinstance(W, list) else W.mesh().hmin()
            ndofs = A.size(0)

            cond, flag = get_cond(A, B, Z)
            # No convergence? 
            if cond is None: break
            # Write
            # Save current
            row = (ndofs, h, cond)
            stream.write('%d %g %.16f %s\n' % (ndofs, h, cond, flag))

            # Put the entire history because of eps monitor
            history.append(row)

            print '='*79
            print RED % str(alpha)
            print GREEN % header

            for i, row in enumerate(history):
                
                if len(history) > 1:
                    increment = history[i][-1] - history[i-1][-1]
                else:
                    increment = 0
                
                print msg % (row + (increment, ))

            print '='*79
示例#6
0
    # These should be linear
    L = inner(Tu, q) * dxGamma
    assert is_linear(L, Function(V)) == None
    test(*is_linear(L, u))

    # ---------------------------------------------------------------

    L = inner(Tu + Tu, q) * dxGamma
    test(*is_linear(L, u))

    # ---------------------------------------------------------------

    # # Some simple nonlinearity where I can check things
    L = inner(Tu**2, q) * dxGamma
    dL0 = inner(2 * Tu * Trace(TrialFunction(V), bmesh), q) * dxGamma
    A0 = ii_convert(ii_assemble(dL0)).array()

    dL = ii_derivative(L, u)
    A = ii_convert(ii_assemble(dL)).array()

    test(np.linalg.norm(A - A0, np.inf), np.linalg.norm(A0, np.inf))

    # ---------------------------------------------------------------

    L = inner(2 * Tu**3 - sin(Tu), q) * dxGamma
    dL0 = inner(
        (6 * Tu**2 - cos(Tu)) * Trace(TrialFunction(V), bmesh), q) * dxGamma
    A0 = ii_convert(ii_assemble(dL0)).array()

    dL = ii_derivative(L, u)
    A = ii_convert(ii_assemble(dL)).array()
示例#7
0
    def __init__(self, mesh, orientation):
        if orientation is None:
            # We assume convex domain and take center as ...
            orientation = mesh.coordinates().mean(axis=0)

        if isinstance(orientation, df.Mesh):
            # We set surface normal as outer with respect to orientation mesh
            assert orientation.id() in mesh.parent_entity_map

            n = df.FacetNormal(orientation)
            hA = df.FacetArea(orientation)
            # Project normal, we have a function on mesh
            DLT = df.VectorFunctionSpace(orientation,
                                         'Discontinuous Lagrange Trace', 0)
            n_ = df.Function(DLT)
            df.assemble((1 / hA) * df.inner(n, df.TestFunction(DLT)) * df.ds,
                        tensor=n_.vector())

            # Now we get it to manifold
            dx_ = df.Measure('dx', domain=mesh)
            V = df.VectorFunctionSpace(mesh, 'DG', 0)

            df.Function.__init__(self, V)

            hK = df.CellVolume(mesh)
            n_vec = xii.ii_convert(
                xii.ii_assemble(
                    (1 / hK) *
                    df.inner(xii.Trace(n_, mesh), df.TestFunction(V)) * dx_))

            self.vector()[:] = n_vec

            return None

        # Manifold assumption
        assert 1 <= mesh.topology().dim() < mesh.geometry().dim()
        gdim = mesh.geometry().dim()

        # Orientation from inside point
        if isinstance(orientation, (list, np.ndarray, tuple)):
            assert len(orientation) == gdim

            kwargs = {'x0%d' % i: val for i, val in enumerate(orientation)}
            orientation = ['x[%d] - x0%d' % (i, i) for i in range(gdim)]
            orientation = df.Expression(orientation, degree=1, **kwargs)

        assert orientation.ufl_shape == (gdim, )

        V = df.VectorFunctionSpace(mesh, 'DG', 0, gdim)
        df.Function.__init__(self, V)
        n_values = self.vector().get_local()

        X = mesh.coordinates()

        dd = X[np.argmin(X[:, 0])] - X[np.argmax(X[:, 0])]

        values = []
        R = np.array([[0, -1], [1, 0]])

        for cell in df.cells(mesh):
            n = cell.cell_normal().array()[:gdim]

            x = cell.midpoint().array()[:gdim]
            # Disagree?
            if np.inner(orientation(x), n) < 0:
                n *= -1.
            values.append(n / np.linalg.norm(n))

        values = np.array(values)

        for sub in range(gdim):
            dofs = V.sub(sub).dofmap().dofs()
            n_values[dofs] = values[:, sub]
        self.vector().set_local(n_values)
        self.vector().apply('insert')