示例#1
0
def test_pointsource_mixed_space(mesh, point):
    """Tests point source when given constructor PointSource(V, point,
    mag) with a vector for a mixed function space that isn't placed at
    a node for 1D, 2D and 3D. Global points given to constructor from
    rank 0 processor.

    """

    rank = MPI.rank(mesh.mpi_comm())
    ele1 = FiniteElement("CG", mesh.ufl_cell(), 1)
    ele2 = FiniteElement("DG", mesh.ufl_cell(), 2)
    ele3 = VectorElement("CG", mesh.ufl_cell(), 2)
    V = FunctionSpace(mesh, MixedElement([ele1, ele2, ele3]))
    value_dimension = V.element().value_dimension(0)
    v = TestFunction(V)
    b = assemble(dot(Constant([0.0]*value_dimension), v)*dx)
    if rank == 0:
        ps = PointSource(V, point, 10.0)
    else:
        ps = PointSource(V, [])
    ps.apply(b)

    # Checks array sums to correct value
    b_sum = b.sum()
    assert round(b_sum - 10.0*value_dimension) == 0
示例#2
0
def test_fenics_vector_copy():
    mesh = UnitSquare(3, 3)
    fs = FunctionSpace(mesh, "CG", 1)
    vec1 = FEniCSVector(Function(fs))
    vec1.coeffs = np.array(range(fs.dim()))
    assert_equal(vec1.coeffs[0], 0)
    vec2 = vec1.copy()
    vec2.coeffs[0] = 5
    assert_equal(vec1.coeffs[0], 0)
示例#3
0
def weighted_H1_norm(w, vec, piecewise=False):
    if piecewise:
        DG = FunctionSpace(vec.basis.mesh, "DG", 0)
        s = TestFunction(DG)
        ae = assemble(w * inner(nabla_grad(vec._fefunc), nabla_grad(vec._fefunc)) * s * dx)
        norm_vec = np.array([sqrt(e) for e in ae])
        # map DG dofs to cell indices
        dofs = [DG.dofmap().cell_dofs(c.index())[0] for c in cells(vec.basis.mesh)]
        norm_vec = norm_vec[dofs]
    else:
        ae = assemble(w * inner(nabla_grad(vec._fefunc), nabla_grad(vec._fefunc)) * dx)
        norm_vec = sqrt(ae)
    return norm_vec
示例#4
0
文件: problem.py 项目: mhanus/EVC
  def __init__(self, coarse_mesh, nref, p_coarse, p_fine, sym=False):
    """

    :param dolfin.cpp.mesh.Mesh coarse_mesh:
    :param int nref:
    :param int p_coarse:
    :param int p_fine:
    :param bool sym:
    :return:
    """

    print0("Creating approximation spaces")

    self.V_coarse = FunctionSpace(coarse_mesh, "CG", p_coarse)
    self.ndof_coarse = self.V_coarse.dim()

    refined_mesh = coarse_mesh
    for ref in xrange(nref):
      refined_mesh = refine(refined_mesh)   # creates a new Mesh, initial coarse mesh is unchanged

    self.V_fine = FunctionSpace(refined_mesh, "CG", p_fine)
    self.ndof_fine = self.V_fine.dim()

    H = coarse_mesh.hmax()
    h = refined_mesh.hmax()
    self.alpha = log(H)/log(h)
    self.beta = p_fine + 1

    if comm.rank == 0:
      prop = Table("Approximation properties")
      prop.set("ndof", "coarse", self.ndof_coarse)
      prop.set("ndof", "fine", self.ndof_fine)
      prop.set("h", "coarse", H)
      prop.set("h", "fine", h)

      info(prop)

      print "alpha = {}, beta = {}".format(self.alpha, self.beta)

    self.bc_coarse = None

    self.A_fine = PETScMatrix()
    self.B_fine = PETScMatrix()
    self.A_coarse = PETScMatrix()
    self.B_coarse = PETScMatrix()

    self.sym = sym
    self.switch_gep_matrices = False
示例#5
0
  def __init__(self, problem, verbosity=0):
    """

    :param ProblemData problem:
    :param int verbosity:
    :return:
    """
    self.parameters = parameters["discretization"]

    self.verb = verbosity
    self.vis_folder = os.path.join(problem.out_folder, "MESH")
    self.core = problem.core
    self.G = problem.G

    if self.verb > 1: print pid+"Loading mesh"
        
    t_load = Timer("DD: Data loading")

    if not problem.mesh_module:
      if self.verb > 1: print pid + "  mesh data"
      self.mesh = Mesh(problem.mesh_files.mesh)

      if self.verb > 1: print pid + "  physical data"
      self.cell_regions_fun = MeshFunction("size_t", self.mesh, problem.mesh_files.physical_regions)

      if self.verb > 1: print pid + "  boundary data"
      self.boundaries = MeshFunction("size_t", self.mesh, problem.mesh_files.facet_regions)
    else:
      self.mesh = problem.mesh_module.mesh
      self.cell_regions_fun = problem.mesh_module.regions

      try:
        self.boundaries = problem.mesh_module.boundaries
      except AttributeError:
        self.boundaries = None

    assert self.mesh
    assert self.boundaries is None or self.boundaries.array().size > 0

    if self.verb > 2:
      print pid+"  mesh info: " + str(self.mesh)

    if self.verb > 1: print0("Defining function spaces" )

    self.t_spaces = Timer("DD: Function spaces construction")

    # Spaces that must be specified by the respective subclasses
    self.V = None     # solution space
    self.Vphi1 = None # 1-g scalar flux space
    
    # XS / TH space
    self.V0 = FunctionSpace(self.mesh, "DG", 0)
    self.ndof0 = self.V0.dim()

    dofmap = self.V0.dofmap()
    self.local_ndof0 = dofmap.local_dimension("owned")

    self.cell_regions = self.cell_regions_fun.array()
    assert self.cell_regions.size == self.local_ndof0
示例#6
0
def test_multi_ps_matrix_node(mesh):
    """Tests point source when given constructor PointSource(V, source)
    with a matrix when points placed at 3 nodes for 1D, 2D and
    3D. Global points given to constructor from rank 0 processor.

    """

    point = [0.0, 0.5, 1.0]
    rank = MPI.rank(mesh.mpi_comm())
    V = FunctionSpace(mesh, "CG", 1)
    u, v = TrialFunction(V), TestFunction(V)
    w = Function(V)
    A = assemble(Constant(0.0)*u*v*dx)
    dim = mesh.geometry().dim()

    source = []

    point_coords = np.zeros(dim)
    for p in point:
        for i in range(dim):
            point_coords[i-1] = p
        if rank == 0:
            source.append((Point(point_coords), 10.0))
    ps = PointSource(V, source)
    ps.apply(A)

    # Checks matrix sums to correct value.
    A.get_diagonal(w.vector())
    a_sum = MPI.sum(mesh.mpi_comm(), np.sum(A.array()))
    assert round(a_sum - len(point)*10) == 0

    # Check if coordinates are in portion of mesh and if so check that
    # diagonal components sum to the correct value.
    mesh_coords = V.tabulate_dof_coordinates()
    for p in point:
        for i in range(dim):
            point_coords[i-1] = p

        j = 0
        for i in range(len(mesh_coords)//(dim)):
            mesh_coords_check = mesh_coords[j:j+dim-1]
            if np.array_equal(point_coords, mesh_coords_check) is True:
                assert np.round(w.vector()[j//(dim)]-10.0) == 0.0
            j += dim
示例#7
0
def test_multi_ps_vector_node(mesh):
    """Tests point source when given constructor PointSource(V, V, point,
    mag) with a matrix when points placed at 3 node for 1D, 2D and
    3D. Global points given to constructor from rank 0 processor.

    """

    point = [0.0, 0.5, 1.0]
    dim = mesh.geometry().dim()
    rank = MPI.rank(mesh.mpi_comm())
    V = FunctionSpace(mesh, "CG", 1)
    v = TestFunction(V)
    b = assemble(Constant(0.0)*v*dx)

    source = []
    point_coords = np.zeros(dim)
    for p in point:
        for i in range(dim):
            point_coords[i-1] = p
        if rank == 0:
            source.append((Point(point_coords), 10.0))
    ps = PointSource(V, source)
    ps.apply(b)

    # Checks b sums to correct value
    b_sum = b.sum()
    assert round(b_sum - len(point)*10.0) == 0

    # Checks values added to correct part of vector
    mesh_coords = V.tabulate_dof_coordinates()
    for p in point:
        for i in range(dim):
            point_coords[i] = p

        j = 0
        for i in range(len(mesh_coords)//(dim)):
            mesh_coords_check = mesh_coords[j:j + dim - 1]
            if np.array_equal(point_coords, mesh_coords_check) is True:
                assert np.round(b.array()[j//(dim)]-10.0) == 0.0
            j += dim
示例#8
0
def test_estimator():
    # setup solution multi vector
    mis = [Multiindex([0]),
           Multiindex([1]),
           Multiindex([0, 1]),
           Multiindex([0, 2])]
    mesh = UnitSquare(4, 4)
    fs = FunctionSpace(mesh, "CG", 1)
    F = [interpolate(Expression("*".join(["x[0]"] * i)), fs) for i in range(1, 5)]
    vecs = [FEniCSVector(f) for f in F]

    w = MultiVectorWithProjection()
    for mi, vec in zip(mis, vecs):
        w[mi] = vec
        #    v = A * w

    # define coefficient field
    aN = 4
    a = [Expression('2.+sin(20.*pi*I*x[0]*x[1])', I=i, degree=3, element=fs.ufl_element())
         for i in range(1, aN)]
    rvs = [UniformRV(), NormalRV(mu=0.5)]
    coeff_field = ListCoefficientField(a[0], a[1:], rvs)

    # define source term
    f = Constant("1.0")

    # evaluate residual and projection error estimators
    resind, reserr = ResidualEstimator.evaluateResidualEstimator(w, coeff_field, f)
    projind, projerr = ResidualEstimator.evaluateProjectionError(w, coeff_field)
    print resind[mis[0]].as_array().shape, projind[mis[0]].as_array().shape
    print "RESIDUAL:", resind[mis[0]].as_array()
    print "PROJECTION:", projind[mis[0]].as_array()
    print "residual error estimate for mu"
    for mu in reserr:
        print "\t eta", mu, " is ", reserr[mu]
        print "\t delta", mu, " is ", projerr[mu]

    assert_equal(w.active_indices(), resind.active_indices())
    print "active indices are ", resind.active_indices()
示例#9
0
 def setUp(self):
     mesh = UnitSquareMesh(5, 5, 'crossed')
     self.V = FunctionSpace(mesh, 'Lagrange', 5)
     self.u = Function(self.V)
     self.uM = Function(self.V)
     self.uMdiag = Function(self.V)
     test = TestFunction(self.V)
     trial = TrialFunction(self.V)
     m = test*trial*dx
     self.M = assemble(m)
     self.solver = LUSolver()
     self.solver.parameters['reuse_factorization'] = True
     self.solver.parameters['symmetric'] = True
     self.solver.set_operator(self.M)
     self.ones = np.ones(self.V.dim())
示例#10
0
    def __init__(self, mesh, num, denom, method="CR"):
        """ Assemble matrices and cache matrices. """
        self.V = FunctionSpace(mesh, method, 1)
        # save coordinates of DOFs
        self.dofs = np.reshape(self.V.dofmap().tabulate_all_coordinates(mesh),
                               (-1, 2))
        u = TrialFunction(self.V)
        v = TestFunction(self.V)
        self.boundary = MeshFunction("size_t", mesh, 1)
        # assemble matrices
        a = inner(grad(u), grad(v)) * dx
        b = u * v * dx
        self.A = uBLASSparseMatrix()
        self.A = assemble(a, tensor=self.A)
        self.A.compress()
        self.B = uBLASSparseMatrix()
        self.B = assemble(b, tensor=self.B)
        self.B.compress()
        size = mesh.size(2)
        # cell diameter calculation
        self.H2 = (num ** 2 + denom ** 2) * 1.0 / size
        # print "Theoretical cell diameter: ", sqrt(self.H2)
        # print "FEniCS calculated: ", mesh.hmax()

        # Matrices have rational entries. We can rescale to get integers
        # and save as scipy matrices
        scaleB = 6.0*size/num/denom
        self.scaleB = scaleB
        self.B *= scaleB
        r, c, val = self.B.data()
        val = np.round(val)
        self.B = sps.csr_matrix((val, c, r))
        # check if B is diagonal
        assert len(self.B.diagonal()) == self.B.nnz
        # find B inverse
        self.Binv = sps.csr_matrix((1.0/val, c, r))
        scaleA = 1.0*num*denom/2
        self.scaleA = scaleA
        self.A *= scaleA
        r, c, val = self.A.data()
        self.A = sps.csr_matrix((np.round(val), c, r))

        self.scale = scaleA/scaleB
        print 'scaling A by: ', scaleA
        print 'scaling B by: ', scaleB
        print 'eigenvalues scale by:', self.scale
示例#11
0
文件: sampling.py 项目: SpuqTeam/spuq
def get_projection_basis(mesh0, mesh_refinements=None, maxh=None, degree=1, sub_spaces=None, family='CG'):
    if mesh_refinements is not None:
        mesh = mesh0
        for _ in range(mesh_refinements):
            mesh = refine(mesh)
        if sub_spaces is None or sub_spaces == 0:
            V = FunctionSpace(mesh, family, degree)
        else:
            V = VectorFunctionSpace(mesh, family, degree)
            assert V.num_sub_spaces() == sub_spaces
        return FEniCSBasis(V)
    else:
        assert maxh is not None
        if sub_spaces is None or sub_spaces == 0:
            V = FunctionSpace(mesh0, family, degree)
        else:
            V = VectorFunctionSpace(mesh0, family, degree)
            assert V.num_sub_spaces() == sub_spaces
        B = FEniCSBasis(V)
        return B.refine_maxh(maxh, True)[0]
def test_single_block_no_restriction_from_list(mesh, FunctionSpace):
    V = FunctionSpace(mesh)
    block_V = BlockFunctionSpace([V])
    assert_dof_map_single_block_no_restriction(V, block_V)
示例#13
0
            def __call__(self, degree=3, y=0., standard_deviation=1.):
                V = FunctionSpace(self._mesh, "CG", degree)
                v = TestFunction(V)
                potential_trial = TrialFunction(V)
                potential = Function(V)
                dx = Measure("dx")(subdomain_data=self._subdomains)
                # ds = Measure("ds")(subdomain_data=self._boundaries)
                csd = Expression(f'''
                                  x[1] >= {self.H} ?
                                  0 :
                                  a * exp({-0.5 / standard_deviation ** 2}
                                          * ((x[0])*(x[0])
                                             + (x[1] - {y})*(x[1] - {y})
                                             + (x[2])*(x[2])
                                             ))
                                  ''',
                                 degree=degree,
                                 a=1.0)

                self.a = csd.a = 1.0 / assemble(
                    csd * Measure("dx", self._mesh))
                # print(assemble(csd * Measure("dx", self._mesh)))
                L = csd * v * dx

                known_terms = assemble(L)
                # a = (inner(grad(potential_trial), grad(v))
                #      * (Constant(self.SALINE_CONDUCTIVITY) * dx(self.SALINE_VOL)
                #         + Constant(self.SLICE_CONDUCTIVITY) * (dx(self.SLICE_VOL)
                #                                                + dx(self.ROI_VOL))))
                a = sum(
                    Constant(conductivity) *
                    inner(grad(potential_trial), grad(v)) * dx(domain)
                    for domain, conductivity in [
                        (self.SALINE_VOL, self.SALINE_CONDUCTIVITY),
                        (self.SLICE_VOL, self.SLICE_CONDUCTIVITY),
                        (self.ROI_VOL, self.SLICE_CONDUCTIVITY),
                    ])
                terms_with_unknown = assemble(a)
                dirchlet_bc = DirichletBC(
                    V,
                    Constant(2.0 * 0.25 /
                             (self.RADIUS * np.pi * self.SALINE_CONDUCTIVITY)),
                    # 2.0 becaue of dielectric base duplicating
                    # the current source
                    # slice conductivity and thickness considered
                    # negligible
                    self._boundaries,
                    self.MODEL_DOME)
                dirchlet_bc.apply(terms_with_unknown, known_terms)
                solver = KrylovSolver("cg", "ilu")
                solver.parameters["maximum_iterations"] = MAX_ITER
                solver.parameters["absolute_tolerance"] = 1E-8
                # solver.parameters["monitor_convergence"] = True
                start = datetime.datetime.now()
                try:
                    self.iterations = solver.solve(terms_with_unknown,
                                                   potential.vector(),
                                                   known_terms)
                    return potential

                except RuntimeError as e:
                    self.iterations = MAX_ITER
                    logger.warning("Solver failed: {}".format(repr(e)))
                    return None

                finally:
                    self.time = datetime.datetime.now() - start
示例#14
0
 def __init__(self, Th, N):
     self.N = N
     mesh = UnitSquareMesh(Th, Th)
     self.V = FunctionSpace(mesh, "Lagrange", 1)
def test_heat_equation_fenics():
    # Define problem
    class Heat(object):
        '''
        u' = \\Delta u + f
        '''
        def __init__(self, V):
            self.V = V
            u = TrialFunction(V)
            v = TestFunction(V)
            self.M = assemble(u * v * dx)
            self.A = assemble(-dot(grad(u), grad(v)) * dx)
            self.b = assemble(1.0 * v * dx)
            self.bcs = DirichletBC(self.V, 0.0, 'on_boundary')
            return

        # pylint: disable=unused-argument
        def eval_alpha_M_beta_F(self, alpha, beta, u, t):
            # Evaluate  alpha * M * u + beta * F(u, t).
            uvec = u.vector()
            return alpha * (self.M * uvec) + beta * (self.A * uvec + self.b)

        def solve_alpha_M_beta_F(self, alpha, beta, b, t):
            # Solve  alpha * M * u + beta * F(u, t) = b  for u.
            A = alpha * self.M + beta * self.A

            rhs = b - beta * self.b
            self.bcs.apply(A, rhs)

            solver = KrylovSolver('gmres', 'ilu')
            solver.parameters['relative_tolerance'] = 1.0e-13
            solver.parameters['absolute_tolerance'] = 0.0
            solver.parameters['maximum_iterations'] = 100
            solver.parameters['monitor_convergence'] = True
            solver.set_operator(A)

            u = Function(self.V)
            solver.solve(u.vector(), rhs)
            return u

    # create initial guess
    mesh = UnitSquareMesh(20, 20, 'crossed')
    V = FunctionSpace(mesh, 'CG', 1)
    u = TrialFunction(V)
    v = TestFunction(V)
    u0 = Function(V)
    solve(u * v * dx == Constant(0.0) * v * dx, u0)

    u1 = Function(V)
    u1.assign(u0)

    # create time stepper
    # stepper = parabolic.Dummy(Heat(V))
    # stepper = parabolic.ExplicitEuler(Heat(V))
    stepper = parabolic.ImplicitEuler(Heat(V))
    # stepper = parabolic.Trapezoidal(Heat(V))

    # step
    t = 0.0
    dt = 1.0e-3
    with XDMFFile('heat.xdmf') as xf:
        xf.write(u1, t)
        for _ in range(10):
            u1.assign(stepper.step(u0, t, dt))
            u0.assign(u1)
            t += dt
            xf.write(u1, t)
    return
示例#16
0
class Solver(object):

    """
    First order FEM with CR elements.

    Nonconforming CR elements give lower bounds for eigenvalues
    after apropriate postprocessing is applied.
    """

    def __init__(self, mesh, num, denom, method="CR"):
        """ Assemble matrices and cache matrices. """
        self.V = FunctionSpace(mesh, method, 1)
        # save coordinates of DOFs
        self.dofs = np.reshape(self.V.dofmap().tabulate_all_coordinates(mesh),
                               (-1, 2))
        u = TrialFunction(self.V)
        v = TestFunction(self.V)
        self.boundary = MeshFunction("size_t", mesh, 1)
        # assemble matrices
        a = inner(grad(u), grad(v)) * dx
        b = u * v * dx
        self.A = uBLASSparseMatrix()
        self.A = assemble(a, tensor=self.A)
        self.A.compress()
        self.B = uBLASSparseMatrix()
        self.B = assemble(b, tensor=self.B)
        self.B.compress()
        size = mesh.size(2)
        # cell diameter calculation
        self.H2 = (num ** 2 + denom ** 2) * 1.0 / size
        # print "Theoretical cell diameter: ", sqrt(self.H2)
        # print "FEniCS calculated: ", mesh.hmax()

        # Matrices have rational entries. We can rescale to get integers
        # and save as scipy matrices
        scaleB = 6.0*size/num/denom
        self.scaleB = scaleB
        self.B *= scaleB
        r, c, val = self.B.data()
        val = np.round(val)
        self.B = sps.csr_matrix((val, c, r))
        # check if B is diagonal
        assert len(self.B.diagonal()) == self.B.nnz
        # find B inverse
        self.Binv = sps.csr_matrix((1.0/val, c, r))
        scaleA = 1.0*num*denom/2
        self.scaleA = scaleA
        self.A *= scaleA
        r, c, val = self.A.data()
        self.A = sps.csr_matrix((np.round(val), c, r))

        self.scale = scaleA/scaleB
        print 'scaling A by: ', scaleA
        print 'scaling B by: ', scaleB
        print 'eigenvalues scale by:', self.scale

    def solve(self, dirichlet=lambda x: False, plotted=None):
        """
        Find eigenvalues given a boundary condition function.

        dirichlet is a function returning True if x is on Dirichlet BC.
        """
        self.boundary.set_all(0)
        d = Dirichlet()
        d.init(dirichlet)
        d.mark(self.boundary, 1)
        if plotted is not None:
            plotted.plot(self.boundary)
        #    plotted.write_png()

        # indices for non-Dirichlet rows/columns
        indices = np.nonzero(np.apply_along_axis(
            lambda x: not dirichlet(x), 1, self.dofs))[0]
        # remove Dirichlet rows and columns from A
        self.AA = (self.A[indices, :]).tocsc()[:, indices]
        # remove Dirichlet rows and columns from B
        self.BB = (self.B[indices, :]).tocsc()[:, indices]
        self.BBinv = (self.Binv[indices, :]).tocsc()[:, indices]
        # solve using scipy
        eigs, eigfs = ssl.eigsh(self.AA, k=2, M=self.BB, sigma=0, which='LM')
        self.raweigs = [eigs[0], eigs[1]]
        eig = eigs[0]
        # turn eigf into a function
        eigf = np.array(eigfs[:, 0]).flatten()
        # we were solving only for some dofs, rest is 0
        # u = Function(self.V)
        # u.vector()[:] = 0
        # u.vector()[indices] = eigf
        # find algebraic residual
        # both L2 norm of u and B norm of eigf should equal 1
        # print assemble(u*u*dx), self.BB.dot(eigf).dot(eigf)
        res = self.AA.dot(eigf) - eig * self.BB.dot(eigf)
        resnorm = np.sqrt(self.BBinv.dot(res).dot(res))
        self.residual = [resnorm, 0]
        # apply Carstensen-Gedicke transformations to eig
        #
        # kappa^2 less than 0.1932
        eig = (eig - resnorm) / (1 + 0.1932 * (eig - resnorm) *
                                 self.H2 / self.scale)
        # scale back the eigenvalue
        eig = eig/self.scale
        # find residual for the second eigenvalue (for gap calculations)
        eigf = np.array(eigfs[:, 1]).flatten()
        res = self.AA.dot(eigf) - eigs[1] * self.BB.dot(eigf)
        resnorm = np.sqrt(self.BBinv.dot(res).dot(res))
        self.residual[1] = resnorm
        # return (eig, u)  # pair (eigenvalue,eigenfunctions)
        return (eig, None)  # pair (eigenvalue,eigenfunctions)
示例#17
0
    def solve(self):
        """ Find eigenvalues for transformed mesh. """
        self.progress("Building mesh.")
        # build transformed mesh
        mesh = self.refineMesh()
        # dim = mesh.topology().dim()
        if self.bcLast:
            mesh = transform_mesh(mesh, self.transformList)
            Robin, Steklov, shift, bcs = get_bc_parts(mesh, self.bcList)
        else:
            Robin, Steklov, shift, bcs = get_bc_parts(mesh, self.bcList)
            mesh = transform_mesh(mesh, self.transformList)
            # boundary conditions computed on non-transformed mesh
            # copy the values to transformed mesh
            fun = FacetFunction("size_t", mesh, shift)
            fun.array()[:] = bcs.array()[:]
            bcs = fun
        ds = Measure('ds', domain=mesh, subdomain_data=bcs)
        V = FunctionSpace(mesh, self.method, self.deg)
        u = TrialFunction(V)
        v = TestFunction(V)
        self.progress("Assembling matrices.")
        wTop = Expression(self.wTop)
        wBottom = Expression(self.wBottom)

        #
        # build stiffness matrix form
        #
        s = dot(grad(u), grad(v)) * wTop * dx
        # add Robin parts
        for bc in Robin:
            s += Constant(bc.parValue) * u * v * wTop * ds(bc.value + shift)

        #
        # build mass matrix form
        #
        if len(Steklov) > 0:
            m = 0
            for bc in Steklov:
                m += Constant(
                    bc.parValue) * u * v * wBottom * ds(bc.value + shift)
        else:
            m = u * v * wBottom * dx

        # assemble
        # if USE_EIGEN:
        #     S, M = EigenMatrix(), EigenMatrix()
        # tempv = EigenVector()
        # else:
        S, M = PETScMatrix(), PETScMatrix()
        # tempv = PETScVector()

        if not np.any(bcs.array() == shift + 1):
            # no Dirichlet parts
            assemble(s, tensor=S)
            assemble(m, tensor=M)
        else:
            #
            # with EIGEN we could
            #   apply Dirichlet condition symmetrically
            #   completely remove rows and columns
            #
            # Dirichlet parts are marked with shift+1
            #
            # temp = Constant(0)*v*dx
            bc = DirichletBC(V, Constant(0.0), bcs, shift + 1)
            # assemble_system(s, temp, bc, A_tensor=S, b_tensor=tempv)
            # assemble_system(m, temp, bc, A_tensor=M, b_tensor=tempv)
            assemble(s, tensor=S)
            bc.apply(S)
            assemble(m, tensor=M)
            # bc.zero(M)

        # if USE_EIGEN:
        #    M = M.sparray()
        #    M.eliminate_zeros()
        #    print M.shape
        #    indices = M.indptr[:-1] - M.indptr[1:] < 0
        #    M = M[indices, :].tocsc()[:, indices]
        #    S = S.sparray()[indices, :].tocsc()[:, indices]
        #    print M.shape
        #
        # solve the eigenvalue problem
        #
        self.progress("Solving eigenvalue problem.")
        eigensolver = SLEPcEigenSolver(S, M)
        eigensolver.parameters["problem_type"] = "gen_hermitian"
        eigensolver.parameters["solver"] = "krylov-schur"
        if self.target is not None:
            eigensolver.parameters["spectrum"] = "target real"
            eigensolver.parameters["spectral_shift"] = self.target
        else:
            eigensolver.parameters["spectrum"] = "smallest magnitude"
            eigensolver.parameters["spectral_shift"] = -0.01
        eigensolver.parameters["spectral_transform"] = "shift-and-invert"
        eigensolver.solve(self.number)
        self.progress("Generating eigenfunctions.")
        if eigensolver.get_number_converged() == 0:
            return None
        eigf = []
        eigv = []
        if self.deg > 1:
            mesh = refine(mesh)
        W = FunctionSpace(mesh, 'CG', 1)
        for i in range(eigensolver.get_number_converged()):
            pair = eigensolver.get_eigenpair(i)[::2]
            eigv.append(pair[0])
            u = Function(V)
            u.vector()[:] = pair[1]
            eigf.append(interpolate(u, W))
        return eigv, eigf
def dphi_dt_print(mesh, phi, phi0, A_0, c, p, Temp, Omega, rho_solid):
    DG0 = FunctionSpace(mesh, "DG", 0)
    A_s = A_s_cal(phi, phi0, A_0)
    R_c = R_c_cal(c, p, Temp)
    porosity_from_chemical_expr = R_c * A_s / Omega / rho_solid
    return project(porosity_from_chemical_expr, DG0)
示例#19
0
def test_fenics_vector_inner():
    mesh = UnitSquare(3, 3)
    fs = FunctionSpace(mesh, "CG", 1)
    vec = FEniCSVector(Function(fs))
    vec.coeffs = np.array(range(fs.dim()))
    assert_equal(vec.__inner__(vec), 1240)
示例#20
0
        #        return x[0] < 0.4

        #Left().mark(mf, 1)
        cell_domains = CellFunction("size_t", mesh)
        cell_domains.set_all(0)
        subdomains = AutoSubDomain(lambda x: x[0] < 0.5)
        subdomains.mark(cell_domains, 1)

        if MPI.size(mpi_comm_world()) == 1:
            submesh = SubMesh(mesh, cell_domains, 1)
        else:
            submesh = create_submesh(mesh, cell_domains, 1)

        #MPI.barrier(mpi_comm_world())
        #continue
        V = FunctionSpace(submesh, "CG", 2)
        expr = Expression("x[0]*x[1]*x[1]+4*x[2]", degree=2)
        u = project(expr, V)

        MPI.barrier(mpi_comm_world())

        s0 = submesh.size_global(0)
        s3 = submesh.size_global(submesh.ufl_cell().topological_dimension())
        a = assemble(u * dx)
        v = assemble(Constant(1) * dx(domain=submesh))
        if MPI.rank(mpi_comm_world()) == 0:
            print "Num vertices: ", s0
            print "Num cells: ", s3
            print "assemble(u*dx): ", a
            print "Volume: ", v
        #u = Function(V)
def to_dg0(mesh, subdomains, boundaries, p):
    PM = FunctionSpace(mesh, 'DG', 0)
    return project(p, PM)
def test_poisson(k):
    # Polynomial order and mesh resolution
    nx_list = [4, 8, 16]

    # Error list
    error_u_l2, error_u_h1 = [], []

    for nx in nx_list:
        mesh = UnitSquareMesh(nx, nx)

        # Define FunctionSpaces and functions
        V = FunctionSpace(mesh, "DG", k)
        Vbar = FunctionSpace(mesh,
                             FiniteElement("CG", mesh.ufl_cell(), k)["facet"])

        u_soln = Expression("sin(pi*x[0])*sin(pi*x[1])",
                            degree=k + 1,
                            domain=mesh)
        f = Expression("2*pi*pi*sin(pi*x[0])*sin(pi*x[1])", degree=k + 1)
        u, v = Function(V), TestFunction(V)
        ubar, vbar = Function(Vbar), TestFunction(Vbar)

        n = FacetNormal(mesh)
        h = CellDiameter(mesh)
        alpha = Constant(6 * k * k)
        penalty = alpha / h

        def facet_integral(integrand):
            return integrand('-') * dS + integrand('+') * dS + integrand * ds

        u_flux = ubar
        F_v_flux = grad(u) + penalty * outer(u_flux - u, n)

        residual_local = inner(grad(u), grad(v)) * dx
        residual_local += facet_integral(inner(outer(u_flux - u, n), grad(v)))
        residual_local -= facet_integral(inner(F_v_flux, outer(v, n)))
        residual_local -= f * v * dx

        residual_global = facet_integral(inner(F_v_flux, outer(vbar, n)))

        a_ll = derivative(residual_local, u)
        a_lg = derivative(residual_local, ubar)
        a_gl = derivative(residual_global, u)
        a_gg = derivative(residual_global, ubar)

        l_l = -residual_local
        l_g = -residual_global

        bcs = [DirichletBC(Vbar, u_soln, "on_boundary")]

        # Initialize static condensation assembler
        assembler = AssemblerStaticCondensation(a_ll, a_lg, a_gl, a_gg, l_l,
                                                l_g, bcs)

        A_g, b_g = PETScMatrix(), PETScVector()
        assembler.assemble_global_lhs(A_g)
        assembler.assemble_global_rhs(b_g)

        for bc in bcs:
            bc.apply(A_g, b_g)

        solver = PETScKrylovSolver()
        solver.set_operator(A_g)
        PETScOptions.set("ksp_type", "preonly")
        PETScOptions.set("pc_type", "lu")
        PETScOptions.set("pc_factor_mat_solver_type", "mumps")
        solver.set_from_options()

        solver.solve(ubar.vector(), b_g)
        assembler.backsubstitute(ubar._cpp_object, u._cpp_object)

        # Compute L2 and H1 norms
        e_u_l2 = assemble((u - u_soln)**2 * dx)**0.5
        e_u_h1 = assemble(grad(u - u_soln)**2 * dx)**0.5

        if mesh.mpi_comm().rank == 0:
            error_u_l2.append(e_u_l2)
            error_u_h1.append(e_u_h1)

    if mesh.mpi_comm().rank == 0:
        iterator_list = [1.0 / float(nx) for nx in nx_list]
        conv_u_l2 = compute_convergence(iterator_list, error_u_l2)
        conv_u_h1 = compute_convergence(iterator_list, error_u_h1)

        # Optimal rate of k + 1 - tolerance
        assert np.all(conv_u_l2 >= (k + 1.0 - 0.15))
        # Optimal rate of k - tolerance
        assert np.all(conv_u_h1 >= (k - 0.1))
示例#23
0
            def u_numpy(t, x, y):                return 0.5 * (t**2+1) * \
np.sin(2*x*pi) * np.sin(2*y*pi)

            def u_T_numpy(x, y):
                return u_numpy(T[1], x, y)


            # Define right-hand side
            u_x = u(t_, x, y_)
            by_x = by(t_, x, y_)

            f = diff(u_x, t_) + c * u_x + inner(bx, grad(u_x)) + \
                by_x * diff(u_x, y_) + inner(ax, grad(grad(u_x)))

        # We have to use at least quadratic polynomials here
        Vx = FunctionSpace(mx, 'CG', 2)
        Vy = FunctionSpace(my, 'CG', 1)

        phi = TrialFunction(Vx)
        psi = TestFunction(Vx)

        v = Function(Vx)

        gamma = 1.0

        # Jump penalty term
        stab1 = 2.

        nE = FacetNormal(mx)
        hE = FacetArea(mx)
def RunJob(Tb, mu_value, path):
    runtimeInit = clock()

    tfile = File(path + '/t6t.pvd')
    mufile = File(path + "/mu.pvd")
    ufile = File(path + '/velocity.pvd')
    gradpfile = File(path + '/gradp.pvd')
    pfile = File(path + '/pstar.pvd')
    parameters = open(path + '/parameters', 'w', 0)
    vmeltfile = File(path + '/vmelt.pvd')
    rhofile = File(path + '/rhosolid.pvd')

    for name in dir():
        ev = str(eval(name))
        if name[0] != '_' and ev[0] != '<':
            parameters.write(name + ' = ' + ev + '\n')

    temp_values = [27. + 273, Tb + 273, 1300. + 273, 1305. + 273]
    dTemp = temp_values[3] - temp_values[0]
    temp_values = [x / dTemp for x in temp_values]  # non dimensionalising temp

    mu_a = mu_value  # this was taken from the blankenbach paper, can change..
    
    Ep = b / dTemp

    mu_bot = exp(-Ep * (temp_values[3] * dTemp - 1573) + cc) * mu_a

    Ra = rho_0 * alpha * g * dTemp * h**3 / (kappa_0 * mu_a)
    w0 = rho_0 * alpha * g * dTemp * h**2 / mu_a
    tau = h / w0
    p0 = mu_a * w0 / h

    print(mu_a, mu_bot, Ra, w0, p0)

    vslipx = 1.6e-09 / w0
    vslip = Constant((vslipx, 0.0))  # nondimensional
    noslip = Constant((0.0, 0.0))

    dt = 3.E11 / tau
    tEnd = 3.E13 / tau  # non-dimensionalising times

    class PeriodicBoundary(SubDomain):
        def inside(self, x, on_boundary):
            return left(x, on_boundary)

        def map(self, x, y):
            y[0] = x[0] - MeshWidth
            y[1] = x[1]

    pbc = PeriodicBoundary()

    class TempExp(Expression):
        def eval(self, value, x):
            if x[1] >= LAB(x):
                value[0] = temp_values[0] + (temp_values[1] - temp_values[0]) * (MeshHeight - x[1]) / (MeshHeight - LAB(x))
            else:
                value[0] = temp_values[3] - (temp_values[3] - temp_values[2]) * (x[1]) / (LAB(x))

    class FluidTemp(Expression):
        def eval(self, value, x):
            if value[0] < 1295:
                value[0] = 1295

    mesh = RectangleMesh(Point(0.0, 0.0), Point(MeshWidth, MeshHeight), nx, ny)

    Svel = VectorFunctionSpace(mesh, 'CG', 2, constrained_domain=pbc)
    Spre = FunctionSpace(mesh, 'CG', 1, constrained_domain=pbc)
    Stemp = FunctionSpace(mesh, 'CG', 1, constrained_domain=pbc)
    Smu = FunctionSpace(mesh, 'CG', 1, constrained_domain=pbc)
    Sgradp = VectorFunctionSpace(mesh, 'CG', 2, constrained_domain=pbc)
    Srho = FunctionSpace(mesh, 'CG', 1, constrained_domain=pbc)
    S0 = MixedFunctionSpace([Svel, Spre, Stemp])

    u = Function(S0)
    v, p, T = split(u)
    v_t, p_t, T_t = TestFunctions(S0)

    T0 = interpolate(TempExp(), Stemp)

    muExp = Expression('exp(-Ep * (T_val * dTemp - 1573) + cc * x[2] / meshHeight)', Smu.ufl_element(),
                        Ep=Ep, dTemp=dTemp, cc=cc, meshHeight=MeshHeight, T_val=T0)

    mu = interpolate(muExp, Smu)

    rhosolid = Function(Srho)
    deltarho = Function(Srho)

    v0 = Function(Svel)
    vmelt = Function(Svel)

    v_theta = (1. - theta)*v0 + theta*v

    T_theta = (1. - theta)*T + theta*T0

    r_v = (inner(sym(grad(v_t)), 2.*mu*sym(grad(v))) \
        - div(v_t)*p \
        - T*v_t[1] )*dx

    r_p = p_t*div(v)*dx

    r_T = (T_t*((T - T0) \
        + dt*inner(v_theta, grad(T_theta))) \
        + (dt/Ra)*inner(grad(T_t), grad(T_theta)) )*dx
#           + k_s*(Tf-T_theta)*dt

    Tf = T0.interpolate(FluidTemp())
    # Tf = T0.interpolate(Expression('value[0] >= 1295.0 ? value[0] : 1295.0'))

    # Tf.interpolate(Expression('value[0] >= 1295 ? value[0] : 1295'))
    # project(Expression('value[0] >= 1295 ? value[0] : 1295'), Tf)
# Alex, a question for you:
# can you see if there is a way to set Tf = T in regions where T >=1295 celsius
#
# 1295 celsius is my arbitrary choice for the LAB isotherm.  In regions
# where T < 1295 C, set Tf to be some constant for now, such as 1295 C.
# Once we do this, then we can add in a term like that last line above where
# it will only be non-zero when the solid temperature, T, is cooler than 1295
# can you do this? After this is done, we will then worry about a calculation
# where we solve for Tf as a function of time in the regions cooler than 1295 C
# Makes sense?  If not, we can skype soon -- email me with questions
# 3/19/16
    r = r_v + r_p + r_T

    bcv0 = DirichletBC(S0.sub(0), noslip, top)
    bcv1 = DirichletBC(S0.sub(0), vslip, bottom)
    bcp0 = DirichletBC(S0.sub(1), Constant(0.0), bottom)
    bct0 = DirichletBC(S0.sub(2), Constant(temp_values[0]), top)
    bct1 = DirichletBC(S0.sub(2), Constant(temp_values[3]), bottom)

    bcs = [bcv0, bcv1, bcp0, bct0, bct1]

    t = 0
    count = 0
    while (t < tEnd):
        solve(r == 0, u, bcs)
        t += dt
        nV, nP, nT = u.split()
        gp = grad(nP)
        rhosolid = rho_0 * (1 - alpha * (nT * dTemp - 1573))
        deltarho = rhosolid - rhomelt
        yvec = Constant((0.0, 1.0))
        vmelt = nV * w0 - darcy * (gp * p0 / h - deltarho * yvec * g)
        if (count % 100 == 0):
            pfile << nP
            ufile << nV
            tfile << nT
            mufile << mu
            gradpfile << project(grad(nP), Sgradp)
            mufile << project(mu * mu_a, Smu)
            rhofile << project(rhosolid, Srho)
            vmeltfile << project(vmelt, Svel)
        count += 1
        assign(T0, nT)
        assign(v0, nV)
        mu.interpolate(muExp)

    print('Case mu=%g, Tb=%g complete.' % (mu_a, Tb), ' Run time =', clock() - runtimeInit, 's')
def test_eim_approximation_10(expression_type, basis_generation):
    """
    This test is an extension of test 01.
    The aim of this script is to test the detection of time dependent parametrized expression.
    * EIM: test the case when the expression to be interpolated is time dependent.
    * DEIM: test interpolation of form with a time dependent integrand function.
    """
    class MockTimeDependentProblem(ParametrizedProblem):
        def __init__(self, V, **kwargs):
            ParametrizedProblem.__init__(self, "")
            self.V = V

            # Minimal subset of a time dependent ParametrizedDifferentialProblem
            self.t0 = 0.
            self.t = 0.
            self.dt = 0.
            self.T = 0.

        def name(self):
            return "MockTimeDependentProblem_10_" + expression_type + "_" + basis_generation

        def set_initial_time(self, t0):
            self.t0 = t0

        def set_time(self, t):
            self.t = t

        def set_time_step_size(self, dt):
            self.dt = dt

        def set_final_time(self, T):
            self.T = T

    class ParametrizedFunctionApproximation(TimeDependentEIMApproximation):
        def __init__(self, V, expression_type, basis_generation):
            self.V = V
            # Parametrized function to be interpolated
            mock_time_dependent_problem = MockTimeDependentProblem(V)
            f = ParametrizedExpression(
                mock_time_dependent_problem,
                "(1-x[0])*cos(3*pi*(1+t)*(1+x[0]))*exp(-(1+t)*(1+x[0]))",
                mu=(),
                t=0.,
                element=V.ufl_element())
            #
            folder_prefix = os.path.join("test_eim_approximation_10_tempdir",
                                         expression_type, basis_generation)
            assert expression_type in ("Function", "Vector", "Matrix")
            if expression_type == "Function":
                # Call Parent constructor
                TimeDependentEIMApproximation.__init__(
                    self, mock_time_dependent_problem,
                    ParametrizedExpressionFactory(f), folder_prefix,
                    basis_generation)
            elif expression_type == "Vector":
                v = TestFunction(V)
                form = f * v * dx
                # Call Parent constructor
                TimeDependentEIMApproximation.__init__(
                    self, mock_time_dependent_problem,
                    ParametrizedTensorFactory(form), folder_prefix,
                    basis_generation)
            elif expression_type == "Matrix":
                u = TrialFunction(V)
                v = TestFunction(V)
                form = f * u * v * dx
                # Call Parent constructor
                TimeDependentEIMApproximation.__init__(
                    self, mock_time_dependent_problem,
                    ParametrizedTensorFactory(form), folder_prefix,
                    basis_generation)
            else:  # impossible to arrive here anyway thanks to the assert
                raise AssertionError("Invalid expression_type")

    # 1. Create the mesh for this test
    mesh = IntervalMesh(100, -1., 1.)

    # 2. Create Finite Element space (Lagrange P1)
    V = FunctionSpace(mesh, "Lagrange", 1)

    # 3. Allocate an object of the ParametrizedFunctionApproximation class
    parametrized_function_approximation = ParametrizedFunctionApproximation(
        V, expression_type, basis_generation)
    mu_range = []
    parametrized_function_approximation.set_mu_range(mu_range)
    parametrized_function_approximation.set_time_step_size(1.e-10)
    parametrized_function_approximation.set_final_time(pi - 1)

    # 4. Prepare reduction with EIM
    parametrized_function_reduction_method = TimeDependentEIMApproximationReductionMethod(
        parametrized_function_approximation)
    parametrized_function_reduction_method.set_Nmax(30)
    parametrized_function_reduction_method.set_tolerance(0.)

    # 5. Perform the offline phase
    parametrized_function_reduction_method.initialize_training_set(
        51, time_sampling=EquispacedDistribution())
    reduced_parametrized_function_approximation = parametrized_function_reduction_method.offline(
    )

    # 6. Perform an online solve
    online_mu = ()
    online_t = 0.
    reduced_parametrized_function_approximation.set_mu(online_mu)
    reduced_parametrized_function_approximation.set_time(online_t)
    reduced_parametrized_function_approximation.solve()

    # 7. Perform an error analysis
    parametrized_function_reduction_method.initialize_testing_set(100)
    parametrized_function_reduction_method.error_analysis()
def cal_delta_pm(mesh, p, p1):
    DG0 = FunctionSpace(mesh, "DG", 0)
    return_p = Function(DG0)
    return_p.vector()[:] = p.vector()[:] - p1.vector()[:]
    return return_p
示例#27
0
def visualize_form(A, b, c):
    '''
    Study the quadratic form phi(x) = 0.5*x*A*x - b*x + c
        - definiteness
        - extrema
        - constraints
    '''

    # Iput check
    assert isinstance(A, np.ndarray)
    assert A.shape == (2, 2)
    assert isinstance(b, np.ndarray)
    assert b.shape == (2, )
    assert isinstance(c, (float, int))

    # Decide symmetry
    is_sym = la.norm(A - A.T) < _TOL_
    is_skew = la.norm(A + A.T) < _TOL_

    print 'A is symmetric', is_sym, 'A is skew', is_skew

    # Decide definitness from eigenvalues
    vals, vecs = la.eig(A)
    vecs = vecs.T        # Eigenvectors are now row, easies for iter access
    print 'A eigenvalues', vals
    print 'A eigenvectors', vecs[0], vecs[1]

    is_singular, is_posdef, is_negdef, is_indef = False, False, False, False
    # Don't know about definiteness of forms from A with complex eigenvalues
    if not vals.dtype == 'complex128':
        if np.any(np.abs(vals) < _TOL_):
            is_singular = True
        else:
            if np.all(vals > 0):
                is_posdef = True
            elif np.all(vals < 0):
                is_negdef = True
            elif abs(np.sum(np.sign(vals))) < _TOL_:
                is_indef = True

        print 'A is singular', is_singular,\
              'A is posdef', is_posdef,\
              'A is negdef', is_negdef,\
              'A is indef', is_indef

    # lambda for computing the quadratic form value, and the gradient
    phi = lambda x: 0.5*x.dot(A.dot(x)) - b.dot(x) + c
    grad_phi = lambda x: A.dot(x) - b

    # Width for grids
    w = 3

    # If the system is not singular we can compute Ax=b (straight forward)
    # and use x to decide visualiz. grid width
    if not is_singular:
        if is_sym:
            x = la.solve(A, b)
        elif not is_skew:
            # If the system is not symmetric then the minima is sought
            # via the `symmetrized` system
            x = la.solve(0.5*(A + A.T), b)
        else:
            # For the skew system the above gives zero matrix and the gradient
            # is just b -- the quadratic surface is just a plane perpend to b
            x = b

    else:
        # For the singular system only constrained minimization makes sense
        # The functional is modified to include costraint that x is
        # perpendicular to nullspace vector

        # Get eigenvalues and eigenvectors of A
        # Find the eigenvector corresponding to 0. Is in the nullspace
        z = None
        for vec in vecs:
            if abs(vec.dot(A.dot(vec.T))) < _TOL_:
                z = vec

        # Build the system that is yields extrema of
        # phi(x) + constraint of orthogonality to z
        AA = np.zeros((3, 3))
        AA[:2, :2] = A
        AA[2, :2] = z
        AA[:2, 2] = z.T

        bb = np.zeros(3)
        bb[:2] = b

        xx = la.solve(AA, bb)

        # Extract x and the multiplier
        x = xx[:2]
        lmbda = xx[-1]
        print 'Multiplier', lmbda

        # lambda for computing surface whose contours are lines that satisfy
        # the constraint
        psi = lambda x: x.dot(z)

        # These are poits on the line that satisfies the constraint and the
        # solution passed through it
        cs = lambda s: x + np.array([z[1], -z[0]])*s

        # Perperndicular line to cs
        cs_perp = lambda s: x + z*s

        # Chi is the constrained functional whose value we want to plot
        chi = lambda x: phi(x[:2]) + x[-1]*psi(x[:2])

        mesh = BoxMesh(x[0]-w, x[1]-w, lmbda-w, x[0]+w, x[1]+w, lmbda+w,
                       10, 10, 10)
        V = FunctionSpace(mesh, 'CG', 1)
        dofs_x = V.dofmap().tabulate_all_coordinates(mesh).reshape((-1, 3))

        values = np.array([chi(dof_x) for dof_x in dofs_x])

        # The plot should show that the solution has chosen a plane at certain
        # height = lagrange multiplier, that has normal = z
        chi_f = Function(V)
        chi_f.vector().set_local(values)
        chi_f.vector().apply('')
        plot(chi_f, interactive=True)

        print 'Saved constrained functional to chi.pvd'
        File('chi.pvd') << chi_f

        # Generate data for plot showing the quadratic function values over
        # cs line
        s_array = np.linspace(-2, 2, 1000)
        PHI_cs = np.zeros_like(s_array)
        for i, s in enumerate(s_array):
            P = cs(s)
            PHI_cs[i] = phi(P)

        # Add three contour plot x, y @ a*
        X, Y = np.meshgrid(np.linspace(x[0]-w, x[0]+w, 100),
                           np.linspace(x[1]-w, x[1]+w, 100))
        PHI_a = np.zeros_like(X)
        for i in range(PHI_a.shape[0]):
            for j in range(PHI_a.shape[1]):
                P = np.array([X[i, j], Y[i, j], lmbda])
                PHI_a[i, j] = chi(P)

        plt.figure()
        plt.suptitle('lambda constant')
        plt.pcolor(X, Y, PHI_a)
        plt.contour(X, Y, PHI_a, 10, colors='k')
        plt.plot(x[0], x[1], 'rx')

        # #                        x, a @ y* eval chi
        X, LMBDA = np.meshgrid(np.linspace(x[0]-w, x[0]+w, 100),
                               np.linspace(lmbda-w, lmbda+w, 100))
        PHI_y = np.zeros_like(X)
        for i in range(PHI_y.shape[0]):
            for j in range(PHI_y.shape[1]):
                P = np.array([X[i, j], x[1], LMBDA[i, j]])
                PHI_y[i, j] = chi(P)

        plt.figure()
        plt.suptitle('x[1] constant')
        plt.pcolor(X, LMBDA, PHI_y)
        plt.contour(X, LMBDA, PHI_y, 10, colors='k')
        plt.plot(x[0], lmbda, 'rx')

        #                        y, a @ x*
        Y, LMBDA = np.meshgrid(np.linspace(x[1]-w, x[1]+w, 100),
                               np.linspace(lmbda-w, lmbda+w, 100))
        PHI_x = np.zeros_like(Y)
        for i in range(PHI_x.shape[0]):
            for j in range(PHI_x.shape[1]):
                P = np.array([x[0], Y[i, j], LMBDA[i, j]])
                PHI_x[i, j] = chi(P)

        plt.figure()
        plt.suptitle('x[0] constant')
        plt.pcolor(Y, LMBDA, PHI_x)
        plt.contour(Y, LMBDA, PHI_x, 10, colors='k')
        plt.plot(x[1], lmbda, 'rx')

        # Slice through selected nullspace line and perpr to it
        s = np.linspace(-2, 2, 100)
        S, T = np.meshgrid(s, s)
        Z = np.zeros_like(S)
        Z_perp = np.zeros_like(S)
        for i in range(len(s)):
            for j in range(len(s)):
                P = np.r_[cs(s[j]), lmbda+s[i]]
                # print P, psi(cs(s[j])), phi(cs(s[j])), chi(cs(s[j]))
                # On the line phi is chi!
                Z[i, j] = chi(P)

                P = np.r_[cs_perp(s[j]), lmbda+s[i]]
                Z_perp[i, j] = chi(P)

        plt.figure()
        plt.suptitle('Nullspace slice')
        pcolor = plt.pcolor(S, T, Z)
        plt.colorbar(pcolor)
        plt.contour(S, T, Z, 10, colors='k')

        plt.figure()
        plt.suptitle('Nullspace perp. slice')
        plt.pcolor(S, T, Z_perp)
        plt.contour(S, T, Z_perp, 10, colors='k')


    print 'Potential extremum', x

    X, Y = np.meshgrid(np.linspace(x[0]-w, x[0]+w, 100),
                       np.linspace(x[1]-w, x[1]+w, 100))

    PHI = np.zeros_like(X)
    GRAD_PHI = np.zeros((X.shape[0], X.shape[1], 2))

    # Generate values of qudratic function and its gradient
    for i in range(PHI.shape[0]):
        for j in range(PHI.shape[1]):
            P = np.array([X[i, j], Y[i, j]])
            PHI[i, j] = phi(P)
            GRAD_PHI[i, j, :] = grad_phi(P)

    # Plot the quadratic form as 3d + the extreme point
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.plot_surface(X, Y, PHI)
    ax.plot([x[0]], [x[1]], [phi(x)], 'rx')
    # ------------------------------------------------------------------------

    # Plot contours of the quadratic function
    plt.figure()
    plt.pcolor(X, Y, PHI)
    plt.contour(X, Y, PHI, 10, colors='k')
    plt.plot(x[0], x[1], 'rx')

    # Plot eigenvectors
    e0 = x + vecs[0]
    plt.plot(np.linspace(x[0], e0[0], 10), np.linspace(x[1], e0[1], 10), 'k')
    plt.text(0.5*x[0] + 0.5*e0[0], 0.5*x[1] + 0.5*e0[1], str(vals[0]))
    e1 = x + vecs[1]
    plt.plot(np.linspace(x[0], e1[0], 10), np.linspace(x[1], e1[1], 10), 'k')
    plt.text(0.5*x[0] + 0.5*e1[0], 0.5*x[1] + 0.5*e1[1], str(vals[1]))

    # For singular system add 'constraint lines'
    if is_singular:
        PSI = np.zeros_like(X)
        for i in range(PSI.shape[0]):
            for j in range(PSI.shape[1]):
                P = np.array([X[i, j], Y[i, j]])
                PSI[i, j] = psi(P)

        plt.contour(X, Y, PSI, 10, colors='g')
    # ------------------------------------------------------------------------

    # Finally plot curves tangent to gradient (better for  visualiz. than
    # quiver)
    plt.figure()
    plt.streamplot(X, Y, GRAD_PHI[:, :, 0], GRAD_PHI[:, :, 1])
    plt.plot(x[0], x[1], 'rx')
    if is_singular:
        plt.contour(X, Y, PSI, 10, colors='g')
    # ------------------------------------------------------------------------

    # Add the plot of phi(cs) to check that found point is the extremum
    if is_singular:
        plt.figure()
        plt.plot(s_array, PHI_cs, 'b')
        plt.plot([0], [phi(cs(0))], 'rx')

        plt.figure()
        plt.plot(s_array, np.array([chi(np.array([x[0], x[1], s])) for s
                                    in s_array]), label='via min')
        plt.plot(s_array, np.array([chi(np.array([x[0]+0.01, x[1]-0.01, s])) for s
                                    in s_array]), label='no min')
        plt.legend()
    # ------------------------------------------------------------------------

    plt.show()

    # For positve def and negdef forms see that we found minima and maxima
    # respectively
    # Something similar could be done for cs with singular system but the plot
    # tells the same story
    if is_posdef or is_negdef or is_indef:
        phi_s = np.array([PHI[random_index(PHI.shape[0], PHI.shape[1])]
                          for i in range(1000)])
        phi_s -= phi(x)

        if is_posdef:
            assert np.all(phi_s > 0)
        elif is_negdef:
            assert np.all(phi_s < 0)
def porosity_update_from_flow(mesh, phi_1, alpha, K, p, p_1):
    DG0 = FunctionSpace(mesh, "DG", 0)
    porosity_from_flow_expr = phi_1 + (alpha - phi_1) / K * (p - p_1)
    porosity_from_flow = project(porosity_from_flow_expr, DG0)
    return porosity_from_flow
示例#29
0
    def __init__(self):
        import os
        from dolfin import Mesh, MeshFunction, SubMesh, SubDomain, \
            FacetFunction, DirichletBC, dot, grad, FunctionSpace, \
            MixedFunctionSpace, Expression, FacetNormal, pi, Function, \
            Constant, TestFunction, MPI, mpi_comm_world, File
        import numpy
        import warnings

        from maelstrom import heat_cylindrical as cyl_heat
        from maelstrom import materials_database as md

        GMSH_EPS = 1.0e-15

        current_path = os.path.dirname(os.path.realpath(__file__))
        base = os.path.join(
            current_path,
            '../../meshes/2d/crucible-with-coils'
            )
        self.mesh = Mesh(base + '.xml')
        self.subdomains = MeshFunction('size_t',
                                       self.mesh,
                                       base + '_physical_region.xml'
                                       )

        self.subdomain_materials = {
            1: md.get_material('porcelain'),
            2: md.get_material('argon'),
            3: md.get_material('GaAs (solid)'),
            4: md.get_material('GaAs (liquid)'),
            27: md.get_material('air')
            }

        # coils
        for k in range(5, 27):
            self.subdomain_materials[k] = md.get_material('graphite EK90')

        # Define the subdomains which together form a single coil.
        self.coil_domains = [
            [5, 6, 7, 8, 9],
            [10, 11, 12, 13, 14],
            [15, 16, 17, 18, 19],
            [20, 21, 22, 23],
            [24, 25, 26]
            ]

        self.wpi = 4
        # http://fenicsproject.org/qa/2026/submesh-workaround-for-parallel-computation
        submesh_parallel_bug_fixed = False
        if submesh_parallel_bug_fixed:
            submesh_workpiece = SubMesh(self.mesh, self.subdomains, self.wpi)
        else:
            # To get the mesh in parallel, we need to read it in from a file.
            # Writing out can only happen in serial mode, though. :/
            base = os.path.join(current_path,
                                '../../meshes/2d/crucible-with-coils-submesh'
                                )
            filename = base + '.xml'
            if not os.path.isfile(filename):
                warnings.warn(
                    'Submesh file \'%s\' does not exist. Creating... '
                    % filename
                    )
                if MPI.size(mpi_comm_world()) > 1:
                    raise RuntimeError(
                        'Can only write submesh in serial mode.'
                        )
                submesh_workpiece = \
                    SubMesh(self.mesh, self.subdomains, self.wpi)
                output_stream = File(filename)
                output_stream << submesh_workpiece
            # Read the mesh
            submesh_workpiece = Mesh(base + '.xml')

        coords = submesh_workpiece.coordinates()
        ymin = min(coords[:, 1])
        ymax = max(coords[:, 1])

        # Find the top right point.
        k = numpy.argmax(numpy.sum(coords, 1))
        topright = coords[k, :]

        # Initialize mesh function for boundary domains
        class Left(SubDomain):
            def inside(self, x, on_boundary):
                # Explicitly exclude the lowest and the highest point of the
                # symmetry axis.
                # It is necessary for the consistency of the pressure-Poisson
                # system in the Navier-Stokes solver that the velocity is
                # exactly 0 at the boundary r>0. Hence, at the corner points
                # (r=0, melt-crucible, melt-crystal) we must enforce u=0
                # already and cannot have a component in z-direction.
                return on_boundary \
                    and x[0] < GMSH_EPS \
                    and x[1] < ymax - GMSH_EPS \
                    and x[1] > ymin + GMSH_EPS

        class Crucible(SubDomain):
            def inside(self, x, on_boundary):
                return on_boundary \
                    and ((x[0] > GMSH_EPS and x[1] < ymax - GMSH_EPS)
                         or (x[0] > topright[0] - GMSH_EPS
                             and x[1] > topright[1] - GMSH_EPS)
                         or (x[0] < GMSH_EPS and x[1] < ymin + GMSH_EPS)
                         )

        # At the top right part (boundary melt--gas), slip is allowed, so only
        # n.u=0 is enforced. Very weirdly, the PPE is consistent if and only if
        # the end points of UpperRight are in UpperRight. This contrasts
        # Left(), where the end points must NOT belong to Left().  Judging from
        # the experiments, these settings do the right thing.
        # TODO try to better understand the PPE system/dolfin's boundary
        # settings
        class Upper(SubDomain):
            def inside(self, x, on_boundary):
                return on_boundary \
                    and x[1] > ymax - GMSH_EPS

        class UpperRight(SubDomain):
            def inside(self, x, on_boundary):
                return on_boundary \
                    and x[1] > ymax - GMSH_EPS \
                    and x[0] > 0.038 - GMSH_EPS

        # The crystal boundary is taken to reach up to 0.038 where the
        # Dirichlet boundary data is about the melting point of the crystal,
        # 1511K. This setting gives pretty acceptable results when there is no
        # convection except the one induced by buoyancy. Is there is any more
        # stirring going on, though, the end point of the crystal with its
        # fixed temperature of 1511K might be the hottest point globally. This
        # looks rather unphysical.
        # TODO check out alternatives
        class UpperLeft(SubDomain):
            def inside(self, x, on_boundary):
                return on_boundary \
                    and x[1] > ymax - GMSH_EPS \
                    and x[0] < 0.038 + GMSH_EPS

        left = Left()
        crucible = Crucible()
        upper_left = UpperLeft()
        upper_right = UpperRight()

        self.wp_boundaries = FacetFunction('size_t', submesh_workpiece)
        self.wp_boundaries.set_all(0)
        left.mark(self.wp_boundaries, 1)
        crucible.mark(self.wp_boundaries, 2)
        upper_right.mark(self.wp_boundaries, 3)
        upper_left.mark(self.wp_boundaries, 4)

        if DEBUG:
            from dolfin import plot, interactive
            plot(self.wp_boundaries, title='Boundaries')
            interactive()

        submesh_boundary_indices = {
            'left': 1,
            'crucible': 2,
            'upper right': 3,
            'upper left': 4
            }

        # Boundary conditions for the velocity.
        #
        # [1] Incompressible flow and the finite element method; volume two;
        #     Isothermal Laminar Flow;
        #     P.M. Gresho, R.L. Sani;
        #
        # For the choice of function space, [1] says:
        #     "In 2D, the triangular elements P_2^+P_1 and P_2^+P_{-1} are very
        #      good [...]. [...] If you wish to avoid bubble functions on
        #      triangular elements, P_2P_1 is not bad, and P_2(P_1+P_0) is even
        #      better [...]."
        #
        # It turns out that adding the bubble space significantly hampers the
        # convergence of the Stokes solver and also considerably increases the
        # time it takes to construct the Jacobian matrix of the Navier--Stokes
        # problem if no optimization is applied.
        V = FunctionSpace(submesh_workpiece, 'CG', 2)
        with_bubbles = False
        if with_bubbles:
            V += FunctionSpace(submesh_workpiece, 'B', 3)
        self.W = MixedFunctionSpace([V, V, V])

        self.u_bcs = [
            DirichletBC(self.W,
                        Expression(
                            ('0.0', '0.0', '-2*pi*x[0] * 5.0/60.0'),
                            degree=1
                            ),
                        #(0.0, 0.0, 0.0),
                        crucible),
            DirichletBC(self.W.sub(0), 0.0, left),
            DirichletBC(self.W.sub(2), 0.0, left),
            # Make sure that u[2] is 0 at r=0.
            DirichletBC(self.W,
                        Expression(
                            ('0.0', '0.0', '2*pi*x[0] * 5.0/60.0'),
                            degree=1
                            ),
                        upper_left),
            DirichletBC(self.W.sub(1), 0.0, upper_right),
            ]
        self.p_bcs = []

        self.P = FunctionSpace(submesh_workpiece, 'CG', 1)

        # Boundary conditions for heat equation.
        self.Q = FunctionSpace(submesh_workpiece, 'CG', 2)
        # Dirichlet.
        # This is a bit of a tough call since the boundary conditions need to
        # be read from a Tecplot file here.
        import tecplot_reader
        filename = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            'data/crucible-boundary.dat'
            )
        data = tecplot_reader.read(filename)
        RZ = numpy.c_[data['ZONE T']['node data']['r'],
                      data['ZONE T']['node data']['z']
                      ]
        T_vals = data['ZONE T']['node data']['temp. [K]']

        class TecplotDirichletBC(Expression):
            # TODO specify degree
            def eval(self, value, x):
                # Find on which edge x sits, and raise exception if it doesn't.
                edge_found = False
                for edge in data['ZONE T']['element data']:
                    # Given a point X and an edge X0--X1,
                    #
                    #     (1 - theta) X0 + theta X1,
                    #
                    # the minimum distance is assumed for
                    #
                    #    argmin_theta ||(1-theta) X0  + theta X1 - X||^2
                    #    = <X1 - X0, X - X0> / ||X1 - X0||^2.
                    #
                    # If the distance is 0 and 0<=theta<=1, we found the edge.
                    #
                    # Note that edges are 1-based in Tecplot.
                    X0 = RZ[edge[0] - 1]
                    X1 = RZ[edge[1] - 1]
                    theta = numpy.dot(X1-X0, x-X0) / numpy.dot(X1-X0, X1-X0)
                    diff = (1.0-theta)*X0 + theta*X1 - x
                    if numpy.dot(diff, diff) < 1.0e-10 and \
                            0.0 <= theta and theta <= 1.0:
                        # Linear interpolation of the temperature value.
                        value[0] = (1.0-theta) * T_vals[edge[0]-1] \
                                 + theta       * T_vals[edge[1]-1]
                        edge_found = True
                        break
                # This class is supposed to be used for Dirichlet boundary
                # conditions. For some reason, FEniCS also evaluates
                # DirichletBC objects at coordinates which do not sit on the
                # boundary, see
                # <http://fenicsproject.org/qa/1033/dirichletbc-expressions-evaluated-away-from-the-boundary>.
                # The assigned values have no meaning though, so not assigning
                # values[0] here is okay.
                #
                #from matplotlib import pyplot as pp
                #pp.plot(x[0], x[1], 'xg')
                if not edge_found:
                    value[0] = 0.0
                    warnings.warn('Coordinate (%e, %e) doesn\'t sit on edge.'
                                  % (x[0], x[1]))
                    #pp.plot(RZ[:, 0], RZ[:, 1], '.k')
                    #pp.plot(x[0], x[1], 'xr')
                    #pp.show()
                    #raise RuntimeError('Input coordinate '
                    #                   '%r is not on boundary.' % x)
                return

        tecplot_dbc = TecplotDirichletBC()
        self.theta_bcs_d = [
            DirichletBC(self.Q, tecplot_dbc, upper_left)
            ]
        theta_bcs_d_strict = [
            DirichletBC(self.Q, tecplot_dbc, upper_right),
            DirichletBC(self.Q, tecplot_dbc, crucible),
            DirichletBC(self.Q, tecplot_dbc, upper_left)
            ]

        # Neumann
        dTdr_vals = data['ZONE T']['node data']['dTempdx [K/m]']
        dTdz_vals = data['ZONE T']['node data']['dTempdz [K/m]']

        class TecplotNeumannBC(Expression):
            # TODO specify degree
            def eval(self, value, x):
                # Same problem as above: This expression is not only evaluated
                # at boundaries.
                for edge in data['ZONE T']['element data']:
                    X0 = RZ[edge[0] - 1]
                    X1 = RZ[edge[1] - 1]
                    theta = numpy.dot(X1-X0, x-X0) / numpy.dot(X1-X0, X1-X0)
                    dist = numpy.linalg.norm((1-theta)*X0 + theta*X1 - x)
                    if dist < 1.0e-5 and 0.0 <= theta and theta <= 1.0:
                        value[0] = (1-theta) * dTdr_vals[edge[0]-1] \
                            + theta * dTdr_vals[edge[1]-1]
                        value[1] = (1-theta) * dTdz_vals[edge[0]-1] \
                            + theta * dTdz_vals[edge[1]-1]
                        break
                return

            def value_shape(self):
                return (2,)

        tecplot_nbc = TecplotNeumannBC()
        n = FacetNormal(self.Q.mesh())
        self.theta_bcs_n = {
            submesh_boundary_indices['upper right']: dot(n, tecplot_nbc),
            submesh_boundary_indices['crucible']: dot(n, tecplot_nbc)
            }
        self.theta_bcs_r = {}

        # It seems that the boundary conditions from above are inconsistent in
        # that solving with Dirichlet overall and mixed Dirichlet-Neumann give
        # different results; the value *cannot* correspond to one solution.
        # From looking at the solutions, the pure Dirichlet setting appears
        # correct, so extract the Neumann values directly from that solution.
        zeta = TestFunction(self.Q)

        theta_reference = Function(self.Q, name='temperature (Dirichlet)')
        theta_reference.vector()[:] = 0.0

        # Solve the *quasilinear* PDE (coefficients may depend on theta).
        # This is to avoid setting a fixed temperature for the coefficients.

        # Get material parameters
        wp_material = self.subdomain_materials[self.wpi]
        if isinstance(wp_material.specific_heat_capacity, float):
            cp = wp_material.specific_heat_capacity
        else:
            cp = wp_material.specific_heat_capacity(theta_reference)
        if isinstance(wp_material.density, float):
            rho = wp_material.density
        else:
            rho = wp_material.density(theta_reference)
        if isinstance(wp_material.thermal_conductivity, float):
            k = wp_material.thermal_conductivity
        else:
            k = wp_material.thermal_conductivity(theta_reference)

        reference_problem = cyl_heat.HeatCylindrical(
            self.Q, theta_reference,
            zeta,
            b=Constant((0.0, 0.0, 0.0)),
            kappa=k,
            rho=rho,
            cp=cp,
            source=Constant(0.0),
            dirichlet_bcs=theta_bcs_d_strict
            )

        from dolfin import solve
        solve(reference_problem.F0 == 0,
              theta_reference,
              bcs=theta_bcs_d_strict
              )

        # Create equivalent boundary conditions from theta_reference. This
        # makes sure that the potentially expensive Expression evaluation in
        # theta_bcs_* is replaced by something reasonably cheap.
        for k, bc in enumerate(self.theta_bcs_d):
            self.theta_bcs_d[k] = DirichletBC(bc.function_space(),
                                              theta_reference,
                                              bc.domain_args[0]
                                              )
        # Adapt Neumann conditions.
        n = FacetNormal(self.Q.mesh())
        for k in self.theta_bcs_n:
            self.theta_bcs_n[k] = dot(n, grad(theta_reference))

        if DEBUG:
            # Solve the heat equation with the mixed Dirichlet-Neumann
            # boundary conditions and compare it to the Dirichlet-only
            # solution.
            theta_new = Function(
                self.Q,
                name='temperature (Neumann + Dirichlet)'
                )
            from dolfin import Measure
            ds_workpiece = Measure('ds')[self.wp_boundaries]
            problem_new = cyl_heat.HeatCylindrical(
                self.Q, theta_new,
                zeta,
                b=Constant((0.0, 0.0, 0.0)),
                kappa=k,
                rho=rho,
                cp=cp,
                source=Constant(0.0),
                dirichlet_bcs=self.theta_bcs_d,
                neumann_bcs=self.theta_bcs_n,
                ds=ds_workpiece
                )

            from dolfin import solve
            solve(problem_new.F0 == 0,
                  theta_new,
                  bcs=problem_new.dirichlet_bcs
                  )
            from dolfin import plot, interactive, errornorm
            print('||theta_new - theta_ref|| = %e'
                  % errornorm(theta_new, theta_reference)
                  )
            plot(theta_reference)
            plot(theta_new)
            plot(
                theta_reference - theta_new,
                title='theta_ref - theta_new'
                )
            interactive()

        #omega = 2 * pi * 10.0e3
        self.omega = 2 * pi * 300.0

        return
def porosity_update_from_mechanics(mesh, phi_0, alpha, K, u, u_0, p, p_0):
    DG0 = FunctionSpace(mesh, "DG", 0)
    porosity_from_mechanics_expr = phi_0 + (alpha - phi_0)*(vol_strain(u) - vol_strain(u_0)) \
    + (alpha-phi_0)*(1.-alpha)/K*(p - p_0)
    porosity_from_mechanics = project(porosity_from_mechanics_expr, DG0)
    return porosity_from_mechanics
示例#31
0
def test(show=False):
    problem = problems.Crucible()
    # The voltage is defined as
    #
    #     v(t) = Im(exp(i omega t) v)
    #          = Im(exp(i (omega t + arg(v)))) |v|
    #          = sin(omega t + arg(v)) |v|.
    #
    # Hence, for a lagging voltage, arg(v) needs to be negative.
    voltages = [
        38.0 * numpy.exp(-1j * 2 * pi * 2 * 70.0 / 360.0),
        38.0 * numpy.exp(-1j * 2 * pi * 1 * 70.0 / 360.0),
        38.0 * numpy.exp(-1j * 2 * pi * 0 * 70.0 / 360.0),
        25.0 * numpy.exp(-1j * 2 * pi * 0 * 70.0 / 360.0),
        25.0 * numpy.exp(-1j * 2 * pi * 1 * 70.0 / 360.0),
    ]

    lorentz, joule, Phi = get_lorentz_joule(problem, voltages, show=show)

    # Some assertions
    ref = 1.4627674791126285e-05
    assert abs(norm(Phi[0], "L2") - ref) < 1.0e-3 * ref
    ref = 3.161363929287592e-05
    assert abs(norm(Phi[1], "L2") - ref) < 1.0e-3 * ref
    #
    ref = 12.115309575057681
    assert abs(norm(lorentz, "L2") - ref) < 1.0e-3 * ref
    #
    ref = 1406.336109054347
    V = FunctionSpace(problem.submesh_workpiece, "CG", 1)
    jp = project(joule, V)
    jp.rename("s", "Joule heat source")
    assert abs(norm(jp, "L2") - ref) < 1.0e-3 * ref

    # check_currents = False
    # if check_currents:
    #     r = SpatialCoordinate(problem.mesh)[0]
    #     begin('Currents computed after the fact:')
    #     k = 0
    #     with XDMFFile('currents.xdmf') as xdmf_file:
    #         for coil in coils:
    #             for ii in coil['rings']:
    #                 J_r = sigma[ii] * (
    #                     voltages[k].real/(2*pi*r) + problem.omega * Phi[1]
    #                     )
    #                 J_i = sigma[ii] * (
    #                     voltages[k].imag/(2*pi*r) - problem.omega * Phi[0]
    #                     )
    #                 alpha = assemble(J_r * dx(ii))
    #                 beta = assemble(J_i * dx(ii))
    #                 info('J = {:e} + i {:e}'.format(alpha, beta))
    #                 info(
    #                     '|J|/sqrt(2) = {:e}'.format(
    #                         numpy.sqrt(0.5 * (alpha**2 + beta**2))
    #                     ))
    #                 submesh = SubMesh(problem.mesh, problem.subdomains, ii)
    #                 V1 = FunctionSpace(submesh, 'CG', 1)
    #                 # Those projections may take *very* long.
    #                 # TODO find out why
    #                 j_v1 = [
    #                     project(J_r, V1),
    #                     project(J_i, V1)
    #                     ]
    #                 # show=Trueplot(j_v1[0], title='j_r')
    #                 # plot(j_v1[1], title='j_i')
    #                 current = project(as_vector(j_v1), V1*V1)
    #                 current.rename('j{}'.format(ii), 'current {}'.format(ii))
    #                 xdmf_file.write(current)
    #                 k += 1
    #     end()

    filename = "./maxwell.xdmf"
    with XDMFFile(filename) as xdmf_file:
        xdmf_file.parameters["flush_output"] = True
        xdmf_file.parameters["rewrite_function_mesh"] = False

        # Store phi
        info("Writing out Phi to {}...".format(filename))
        V = FunctionSpace(problem.mesh, "CG", 1)
        phi = Function(V, name="phi")
        Phi0 = project(Phi[0], V)
        Phi1 = project(Phi[1], V)
        omega = problem.omega
        for t in numpy.linspace(0.0, 2 * pi / omega, num=100, endpoint=False):
            # Im(Phi * exp(i*omega*t))
            phi.vector().zero()
            phi.vector().axpy(sin(problem.omega * t), Phi0.vector())
            phi.vector().axpy(cos(problem.omega * t), Phi1.vector())
            xdmf_file.write(phi, t)

        # Show the resulting magnetic field
        #
        #   B_r = -dphi/dz,
        #   B_z = 1/r d(rphi)/dr.
        #
        r = SpatialCoordinate(problem.mesh)[0]
        g = 1.0 / r * grad(r * Phi[0])
        V_element = FiniteElement("CG", V.mesh().ufl_cell(), 1)
        VV = FunctionSpace(V.mesh(), V_element * V_element)

        B_r = project(as_vector((-g[1], g[0])), VV)
        g = 1 / r * grad(r * Phi[1])
        B_i = project(as_vector((-g[1], g[0])), VV)
        info("Writing out B to {}...".format(filename))
        B = Function(VV)
        B.rename("B", "magnetic field")
        if abs(problem.omega) < DOLFIN_EPS:
            B.assign(B_r)
            xdmf_file.write(B)
            # plot(B_r, title='Re(B)')
            # plot(B_i, title='Im(B)')
        else:
            # Write those out to a file.
            lspace = numpy.linspace(
                0.0, 2 * pi / problem.omega, num=100, endpoint=False
            )
            for t in lspace:
                # Im(B * exp(i*omega*t))
                B.vector().zero()
                B.vector().axpy(sin(problem.omega * t), B_r.vector())
                B.vector().axpy(cos(problem.omega * t), B_i.vector())
                xdmf_file.write(B, t)

    filename = "./lorentz-joule.xdmf"
    info("Writing out Lorentz force and Joule heat source to {}...".format(filename))
    with XDMFFile(filename) as xdmf_file:
        xdmf_file.write(lorentz, 0.0)
        # xdmf_file.write(jp, 0.0)

    return
def linear_extrapolation(mesh, p, p_n1, p_n2, p_max, p_min, dt, dt_1, p2_max,
                         p2_min):
    DG0 = FunctionSpace(mesh, "DG", 0)
    p = (1.0+dt/dt_1)*mu_newton_linear_adapt(p_n1,p_min,p_max,p2_min,p2_max) \
    - (dt/dt_1)*mu_newton_linear_adapt(p_n2,p_min,p_max,p2_min,p2_max)
    return project(p, DG0)
示例#33
0
def test_eim_approximation_00(expression_type, basis_generation):
    """
    This test deals with the trivial case of interpolating the zero function/vector/matrix,
    as it is a corner case. Next files will deal with more interesting cases.
    """
    class MockProblem(ParametrizedProblem):
        def __init__(self, V, **kwargs):
            ParametrizedProblem.__init__(self, "")
            self.V = V

        def name(self):
            return "MockProblem_00_" + expression_type + "_" + basis_generation

    class ParametrizedFunctionApproximation(EIMApproximation):
        def __init__(self, V, expression_type, basis_generation):
            self.V = V
            # Parametrized function to be interpolated
            mock_problem = MockProblem(V)
            f = ParametrizedExpression(mock_problem,
                                       "0",
                                       mu=(1., ),
                                       element=V.ufl_element())
            #
            folder_prefix = os.path.join("test_eim_approximation_00_tempdir",
                                         expression_type, basis_generation)
            assert expression_type in ("Function", "Vector", "Matrix")
            if expression_type == "Function":
                # Call Parent constructor
                EIMApproximation.__init__(self, mock_problem,
                                          ParametrizedExpressionFactory(f),
                                          folder_prefix, basis_generation)
            elif expression_type == "Vector":
                v = TestFunction(V)
                form = f * v * dx
                # Call Parent constructor
                EIMApproximation.__init__(self, mock_problem,
                                          ParametrizedTensorFactory(form),
                                          folder_prefix, basis_generation)
            elif expression_type == "Matrix":
                u = TrialFunction(V)
                v = TestFunction(V)
                form = f * u * v * dx
                # Call Parent constructor
                EIMApproximation.__init__(self, mock_problem,
                                          ParametrizedTensorFactory(form),
                                          folder_prefix, basis_generation)
            else:  # impossible to arrive here anyway thanks to the assert
                raise AssertionError("Invalid expression_type")

    # 1. Create the mesh for this test
    mesh = IntervalMesh(10, 0., 1.)

    # 2. Create Finite Element space (Lagrange P1)
    V = FunctionSpace(mesh, "Lagrange", 1)

    # 3. Allocate an object of the ParametrizedFunctionApproximation class
    parametrized_function_approximation = ParametrizedFunctionApproximation(
        V, expression_type, basis_generation)
    mu_range = [
        (0., 1.),
    ]
    parametrized_function_approximation.set_mu_range(mu_range)

    # 4. Prepare reduction with EIM
    parametrized_function_reduction_method = EIMApproximationReductionMethod(
        parametrized_function_approximation)
    parametrized_function_reduction_method.set_Nmax(1)

    # 5. Perform the offline phase
    parametrized_function_reduction_method.initialize_training_set(
        5, sampling=EquispacedDistribution())
    reduced_parametrized_function_approximation = parametrized_function_reduction_method.offline(
    )
    assert reduced_parametrized_function_approximation.N is 1

    # 6. Perform an online solve
    online_mu = (1., )
    reduced_parametrized_function_approximation.set_mu(online_mu)
    reduced_parametrized_function_approximation.solve()

    # 7. Perform an error analysis
    parametrized_function_reduction_method.initialize_testing_set(5)
    parametrized_function_reduction_method.error_analysis()
def linear_n1(mesh, p, p_n1, p_max, p_min, p2_max, p2_min):
    DG0 = FunctionSpace(mesh, "DG", 0)
    p = mu_newton_linear_adapt(p_n1, p_min, p_max, p2_min, p2_max)
    return project(p, DG0)
示例#35
0
# gen.parameters["edge_size"] = 0.05

print(f"Started meshing")
mesh = gen.generate(meshing_domain)
print(f"Finished meshing")

# mesh = generate_mesh(strap, 16, "cgal")
msh_file << mesh
print(f"Mesh saved!")

print(f"Mesh info:")
info(mesh)

sys.exit(-1)

V = FunctionSpace(mesh, 'P', 1)

# Define boundary condition
u_D = Expression(
    '1 + x[0]*x[0] + alpha*x[1]*x[1] + theta*x[2]*x[2] + beta*t',
    # '1 + beta*t',
    degree=2,
    alpha=alpha,
    theta=theta,
    beta=beta,
    t=0)


def boundary(x, on_boundary):
    return on_boundary
def linear_extrapolation_for_c(mesh, p, p_n1, p_n2, dt, dt_1):
    DG1 = FunctionSpace(mesh, "DG", 1)
    p = (1.0+dt/dt_1)*p_n1 \
    - (dt/dt_1)*p_n2
    return project(p, DG1)
示例#37
0
def V(mesh):
    return FunctionSpace(mesh, 'CG', 1)
def linear_n1_for_c(mesh, p, p_n1):
    DG1 = FunctionSpace(mesh, "DG", 1)
    p = p_n1
    return project(p, DG1)
def test_single_block_no_restriction_from_block_element(mesh, Element):
    V_element = Element(mesh)
    V = FunctionSpace(mesh, V_element)
    block_V_element = BlockElement(V_element)
    block_V = BlockFunctionSpace(mesh, block_V_element)
    assert_dof_map_single_block_no_restriction(V, block_V)
示例#40
0
    def solve(self, problem):
        self.problem = problem
        doSave = problem.doSave
        save_this_step = False
        onlyVel = problem.saveOnlyVel
        dt = self.metadata['dt']

        nu = Constant(self.problem.nu)
        # TODO check proper use of watches
        self.tc.init_watch('init', 'Initialization', True, count_to_percent=False)
        self.tc.init_watch('rhs', 'Assembled right hand side', True, count_to_percent=True)
        self.tc.init_watch('updateBC', 'Updated velocity BC', True, count_to_percent=True)
        self.tc.init_watch('applybc1', 'Applied velocity BC 1st step', True, count_to_percent=True)
        self.tc.init_watch('applybc3', 'Applied velocity BC 3rd step', True, count_to_percent=True)
        self.tc.init_watch('applybcP', 'Applied pressure BC or othogonalized rhs', True, count_to_percent=True)
        self.tc.init_watch('assembleMatrices', 'Initial matrix assembly', False, count_to_percent=True)
        self.tc.init_watch('solve 1', 'Running solver on 1st step', True, count_to_percent=True)
        self.tc.init_watch('solve 2', 'Running solver on 2nd step', True, count_to_percent=True)
        self.tc.init_watch('solve 3', 'Running solver on 3rd step', True, count_to_percent=True)
        self.tc.init_watch('solve 4', 'Running solver on 4th step', True, count_to_percent=True)
        self.tc.init_watch('assembleA1', 'Assembled A1 matrix (without stabiliz.)', True, count_to_percent=True)
        self.tc.init_watch('assembleA1stab', 'Assembled A1 stabilization', True, count_to_percent=True)
        self.tc.init_watch('next', 'Next step assignments', True, count_to_percent=True)
        self.tc.init_watch('saveVel', 'Saved velocity', True)

        self.tc.start('init')

        # Define function spaces (P2-P1)
        mesh = self.problem.mesh
        self.V = VectorFunctionSpace(mesh, "Lagrange", 2)  # velocity
        self.Q = FunctionSpace(mesh, "Lagrange", 1)  # pressure
        self.PS = FunctionSpace(mesh, "Lagrange", 2)  # partial solution (must be same order as V)
        self.D = FunctionSpace(mesh, "Lagrange", 1)   # velocity divergence space
        if self.bc == 'lagrange':
            L = FunctionSpace(mesh, "R", 0)
            QL = self.Q*L

        problem.initialize(self.V, self.Q, self.PS, self.D)

        # Define trial and test functions
        u = TrialFunction(self.V)
        v = TestFunction(self.V)
        if self.bc == 'lagrange':
            (pQL, rQL) = TrialFunction(QL)
            (qQL, lQL) = TestFunction(QL)
        else:
            p = TrialFunction(self.Q)
            q = TestFunction(self.Q)

        n = FacetNormal(mesh)
        I = Identity(u.geometric_dimension())

        # Initial conditions: u0 velocity at previous time step u1 velocity two time steps back p0 previous pressure
        [u1, u0, p0] = self.problem.get_initial_conditions([{'type': 'v', 'time': -dt},
                                                          {'type': 'v', 'time': 0.0},
                                                          {'type': 'p', 'time': 0.0}])

        if doSave:
            problem.save_vel(False, u0, 0.0)
            problem.save_vel(True, u0, 0.0)

        u_ = Function(self.V)         # current tentative velocity
        u_cor = Function(self.V)         # current corrected velocity
        if self.bc == 'lagrange':
            p_QL = Function(QL)    # current pressure or pressure help function from rotation scheme
            pQ = Function(self.Q)     # auxiliary function for conversion between QL.sub(0) and Q
        else:
            p_ = Function(self.Q)         # current pressure or pressure help function from rotation scheme
        p_mod = Function(self.Q)      # current modified pressure from rotation scheme

        # Define coefficients
        k = Constant(self.metadata['dt'])
        f = Constant((0, 0, 0))

        # Define forms
        # step 1: Tentative velocity, solve to u_
        u_ext = 1.5*u0 - 0.5*u1  # extrapolation for convection term

        # Stabilisation
        h = CellSize(mesh)
        # CBC delta:
        if self.cbcDelta:
            delta = Constant(self.stabCoef)*h/(sqrt(inner(u_ext, u_ext))+h)
        else:
            delta = Constant(self.stabCoef)*h**2/(2*nu*k + k*h*inner(u_ext, u_ext)+h**2)

        if self.use_full_SUPG:
            v1 = v + delta*0.5*k*dot(grad(v), u_ext)
            parameters['form_compiler']['quadrature_degree'] = 6
        else:
            v1 = v

        def nonlinearity(function):
            if self.use_ema:
               return 2*inner(dot(sym(grad(function)), u_ext), v1) * dx + inner(div(function)*u_ext, v1) * dx
                # return 2*inner(dot(sym(grad(function)), u_ext), v) * dx + inner(div(u_ext)*function, v) * dx
                # QQ implement this way?
            else:
                return inner(dot(grad(function), u_ext), v1) * dx

        def diffusion(fce):
            if self.useLaplace:
                return nu*inner(grad(fce), grad(v1)) * dx
            else:
                form = inner(nu * 2 * sym(grad(fce)), sym(grad(v1))) * dx
                if self.bcv == 'CDN':
                    # IMP will work only if p=0 on output, or we must add term
                    # inner(p0*n, v)*problem.get_outflow_measure_form() to avoid boundary layer
                    return form
                if self.bcv == 'LAP':
                    return form - inner(nu*dot(grad(fce).T, n), v1)  * problem.get_outflow_measure_form()
                if self.bcv == 'DDN':
                    # IMP will work only if p=0 on output, or we must add term
                    # inner(p0*n, v)*problem.get_outflow_measure_form() to avoid boundary layer
                    return form  # additional term must be added to non-constant part

        def pressure_rhs():
            if self.useLaplace or self.bcv == 'LAP':
                return inner(p0, div(v1)) * dx - inner(p0*n, v1) * problem.get_outflow_measure_form()
                # NT term inner(inner(p, n), v) is 0 when p=0 on outflow
            else:
                return inner(p0, div(v1)) * dx

        a1_const = (1./k)*inner(u, v1)*dx + diffusion(0.5*u)
        a1_change = nonlinearity(0.5*u)
        if self.bcv == 'DDN':
            # IMP Problem: Does not penalize influx for current step, only for the next one
            # IMP this can lead to oscilation: DDN correct next step, but then u_ext is OK so in next step DDN is not used, leading to new influx...
            # u and u_ext cannot be switched, min_value is nonlinear function
            a1_change += -0.5*min_value(Constant(0.), inner(u_ext, n))*inner(u, v1)*problem.get_outflow_measure_form()
            # IMP works only with uflacs compiler

        L1 = (1./k)*inner(u0, v1)*dx - nonlinearity(0.5*u0) - diffusion(0.5*u0) + pressure_rhs()
        if self.bcv == 'DDN':
            L1 += 0.5*min_value(0., inner(u_ext, n))*inner(u0, v1)*problem.get_outflow_measure_form()

        # Non-consistent SUPG stabilisation
        if self.stabilize and not self.use_full_SUPG:
            # a1_stab = delta*inner(dot(grad(u), u_ext), dot(grad(v), u_ext))*dx
            a1_stab = 0.5*delta*inner(dot(grad(u), u_ext), dot(grad(v), u_ext))*dx(None, {'quadrature_degree': 6})
            # NT optional: use Crank Nicolson in stabilisation term: change RHS
            # L1 += -0.5*delta*inner(dot(grad(u0), u_ext), dot(grad(v), u_ext))*dx(None, {'quadrature_degree': 6})

        outflow_area = Constant(problem.outflow_area)
        need_outflow = Constant(0.0)
        if self.useRotationScheme:
            # Rotation scheme
            if self.bc == 'lagrange':
                F2 = inner(grad(pQL), grad(qQL))*dx + (1./k)*qQL*div(u_)*dx + pQL*lQL*dx + qQL*rQL*dx
            else:
                F2 = inner(grad(p), grad(q))*dx + (1./k)*q*div(u_)*dx
        else:
            # Projection, solve to p_
            if self.bc == 'lagrange':
                F2 = inner(grad(pQL - p0), grad(qQL))*dx + (1./k)*qQL*div(u_)*dx + pQL*lQL*dx + qQL*rQL*dx
            else:
                if self.forceOutflow and problem.can_force_outflow:
                    info('Forcing outflow.')
                    F2 = inner(grad(p - p0), grad(q))*dx + (1./k)*q*div(u_)*dx
                    for m in problem.get_outflow_measures():
                        F2 += (1./k)*(1./outflow_area)*need_outflow*q*m
                else:
                    F2 = inner(grad(p - p0), grad(q))*dx + (1./k)*q*div(u_)*dx
        a2, L2 = system(F2)

        # step 3: Finalize, solve to u_
        if self.useRotationScheme:
            # Rotation scheme
            if self.bc == 'lagrange':
                F3 = (1./k)*inner(u - u_, v)*dx + inner(grad(p_QL.sub(0)), v)*dx
            else:
                F3 = (1./k)*inner(u - u_, v)*dx + inner(grad(p_), v)*dx
        else:
            if self.bc == 'lagrange':
                F3 = (1./k)*inner(u - u_, v)*dx + inner(grad(p_QL.sub(0) - p0), v)*dx
            else:
                F3 = (1./k)*inner(u - u_, v)*dx + inner(grad(p_ - p0), v)*dx
        a3, L3 = system(F3)

        if self.useRotationScheme:
            # Rotation scheme: modify pressure
            if self.bc == 'lagrange':
                pr = TrialFunction(self.Q)
                qr = TestFunction(self.Q)
                F4 = (pr - p0 - p_QL.sub(0) + nu*div(u_))*qr*dx
            else:
                F4 = (p - p0 - p_ + nu*div(u_))*q*dx
            # TODO zkusit, jestli to nebude rychlejsi? nepocitat soustavu, ale p.assign(...), nutno project(div(u),Q) coz je pocitani podobne soustavy
            # TODO zkusit v project zadat solver_type='lu' >> primy resic by mel byt efektivnejsi
            a4, L4 = system(F4)

        # Assemble matrices
        self.tc.start('assembleMatrices')
        A1_const = assemble(a1_const)  # need to be here, so A1 stays one Python object during repeated assembly
        A1_change = A1_const.copy()  # copy to get matrix with same sparse structure (data will be overwriten)
        if self.stabilize and not self.use_full_SUPG:
            A1_stab = A1_const.copy()  # copy to get matrix with same sparse structure (data will be overwriten)
        A2 = assemble(a2)
        A3 = assemble(a3)
        if self.useRotationScheme:
            A4 = assemble(a4)
        self.tc.end('assembleMatrices')

        if self.solvers == 'direct':
            self.solver_vel_tent = LUSolver('mumps')
            self.solver_vel_cor = LUSolver('mumps')
            self.solver_p = LUSolver('umfpack')
            if self.useRotationScheme:
                self.solver_rot = LUSolver('umfpack')
        else:
            # NT not needed, chosen not to use hypre_parasails
            # if self.prec_v == 'hypre_parasails':  # in FEniCS 1.6.0 inaccessible using KrylovSolver class
            #     self.solver_vel_tent = PETScKrylovSolver('gmres')   # PETSc4py object
            #     self.solver_vel_tent.ksp().getPC().setType('hypre')
            #     PETScOptions.set('pc_hypre_type', 'parasails')
            #     # this is global setting, but preconditioners for pressure solvers are set by their constructors
            # else:
            self.solver_vel_tent = KrylovSolver('gmres', self.prec_v)   # nonsymetric > gmres
            # IMP cannot use 'ilu' in parallel (choose different default option)
            self.solver_vel_cor = KrylovSolver('cg', 'hypre_amg')   # nonsymetric > gmres
            self.solver_p = KrylovSolver('cg', self.prec_p)          # symmetric > CG
            if self.useRotationScheme:
                self.solver_rot = KrylovSolver('cg', self.prec_p)

        solver_options = {'monitor_convergence': True, 'maximum_iterations': 1000, 'nonzero_initial_guess': True}
        # 'nonzero_initial_guess': True   with  solver.solbe(A, u, b) means that
        # Solver will use anything stored in u as an initial guess

        # Get the nullspace if there are no pressure boundary conditions
        foo = Function(self.Q)     # auxiliary vector for setting pressure nullspace
        if self.bc in ['nullspace', 'nullspace_s']:
            null_vec = Vector(foo.vector())
            self.Q.dofmap().set(null_vec, 1.0)
            null_vec *= 1.0/null_vec.norm('l2')
            self.null_space = VectorSpaceBasis([null_vec])
            if self.bc == 'nullspace':
                as_backend_type(A2).set_nullspace(self.null_space)

        # apply global options for Krylov solvers
        self.solver_vel_tent.parameters['relative_tolerance'] = 10 ** (-self.precision_rel_v_tent)
        self.solver_vel_tent.parameters['absolute_tolerance'] = 10 ** (-self.precision_abs_v_tent)
        self.solver_vel_cor.parameters['relative_tolerance'] = 10E-12
        self.solver_vel_cor.parameters['absolute_tolerance'] = 10E-4
        self.solver_p.parameters['relative_tolerance'] = 10**(-self.precision_p)
        self.solver_p.parameters['absolute_tolerance'] = 10E-10
        if self.useRotationScheme:
            self.solver_rot.parameters['relative_tolerance'] = 10**(-self.precision_p)
            self.solver_rot.parameters['absolute_tolerance'] = 10E-10

        if self.solvers == 'krylov':
            for solver in [self.solver_vel_tent, self.solver_vel_cor, self.solver_p, self.solver_rot] if \
                    self.useRotationScheme else [self.solver_vel_tent, self.solver_vel_cor, self.solver_p]:
                for key, value in solver_options.items():
                    try:
                        solver.parameters[key] = value
                    except KeyError:
                        info('Invalid option %s for KrylovSolver' % key)
                        return 1
                solver.parameters['preconditioner']['structure'] = 'same'
                # matrices A2-A4 do not change, so we can reuse preconditioners

        self.solver_vel_tent.parameters['preconditioner']['structure'] = 'same_nonzero_pattern'
        # matrix A1 changes every time step, so change of preconditioner must be allowed

        if self.bc == 'lagrange':
            fa = FunctionAssigner(self.Q, QL.sub(0))

        # boundary conditions
        bcu, bcp = problem.get_boundary_conditions(self.bc == 'outflow', self.V, self.Q)
        self.tc.end('init')
        # Time-stepping
        info("Running of Incremental pressure correction scheme n. 1")
        ttime = self.metadata['time']
        t = dt
        step = 1
        while t < (ttime + dt/2.0):
            info("t = %f" % t)
            self.problem.update_time(t, step)
            if self.MPI_rank == 0:
                problem.write_status_file(t)

            if doSave:
                save_this_step = problem.save_this_step

            # DDN debug
            # u_ext_in = assemble(inner(u_ext, n)*problem.get_outflow_measure_form())
            # DDN_triggered = assemble(min_value(Constant(0.), inner(u_ext, n))*problem.get_outflow_measure_form())
            # print('DDN: u_ext*n dSout = ', u_ext_in)
            # print('DDN: negative part of u_ext*n dSout = ', DDN_triggered)

            # assemble matrix (it depends on solution)
            self.tc.start('assembleA1')
            assemble(a1_change, tensor=A1_change)  # assembling into existing matrix is faster than assembling new one
            A1 = A1_const.copy()  # we dont want to change A1_const
            A1.axpy(1, A1_change, True)
            self.tc.end('assembleA1')
            self.tc.start('assembleA1stab')
            if self.stabilize and not self.use_full_SUPG:
                assemble(a1_stab, tensor=A1_stab)  # assembling into existing matrix is faster than assembling new one
                A1.axpy(1, A1_stab, True)
            self.tc.end('assembleA1stab')

            # Compute tentative velocity step
            begin("Computing tentative velocity")
            self.tc.start('rhs')
            b = assemble(L1)
            self.tc.end('rhs')
            self.tc.start('applybc1')
            [bc.apply(A1, b) for bc in bcu]
            self.tc.end('applybc1')
            try:
                self.tc.start('solve 1')
                self.solver_vel_tent.solve(A1, u_.vector(), b)
                self.tc.end('solve 1')
                if save_this_step:
                    self.tc.start('saveVel')
                    problem.save_vel(True, u_, t)
                    self.tc.end('saveVel')
                if save_this_step and not onlyVel:
                    problem.save_div(True, u_)
                problem.compute_err(True, u_, t)
                problem.compute_div(True, u_)
            except RuntimeError as inst:
                problem.report_fail(t)
                return 1
            end()

            # DDN debug
            # u_ext_in = assemble(inner(u_, n)*problem.get_outflow_measure_form())
            # DDN_triggered = assemble(min_value(Constant(0.), inner(u_, n))*problem.get_outflow_measure_form())
            # print('DDN: u_tent*n dSout = ', u_ext_in)
            # print('DDN: negative part of u_tent*n dSout = ', DDN_triggered)

            if self.useRotationScheme:
                begin("Computing tentative pressure")
            else:
                begin("Computing pressure")
            if self.forceOutflow and problem.can_force_outflow:
                out = problem.compute_outflow(u_)
                info('Tentative outflow: %f' % out)
                n_o = -problem.last_inflow-out
                info('Needed outflow: %f' % n_o)
                need_outflow.assign(n_o)
            self.tc.start('rhs')
            b = assemble(L2)
            self.tc.end('rhs')
            self.tc.start('applybcP')
            [bc.apply(A2, b) for bc in bcp]
            if self.bc in ['nullspace', 'nullspace_s']:
                self.null_space.orthogonalize(b)
            self.tc.end('applybcP')
            try:
                self.tc.start('solve 2')
                if self.bc == 'lagrange':
                    self.solver_p.solve(A2, p_QL.vector(), b)
                else:
                    self.solver_p.solve(A2, p_.vector(), b)
                self.tc.end('solve 2')
            except RuntimeError as inst:
                problem.report_fail(t)
                return 1
            if self.useRotationScheme:
                foo = Function(self.Q)
                if self.bc == 'lagrange':
                    fa.assign(pQ, p_QL.sub(0))
                    foo.assign(pQ + p0)
                else:
                    foo.assign(p_+p0)
                problem.averaging_pressure(foo)
                if save_this_step and not onlyVel:
                    problem.save_pressure(True, foo)
            else:
                if self.bc == 'lagrange':
                    fa.assign(pQ, p_QL.sub(0))
                    problem.averaging_pressure(pQ)
                    if save_this_step and not onlyVel:
                        problem.save_pressure(False, pQ)
                else:
                    # we do not want to change p=0 on outflow, it conflicts with do-nothing conditions
                    foo = Function(self.Q)
                    foo.assign(p_)
                    problem.averaging_pressure(foo)
                    if save_this_step and not onlyVel:
                        problem.save_pressure(False, foo)
            end()

            begin("Computing corrected velocity")
            self.tc.start('rhs')
            b = assemble(L3)
            self.tc.end('rhs')
            if not self.B:
                self.tc.start('applybc3')
                [bc.apply(A3, b) for bc in bcu]
                self.tc.end('applybc3')
            try:
                self.tc.start('solve 3')
                self.solver_vel_cor.solve(A3, u_cor.vector(), b)
                self.tc.end('solve 3')
                problem.compute_err(False, u_cor, t)
                problem.compute_div(False, u_cor)
            except RuntimeError as inst:
                problem.report_fail(t)
                return 1
            if save_this_step:
                self.tc.start('saveVel')
                problem.save_vel(False, u_cor, t)
                self.tc.end('saveVel')
            if save_this_step and not onlyVel:
                problem.save_div(False, u_cor)
            end()

            # DDN debug
            # u_ext_in = assemble(inner(u_cor, n)*problem.get_outflow_measure_form())
            # DDN_triggered = assemble(min_value(Constant(0.), inner(u_cor, n))*problem.get_outflow_measure_form())
            # print('DDN: u_cor*n dSout = ', u_ext_in)
            # print('DDN: negative part of u_cor*n dSout = ', DDN_triggered)

            if self.useRotationScheme:
                begin("Rotation scheme pressure correction")
                self.tc.start('rhs')
                b = assemble(L4)
                self.tc.end('rhs')
                try:
                    self.tc.start('solve 4')
                    self.solver_rot.solve(A4, p_mod.vector(), b)
                    self.tc.end('solve 4')
                except RuntimeError as inst:
                    problem.report_fail(t)
                    return 1
                problem.averaging_pressure(p_mod)
                if save_this_step and not onlyVel:
                    problem.save_pressure(False, p_mod)
                end()

            # compute functionals (e. g. forces)
            problem.compute_functionals(u_cor,
                                        p_mod if self.useRotationScheme else (pQ if self.bc == 'lagrange' else p_), t)

            # Move to next time step
            self.tc.start('next')
            u1.assign(u0)
            u0.assign(u_cor)
            u_.assign(u_cor)  # use corretced velocity as initial guess in first step

            if self.useRotationScheme:
                p0.assign(p_mod)
            else:
                if self.bc == 'lagrange':
                    p0.assign(pQ)
                else:
                    p0.assign(p_)

            t = round(t + dt, 6)  # round time step to 0.000001
            step += 1
            self.tc.end('next')

        info("Finished: Incremental pressure correction scheme n. 1")
        problem.report()
        return 0
示例#41
0
#
# Gryphon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Gryphon. If not, see <http://www.gnu.org/licenses/>.

import numpy as np
from dolfin import UnitSquareMesh, FunctionSpace, TrialFunction, TestFunction, Constant, Expression, inner, grad, dx, \
    DOLFIN_EPS, DirichletBC, norm, Function
from gryphon import backwardEuler, ESDIRK

mesh = UnitSquareMesh(29, 29)
V = FunctionSpace(mesh, "Lagrange", 1)
u = TrialFunction(V)
v = TestFunction(V)

D = Constant(0.1)
domainSource = Expression(
    "10*sin(t)*exp(-((x[0]-0.7)*(x[0]-0.7) + (x[1]-0.5)*(x[1]-0.5))/0.01)",
    t=0.0,
    degree=1)
rhs = -D * inner(grad(u), grad(v)) * dx + domainSource * v * dx


# Define left and right boundary
def boundaryLeft(x, on_boundary):
    return x[0] < DOLFIN_EPS
示例#42
0
文件: problem.py 项目: mhanus/EVC
class Problem(object):
  def __init__(self, coarse_mesh, nref, p_coarse, p_fine, sym=False):
    """

    :param dolfin.cpp.mesh.Mesh coarse_mesh:
    :param int nref:
    :param int p_coarse:
    :param int p_fine:
    :param bool sym:
    :return:
    """

    print0("Creating approximation spaces")

    self.V_coarse = FunctionSpace(coarse_mesh, "CG", p_coarse)
    self.ndof_coarse = self.V_coarse.dim()

    refined_mesh = coarse_mesh
    for ref in xrange(nref):
      refined_mesh = refine(refined_mesh)   # creates a new Mesh, initial coarse mesh is unchanged

    self.V_fine = FunctionSpace(refined_mesh, "CG", p_fine)
    self.ndof_fine = self.V_fine.dim()

    H = coarse_mesh.hmax()
    h = refined_mesh.hmax()
    self.alpha = log(H)/log(h)
    self.beta = p_fine + 1

    if comm.rank == 0:
      prop = Table("Approximation properties")
      prop.set("ndof", "coarse", self.ndof_coarse)
      prop.set("ndof", "fine", self.ndof_fine)
      prop.set("h", "coarse", H)
      prop.set("h", "fine", h)

      info(prop)

      print "alpha = {}, beta = {}".format(self.alpha, self.beta)

    self.bc_coarse = None

    self.A_fine = PETScMatrix()
    self.B_fine = PETScMatrix()
    self.A_coarse = PETScMatrix()
    self.B_coarse = PETScMatrix()

    self.sym = sym
    self.switch_gep_matrices = False

  def identity_at_coarse_level(self):
    I = PETScMatrix()
    u = TrialFunction(self.V_coarse)
    v = TestFunction(self.V_coarse)
    assemble(Constant(0)*u*v*dx, tensor=I)
    I.ident_zeros()
    return I

  def residual_norm(self, vec, lam, norm_type='l2', A=None, B=None):
    if A is None:
      A = self.A_fine
      B = self.B_fine

    r = PETScVector()
    A.mult(vec, r)

    if B.size(0) > 0:
      y = PETScVector()
      B.mult(vec, y)
    else:
      y = 1

    r -= lam*y

    return norm(r,norm_type)

  def rayleigh_quotient(self, vec, A=None, B=None):
    if A is None:
      A = self.A_fine
      B = self.B_fine

    r = PETScVector()
    A.mult(vec, r)
    nom = MPI_sum( numpy.dot(r, vec) )

    if B.size(0) > 0:
      B.mult(vec, r)
      denom = MPI_sum( numpy.dot(r, vec) )
    else:
      denom = sqr(norm(r, norm_type='l2'))

    return nom/denom
示例#43
0
def test_eim_approximation_20(expression_type, basis_generation):
    """
    This test is the version of test 19 where high fidelity solution is used in place of reduced order one.
    """
    @StoreMapFromProblemNameToProblem
    @StoreMapFromProblemToTrainingStatus
    @StoreMapFromSolutionToProblem
    class MockProblem(ParametrizedProblem):
        def __init__(self, V, **kwargs):
            # Call parent
            ParametrizedProblem.__init__(
                self,
                os.path.join("test_eim_approximation_20_tempdir",
                             expression_type, basis_generation,
                             "mock_problem"))
            # Minimal subset of a ParametrizedDifferentialProblem
            self.V = V
            self._solution = Function(V)
            self.components = ["u", "s", "p"]
            # Parametrized function to be interpolated
            x = SpatialCoordinate(V.mesh())
            mu = SymbolicParameters(self, V, (-1., -1.))
            self.f00 = 1. / sqrt(
                pow(x[0] - mu[0], 2) + pow(x[1] - mu[1], 2) + 0.01)
            self.f01 = 1. / sqrt(
                pow(x[0] - mu[0], 4) + pow(x[1] - mu[1], 4) + 0.01)
            # Inner product
            f = TrialFunction(self.V)
            g = TestFunction(self.V)
            self.inner_product = assemble(inner(f, g) * dx)
            # Collapsed vector and space
            self.V0 = V.sub(0).collapse()
            self.V00 = V.sub(0).sub(0).collapse()
            self.V1 = V.sub(1).collapse()

        def name(self):
            return "MockProblem_20_" + expression_type + "_" + basis_generation

        def init(self):
            pass

        def solve(self):
            print("solving mock problem at mu =", self.mu)
            assert not hasattr(self, "_is_solving")
            self._is_solving = True
            f00 = project(self.f00, self.V00)
            f01 = project(self.f01, self.V00)
            assign(self._solution.sub(0).sub(0), f00)
            assign(self._solution.sub(0).sub(1), f01)
            delattr(self, "_is_solving")
            return self._solution

    @StoreMapFromProblemToReductionMethod
    class MockReductionMethod(ReductionMethod):
        def __init__(self, truth_problem, **kwargs):
            # Call parent
            ReductionMethod.__init__(
                self,
                os.path.join("test_eim_approximation_20_tempdir",
                             expression_type, basis_generation,
                             "mock_problem"))
            # Minimal subset of a DifferentialProblemReductionMethod
            self.truth_problem = truth_problem
            self.reduced_problem = None

        def initialize_training_set(self,
                                    ntrain,
                                    enable_import=True,
                                    sampling=None,
                                    **kwargs):
            return ReductionMethod.initialize_training_set(
                self, self.truth_problem.mu_range, ntrain, enable_import,
                sampling, **kwargs)

        def initialize_testing_set(self,
                                   ntest,
                                   enable_import=False,
                                   sampling=None,
                                   **kwargs):
            return ReductionMethod.initialize_testing_set(
                self, self.truth_problem.mu_range, ntest, enable_import,
                sampling, **kwargs)

        def offline(self):
            pass

        def update_basis_matrix(self, snapshot):
            pass

        def error_analysis(self, N=None, **kwargs):
            pass

        def speedup_analysis(self, N=None, **kwargs):
            pass

    class ParametrizedFunctionApproximation(EIMApproximation):
        def __init__(self, truth_problem, expression_type, basis_generation):
            self.V = truth_problem.V0
            (f0, _) = split(truth_problem._solution)
            #
            folder_prefix = os.path.join("test_eim_approximation_20_tempdir",
                                         expression_type, basis_generation)
            assert expression_type in ("Function", "Vector", "Matrix")
            if expression_type == "Function":
                # Call Parent constructor
                EIMApproximation.__init__(
                    self, truth_problem,
                    ParametrizedExpressionFactory(grad(f0)), folder_prefix,
                    basis_generation)
            elif expression_type == "Vector":
                v = TestFunction(self.V)
                form = inner(grad(f0), grad(v)) * dx
                # Call Parent constructor
                EIMApproximation.__init__(self, truth_problem,
                                          ParametrizedTensorFactory(form),
                                          folder_prefix, basis_generation)
            elif expression_type == "Matrix":
                u = TrialFunction(self.V)
                v = TestFunction(self.V)
                form = inner(grad(f0), grad(u)) * v[0] * dx
                # Call Parent constructor
                EIMApproximation.__init__(self, truth_problem,
                                          ParametrizedTensorFactory(form),
                                          folder_prefix, basis_generation)
            else:  # impossible to arrive here anyway thanks to the assert
                raise AssertionError("Invalid expression_type")

    # 1. Create the mesh for this test
    mesh = RectangleMesh(Point(0.1, 0.1), Point(0.9, 0.9), 20, 20)

    # 2. Create Finite Element space (Lagrange P1)
    element_0 = VectorElement("Lagrange", mesh.ufl_cell(), 2)
    element_1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
    element = MixedElement(element_0, element_1)
    V = FunctionSpace(mesh, element, components=[["u", "s"], "p"])

    # 3. Create a parametrized problem
    problem = MockProblem(V)
    mu_range = [(-1., -0.01), (-1., -0.01)]
    problem.set_mu_range(mu_range)

    # 4. Create a reduction method, but postpone generation of the reduced problem
    MockReductionMethod(problem)

    # 5. Allocate an object of the ParametrizedFunctionApproximation class
    parametrized_function_approximation = ParametrizedFunctionApproximation(
        problem, expression_type, basis_generation)
    parametrized_function_approximation.set_mu_range(mu_range)

    # 6. Prepare reduction with EIM
    parametrized_function_reduction_method = EIMApproximationReductionMethod(
        parametrized_function_approximation)
    parametrized_function_reduction_method.set_Nmax(16)
    parametrized_function_reduction_method.set_tolerance(0.)

    # 7. Perform EIM offline phase
    parametrized_function_reduction_method.initialize_training_set(
        64, sampling=EquispacedDistribution())
    reduced_parametrized_function_approximation = parametrized_function_reduction_method.offline(
    )

    # 8. Perform EIM online solve
    online_mu = (-1., -1.)
    reduced_parametrized_function_approximation.set_mu(online_mu)
    reduced_parametrized_function_approximation.solve()

    # 9. Perform EIM error analysis
    parametrized_function_reduction_method.initialize_testing_set(100)
    parametrized_function_reduction_method.error_analysis()
示例#44
0
    def __init__(self):
        # https://fenicsproject.org/qa/12891/initialize-mesh-from-vertices-connectivities-at-once
        points, cells, _, cell_data, _ = meshes.ball_in_tube_cyl.generate()
        # 2018.1
        # self.mesh = Mesh(
        #     dolfin.mpi_comm_world(), dolfin.cpp.mesh.CellType.Type_triangle,
        #     points[:, :2], cells['triangle']
        #     )
        with TemporaryDirectory() as temp_dir:
            tmp_filename = os.path.join(temp_dir, "test.xml")
            meshio.write_points_cells(
                tmp_filename,
                points,
                cells,
                cell_data=cell_data,
                file_format="dolfin-xml",
            )
            self.mesh = Mesh(tmp_filename)

        V0_element = FiniteElement("CG", self.mesh.ufl_cell(), 2)
        V1_element = FiniteElement("B", self.mesh.ufl_cell(), 3)
        self.W = FunctionSpace(self.mesh, V0_element * V1_element)

        self.P = FunctionSpace(self.mesh, "CG", 1)

        # Define mesh and boundaries.
        class LeftBoundary(SubDomain):
            # pylint: disable=no-self-use
            def inside(self, x, on_boundary):
                return on_boundary and x[0] < GMSH_EPS

        left_boundary = LeftBoundary()

        class RightBoundary(SubDomain):
            # pylint: disable=no-self-use
            def inside(self, x, on_boundary):
                return on_boundary and x[0] > 1.0 - GMSH_EPS

        right_boundary = RightBoundary()

        class LowerBoundary(SubDomain):
            # pylint: disable=no-self-use
            def inside(self, x, on_boundary):
                return on_boundary and x[1] < GMSH_EPS

        lower_boundary = LowerBoundary()

        # class UpperBoundary(SubDomain):
        #     # pylint: disable=no-self-use
        #     def inside(self, x, on_boundary):
        #         return on_boundary and x[1] > 5.0-GMSH_EPS

        class CoilBoundary(SubDomain):
            # pylint: disable=no-self-use
            def inside(self, x, on_boundary):
                # One has to pay a little bit of attention when defining the
                # coil boundary; it's easy to miss the edges closest to x[0]=0.
                return (
                    on_boundary
                    and x[1] > 1.0 - GMSH_EPS
                    and x[1] < 2.0 + GMSH_EPS
                    and x[0] < 1.0 - GMSH_EPS
                )

        coil_boundary = CoilBoundary()

        self.u_bcs = [
            DirichletBC(self.W, (0.0, 0.0), right_boundary),
            DirichletBC(self.W.sub(0), 0.0, left_boundary),
            DirichletBC(self.W, (0.0, 0.0), lower_boundary),
            DirichletBC(self.W, (0.0, 0.0), coil_boundary),
        ]
        self.p_bcs = []
        # self.p_bcs = [DirichletBC(Q, 0.0, upper_boundary)]
        return
示例#45
0
def run_with_params(Tb, mu_value, k_s, path):
    run_time_init = clock()

    mesh = BoxMesh(Point(0.0, 0.0, 0.0), Point(mesh_width, mesh_width, mesh_height), nx, ny, nz)

    pbc = PeriodicBoundary()

    WE = VectorElement('CG', mesh.ufl_cell(), 2)
    SE = FiniteElement('CG', mesh.ufl_cell(), 1)
    WSSS = FunctionSpace(mesh, MixedElement(WE, SE, SE, SE), constrained_domain=pbc)
    # W = FunctionSpace(mesh, WE, constrained_domain=pbc)
    # S = FunctionSpace(mesh, SE, constrained_domain=pbc)
    W = WSSS.sub(0).collapse()
    S = WSSS.sub(1).collapse()

    temperature_vals = [27.0 + 273, Tb + 273, 1300.0 + 273, 1305.0 + 273]
    temp_prof = TemperatureProfile(temperature_vals, element=S.ufl_element())

    mu_a = mu_value  # this was taken from the Blankenbach paper, can change

    Ep = b / temp_prof.delta

    mu_bot = exp(-Ep * (temp_prof.bottom * temp_prof.delta - 1573.0) + cc) * mu_a

    # TODO: verify exponentiation
    Ra = rho_0 * alpha * g * temp_prof.delta * h ** 3 / (kappa_0 * mu_a)
    w0 = rho_0 * alpha * g * temp_prof.delta * h ** 2 / mu_a
    tau = h / w0
    p0 = mu_a * w0 / h

    log(mu_a, mu_bot, Ra, w0, p0)

    slip_vx = 1.6E-09 / w0  # Non-dimensional
    slip_velocity = Constant((slip_vx, 0.0, 0.0))
    zero_slip = Constant((0.0, 0.0, 0.0))

    time_step = 3.0E11 / tau * 2

    dt = Constant(time_step)
    t_end = 3.0E15 / tau / 5.0  # Non-dimensional times

    u = Function(WSSS)

    # Instead of TrialFunctions, we use split(u) for our non-linear problem
    v, p, T, Tf = split(u)
    v_t, p_t, T_t, Tf_t = TestFunctions(WSSS)

    T0 = interpolate(temp_prof, S)

    mu_exp = Expression('exp(-Ep * (T_val * dTemp - 1573.0) + cc * x[2] / mesh_height)',
                       Ep=Ep, dTemp=temp_prof.delta, cc=cc, mesh_height=mesh_height, T_val=T0,
                       element=S.ufl_element())

    Tf0 = interpolate(temp_prof, S)

    mu = Function(S)
    v0 = Function(W)

    v_theta = (1.0 - theta) * v0 + theta * v

    T_theta = (1.0 - theta) * T0 + theta * T

    Tf_theta = (1.0 - theta) * Tf0 + theta * Tf

    # TODO: Verify forms

    r_v = (inner(sym(grad(v_t)), 2.0 * mu * sym(grad(v)))
           - div(v_t) * p
           - T * v_t[2]) * dx

    r_p = p_t * div(v) * dx

    heat_transfer = Constant(k_s) * (Tf_theta - T_theta) * dt

    r_T = (T_t * ((T - T0) + dt * inner(v_theta, grad(T_theta)))  # TODO: Inner vs dot
           + (dt / Ra) * inner(grad(T_t), grad(T_theta))
           - T_t * heat_transfer) * dx

    v_melt = Function(W)
    z_hat = Constant((0.0, 0.0, 1.0))

    # TODO: inner -> dot, take out Tf_t
    r_Tf = (Tf_t * ((Tf - Tf0) + dt * inner(v_melt, grad(Tf_theta)))
            + Tf_t * heat_transfer) * dx

    r = r_v + r_p + r_T + r_Tf

    bcv0 = DirichletBC(WSSS.sub(0), zero_slip, top)
    bcv1 = DirichletBC(WSSS.sub(0), slip_velocity, bottom)
    bcv2 = DirichletBC(WSSS.sub(0).sub(1), Constant(0.0), back)
    bcv3 = DirichletBC(WSSS.sub(0).sub(1), Constant(0.0), front)

    bcp0 = DirichletBC(WSSS.sub(1), Constant(0.0), bottom)
    bct0 = DirichletBC(WSSS.sub(2), Constant(temp_prof.surface), top)
    bct1 = DirichletBC(WSSS.sub(2), Constant(temp_prof.bottom), bottom)
    bctf1 = DirichletBC(WSSS.sub(3), Constant(temp_prof.bottom), bottom)

    bcs = [bcv0, bcv1, bcv2, bcv3, bcp0, bct0, bct1, bctf1]

    t = 0
    count = 0
    files = DefaultDictByKey(partial(create_xdmf, path))

    while t < t_end:
        mu.interpolate(mu_exp)
        rhosolid = rho_0 * (1.0 - alpha * (T0 * temp_prof.delta - 1573.0))
        deltarho = rhosolid - rho_melt
        # TODO: project (accuracy) vs interpolate
        assign(v_melt, project(v0 - darcy * (grad(p) * p0 / h - deltarho * z_hat * g) / w0, W))
        # TODO: Written out one step later?
        # v_melt.assign(v0 - darcy * (grad(p) * p0 / h - deltarho * yvec * g) / w0)
        # TODO: use nP after to avoid projection?

        solve(r == 0, u, bcs)
        nV, nP, nT, nTf = u.split()  # TODO: write with Tf, ... etc

        if count % output_every == 0:
            time_left(count, t_end / time_step, run_time_init)  # TODO: timestep vs dt

            # TODO: Make sure all writes are to the same function for each time step
            files['T_fluid'].write(nTf, t)
            files['p'].write(nP, t)
            files['v_solid'].write(nV, t)
            files['T_solid'].write(nT, t)
            files['mu'].write(mu, t)
            files['v_melt'].write(v_melt, t)
            files['gradp'].write(project(grad(nP), W), t)
            files['rho'].write(project(rhosolid, S), t)
            files['Tf_grad'].write(project(grad(Tf), W), t)
            files['advect'].write(project(dt * dot(v_melt, grad(nTf))), t)
            files['ht'].write(project(heat_transfer, S), t)

        assign(T0, nT)
        assign(v0, nV)
        assign(Tf0, nTf)

        t += time_step
        count += 1

    log('Case mu={}, Tb={}, k={} complete. Run time = {:.2f} minutes'.format(mu_a, Tb, k_s, (clock() - run_time_init) / 60.0))
示例#46
0
    def _evaluateLocalEstimator(cls, mu, w, coeff_field, pde, f, quadrature_degree, epsilon=1e-5):
        """Evaluation of patch local equilibrated estimator."""

        # prepare numerical flux and f
        sigma_mu, f_mu = evaluate_numerical_flux(w, mu, coeff_field, f)

        # ###################
        # ## MIXED PROBLEM ##
        # ###################

        # get setup data for mixed problem
        V = w[mu]._fefunc.function_space()
        mesh = V.mesh()
        mesh.init()
        degree = element_degree(w[mu]._fefunc)

        # data for nodal bases
        V_dm = V.dofmap()
        V_dofs = dict([(i, V_dm.cell_dofs(i)) for i in range(mesh.num_cells())])
        V1 = FunctionSpace(mesh, 'CG', 1)   # V1 is to define nodal base functions
        phi_z = Function(V1)
        phi_coeffs = np.ndarray(V1.dim())
        vertex_dof_map = V1.dofmap().vertex_to_dof_map(mesh)
        # vertex_dof_map = vertex_to_dof_map(V1)
        dof_list = vertex_dof_map.tolist()
        # DG0 localisation
        DG0 = FunctionSpace(mesh, 'DG', 0)
        DG0_dofs = dict([(c.index(),DG0.dofmap().cell_dofs(c.index())[0]) for c in cells(mesh)])
        dg0 = TestFunction(DG0)
        # characteristic function of patch
        xi_z = Function(DG0)
        xi_coeffs = np.ndarray(DG0.dim())
        # mesh data
        h = CellSize(mesh)
        n = FacetNormal(mesh)
        cf = CellFunction('size_t', mesh)
        # setup error estimator vector
        eq_est = np.zeros(DG0.dim())

        # setup global equilibrated flux vector
        DG = VectorFunctionSpace(mesh, "DG", degree)
        DG_dofmap = DG.dofmap()

        # define form functions
        tau = TrialFunction(DG)
        v = TestFunction(DG)

        # define global tau
        tau_global = Function(DG)
        tau_global.vector()[:] = 0.0

        # iterate vertices
        for vertex in vertices(mesh):
            # get patch cell indices
            vid = vertex.index()
            patch_cid, FF_inner, FF_boundary = get_vertex_patch(vid, mesh, layers=1)

            # set nodal base function
            phi_coeffs[:] = 0
            phi_coeffs[dof_list.index(vid)] = 1
            phi_z.vector()[:] = phi_coeffs

            # set characteristic function and mark patch
            cf.set_all(0)
            xi_coeffs[:] = 0
            for cid in patch_cid:
                xi_coeffs[DG0_dofs[int(cid)]] = 1
                cf[int(cid)] = 1
            xi_z.vector()[:] = xi_coeffs

            # determine local dofs
            lDG_cell_dofs = dict([(cid, DG_dofmap.cell_dofs(cid)) for cid in patch_cid])
            lDG_dofs = [cd.tolist() for cd in lDG_cell_dofs.values()]
            lDG_dofs = list(iter.chain(*lDG_dofs))

            # print "\nlocal DG subspace has dimension", len(lDG_dofs), "degree", degree, "cells", len(patch_cid), patch_cid
            # print "local DG_cell_dofs", lDG_cell_dofs
            # print "local DG_dofs", lDG_dofs

            # create patch measures
            dx = Measure('dx')[cf]
            dS = Measure('dS')[FF_inner]

            # define forms
            alpha = Constant(1 / epsilon) / h
            a = inner(tau,v) * phi_z * dx(1) + alpha * div(tau) * div(v) * dx(1) + avg(alpha) * jump(tau,n) * jump(v,n) * dS(1)\
                + avg(alpha) * jump(xi_z * tau,n) * jump(v,n) * dS(2)
            L = -alpha * (div(sigma_mu) + f) * div(v) * phi_z * dx(1)\
                - avg(alpha) * jump(sigma_mu,n) * jump(v,n) * avg(phi_z)*dS(1)

    #        print "L2 f + div(sigma)", assemble((f + div(sigma)) * (f + div(sigma)) * dx(0))

            # assemble forms
            lhs = assemble(a, form_compiler_parameters={'quadrature_degree': quadrature_degree})
            rhs = assemble(L, form_compiler_parameters={'quadrature_degree': quadrature_degree})

            # convert DOLFIN representation to scipy sparse arrays
            rows, cols, values = lhs.data()
            lhsA = sps.csr_matrix((values, cols, rows)).tocoo()

            # slice sparse matrix and solve linear problem
            lhsA = coo_submatrix_pull(lhsA, lDG_dofs, lDG_dofs)
            lx = spsolve(lhsA, rhs.array()[lDG_dofs])
            # print ">>> local solution lx", type(lx), lx
            local_tau = Function(DG)
            local_tau.vector()[lDG_dofs] = lx
            # print "div(tau)", assemble(inner(div(local_tau),div(local_tau))*dx(1))

            # add up local fluxes
            tau_global.vector()[lDG_dofs] += lx

        # evaluate estimator
        # maybe TODO: re-define measure dx
        eq_est = assemble( inner(tau_global, tau_global) * dg0 * (dx(0)+dx(1)),\
                           form_compiler_parameters={'quadrature_degree': quadrature_degree})

        # reorder according to cell ids
        eq_est = eq_est[DG0_dofs.values()].array()
        global_est = np.sqrt(np.sum(eq_est))
        # eq_est_global = assemble( inner(tau_global, tau_global) * (dx(0)+dx(1)), form_compiler_parameters={'quadrature_degree': quadrature_degree} )
        # global_est2 = np.sqrt(np.sum(eq_est_global))
        return global_est, FlatVector(np.sqrt(eq_est))#, tau_global
示例#47
0
class Discretization(object):
  def __init__(self, problem, verbosity=0):
    """

    :param ProblemData problem:
    :param int verbosity:
    :return:
    """
    self.parameters = parameters["discretization"]

    self.verb = verbosity
    self.vis_folder = os.path.join(problem.out_folder, "MESH")
    self.core = problem.core
    self.G = problem.G

    if self.verb > 1: print pid+"Loading mesh"
        
    t_load = Timer("DD: Data loading")

    if not problem.mesh_module:
      if self.verb > 1: print pid + "  mesh data"
      self.mesh = Mesh(problem.mesh_files.mesh)

      if self.verb > 1: print pid + "  physical data"
      self.cell_regions_fun = MeshFunction("size_t", self.mesh, problem.mesh_files.physical_regions)

      if self.verb > 1: print pid + "  boundary data"
      self.boundaries = MeshFunction("size_t", self.mesh, problem.mesh_files.facet_regions)
    else:
      self.mesh = problem.mesh_module.mesh
      self.cell_regions_fun = problem.mesh_module.regions

      try:
        self.boundaries = problem.mesh_module.boundaries
      except AttributeError:
        self.boundaries = None

    assert self.mesh
    assert self.boundaries is None or self.boundaries.array().size > 0

    if self.verb > 2:
      print pid+"  mesh info: " + str(self.mesh)

    if self.verb > 1: print0("Defining function spaces" )

    self.t_spaces = Timer("DD: Function spaces construction")

    # Spaces that must be specified by the respective subclasses
    self.V = None     # solution space
    self.Vphi1 = None # 1-g scalar flux space
    
    # XS / TH space
    self.V0 = FunctionSpace(self.mesh, "DG", 0)
    self.ndof0 = self.V0.dim()

    dofmap = self.V0.dofmap()
    self.local_ndof0 = dofmap.local_dimension("owned")

    self.cell_regions = self.cell_regions_fun.array()
    assert self.cell_regions.size == self.local_ndof0

  def __create_cell_dof_mapping(self, dofmap):
    """
    Generate cell -> dof mapping for all cells of current partition.
    Note: in DG(0) space, there is one dof per element and no ghost cells.

    :param GenericDofMap dofmap: DG(0) dofmap
    """

    if self.verb > 2: print0("Constructing cell -> dof mapping")
    timer = Timer("DD: Cell->dof construction")

    code = \
    '''
      #include <dolfin/mesh/Cell.h>

      namespace dolfin
      {
        void fill_in(Array<int>& local_cell_dof_map, const Mesh& mesh, const GenericDofMap& dofmap)
        {
          std::size_t local_dof_range_start = dofmap.ownership_range().first;
          int* cell_dof_data = local_cell_dof_map.data();

          for (CellIterator c(mesh); !c.end(); ++c)
            *cell_dof_data++ = dofmap.cell_dofs(c->index())[0] - local_dof_range_start;
        }
      }
    '''

    cell_mapping_module = compile_extension_module(code)
    cell_dof_array = IntArray(self.local_ndof0)
    cell_mapping_module.fill_in(cell_dof_array, self.mesh, dofmap)
    self._local_cell_dof_map = cell_dof_array.array()

    timer.stop()

  def __create_cell_layers_mapping(self):
    """
    Generate a cell -> axial layer mapping for all cells of current partition. Note that keys are ordered by the
    associated DG(0) dof, not by the cell index in the mesh.
    """

    if self.verb > 2: print0("Constructing cell -> layer mapping")
    timer = Timer("DD: Cell->layer construction")

    code = \
    '''
      #include <dolfin/mesh/Cell.h>

      namespace dolfin
      {
        void fill_in(Array<int>& local_cell_layers,
                     const Mesh& mesh, const Array<int>& cell_dofs, const Array<double>& layer_boundaries)
        {
          std::size_t num_layers = layer_boundaries.size() - 1;
          unsigned int layer;

          for (CellIterator c(mesh); !c.end(); ++c)
          {
            double midz = c->midpoint().z();
            for (layer = 0; layer < num_layers; layer++)
              if (layer_boundaries[layer] <= midz && midz <= layer_boundaries[layer+1])
                break;

            int dof = cell_dofs[c->index()];
            local_cell_layers[dof] = layer;
          }
        }
      }
    '''

    cell_mapping_module = compile_extension_module(code)

    cell_layers_array =  IntArray(self.local_ndof0)
    cell_mapping_module.fill_in(cell_layers_array, self.mesh, self.local_cell_dof_map, self.core.layer_boundaries)
    self._local_cell_layers = cell_layers_array.array()

    timer.stop()

  def __create_cell_vol_mapping(self):
    """
    Generate cell -> volume mapping for all cells of current partition. Note that keys are ordered by the
    associated DG(0) dof, not by the cell index in the mesh.

    This map is required for calculating various densities from total region integrals (like cell power densities from
    cell-integrated powers).
    """

    if self.verb > 2: print0("Constructing cell -> volume mapping")
    timer = Timer("DD: Cell->vol construction")

    code = \
    '''
      #include <dolfin/mesh/Cell.h>

      namespace dolfin
      {
        void fill_in(Array<double>& cell_vols, const Mesh& mesh, const Array<int>& cell_dofs)
        {
          for (CellIterator c(mesh); !c.end(); ++c)
            cell_vols[cell_dofs[c->index()]] = c->volume();
        }
      }
    '''

    cell_mapping_module = compile_extension_module(code)
    cell_vol_array = DoubleArray(self.local_ndof0)
    cell_mapping_module.fill_in(cell_vol_array, self.mesh, self.local_cell_dof_map)
    self._local_cell_volumes = cell_vol_array.array()

    timer.stop()

  @property
  def local_cell_dof_map(self):
    try:
      self._local_cell_dof_map
    except AttributeError:
      self.__create_cell_dof_mapping(self.V0.dofmap())
    return self._local_cell_dof_map

  @property
  def local_cell_volumes(self):
    try:
      self._local_cell_volumes
    except AttributeError:
      self.__create_cell_vol_mapping()
    return self._local_cell_volumes

  @property
  def local_cell_layers(self):
    try:
      self._local_cell_layers
    except AttributeError:
      self.__create_cell_layers_mapping()
    return self._local_cell_layers

  def visualize_mesh_data(self):
    timer = Timer("DD: Mesh data visualization")
    if self.verb > 2: print0("Visualizing mesh data")

    File(os.path.join(self.vis_folder, "mesh.pvd"), "compressed") << self.mesh
    if self.boundaries:
      File(os.path.join(self.vis_folder, "boundaries.pvd"), "compressed") << self.boundaries
    File(os.path.join(self.vis_folder, "mesh_regions.pvd"), "compressed") << self.cell_regions_fun

    # Create MeshFunction to hold cell process rank
    processes = CellFunction('size_t', self.mesh, MPI.rank(comm))
    File(os.path.join(self.vis_folder, "mesh_partitioning.pvd"), "compressed") << processes

  def print_diagnostics(self):
    print "\nDiscretization diagnostics"

    print MPI.rank(comm), self.mesh.num_entities(self.mesh.topology().dim())

    dofmap = self.V0.dofmap()

    print MPI.rank(comm), dofmap.ownership_range()
    print MPI.rank(comm), numpy.min(dofmap.collapse(self.mesh)[1].values()), \
          numpy.max(dofmap.collapse(self.mesh)[1].values())

    print "#Owned by {}: {}".format(MPI.rank(comm), dofmap.local_dimension("owned"))
    print "#Unowned by {}: {}".format(MPI.rank(comm), dofmap.local_dimension("unowned"))
示例#48
0
文件: state.py 项目: getzze/magnum.fe
  def __init__(self, mesh, celldomains = {'magnetic': 1, 'conducting': 1}, facetdomains = {'outermagnet': 1}, material = None, scale = 1.0, t = 0.0, **kwargs):
    """
    This class holds the complete state of the simulation and provides some
    convenience wrappers for the handling of multiple domains. Furthermore
    it provides an interface for attribute handling and caching.

    *Domain/Material Examples*
      .. code-block:: python

        state = State(mesh,
          celldomains = {'magnetic': (1,3), 'conducting': 2, 'iron': 1, 'cobalt': 3}
          m = Expression(...)
        )

        # Set materials for different regions
        state.material['iron']       = Material(...)
        state.material['cobalt']     = Material(...)
        state.material['conducting'] = Material(...)

        # Use integration measures with named domains
        assemble(Constant(1.0) * state.dx('all'))       # All named domains
        assemble(Constant(1.0) * state.dx('magnetic'))  # Magnetic region
        assemble(Constant(1.0) * state.dx('!magnetic')) # Nonmagnetic region
        assemble(Constant(1.0) * state.dx(1))           # Region by ID

        # Compute average of magnetization m
        state.m.average()       # Over whole space
        state.m.average('iron') # Over iron region

        # Crop magnetization to subdomain and save as PVD
        f = File("m_iron.pvd")
        f << state.m.crop('iron')

        # Normalize the magnetization
        state.m.normalize()

    *Attribute Examples*
      .. code-block:: python

        # initialize state with constant magnetization. m is automatically
        # interpolated on a suitable discrete function space.
        state = State(mesh,
          m = Constant((1.0, 0.0, 0.0)),
        )

        # define a current as function of the time
        state.j = lambda state: Constant((state.t * 1e12, 0.0, 0.0))

        # define some functional depending on m
        state.E = lambda state: (assemble(inner(state.m, state.m)*state.dx()), "m")

        # dependencies can be arbirtrarily nested
        state.E_times_2 = lambda state: (2*state.E, "E")

        # all values are cached according to their dependencies
        state.E_times_2  # triggers computation of E_times_2
        state.E_times_2  # taken from cache, no computation

        state.m = Constant((0.0, 1.0, 0.0))

        state.E_times_2  # triggers computation of E_times_2

    *Arguments*
      mesh (:class:`dolfin.Mesh`)
        The mesh including all subdomains as :class:`dolfin.MeshDomains`
      celldomains (:class:`dict`)
        naming of the cell subdomains, at least the subdomains 'magnetic' and 'conducting' should be defined.
      facetdomains (:class:`dict`)
        naming of the facet subdomains
      material (:class:`Material`)
        the material of the sample. If material differs from subdomain to subdomain, use material setters instead.
      scale (:class:`float`)
        the spatial scaling of the mesh. Use 1e-9 if you use nanometers as length measure.
      t (:class:`float`)
        the time
      **kwargs (:class:`dict`)
        add any state variables like magnetization (m) or spin diffusion (s).
        Expressions are automatically interpolated on the corresponding discrete spaces.
        
    """

    self._uuid  = uuid.uuid4()
    self.mesh  = mesh
    self.scale = scale
    self.t = t
    self.set_celldomains(celldomains)
    self.set_facetdomains(facetdomains)

    # TODO add args to set domains via mesh functions directly
    self.cell_domains  = MeshFunction('size_t', mesh, 3, mesh.domains())
    self.facet_domains = MeshFunction('size_t', mesh, 2, mesh.domains())

    self._dx = Measure('dx', mesh)[self.cell_domains]
    self._ds = Measure('ds', mesh)[self.facet_domains]
    self._dS = Measure('dS', mesh)[self.facet_domains]
    self._dP = {}

    self._VS = FunctionSpace(self.mesh, 'CG', 1)          # TODO lazy initialize
    self._VV = VectorFunctionSpace(self.mesh, 'CG', 1, 3) # TODO lazy initialize

    self.material = material

    self._wrapped_meshes  = {}
    self._volumes         = {}
    self._func_attributes = {}
    self._M_inv_diag      = {}

    for key, value in kwargs.iteritems():
      setattr(self, key, value)
示例#49
0
    def __init__(self):
        n = 40
        self.mesh = UnitSquareMesh(n, n, "crossed")

        # Define mesh and boundaries.
        class LeftBoundary(SubDomain):
            def inside(self, x, on_boundary):
                return on_boundary and x[0] < DOLFIN_EPS

        class RightBoundary(SubDomain):
            def inside(self, x, on_boundary):
                return on_boundary and x[0] > 1.0 - DOLFIN_EPS

        class LowerBoundary(SubDomain):
            def inside(self, x, on_boundary):
                return on_boundary and x[1] < DOLFIN_EPS

        class UpperBoundary(SubDomain):
            def inside(self, x, on_boundary):
                return on_boundary and x[1] > 1.0 - DOLFIN_EPS

        class RestrictedUpperBoundary(SubDomain):
            def inside(self, x, on_boundary):
                return (
                    on_boundary
                    and x[1] > 1.0 - DOLFIN_EPS
                    and DOLFIN_EPS < x[0]
                    and x[0] < 0.5 - DOLFIN_EPS
                )

        left = LeftBoundary()
        right = RightBoundary()
        lower = LowerBoundary()
        upper = UpperBoundary()
        # restricted_upper = RestrictedUpperBoundary()

        # Be particularly careful with the boundary conditions.
        # The main problem here is that the PPE system is consistent if and
        # only if
        #
        #     \int_\Omega div(u) = \int_\Gamma n.u = 0.
        #
        # This is exactly and even pointwise fulfilled for the continuous
        # problem.  In the discrete case, we can have to make sure that n.u is
        # 0 all along the boundary.
        # In the lid-driven cavity problem, of particular interest are the
        # corner points at the lid. One has to assert that the z-component of u
        # is 0 all across the lid, and the x-component of u is 0 everywhere but
        # the lid.  Since u is L2-"continuous", the lid condition on u_x must
        # not be enforced in the corner points. The u_y component must be
        # enforced all over the lid, including the end points.
        V_element = FiniteElement("CG", self.mesh.ufl_cell(), 2)
        self.W = FunctionSpace(self.mesh, V_element * V_element)

        self.u_bcs = [
            DirichletBC(self.W, (0.0, 0.0), left),
            DirichletBC(self.W, (0.0, 0.0), right),
            # DirichletBC(self.W.sub(0), Expression('x[0]'), restricted_upper),
            DirichletBC(self.W, (0.0, 0.0), lower),
            DirichletBC(self.W.sub(0), Constant("1.0"), upper),
            DirichletBC(self.W.sub(1), 0.0, upper),
            # DirichletBC(self.W.sub(0), Constant('-1.0'), lower),
            # DirichletBC(self.W.sub(1), 0.0, lower),
            # DirichletBC(self.W.sub(1), Constant('1.0'), left),
            # DirichletBC(self.W.sub(0), 0.0, left),
            # DirichletBC(self.W.sub(1), Constant('-1.0'), right),
            # DirichletBC(self.W.sub(0), 0.0, right),
        ]
        self.P = FunctionSpace(self.mesh, "CG", 1)
        self.p_bcs = []
        return
示例#50
0
def FunctionAndRealSpace(mesh, family, degree):
    function_and_real_element = FunctionAndRealElement(family, mesh.ufl_cell(),
                                                       degree)
    return FunctionSpace(mesh, function_and_real_element)
def project_mu(p):
    DG0 = FunctionSpace(mesh, "DG", 0)
    return project(p, DG0)
示例#52
0
文件: ipcs1.py 项目: j-hr/projection
    def solve(self, problem):
        self.problem = problem
        doSave = problem.doSave
        save_this_step = False
        onlyVel = problem.saveOnlyVel
        dt = self.metadata['dt']

        nu = Constant(self.problem.nu)
        self.tc.init_watch('init', 'Initialization', True, count_to_percent=False)
        self.tc.init_watch('rhs', 'Assembled right hand side', True, count_to_percent=True)
        self.tc.init_watch('applybc1', 'Applied velocity BC 1st step', True, count_to_percent=True)
        self.tc.init_watch('applybc3', 'Applied velocity BC 3rd step', True, count_to_percent=True)
        self.tc.init_watch('applybcP', 'Applied pressure BC or othogonalized rhs', True, count_to_percent=True)
        self.tc.init_watch('assembleMatrices', 'Initial matrix assembly', False, count_to_percent=True)
        self.tc.init_watch('solve 1', 'Running solver on 1st step', True, count_to_percent=True)
        self.tc.init_watch('solve 2', 'Running solver on 2nd step', True, count_to_percent=True)
        self.tc.init_watch('solve 3', 'Running solver on 3rd step', True, count_to_percent=True)
        self.tc.init_watch('solve 4', 'Running solver on 4th step', True, count_to_percent=True)
        self.tc.init_watch('assembleA1', 'Assembled A1 matrix (without stabiliz.)', True, count_to_percent=True)
        self.tc.init_watch('assembleA1stab', 'Assembled A1 stabilization', True, count_to_percent=True)
        self.tc.init_watch('next', 'Next step assignments', True, count_to_percent=True)
        self.tc.init_watch('saveVel', 'Saved velocity', True)

        self.tc.start('init')

        # Define function spaces (P2-P1)
        mesh = self.problem.mesh
        self.V = VectorFunctionSpace(mesh, "Lagrange", 2)  # velocity
        self.Q = FunctionSpace(mesh, "Lagrange", 1)  # pressure
        self.PS = FunctionSpace(mesh, "Lagrange", 2)  # partial solution (must be same order as V)
        self.D = FunctionSpace(mesh, "Lagrange", 1)  # velocity divergence space

        problem.initialize(self.V, self.Q, self.PS, self.D)

        # Define trial and test functions
        u = TrialFunction(self.V)
        v = TestFunction(self.V)
        p = TrialFunction(self.Q)
        q = TestFunction(self.Q)

        n = FacetNormal(mesh)
        I = Identity(find_geometric_dimension(u))

        # Initial conditions: u0 velocity at previous time step u1 velocity two time steps back p0 previous pressure
        [u1, u0, p0] = self.problem.get_initial_conditions([{'type': 'v', 'time': -dt},
                                                            {'type': 'v', 'time': 0.0},
                                                            {'type': 'p', 'time': 0.0}])

        u_ = Function(self.V)  # current tentative velocity
        u_cor = Function(self.V)  # current corrected velocity
        p_ = Function(self.Q)  # current pressure or pressure help function from rotation scheme
        p_mod = Function(self.Q)  # current modified pressure from rotation scheme

        # Define coefficients
        k = Constant(self.metadata['dt'])
        f = Constant((0, 0, 0))

        # Define forms
        # step 1: Tentative velocity, solve to u_
        u_ext = 1.5 * u0 - 0.5 * u1  # extrapolation for convection term

        # Stabilisation
        h = CellSize(mesh)
        if self.args.cbc_tau:
            # used in Simula cbcflow project
            tau = Constant(self.stabCoef) * h / (sqrt(inner(u_ext, u_ext)) + h)
        else:
            # proposed in R. Codina: On stabilized finite element methods for linear systems of
            # convection-diffusion-reaction equations.
            tau = Constant(self.stabCoef) * k * h ** 2 / (
            2 * nu * k + k * h * sqrt(DOLFIN_EPS + inner(u_ext, u_ext)) + h ** 2)
            # DOLFIN_EPS is added because of FEniCS bug that inner(u_ext, u_ext) can be negative when u_ext = 0

        if self.use_full_SUPG:
            v1 = v + tau * 0.5 * dot(grad(v), u_ext)
            parameters['form_compiler']['quadrature_degree'] = 6
        else:
            v1 = v

        def nonlinearity(function):
            if self.args.ema:
                return 2 * inner(dot(sym(grad(function)), u_ext), v1) * dx + inner(div(function) * u_ext, v1) * dx
            else:
                return inner(dot(grad(function), u_ext), v1) * dx

        def diffusion(fce):
            if self.useLaplace:
                return nu * inner(grad(fce), grad(v1)) * dx
            else:
                form = inner(nu * 2 * sym(grad(fce)), sym(grad(v1))) * dx
                if self.bcv == 'CDN':
                    return form
                if self.bcv == 'LAP':
                    return form - inner(nu * dot(grad(fce).T, n), v1) * problem.get_outflow_measure_form()
                if self.bcv == 'DDN':
                    return form  # additional term must be added to non-constant part

        def pressure_rhs():
            if self.args.bc == 'outflow':
                return inner(p0, div(v1)) * dx
            else:
                return inner(p0, div(v1)) * dx - inner(p0 * n, v1) * problem.get_outflow_measure_form()

        a1_const = (1. / k) * inner(u, v1) * dx + diffusion(0.5 * u)
        a1_change = nonlinearity(0.5 * u)
        if self.bcv == 'DDN':
            # does not penalize influx for current step, only for the next one
            # this can lead to oscilation:
            # DDN correct next step, but then u_ext is OK so in next step DDN is not used, leading to new influx...
            # u and u_ext cannot be switched, min_value is nonlinear function
            a1_change += -0.5 * min_value(Constant(0.), inner(u_ext, n)) * inner(u,
                                                                                 v1) * problem.get_outflow_measure_form()
            # NT works only with uflacs compiler

        L1 = (1. / k) * inner(u0, v1) * dx - nonlinearity(0.5 * u0) - diffusion(0.5 * u0) + pressure_rhs()
        if self.bcv == 'DDN':
            L1 += 0.5 * min_value(0., inner(u_ext, n)) * inner(u0, v1) * problem.get_outflow_measure_form()

        # Non-consistent SUPG stabilisation
        if self.stabilize and not self.use_full_SUPG:
            # a1_stab = tau*inner(dot(grad(u), u_ext), dot(grad(v), u_ext))*dx
            a1_stab = 0.5 * tau * inner(dot(grad(u), u_ext), dot(grad(v), u_ext)) * dx(None, {'quadrature_degree': 6})
            # optional: to use Crank Nicolson in stabilisation term following change of RHS is needed:
            # L1 += -0.5*tau*inner(dot(grad(u0), u_ext), dot(grad(v), u_ext))*dx(None, {'quadrature_degree': 6})

        outflow_area = Constant(problem.outflow_area)
        need_outflow = Constant(0.0)
        if self.useRotationScheme:
            # Rotation scheme
            F2 = inner(grad(p), grad(q)) * dx + (1. / k) * q * div(u_) * dx
        else:
            # Projection, solve to p_
            if self.forceOutflow and problem.can_force_outflow:
                info('Forcing outflow.')
                F2 = inner(grad(p - p0), grad(q)) * dx + (1. / k) * q * div(u_) * dx
                for m in problem.get_outflow_measures():
                    F2 += (1. / k) * (1. / outflow_area) * need_outflow * q * m
            else:
                F2 = inner(grad(p - p0), grad(q)) * dx + (1. / k) * q * div(u_) * dx
        a2, L2 = system(F2)

        # step 3: Finalize, solve to u_
        if self.useRotationScheme:
            # Rotation scheme
            F3 = (1. / k) * inner(u - u_, v) * dx + inner(grad(p_), v) * dx
        else:
            F3 = (1. / k) * inner(u - u_, v) * dx + inner(grad(p_ - p0), v) * dx
        a3, L3 = system(F3)

        if self.useRotationScheme:
            # Rotation scheme: modify pressure
            F4 = (p - p0 - p_ + nu * div(u_)) * q * dx
            a4, L4 = system(F4)

        # Assemble matrices
        self.tc.start('assembleMatrices')
        A1_const = assemble(a1_const)  # must be here, so A1 stays one Python object during repeated assembly
        A1_change = A1_const.copy()  # copy to get matrix with same sparse structure (data will be overwritten)
        if self.stabilize and not self.use_full_SUPG:
            A1_stab = A1_const.copy()  # copy to get matrix with same sparse structure (data will be overwritten)
        A2 = assemble(a2)
        A3 = assemble(a3)
        if self.useRotationScheme:
            A4 = assemble(a4)
        self.tc.end('assembleMatrices')

        if self.solvers == 'direct':
            self.solver_vel_tent = LUSolver('mumps')
            self.solver_vel_cor = LUSolver('mumps')
            self.solver_p = LUSolver('mumps')
            if self.useRotationScheme:
                self.solver_rot = LUSolver('mumps')
        else:
            # NT 2016-1  KrylovSolver >> PETScKrylovSolver

            # not needed, chosen not to use hypre_parasails:
            # if self.prec_v == 'hypre_parasails':  # in FEniCS 1.6.0 inaccessible using KrylovSolver class
            #     self.solver_vel_tent = PETScKrylovSolver('gmres')   # PETSc4py object
            #     self.solver_vel_tent.ksp().getPC().setType('hypre')
            #     PETScOptions.set('pc_hypre_type', 'parasails')
            #     # this is global setting, but preconditioners for pressure solvers are set by their constructors
            # else:
            self.solver_vel_tent = PETScKrylovSolver('gmres', self.args.precV)  # nonsymetric > gmres
            # cannot use 'ilu' in parallel
            self.solver_vel_cor = PETScKrylovSolver('cg', self.args.precVC)
            self.solver_p = PETScKrylovSolver(self.args.solP, self.args.precP)  # almost (up to BC) symmetric > CG
            if self.useRotationScheme:
                self.solver_rot = PETScKrylovSolver('cg', 'hypre_amg')

        # setup Krylov solvers
        if self.solvers == 'krylov':
            # Get the nullspace if there are no pressure boundary conditions
            foo = Function(self.Q)  # auxiliary vector for setting pressure nullspace
            if self.args.bc == 'nullspace':
                null_vec = Vector(foo.vector())
                self.Q.dofmap().set(null_vec, 1.0)
                null_vec *= 1.0 / null_vec.norm('l2')
                self.null_space = VectorSpaceBasis([null_vec])
                as_backend_type(A2).set_nullspace(self.null_space)

            # apply global options for Krylov solvers
            solver_options = {'monitor_convergence': True, 'maximum_iterations': 10000, 'nonzero_initial_guess': True}
            # 'nonzero_initial_guess': True   with  solver.solve(A, u, b) means that
            # Solver will use anything stored in u as an initial guess
            for solver in [self.solver_vel_tent, self.solver_vel_cor, self.solver_rot, self.solver_p] if \
                    self.useRotationScheme else [self.solver_vel_tent, self.solver_vel_cor, self.solver_p]:
                for key, value in solver_options.items():
                    try:
                        solver.parameters[key] = value
                    except KeyError:
                        info('Invalid option %s for KrylovSolver' % key)
                        return 1

            if self.args.solP == 'richardson':
                self.solver_p.parameters['monitor_convergence'] = False

            self.solver_vel_tent.parameters['relative_tolerance'] = 10 ** (-self.args.prv1)
            self.solver_vel_tent.parameters['absolute_tolerance'] = 10 ** (-self.args.pav1)
            self.solver_vel_cor.parameters['relative_tolerance'] = 10E-12
            self.solver_vel_cor.parameters['absolute_tolerance'] = 10E-4
            self.solver_p.parameters['relative_tolerance'] = 10 ** (-self.args.prp)
            self.solver_p.parameters['absolute_tolerance'] = 10 ** (-self.args.pap)
            if self.useRotationScheme:
                self.solver_rot.parameters['relative_tolerance'] = 10E-10
                self.solver_rot.parameters['absolute_tolerance'] = 10E-10

            if self.args.Vrestart > 0:
                self.solver_vel_tent.parameters['gmres']['restart'] = self.args.Vrestart

            if self.args.solP == 'gmres' and self.args.Prestart > 0:
                self.solver_p.parameters['gmres']['restart'] = self.args.Prestart

        # boundary conditions
        bcu, bcp = problem.get_boundary_conditions(self.args.bc == 'outflow', self.V, self.Q)
        self.tc.end('init')
        # Time-stepping
        info("Running of Incremental pressure correction scheme n. 1")
        ttime = self.metadata['time']
        t = dt
        step = 1

        # debug function
        if problem.args.debug_rot:
            plot_cor_v = Function(self.V)

        while t < (ttime + dt / 2.0):
            self.problem.update_time(t, step)
            if self.MPI_rank == 0:
                problem.write_status_file(t)

            if doSave:
                save_this_step = problem.save_this_step

            # assemble matrix (it depends on solution)
            self.tc.start('assembleA1')
            assemble(a1_change, tensor=A1_change)  # assembling into existing matrix is faster than assembling new one
            A1 = A1_const.copy()  # we dont want to change A1_const
            A1.axpy(1, A1_change, True)
            self.tc.end('assembleA1')
            self.tc.start('assembleA1stab')
            if self.stabilize and not self.use_full_SUPG:
                assemble(a1_stab, tensor=A1_stab)  # assembling into existing matrix is faster than assembling new one
                A1.axpy(1, A1_stab, True)
            self.tc.end('assembleA1stab')

            # Compute tentative velocity step
            begin("Computing tentative velocity")
            self.tc.start('rhs')
            b = assemble(L1)
            self.tc.end('rhs')
            self.tc.start('applybc1')
            [bc.apply(A1, b) for bc in bcu]
            self.tc.end('applybc1')
            try:
                self.tc.start('solve 1')
                self.solver_vel_tent.solve(A1, u_.vector(), b)
                self.tc.end('solve 1')
                if save_this_step:
                    self.tc.start('saveVel')
                    problem.save_vel(True, u_)
                    self.tc.end('saveVel')
                if save_this_step and not onlyVel:
                    problem.save_div(True, u_)
                problem.compute_err(True, u_, t)
                problem.compute_div(True, u_)
            except RuntimeError as inst:
                problem.report_fail(t)
                return 1
            end()

            if self.useRotationScheme:
                begin("Computing tentative pressure")
            else:
                begin("Computing pressure")
            if self.forceOutflow and problem.can_force_outflow:
                out = problem.compute_outflow(u_)
                info('Tentative outflow: %f' % out)
                n_o = -problem.last_inflow - out
                info('Needed outflow: %f' % n_o)
                need_outflow.assign(n_o)
            self.tc.start('rhs')
            b = assemble(L2)
            self.tc.end('rhs')
            self.tc.start('applybcP')
            [bc.apply(A2, b) for bc in bcp]
            if self.args.bc == 'nullspace':
                self.null_space.orthogonalize(b)
            self.tc.end('applybcP')
            try:
                self.tc.start('solve 2')
                self.solver_p.solve(A2, p_.vector(), b)
                self.tc.end('solve 2')
            except RuntimeError as inst:
                problem.report_fail(t)
                return 1
            if self.useRotationScheme:
                foo = Function(self.Q)
                foo.assign(p_ + p0)
                if save_this_step and not onlyVel:
                    problem.averaging_pressure(foo)
                    problem.save_pressure(True, foo)
            else:
                foo = Function(self.Q)
                foo.assign(p_)  # we do not want to change p_ by averaging
                if save_this_step and not onlyVel:
                    problem.averaging_pressure(foo)
                    problem.save_pressure(False, foo)
            end()

            begin("Computing corrected velocity")
            self.tc.start('rhs')
            b = assemble(L3)
            self.tc.end('rhs')
            if not self.args.B:
                self.tc.start('applybc3')
                [bc.apply(A3, b) for bc in bcu]
                self.tc.end('applybc3')
            try:
                self.tc.start('solve 3')
                self.solver_vel_cor.solve(A3, u_cor.vector(), b)
                self.tc.end('solve 3')
                problem.compute_err(False, u_cor, t)
                problem.compute_div(False, u_cor)
            except RuntimeError as inst:
                problem.report_fail(t)
                return 1
            if save_this_step:
                self.tc.start('saveVel')
                problem.save_vel(False, u_cor)
                self.tc.end('saveVel')
            if save_this_step and not onlyVel:
                problem.save_div(False, u_cor)
            end()

            if self.useRotationScheme:
                begin("Rotation scheme pressure correction")
                self.tc.start('rhs')
                b = assemble(L4)
                self.tc.end('rhs')
                try:
                    self.tc.start('solve 4')
                    self.solver_rot.solve(A4, p_mod.vector(), b)
                    self.tc.end('solve 4')
                except RuntimeError as inst:
                    problem.report_fail(t)
                    return 1
                if save_this_step and not onlyVel:
                    problem.averaging_pressure(p_mod)
                    problem.save_pressure(False, p_mod)
                end()

                if problem.args.debug_rot:
                    # save applied pressure correction (expressed as a term added to RHS of next tentative vel. step)
                    # see comment next to argument definition
                    plot_cor_v.assign(project(k * grad(nu * div(u_)), self.V))
                    problem.fileDict['grad_cor']['file'].write(plot_cor_v, t)

            # compute functionals (e. g. forces)
            problem.compute_functionals(u_cor, p_mod if self.useRotationScheme else p_, t, step)

            # Move to next time step
            self.tc.start('next')
            u1.assign(u0)
            u0.assign(u_cor)
            u_.assign(u_cor)  # use corrected velocity as initial guess in first step

            if self.useRotationScheme:
                p0.assign(p_mod)
            else:
                p0.assign(p_)

            t = round(t + dt, 6)  # round time step to 0.000001
            step += 1
            self.tc.end('next')

        info("Finished: Incremental pressure correction scheme n. 1")
        problem.report()
        return 0
示例#53
0
class TestLumpedMass(unittest.TestCase):

    def setUp(self):
        mesh = UnitSquareMesh(5, 5, 'crossed')
        self.V = FunctionSpace(mesh, 'Lagrange', 5)
        self.u = Function(self.V)
        self.uM = Function(self.V)
        self.uMdiag = Function(self.V)
        test = TestFunction(self.V)
        trial = TrialFunction(self.V)
        m = test*trial*dx
        self.M = assemble(m)
        self.solver = LUSolver()
        self.solver.parameters['reuse_factorization'] = True
        self.solver.parameters['symmetric'] = True
        self.solver.set_operator(self.M)
        self.ones = np.ones(self.V.dim())
        

    def test00(self):
        """ Create a lumped solver """
        myobj = LumpedMatrixSolver(self.V)

    def test01_set(self):
        """ Set operator """
        myobj = LumpedMatrixSolver(self.V)
        myobj.set_operator(self.M)

    def test01_entries(self):
        """ Lump matrix """
        myobj = LumpedMatrixSolver(self.V)
        myobj.set_operator(self.M)
        err = 0.0
        for index, ii in enumerate(self.M.array()):
            err += abs(ii.sum() - myobj.Mdiag[index])
        self.assertTrue(err < index*1e-16)


    def test02(self):
        """ Invert lumped matrix """
        myobj = LumpedMatrixSolver(self.V)
        myobj.set_operator(self.M)
        err = 0.0
        for ii in range(len(myobj.Mdiag.array())):
            err += abs(1./myobj.Mdiag[ii] - myobj.invMdiag[ii])
        self.assertTrue(err < ii*1e-16)


    def test03_basic(self):
        """ solve """
        myobj = LumpedMatrixSolver(self.V)
        myobj.set_operator(self.M)
        myobj.solve(self.uMdiag.vector(), myobj.Mdiag)
        diff = (myobj.one - self.uMdiag.vector()).array()
        self.assertTrue(np.linalg.norm(diff)/np.linalg.norm(myobj.one.array()) < 1e-14)


    def test04_mult(self):
        """ overloaded * operator """
        myobj = LumpedMatrixSolver(self.V)
        myobj.set_operator(self.M)
        self.uMdiag.vector().axpy(1.0, myobj*myobj.one)
        diff = (myobj.Mdiag - self.uMdiag.vector()).array()
        self.assertTrue(np.linalg.norm(diff)/np.linalg.norm(myobj.Mdiag.array()) < 1e-14)


    def test10(self):
        """ Create a lumped solver """
        myobj = LumpedMatrixSolverS(self.V)

    def test11_set(self):
        """ Set operator """
        myobj = LumpedMatrixSolverS(self.V)
        myobj.set_operator(self.M)

    def test11_entries(self):
        """ Lump matrix """
        myobj = LumpedMatrixSolverS(self.V)
        myobj.set_operator(self.M)
        Msum = np.dot(self.ones, self.M.array().dot(self.ones))
        err = abs(myobj.Mdiag.array().dot(self.ones) - \
        Msum) / Msum
        self.assertTrue(err < 1e-14, err)


    def test12(self):
        """ Invert lumped matrix """
        myobj = LumpedMatrixSolverS(self.V)
        myobj.set_operator(self.M)
        err = 0.0
        for ii in range(len(myobj.Mdiag.array())):
            err += abs(1./myobj.Mdiag[ii] - myobj.invMdiag[ii])
        self.assertTrue(err < ii*1e-16)


    def test13_basic(self):
        """ solve """
        myobj = LumpedMatrixSolverS(self.V)
        myobj.set_operator(self.M)
        myobj.solve(self.uMdiag.vector(), myobj.Mdiag)
        diff = myobj.one.array() - self.uMdiag.vector().array()
        self.assertTrue(np.linalg.norm(diff)/np.linalg.norm(myobj.one.array()) < 1e-14)


    def test14_mult(self):
        """ overloaded * operator """
        myobj = LumpedMatrixSolverS(self.V)
        myobj.set_operator(self.M)
        self.uMdiag.vector().axpy(1.0, myobj*myobj.one)
        diff = (myobj.Mdiag - self.uMdiag.vector()).array()
        self.assertTrue(np.linalg.norm(diff)/np.linalg.norm(myobj.Mdiag.array()) < 1e-14)
示例#54
0
def _test_time_stepping_2_sparse(callback_type, integrator_type):
    # Create mesh and define function space
    mesh = IntervalMesh(132, 0, 2*pi)
    V = FunctionSpace(mesh, "Lagrange", 1)

    # Define Dirichlet boundary (x = 0 or x = 2*pi)
    def boundary(x):
        return x[0] < 0 + DOLFIN_EPS or x[0] > 2*pi - 10*DOLFIN_EPS
        
    # Define time step
    dt = 0.01
    T = 1.

    # Define exact solution
    exact_solution_expression = Expression("sin(x[0]+t)", t=0, element=V.ufl_element())
    # ... and interpolate it at the final time
    exact_solution_expression.t = T
    exact_solution = project(exact_solution_expression, V)

    # Define exact solution dot
    exact_solution_dot_expression = Expression("cos(x[0]+t)", t=0, element=V.ufl_element())
    # ... and interpolate it at the final time
    exact_solution_dot_expression.t = T
    exact_solution_dot = project(exact_solution_dot_expression, V)

    # Define variational problem
    du = TrialFunction(V)
    du_dot = TrialFunction(V)
    v = TestFunction(V)
    u = Function(V)
    u_dot = Function(V)
    g = Expression("5./4.*sin(t+x[0])-3./4.*sin(3*(t+x[0]))+cos(t+x[0])", t=0., element=V.ufl_element())
    r_u = inner((1+u**2)*grad(u), grad(v))*dx
    j_u = derivative(r_u, u, du)
    r_u_dot = inner(u_dot, v)*dx
    j_u_dot = derivative(r_u_dot, u_dot, du_dot)
    r = r_u_dot + r_u - g*v*dx
    x = inner(du, v)*dx
    def bc(t):
        exact_solution_expression.t = t
        return [DirichletBC(V, exact_solution_expression, boundary)]

    # Assemble inner product matrix
    X = assemble(x)
    
    # Define callback function depending on callback type
    assert callback_type in ("form callbacks", "tensor callbacks")
    if callback_type == "form callbacks":
        def callback(arg):
            return arg
    elif callback_type == "tensor callbacks":
        def callback(arg):
            return assemble(arg)
            
    # Define problem wrapper
    class SparseProblemWrapper(TimeDependentProblem1Wrapper):
        # Residual and jacobian functions
        def residual_eval(self, t, solution, solution_dot):
            g.t = t
            return callback(r)
        def jacobian_eval(self, t, solution, solution_dot, solution_dot_coefficient):
            return callback(Constant(solution_dot_coefficient)*j_u_dot + j_u)
            
        # Define boundary condition
        def bc_eval(self, t):
            return bc(t)
            
        # Define initial condition
        def ic_eval(self):
            exact_solution_expression.t = 0.
            return project(exact_solution_expression, V)
            
        # Define custom monitor to plot the solution
        def monitor(self, t, solution, solution_dot):
            if matplotlib.get_backend() != "agg":
                plt.subplot(1, 2, 1).clear()
                plot(solution, title="u at t = " + str(t))
                plt.subplot(1, 2, 2).clear()
                plot(solution_dot, title="u_dot at t = " + str(t))
                plt.show(block=False)
                plt.pause(DOLFIN_EPS)
            else:
                print("||u|| at t = " + str(t) + ": " + str(solution.vector().norm("l2")))
                print("||u_dot|| at t = " + str(t) + ": " + str(solution_dot.vector().norm("l2")))
    
    # Solve the time dependent problem
    sparse_problem_wrapper = SparseProblemWrapper()
    (sparse_solution, sparse_solution_dot) = (u, u_dot)
    sparse_solver = SparseTimeStepping(sparse_problem_wrapper, sparse_solution, sparse_solution_dot)
    sparse_solver.set_parameters({
        "initial_time": 0.0,
        "time_step_size": dt,
        "final_time": T,
        "exact_final_time": "stepover",
        "integrator_type": integrator_type,
        "problem_type": "nonlinear",
        "snes_solver": {
            "linear_solver": "mumps",
            "maximum_iterations": 20,
            "report": True
        },
        "monitor": sparse_problem_wrapper.monitor,
        "report": True
    })
    all_sparse_solutions_time, all_sparse_solutions, all_sparse_solutions_dot = sparse_solver.solve()
    assert len(all_sparse_solutions_time) == int(T/dt + 1)
    assert len(all_sparse_solutions) == int(T/dt + 1)
    assert len(all_sparse_solutions_dot) == int(T/dt + 1)

    # Compute the error
    sparse_error = Function(V)
    sparse_error.vector().add_local(+ sparse_solution.vector().get_local())
    sparse_error.vector().add_local(- exact_solution.vector().get_local())
    sparse_error.vector().apply("")
    sparse_error_norm = sparse_error.vector().inner(X*sparse_error.vector())
    sparse_error_dot = Function(V)
    sparse_error_dot.vector().add_local(+ sparse_solution_dot.vector().get_local())
    sparse_error_dot.vector().add_local(- exact_solution_dot.vector().get_local())
    sparse_error_dot.vector().apply("")
    sparse_error_dot_norm = sparse_error_dot.vector().inner(X*sparse_error_dot.vector())
    print("SparseTimeStepping error (" + callback_type + ", " + integrator_type + "):", sparse_error_norm, sparse_error_dot_norm)
    assert isclose(sparse_error_norm, 0., atol=1.e-4)
    assert isclose(sparse_error_dot_norm, 0., atol=1.e-4)
    return ((sparse_error_norm, sparse_error_dot_norm), V, dt, T, u, u_dot, g, r, j_u, j_u_dot, X, exact_solution_expression, exact_solution, exact_solution_dot)
示例#55
0
    def _evaluateResidualEstimator(cls, mu, w, coeff_field, pde, f, quadrature_degree):
        """Evaluate the residual error according to EGSZ (5.7) which consists of volume terms (5.3) and jump terms (5.5).

            .. math:: \eta_{\mu,T}(w_N) &:= h_T || \overline{a}^{-1/2} (f\delta_{\mu,0} + \nabla\overline{a}\cdot\nabla w_{N,\mu}
                                + \sum_{m=1}^\infty \nabla a_m\cdot\nabla( \alpha^m_{\mu_m+1}\Pi_\mu^{\mu+e_m} w_{N,\mu+e_m}
                                - \alpha_{\mu_m}^m w_{N,\mu} + \alpha_{\mu_m-1}^m\Pi_\mu^{\mu_m-e_m} w_{N,\mu-e_m} ||_{L^2(T)}\\
          \eta_{\mu,S}(w_N) &:= h_S^{-1/2} || \overline{a}^{-1/2} [(\overline{a}\nabla w_{N,\mu} + \sum_{m=1}^\infty a_m\nabla
                                  ( \alpha_{\mu_m+1}^m\Pi_\mu^{\mu+e_m} w_{N,\mu+e_m} - \alpha_{\mu_m}^m w_{N,\mu}
                                  + \alpha_{\mu_m-1}^m\Pi_\mu^{\mu-e_m} w_{N,\mu-e_m})\cdot\nu] ||_{L^2(S)}\\
        """
        # set quadrature degree
        quadrature_degree_old = parameters["form_compiler"]["quadrature_degree"]
        parameters["form_compiler"]["quadrature_degree"] = quadrature_degree
        logger.debug("residual quadrature order = " + str(quadrature_degree))
    
        # get pde residual terms
        r_T = pde.volume_residual
        r_E = pde.edge_residual
        r_Nb = pde.neumann_residual
        
        # get mean field of coefficient
        a0_f = coeff_field.mean_func

        # prepare some FEM variables
        V = w[mu]._fefunc.function_space()
        mesh = V.mesh()
        nu = FacetNormal(mesh)

        # initialise volume and edge residual with deterministic part
#        R_T = dot(nabla_grad(a0_f), nabla_grad(w[mu]._fefunc))
        R_T = r_T(a0_f, w[mu]._fefunc)
        if not mu:
            R_T = R_T + f
#        R_E = a0_f * dot(nabla_grad(w[mu]._fefunc), nu)
        R_E = r_E(a0_f, w[mu]._fefunc, nu)
        # get Neumann residual
        homogeneousNBC = False if mu.order == 0 else True
        R_Nb = r_Nb(a0_f, w[mu]._fefunc, nu, mesh, homogeneous=homogeneousNBC)

        # iterate m
        Lambda = w.active_indices()
        maxm = w.max_order
        if len(coeff_field) < maxm:
            logger.warning("insufficient length of coefficient field for MultiVector (%i < %i)", len(coeff_field), maxm)
            maxm = len(coeff_field)
            #        assert coeff_field.length >= maxm        # ensure coeff_field expansion is sufficiently long
        for m in range(maxm):
            am_f, am_rv = coeff_field[m]

            # prepare polynom coefficients
            beta = am_rv.orth_polys.get_beta(mu[m])

            # mu
            res = -beta[0] * w[mu]

            # mu+1
            mu1 = mu.inc(m)
            if mu1 in Lambda:
                w_mu1 = w.get_projection(mu1, mu)
                res += beta[1] * w_mu1

            # mu-1
            mu2 = mu.dec(m)
            if mu2 in Lambda:
                w_mu2 = w.get_projection(mu2, mu)
                res += beta[-1] * w_mu2

            # add volume contribution for m
#            r_t = dot(nabla_grad(am_f), nabla_grad(res._fefunc))
            R_T = R_T + r_T(am_f, res._fefunc)
            # add edge contribution for m
#            r_e = am_f * dot(nabla_grad(res._fefunc), nu)
            R_E = R_E + r_E(am_f, res._fefunc, nu)

        # prepare more FEM variables for residual assembly
        DG = FunctionSpace(mesh, "DG", 0)
        s = TestFunction(DG)
        h = CellSize(mesh)

        # scaling of residual terms and definition of residual form
        a0_s = a0_f[0] if isinstance(a0_f, tuple) else a0_f     # required for elasticity parameters
        res_form = (h ** 2 * (1 / a0_s) * dot(R_T, R_T) * s * dx
                    + avg(h) * dot(avg(R_E) / avg(a0_s), avg(R_E)) * 2 * avg(s) * dS)
        
        resT = h ** 2 * (1 / a0_s) * dot(R_T, R_T) * s * dx
        resE = 0 * s * dx + avg(h) * dot(avg(R_E) / avg(a0_s), avg(R_E)) * 2 * avg(s) * dS
        resNb = 0 * s * dx
        
        # add Neumann residuals
        if R_Nb is not None:
            for rj, dsj in R_Nb:
                res_form = res_form + h * (1 / a0_s) * dot(rj, rj) * s * dsj
                resNb += h * (1 / a0_s) * dot(rj, rj) * s * dsj

        # FEM evaluate residual on mesh
        eta = assemble(res_form)
        eta_indicator = np.array([sqrt(e) for e in eta])
        # map DG dofs to cell indices
        dofs = [DG.dofmap().cell_dofs(c.index())[0] for c in cells(mesh)]
        eta_indicator = eta_indicator[dofs]
        global_error = sqrt(sum(e for e in eta))

        # debug ---
        if False:
            etaT = assemble(resT)
            etaT_indicator = etaT #np.array([sqrt(e) for e in etaT])
            etaT = sqrt(sum(e for e in etaT))
            etaE = assemble(resE)
            etaE_indicator = etaE #np.array([sqrt(e) for e in etaE])
            etaE = sqrt(sum(e for e in etaE))
            etaNb = assemble(resNb)
            etaNb_indicator = etaNb #np.array([sqrt(e) for e in etaNb])
            etaNb = sqrt(sum(e for e in etaNb))
        
            print "==========RESIDUAL ESTIMATOR============"
            print "eta", eta
            print "eta_indicator", eta_indicator
            print "global =", global_error
            print "volume =", etaT
            print "edge =", etaE
            print "Neumann =", etaNb

            if False:        
                plot_indicators(((eta, "overall residual"), (etaT_indicator, "volume residual"), (etaE_indicator, "edge residual"), (etaNb_indicator, "Neumann residual")), mesh)
        # ---debug
        
        # restore quadrature degree
        parameters["form_compiler"]["quadrature_degree"] = quadrature_degree_old

        return (FlatVector(eta_indicator), global_error)
示例#56
0
def test_save_and_checkpoint_vector(tempdir, encoding, fe_degree, fe_family,
                                    mesh_tdim, mesh_n):
    if invalid_fe(fe_family, fe_degree):
        pytest.skip("Trivial finite element")

    filename = os.path.join(tempdir, "u2_checkpoint.xdmf")
    mesh = mesh_factory(mesh_tdim, mesh_n)
    FE = VectorElement(fe_family, mesh.ufl_cell(), fe_degree)
    V = FunctionSpace(mesh, FE)
    u_in = Function(V)
    u_out = Function(V)

    if has_petsc_complex:
        if mesh.geometry.dim == 1:

            def expr_eval(values, x):
                values[:, 0] = x[:, 0] + 1.0j * x[:, 0]

            u_out.interpolate(expr_eval)

        elif mesh.geometry.dim == 2:

            def expr_eval(values, x):
                values[:, 0] = 1.0j * x[:, 0] * x[:, 1]
                values[:, 1] = x[:, 0] + 1.0j * x[:, 0]

            u_out.interpolate(expr_eval)

        elif mesh.geometry.dim == 3:

            def expr_eval(values, x):
                values[:, 0] = x[:, 0] * x[:, 1]
                values[:, 1] = x[:, 0] + 1.0j * x[:, 0]
                values[:, 2] = x[:, 2]

            u_out.interpolate(expr_eval)
    else:
        if mesh.geometry.dim == 1:

            def expr_eval(values, x):
                values[:, 0] = x[:, 0]

            u_out.interpolate(expr_eval)

        elif mesh.geometry.dim == 2:

            def expr_eval(values, x):
                values[:, 0] = x[:, 0] * x[:, 1]
                values[:, 1] = x[:, 0]

            u_out.interpolate(expr_eval)

        elif mesh.geometry.dim == 3:

            def expr_eval(values, x):
                values[:, 0] = x[:, 0] * x[:, 1]
                values[:, 1] = x[:, 0]
                values[:, 2] = x[:, 2]

            u_out.interpolate(expr_eval)

    with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as file:
        file.write_checkpoint(u_out, "u_out", 0)

    with XDMFFile(mesh.mpi_comm(), filename) as file:
        u_in = file.read_checkpoint(V, "u_out", 0)

    u_in.vector().axpy(-1.0, u_out.vector())
    assert u_in.vector().norm() < 1.0e-12
示例#57
0
    def _evaluateGlobalMixedEstimator(cls, mu, w, coeff_field, pde, f, quadrature_degree, vectorspace_type='BDM'):
        """Evaluation of global mixed equilibrated estimator."""
        # set quadrature degree
#        quadrature_degree_old = parameters["form_compiler"]["quadrature_degree"]
#        parameters["form_compiler"]["quadrature_degree"] = quadrature_degree
#        logger.debug("residual quadrature order = " + str(quadrature_degree))

        # prepare numerical flux and f
        sigma_mu, f_mu = evaluate_numerical_flux(w, mu, coeff_field, f)

        # ###################
        # ## MIXED PROBLEM ##
        # ###################

        # get setup data for mixed problem
        V = w[mu]._fefunc.function_space()
        mesh = V.mesh()
        degree = element_degree(w[mu]._fefunc)

        # create function spaces
        DG0 = FunctionSpace(mesh, 'DG', 0)
        DG0_dofs = [DG0.dofmap().cell_dofs(c.index())[0] for c in cells(mesh)]
        RT = FunctionSpace(mesh, vectorspace_type, degree)
        W = RT * DG0

        # setup boundary conditions
#        bcs = pde.create_dirichlet_bcs(W.sub(1))

        # debug ===
        # from dolfin import DOLFIN_EPS, DirichletBC
        # def boundary(x):
        #     return x[0] < DOLFIN_EPS or x[0] > 1.0 + DOLFIN_EPS or x[1] < DOLFIN_EPS or x[1] > 1.0 + DOLFIN_EPS
        # bcs = [DirichletBC(W.sub(1), Constant(0.0), boundary)]
        # === debug

        # create trial and test functions
        (sigma, u) = TrialFunctions(W)
        (tau, v) = TestFunctions(W)

        # define variational form
        a_eq = (dot(sigma, tau) + div(tau) * u + div(sigma) * v) * dx
        L_eq = (- f_mu * v + dot(sigma_mu, tau)) * dx

        # compute solution
        w_eq = Function(W)
        solve(a_eq == L_eq, w_eq)
        (sigma_mixed, u_mixed) = w_eq.split()

        # #############################
        # ## EQUILIBRATION ESTIMATOR ##
        # #############################

        # evaluate error estimator
        dg0 = TestFunction(DG0)
        eta_mu = inner(sigma_mu, sigma_mu) * dg0 * dx
        eta_T = assemble(eta_mu, form_compiler_parameters={'quadrature_degree': quadrature_degree})
        eta_T = np.array([sqrt(e) for e in eta_T])

        # evaluate global error
        eta = sqrt(sum(i**2 for i in eta_T))
        # reorder array entries for local estimators
        eta_T = eta_T[DG0_dofs]

        # restore quadrature degree
#        parameters["form_compiler"]["quadrature_degree"] = quadrature_degree_old

        return eta, FlatVector(eta_T)
示例#58
0
def run_with_params(Tb, mu_value, k_s, path, base_path):
    run_time_init = clock()

    mesh = BoxMesh(Point(0.0, 0.0, 0.0),
                   Point(mesh_width, mesh_width, mesh_height), nx, ny, nz)

    pbc = PeriodicBoundary()

    WE = VectorElement('CG', mesh.ufl_cell(), 2)
    SE = FiniteElement('CG', mesh.ufl_cell(), 1)
    WSSS = FunctionSpace(mesh,
                         MixedElement(WE, SE, SE, SE),
                         constrained_domain=pbc)
    # W = FunctionSpace(mesh, WE, constrained_domain=pbc)
    # S = FunctionSpace(mesh, SE, constrained_domain=pbc)
    W = WSSS.sub(0).collapse()
    S = WSSS.sub(1).collapse()

    temperature_vals = [27.0 + 273, Tb + 273, 1300.0 + 273, 1305.0 + 273]
    temp_prof = TemperatureProfile(temperature_vals, element=S.ufl_element())

    mu_a = mu_value  # this was taken from the Blankenbach paper, can change

    Ep = b / temp_prof.delta

    mu_bot = exp(-Ep *
                 (temp_prof.bottom * temp_prof.delta - 1573.0) + cc) * mu_a

    # TODO: verify exponentiation
    Ra = rho_0 * alpha * g * temp_prof.delta * h**3 / (kappa_0 * mu_a)
    w0 = rho_0 * alpha * g * temp_prof.delta * h**2 / mu_a
    tau = h / w0
    p0 = mu_a * w0 / h

    # Writing file containing constants for each mu, Tb, k (located in code_copy directory)

    const_str = 'mu_a = {}, Tb = {}, k = {} \n\nmu_bot = {}\nRa = {}\nw0 = {}\np0 = {}'.format(
        mu_a, Tb, k_s, mu_bot, Ra, w0, p0)
    const_str += '\n________________________________________\n\n'

    constfile = os.path.join(base_path, 'code_copy', 'constants.txt')
    logfile = os.path.join(base_path, 'code_copy', 'progress_log.txt')

    os.system("printf '" + const_str + "' >> " + constfile)

    # Continuing calculations

    slip_vx = 1.6E-09 / w0  # Non-dimensional
    slip_velocity = Constant((slip_vx, 0.0, 0.0))
    zero_slip = Constant((0.0, 0.0, 0.0))

    time_step = 3.0E11 / tau * 2

    dt = Constant(time_step)
    t_end = 3.0E15 / tau / 5.0  # Non-dimensional times

    u = Function(WSSS)

    # Instead of TrialFunctions, we use split(u) for our non-linear problem
    v, p, T, Tf = split(u)
    v_t, p_t, T_t, Tf_t = TestFunctions(WSSS)

    T0 = interpolate(temp_prof, S)

    mu_exp = Expression(
        'exp(-Ep * (T_val * dTemp - 1573.0) + cc * x[2] / mesh_height)',
        Ep=Ep,
        dTemp=temp_prof.delta,
        cc=cc,
        mesh_height=mesh_height,
        T_val=T0,
        element=S.ufl_element())

    Tf0 = interpolate(temp_prof, S)

    mu = Function(S)
    v0 = Function(W)

    v_theta = (1.0 - theta) * v0 + theta * v

    T_theta = (1.0 - theta) * T0 + theta * T

    Tf_theta = (1.0 - theta) * Tf0 + theta * Tf

    # TODO: Verify forms

    r_v = (inner(sym(grad(v_t)), 2.0 * mu * sym(grad(v))) - div(v_t) * p -
           T * v_t[2]) * dx

    r_p = p_t * div(v) * dx

    heat_transfer = Constant(k_s) * (Tf_theta - T_theta) * dt

    r_T = (
        T_t *
        ((T - T0) + dt * inner(v_theta, grad(T_theta)))  # TODO: Inner vs dot
        +
        (dt / Ra) * inner(grad(T_t), grad(T_theta)) - T_t * heat_transfer) * dx

    v_melt = Function(W)
    z_hat = Constant((0.0, 0.0, 1.0))

    # TODO: inner -> dot, take out Tf_t
    r_Tf = (Tf_t * ((Tf - Tf0) + dt * inner(v_melt, grad(Tf_theta))) +
            Tf_t * heat_transfer) * dx

    r = r_v + r_p + r_T + r_Tf

    bcv0 = DirichletBC(WSSS.sub(0), zero_slip, top)
    bcv1 = DirichletBC(WSSS.sub(0), slip_velocity, bottom)
    bcv2 = DirichletBC(WSSS.sub(0).sub(1), Constant(0.0), back)
    bcv3 = DirichletBC(WSSS.sub(0).sub(1), Constant(0.0), front)

    bcp0 = DirichletBC(WSSS.sub(1), Constant(0.0), bottom)
    bct0 = DirichletBC(WSSS.sub(2), Constant(temp_prof.surface), top)
    bct1 = DirichletBC(WSSS.sub(2), Constant(temp_prof.bottom), bottom)
    bctf1 = DirichletBC(WSSS.sub(3), Constant(temp_prof.bottom), bottom)

    bcs = [bcv0, bcv1, bcv2, bcv3, bcp0, bct0, bct1, bctf1]

    t = 0
    count = 0
    files = DefaultDictByKey(partial(create_xdmf, path))

    while t < t_end:
        mu.interpolate(mu_exp)
        rhosolid = rho_0 * (1.0 - alpha * (T0 * temp_prof.delta - 1573.0))
        deltarho = rhosolid - rho_melt
        # TODO: project (accuracy) vs interpolate
        assign(
            v_melt,
            project(
                v0 - darcy * (grad(p) * p0 / h - deltarho * z_hat * g) / w0,
                W))
        # TODO: Written out one step later?
        # v_melt.assign(v0 - darcy * (grad(p) * p0 / h - deltarho * yvec * g) / w0)
        # TODO: use nP after to avoid projection?

        solve(r == 0, u, bcs)
        nV, nP, nT, nTf = u.split()  # TODO: write with Tf, ... etc

        if count % output_every == 0:

            percent_complete, time_remaining = time_left(
                count, t_end / time_step,
                run_time_init)  # TODO: timestep vs dt

            # Update progress_log.txt (found in code_copy directory) and print progress to screen

            log_str = '{:.4f} Percent Completed, Time Left {:.2f} minutes'.format(
                percent_complete, time_remaining)

            log(log_str)

            os.system("printf '" + log_str + "\n' >> " + logfile)

            # Now write the data files in their respective directories

            # TODO: Make sure all writes are to the same function for each time step
            files['T_fluid'].write(nTf, t)
            files['p'].write(nP, t)
            files['v_solid'].write(nV, t)
            files['T_solid'].write(nT, t)
            files['mu'].write(mu, t)
            files['v_melt'].write(v_melt, t)
            files['gradp'].write(project(grad(nP), W), t)
            files['rho'].write(project(rhosolid, S), t)
            files['Tf_grad'].write(project(grad(Tf), W), t)
            files['advect'].write(project(dt * dot(v_melt, grad(nTf))), t)
            files['ht'].write(project(heat_transfer, S), t)

        assign(T0, nT)
        assign(v0, nV)
        assign(Tf0, nTf)

        t += time_step
        count += 1

    # Print progress to screen and update progress_log.txt with completed case

    log_str_complete = 'Case mu={}, Tb={}, k={} complete. Run time = {:.2f} minutes'.format(
        mu_a, Tb, k_s, (clock() - run_time_init) / 60.0)

    log(log_str_complete)

    os.system("printf '" + log_str_complete +
              "\n________________________________________\n\n' >> " + logfile)
示例#59
0
mesh_size = 10

# Create meshes and facet function
mesh = IntervalMesh(mesh_size, 0.0, length)
mesh_plot = IntervalMesh(8*mesh_size, 0.0, length)
boundary_parts = FacetFunction('size_t', mesh)
right = AutoSubDomain(lambda x: near(x[0], length))
left = AutoSubDomain(lambda x: near(x[0], 0.0))
right.mark(boundary_parts, 2)
left.mark(boundary_parts, 1)

# Create function spaces
V = FunctionSpace(mesh, 'Lagrange', 2)
# Vplot = FunctionSpace(mesh_plot, 'Lagrange', 1)
Vplot = V
Q = FunctionSpace(mesh, 'Lagrange', 1)

# BC conditions, nullspace
v_in_expr = Constant(v_in)
plt = plot(interpolate(v_in_expr, V), range_min=0., range_max=2*v_in, window_width= width, window_height= height)
plt.write_png('%s/correct' % dir)
# v_in_expr = Expression('(t<1.0)?t*v:v', v=Constant(v_in), t=0.0)
# v_in_expr = Expression('(t<1.0)?(1-cos(pi*t))*v*0.5:v', v=Constant(v_in), t=0.0)
bcp = DirichletBC(Q, Constant(0.0), boundary_parts, 2)
bcu = DirichletBC(V, v_in_expr, boundary_parts, 1)
foo = Function(Q)
null_vec = Vector(foo.vector())
Q.dofmap().set(null_vec, 1.0)
null_vec *= 1.0/null_vec.norm('l2')
# print(null_vec.array())
null_space = VectorSpaceBasis([null_vec])
示例#60
0
def StokesFunctionSpace(mesh, family, degree):
    stokes_element = StokesElement(family, mesh.ufl_cell(), degree)
    return FunctionSpace(mesh, stokes_element)