Example #1
0
    def get_observations(self, pde=None, nref=0, init=None):
        """
        Get the observations at given locations and time points
        """
        # pde for observations
        if pde is None:
            mesh = self.Vh.mesh()
            for i in range(nref):
                mesh = dl.refine(mesh)  # refine mesh to obtain observations
            pde = TimeDependentAD(mesh)
        elif nref > 0:
            mesh = pde.mesh
            for i in range(nref):
                mesh = dl.refine(mesh)  # refine mesh to obtain observations
            pde = TimeDependentAD(mesh)
        # initial condition
        if init is None:
            true_init = dl.Expression(
                'min(0.5,exp(-100*(pow(x[0]-0.35,2) +  pow(x[1]-0.7,2))))',
                element=pde.Vh[STATE].ufl_element())
            init = dl.interpolate(true_init, pde.Vh[STATE]).vector()
        # prepare container for observations
        self.prep_container(pde.Vh[STATE])

        utrue = pde.generate_vector(STATE)
        x = [utrue, init, None]
        pde.solveFwd(x[STATE], x)
        self.observe(x, self.d)
        MAX = self.d.norm("linf", "linf")
        noise_std_dev = self.rel_noise * MAX
        parRandom.normal_perturb(noise_std_dev, self.d)
        self.noise_variance = noise_std_dev * noise_std_dev
        return self.d.copy()
Example #2
0
    def setupMeshes(cls, mesh, N, num_refine=0, randref=(1.0, 1.0)):
        """Create a set of N meshes based on provided mesh. Parameters
        num_refine>=0 and randref specify refinement
        adjustments. num_refine specifies the number of refinements
        per mesh, randref[0] specifies the probability that a given
        mesh is refined, and randref[1] specifies the probability that
        an element of the mesh is refined (if it is refined at all).
        """
        assert num_refine >= 0

        assert 0 < randref[0] <= 1.0
        assert 0 < randref[1] <= 1.0

        # create set of (refined) meshes
        meshes = list();
        for _ in range(N):
            m = Mesh(mesh)
            for _ in range(num_refine):
                if randref[0] == 1.0 and randref[1] == 1.0:
                    m = refine(m)
                elif random() <= randref[0]:
                    cell_markers = CellFunction("bool", m)
                    cell_markers.set_all(False)
                    cell_ids = range(m.num_cells())
                    shuffle(cell_ids)
                    num_ref_cells = int(ceil(m.num_cells() * randref[1]))
                    for cell_id in cell_ids[0:num_ref_cells]:
                        cell_markers[cell_id] = True
                    m = refine(m, cell_markers)
            meshes.append(m)
        return meshes
Example #3
0
 def refine_maxh(self, maxh, uniform=False):
     """Refine mesh of FEM basis such that maxh of mesh is smaller than given value."""
     if maxh <= 0 or self.mesh.hmax() < maxh:            
         return self, self.project_onto, self.project_onto, 0
     ufl = self._fefs.ufl_element()
     mesh = self.mesh
     num_cells_refined = 0
     if uniform:
         while mesh.hmax() > maxh:
             num_cells_refined += mesh.num_cells()
             mesh = refine(mesh)         # NOTE: this global refine results in a red-refinement as opposed to bisection in the adaptive case
     else:
         while mesh.hmax() > maxh:
             cell_markers = CellFunction("bool", mesh)
             cell_markers.set_all(False)
             for c in cells(mesh):
                 if c.diameter() > maxh:
                     cell_markers[c.index()] = True
                     num_cells_refined += 1
             mesh = refine(mesh, cell_markers)
     if self._fefs.num_sub_spaces() > 1:
         new_fefs = VectorFunctionSpace(mesh, ufl.family(), ufl.degree())
     else:
         new_fefs = FunctionSpace(mesh, ufl.family(), ufl.degree())
     new_basis = FEniCSBasis(new_fefs)
     prolongate = new_basis.project_onto
     restrict = self.project_onto
     return new_basis, prolongate, restrict, num_cells_refined
Example #4
0
def generate_meshes(N = 5, iterations = 3, refinements = 0.5):
    mesh1 = UnitSquare(N,N)
    mesh2 = UnitSquare(N,N)
    for ref in range(iterations):
        print "refinement ", ref+1
        info(mesh1)
        info(mesh2)
        cf1 = CellFunction("bool", mesh1)
        cf2 = CellFunction("bool", mesh2)
        cf1.set_all(False)
        cf2.set_all(False)
        m1 = round(cf1.size()*refinements)
        m2 = round(cf2.size()*refinements)
        mi1 = randint(0,cf1.size(),m1)
        mi2 = randint(0,cf2.size(),m2)
        for i in mi1:
            cf1[i] = True
        for i in mi2:
            cf2[i] = True
#        newmesh1 = adapt(mesh1, cf1)
        newmesh1 = refine(mesh1, cf1)
#        newmesh2 = adapt(mesh2, cf2)
        newmesh2 = refine(mesh2, cf2)
        mesh1 = newmesh1
        mesh2 = newmesh2
    return [(0.,0.),(1.,0.),(1.,1.),(0.,1.)], mesh1, mesh2
Example #5
0
 def refine_meshes(self, subdomain):
     sub1_marker = MeshFunction("bool", self.mesh1,
                                self.mesh1.topology().dim())
     sub2_marker = MeshFunction("bool", self.mesh2,
                                self.mesh2.topology().dim())
     subdomain.mark(sub1_marker, True)
     subdomain.mark(sub2_marker, True)
     self.mesh1 = refine(self.mesh1, sub1_marker)
     self.mesh2 = refine(self.mesh2, sub2_marker)
def test1():
    # setup meshes
    P = 0.3
    ref1 = 4
    ref2 = 14
    mesh1 = UnitSquare(2, 2)
    mesh2 = UnitSquare(2, 2)
    # refinement loops
    for level in range(ref1):
        mesh1 = refine(mesh1)
    for level in range(ref2):
        # mark and refine
        markers = CellFunction("bool", mesh2)
        markers.set_all(False)
        # randomly refine mesh
        for i in range(mesh2.num_cells()):
            if random() <= P:
                markers[i] = True
        mesh2 = refine(mesh2, markers)

    # create joint meshes
    mesh1j, parents1 = create_joint_mesh([mesh2], mesh1)
    mesh2j, parents2 = create_joint_mesh([mesh1], mesh2)

    # evaluate errors  joint meshes
    ex1 = Expression("sin(2*A*x[0])*sin(2*A*x[1])", A=10)
    V1 = FunctionSpace(mesh1, "CG", 1)
    V2 = FunctionSpace(mesh2, "CG", 1)
    V1j = FunctionSpace(mesh1j, "CG", 1)
    V2j = FunctionSpace(mesh2j, "CG", 1)
    f1 = interpolate(ex1, V1)
    f2 = interpolate(ex1, V2)
    # interpolate on respective joint meshes
    f1j = interpolate(f1, V1j)
    f2j = interpolate(f2, V2j)
    f1j1 = interpolate(f1j, V1)
    f2j2 = interpolate(f2j, V2)
    # evaluate error with regard to original mesh
    e1 = Function(V1)
    e2 = Function(V2)
    e1.vector()[:] = f1.vector() - f1j1.vector()
    e2.vector()[:] = f2.vector() - f2j2.vector()
    print "error on V1:", norm(e1, "L2")
    print "error on V2:", norm(e2, "L2")

    plot(f1j, title="f1j")
    plot(f2j, title="f2j")
    plot(mesh1, title="mesh1")
    plot(mesh2, title="mesh2")
    plot(mesh1j, title="joint mesh from mesh1")
    plot(mesh2j, title="joint mesh from mesh2", interactive=True)
Example #7
0
 def __init__(self, n=16, divide=1, threshold=12.3,
              left_side_num=3, left_side_denom=4):
     """ Store parameters, initialize data exports (latex, txt). """
     # left_side_num/denom are either height, assuming base is 1,
     #   or left and bottom side lengths
     self.n = n
     self.threshold = threshold / left_side_denom ** 2
     self.left_side = [0] * (n + 1)
     self.bottom_side = [0] * (n + 1)
     self.left_side_len = left_side_num * 1.0 / left_side_denom
     self.left_side_num = left_side_num
     self.left_side_denom = left_side_denom
     self.folder = "results"
     self.name = "domains_{}_{}_{}".format(n, divide, threshold)
     self.filename = self.folder + "/" + self.name + ".tex"
     self.matrixZname = self.folder + "/matrices_Z/" + self.name
     self.matrixQname = self.folder + "/matrices_Q/" + self.name
     self.genericname = self.folder + "/" + self.name
     self.textname = self.folder + "/" + self.name + ".txt"
     f = open(self.textname, 'w')
     f.close()
     self.latex = "cd " + self.folder + "; pdflatex --interaction=batchmode" \
         + " {0}.tex; rm {0}.aux {0}.log".format(self.name)
     self.shift = 0
     # common mesh, ready to cut/apply Dirichlet BC
     self.mesh = Triangle(self.left_side_num, left_side_denom)
     while self.mesh.size(2) < self.n * self.n:
         self.mesh = refine(self.mesh)
     for i in range(divide):
         self.mesh = refine(self.mesh)
     boundary = MeshFunction("size_t", self.mesh, 1)
     boundary.set_all(0)
     self.plotted = plot(
         boundary,
         prefix='animation/animation',
         scalarbar=False,
         window_width=1024,
         window_height=1024)
     print 'Grid size: ', self.n ** 2
     print 'Mesh size: ', self.mesh.size(2)
     # setup solver
     self.solver = Solver(self.mesh, left_side_num, left_side_denom)
     self.mass = self.solver.B
     self.stiff = self.solver.A
     print np.unique(self.stiff.data)
     self.save_matrices(0)
     # save dofs
     f = open(self.genericname+'_dofmap.txt', 'w')
     for vec in self.solver.dofs:
         print >>f, vec
     f.close()
Example #8
0
def plot_indicators(indicators, mesh, refinements=1, interactive=True):
    DG = FunctionSpace(mesh, 'DG', 0)
    for _ in range(refinements):
        mesh = refine(mesh)
    V = FunctionSpace(refine(mesh), 'CG', 1)
    if len(indicators) == 1 and not isinstance(indicators[0], (list, tuple)):
        indicators = [indicators]
    for eta, title in indicators:
        e = Function(DG, eta)
        f = interpolate(e, V) 
        plot(f, title=title)
    if interactive:
        from dolfin import interactive
        interactive()
Example #9
0
def mesh(Lx=1., Ly=1., Lz=2., grid_spacing=1./16, **namespace):
    m = df.BoxMesh(df.Point(0., 0., 0.), df.Point(Lx, Ly, Lz),
                   int(Lx/(2*grid_spacing)),
                   int(Ly/(2*grid_spacing)),
                   int(Lz/(2*grid_spacing)))
    m = df.refine(m)
    return m
Example #10
0
def _adaptive_mesh_refinement(dx, phi, mu, sigma, omega, conv, voltages):
    from dolfin import cells, refine
    eta = _error_estimator(dx, phi, mu, sigma, omega, conv, voltages)
    mesh = phi.function_space().mesh()
    level = 0
    TOL = 1.0e-4
    E = sum([e * e for e in eta])
    E = sqrt(MPI.sum(E))
    info('Level {:d}: E = {:g} (TOL = {:g})'.format(level, E, TOL))
    # Mark cells for refinement
    REFINE_RATIO = 0.5
    cell_markers = MeshFunction('bool', mesh, mesh.topology().dim())
    eta_0 = sorted(eta, reverse=True)[int(len(eta) * REFINE_RATIO)]
    eta_0 = MPI.max(eta_0)
    for c in cells(mesh):
        cell_markers[c] = eta[c.index()] > eta_0
    # Refine mesh
    mesh = refine(mesh, cell_markers)
    # Plot mesh
    plot(mesh)
    interactive()
    exit()
    # # Compute error indicators
    # K = array([c.volume() for c in cells(mesh)])
    # R = numpy.array([
    #     abs(source([c.midpoint().x(), c.midpoint().y()]))
    #     for c in cells(mesh)
    #     ])
    # gam = h*R*sqrt(K)
    return
Example #11
0
def mesh_fixup(mesh):
    """Refine cells which have all vertices on boundary and
    return a new mesh."""
    cf = CellFunction('bool', mesh)

    tdim = mesh.topology().dim()
    mesh.init(tdim-1, tdim)

    for f in facets(mesh):
        # Boundary facet?
        # TODO: Here we could check supplied facet function or subdomain
        if not f.exterior():
            continue

        # Pick adjacent cell
        c = Cell(mesh, f.entities(tdim)[0])

        # Number of vertices on boundary
        num_bad_vertices = sum(1 for v in vertices(c)
                               if any(fv.exterior() for fv in facets(v)))
        assert num_bad_vertices <= c.num_vertices()

        # Refine cell if all vertices are on boundary
        if num_bad_vertices == c.num_vertices():
            cf[c] = True

    return refine(mesh, cf)
def _adaptive_mesh_refinement(dx, phi, mu, sigma, omega, conv, voltages):
    from dolfin import cells, refine
    eta = _error_estimator(dx, phi, mu, sigma, omega, conv, voltages)
    mesh = phi.function_space().mesh()
    level = 0
    TOL = 1.0e-4
    E = sum([e * e for e in eta])
    E = sqrt(MPI.sum(E))
    info('Level %d: E = %g (TOL = %g)' % (level, E, TOL))
    # Mark cells for refinement
    REFINE_RATIO = 0.5
    cell_markers = MeshFunction('bool', mesh, mesh.topology().dim())
    eta_0 = sorted(eta, reverse=True)[int(len(eta) * REFINE_RATIO)]
    eta_0 = MPI.max(eta_0)
    for c in cells(mesh):
        cell_markers[c] = eta[c.index()] > eta_0
    # Refine mesh
    mesh = refine(mesh, cell_markers)
    # Plot mesh
    plot(mesh)
    interactive()
    exit()
    ## Compute error indicators
    #K = array([c.volume() for c in cells(mesh)])
    #R = array([abs(source([c.midpoint().x(), c.midpoint().y()])) for c in cells(mesh)])
    #gam = h*R*sqrt(K)
    return
Example #13
0
    def apply_linear_refinement(self):
        r"""
        Refine the mesh linearly: slice all cells between a radius linear\_start
        and a radius linear\_stop in half, for a number of times specified by linear\_refine.

        """

        for i in range(self.linear_refine):
            cells = d.cells(self.mesh)
            cell_markers = d.MeshFunction("bool", self.mesh,
                                          self.mesh.topology().dim())
            cell_markers.set_all(False)
            for cell in cells:
                # slice cell if (1) it's large enough to be sliced and be resolvable within machine precision
                # (2) its left vertex is within the range specified by the user
                left = cell.get_vertex_coordinates()[0]
                divide = ( cell.circumradius() > self.too_fine * d.DOLFIN_EPS ) and \
                        ( self.linear_start < left < self.linear_stop )
                if divide:
                    cell_markers[cell] = True

            self.mesh = d.refine(self.mesh, cell_markers)

        # how many points were added by linear refinement (if any)? Store the info
        self.linear_cells = len(self.mesh.coordinates()) - 1 - self.num_cells
Example #14
0
def mesh(Lx=1, Ly=5, grid_spacing=1./16, rad_init=0.75, **namespace):
    m = df.RectangleMesh(df.Point(0., 0.), df.Point(Lx, Ly),
                         int(Lx/(1*grid_spacing)),
                         int(Ly/(1*grid_spacing)))

    for k in range(3):
        cell_markers = df.MeshFunction("bool", m, 2)
        origin = df.Point(0.0, 0.0)
        for cell in df.cells(m):
            p = cell.midpoint()
            x = p.x()
            y = p.y()

            k_p = 1.6-0.2*k
            k_m = 0.4+0.2*k
            rad_x = 0.75*rad_init
            rad_y = 1.25*rad_init
            
            if (bool(p.distance(origin) < k_p*rad_init and
                     p.distance(origin) > k_m*rad_init)
                or bool((x/rad_x)**2 + (y/rad_y)**2 < k_p**2 and
                        (x/rad_x)**2 + (y/rad_y)**2 > k_m**2)
                or bool((x/rad_y)**2 + (y/rad_x)**2 < k_p**2 and
                        (x/rad_y)**2 + (y/rad_x)**2 > k_m**2)
                or p.y() < 0.5 - k*0.2):
                cell_markers[cell] = True
            else:
                cell_markers[cell] = False
        m = df.refine(m, cell_markers)
    return m
Example #15
0
    def __init__(
        self,
        boundary_elements,
        mesh_name="mesh",
        mesh_folder="Mesh",
        refinement_level=0,
    ):
        super().__init__(boundary_elements, mesh_name, mesh_folder)
        mesh_builder = MeshBuilder(boundary_elements=boundary_elements,
                                   name=self.mesh_name)

        with self.mesh_reader(mesh_builder.xdmf_mesh) as file:
            file.read(self.mesh)

        self.facet_xdmf_mesh = mesh_builder.facet_xdmf_mesh
        self.physical_label = mesh_builder.physical_label

        if refinement_level > 0:
            dolf.parameters[
                "refinement_algorithm"] = "plaza_with_parent_facets"
            for _ in range(refinement_level):
                cf = dolf.MeshFunction("bool", self.mesh, True)
                cf.set_all(True)
                self.mesh = dolf.refine(self.mesh, cf)
                self._boundary_parts = dolf.adapt(self.boundary_parts,
                                                  self.mesh)
            logging.info(f"Refinement level: {refinement_level}, "
                         f"nof cells: {len(list(dolf.cells(self.mesh)))}")
Example #16
0
    def __init__(self, cparams, dtype_u, dtype_f):
        """
        Initialization routine

        Args:
            cparams: custom parameters for the example
            dtype_u: particle data type (will be passed parent class)
            dtype_f: acceleration data type (will be passed parent class)
        """

        # define the Dirichlet boundary
        def Boundary(x, on_boundary):
            return on_boundary

        # these parameters will be used later, so assert their existence
        assert 'c_nvars' in cparams
        assert 'nu' in cparams
        assert 't0' in cparams
        assert 'family' in cparams
        assert 'order' in cparams
        assert 'refinements' in cparams

        # add parameters as attributes for further reference
        for k,v in cparams.items():
            setattr(self,k,v)

        df.set_log_level(df.WARNING)

        df.parameters["form_compiler"]["optimize"]     = True
        df.parameters["form_compiler"]["cpp_optimize"] = True

        # set mesh and refinement (for multilevel)
        mesh = df.UnitIntervalMesh(self.c_nvars)
        # mesh = df.UnitSquareMesh(self.c_nvars[0],self.c_nvars[1])
        for i in range(self.refinements):
            mesh = df.refine(mesh)

        # self.mesh = mesh
        # define function space for future reference
        self.V = df.FunctionSpace(mesh, self.family, self.order)
        tmp = df.Function(self.V)
        print('DoFs on this level:',len(tmp.vector().array()))

        # invoke super init, passing number of dofs, dtype_u and dtype_f
        super(fenics_heat,self).__init__(self.V,dtype_u,dtype_f)

        self.g = df.Expression('-sin(a*x[0]) * (sin(t) - b*a*a*cos(t))',a=np.pi,b=self.nu,t=self.t0,degree=self.order)

        # rhs in weak form
        self.w = df.Function(self.V)
        v = df.TestFunction(self.V)
        self.a_K = -self.nu*df.inner(df.nabla_grad(self.w), df.nabla_grad(v))*df.dx + self.g*v*df.dx

        # mass matrix
        u = df.TrialFunction(self.V)
        a_M = u*v*df.dx
        self.M = df.assemble(a_M)

        self.bc = df.DirichletBC(self.V, df.Constant(0.0), Boundary)
 def refine_basic(self, mesh, perc, weights):
     num = int(np.ceil(len(weights) * perc))
     markers = dol.MeshFunction('bool', mesh, dim=1)
     comb = list(zip(weights, range(len(weights))))
     comb.sort(key=lambda a: a[0], reverse=True)  # sort by weights
     for i in range(num):
         markers[comb[i][1]] = True
     return dol.refine(mesh, markers)
def refineMesh(m, l, u):
    cell_markers = MeshFunction('bool', m, 1)
    cell_markers.set_all(False)
    coords = m.coordinates()
    midpoints = 0.5 * (coords[:-1] + coords[1:])
    ref_idx = np.where((midpoints > l) & (midpoints < u))[0]
    cell_markers.array()[ref_idx] = True
    m = refine(m, cell_markers)
    return m
Example #19
0
    def __init__(self, problem_params, dtype_u=fenics_mesh, dtype_f=rhs_fenics_mesh):
        """
        Initialization routine

        Args:
            problem_params (dict): custom parameters for the example
            dtype_u: FEniCS mesh data type (will be passed to parent class)
            dtype_f: FEniCS mesh data data type with implicit and explicit parts (will be passed to parent class)
        """

        # define the Dirichlet boundary
        # def Boundary(x, on_boundary):
        #     return on_boundary

        # these parameters will be used later, so assert their existence
        essential_keys = ['c_nvars', 't0', 'family', 'order', 'refinements', 'nu']
        for key in essential_keys:
            if key not in problem_params:
                msg = 'need %s to instantiate problem, only got %s' % (key, str(problem_params.keys()))
                raise ParameterError(msg)

        # set logger level for FFC and dolfin
        logging.getLogger('FFC').setLevel(logging.WARNING)
        logging.getLogger('UFL').setLevel(logging.WARNING)

        # set solver and form parameters
        df.parameters["form_compiler"]["optimize"] = True
        df.parameters["form_compiler"]["cpp_optimize"] = True
        df.parameters['allow_extrapolation'] = True

        # set mesh and refinement (for multilevel)
        mesh = df.UnitIntervalMesh(problem_params['c_nvars'])
        for i in range(problem_params['refinements']):
            mesh = df.refine(mesh)

        # define function space for future reference
        self.V = df.FunctionSpace(mesh, problem_params['family'], problem_params['order'])
        tmp = df.Function(self.V)
        print('DoFs on this level:', len(tmp.vector()[:]))

        # invoke super init, passing number of dofs, dtype_u and dtype_f
        super(fenics_heat, self).__init__(self.V, dtype_u, dtype_f, problem_params)

        # Stiffness term (Laplace)
        u = df.TrialFunction(self.V)
        v = df.TestFunction(self.V)
        a_K = -1.0 * df.inner(df.nabla_grad(u), self.params.nu * df.nabla_grad(v)) * df.dx

        # Mass term
        a_M = u * v * df.dx

        self.M = df.assemble(a_M)
        self.K = df.assemble(a_K)

        # set forcing term as expression
        self.g = df.Expression('-cos(a*x[0]) * (sin(t) - b*a*a*cos(t))', a=np.pi, b=self.params.nu, t=self.params.t0,
                               degree=self.params.order)
Example #20
0
def refine_perimeter(mesh):
    """Refine largest boundary triangles."""
    mesh.init(1, 2)
    perimeter = [c for c in cells(mesh)
                 if any([f.exterior() for f in facets(c)])]
    marker = CellFunction('bool', mesh, False)
    max_size = max([c.diameter() for c in perimeter])
    for c in perimeter:
        marker[c] = c.diameter() > 0.75 * max_size
    return refine(mesh, marker)
Example #21
0
def setup_vector(mesh, pde, degree=1, maxh=None):
#    fs = FunctionSpace(mesh, "CG", degree)
    if maxh is not None:
        old_mesh = mesh
        while mesh.hmax() > maxh:
            mesh = refine(old_mesh)
            old_mesh = mesh
    fs = pde.function_space(mesh, degree=degree)
    vec = FEniCSVector(Function(fs))
    return vec
Example #22
0
def refine_mesh_upto(mesh, size, edge=False):
    """ Refine mesh to at most given size, using one of two methods. """
    dim = mesh.topology().dim()
    if mesh.size(dim) > size:
        return mesh
    if not edge:
        while True:
            # FEniCS 1.5 and 1.6 have a bug which prevents uniform refinement
            mesh2 = refine(mesh)
            if mesh2.size(dim) > size:
                return mesh
            mesh = mesh2
    else:
        # Refine based on MeshFunction
        while True:
            all = CellFunction("bool", mesh, True)
            mesh2 = refine(mesh, all)
            if mesh2.size(dim) > size:
                return mesh
            mesh = mesh2
Example #23
0
def refine_perimeter(mesh):
    """Refine largest boundary triangles."""
    mesh.init(1, 2)
    perimeter = [
        c for c in cells(mesh) if any([f.exterior() for f in facets(c)])
    ]
    marker = CellFunction('bool', mesh, False)
    max_size = max([c.diameter() for c in perimeter])
    for c in perimeter:
        marker[c] = c.diameter() > 0.75 * max_size
    return refine(mesh, marker)
Example #24
0
def refine_cylinder(mesh):
    'Refine mesh by cutting cells around the cylinder.'
    h = mesh.hmin()
    center = Point(c_x, c_y)
    cell_f = CellFunction('bool', mesh, False)
    for cell in cells(mesh):
        if cell.midpoint().distance(center) < r + h:
            cell_f[cell] = True
    mesh = refine(mesh, cell_f)

    return mesh
Example #25
0
def refine_cylinder(mesh):
    'Refine mesh by cutting cells around the cylinder.'
    h = mesh.hmin()
    center = Point(c_x, c_y)
    cell_f = CellFunction('bool', mesh, False)
    for cell in cells(mesh):
        if cell.midpoint().distance(center) < r + h:
            cell_f[cell] = True
    mesh = refine(mesh, cell_f)

    return mesh
Example #26
0
def refine_mesh_upto(mesh, size, edge=False):
    """ Refine mesh to at most given size, using one of two methods. """
    dim = mesh.topology().dim()
    if mesh.size(dim) > size:
        return mesh
    if not edge:
        while True:
            # FEniCS 1.5 and 1.6 have a bug which prevents uniform refinement
            mesh2 = refine(mesh)
            if mesh2.size(dim) > size:
                return mesh
            mesh = mesh2
    else:
        # Refine based on MeshFunction
        while True:
            all = CellFunction("bool", mesh, True)
            mesh2 = refine(mesh, all)
            if mesh2.size(dim) > size:
                return mesh
            mesh = mesh2
def minimize_multistage(rf, coarse_mesh, levels):
    ''' Implements the MG/Opt multistage approach; a multigrid algorithym with a V-cycle templage for traversing the grids
    '''
    # Create the meshes
    meshes = [coarse_mesh]
    for l in range(levels - 1):
        meshes.append(refine(meshes[-1]))

    # Create multiple approximations of the reduced functional 
    rfs = [rf]
    for l in range(levels - 1):
        rfs.append()
Example #28
0
def minimize_multistage(rf, coarse_mesh, levels):
    ''' Implements the MG/Opt multistage approach; a multigrid algorithym with a V-cycle templage for traversing the grids
    '''
    # Create the meshes
    meshes = [coarse_mesh]
    for l in range(levels - 1):
        meshes.append(refine(meshes[-1]))

    # Create multiple approximations of the reduced functional
    rfs = [rf]
    for l in range(levels - 1):
        rfs.append()
Example #29
0
def generate_footing_square(Nelements, length, refinements=0):
    from dolfin import UnitSquareMesh, SubDomain, MeshFunction, Measure, near, refine, cells
    import numpy as np
    # Start from square
    mesh = generate_square(Nelements, length, 0)[0]

    def refine_mesh(mesh):
        # Refine on top
        cell_markers = MeshFunction('bool', mesh, mesh.topology().dim())
        cell_markers.set_all(False)
        for c in cells(mesh):
            verts = np.reshape(c.get_vertex_coordinates(), (3, 2))
            verts_x = verts[:, 0]
            verts_y = verts[:, 1]
            newval = verts_y.min() > 2 * length / 3 and verts_x.min() > length / \
                8 and verts_x.max() < 7 / 8 * length
            cell_markers[c] = newval

        # Redefine markers on new mesh
        return refine(mesh, cell_markers)

    mesh = refine_mesh(refine_mesh(mesh))

    for i in range(refinements):
        mesh = refine(mesh)

    class Left(SubDomain):
        def inside(self, x, on_boundary):
            return near(x[0], 0.0) and on_boundary

    class Right(SubDomain):
        def inside(self, x, on_boundary):
            return near(x[0], length) and on_boundary

    class Top(SubDomain):
        def inside(self, x, on_boundary):
            return near(x[1], length) and on_boundary

    class Bottom(SubDomain):
        def inside(self, x, on_boundary):
            return near(x[1], 0.0) and on_boundary

    left, right, top, bottom = Left(), Right(), Top(), Bottom()
    LEFT, RIGHT, TOP, BOTTOM = 1, 2, 3, 4  # Set numbering
    markers = MeshFunction("size_t", mesh, 1)
    markers.set_all(0)

    boundaries = (left, right, top, bottom)
    def_names = (LEFT, RIGHT, TOP, BOTTOM)
    for side, num in zip(boundaries, def_names):
        side.mark(markers, num)
    return mesh, markers, LEFT, RIGHT, TOP, BOTTOM, NONE
Example #30
0
    def refine(self):
        for n in range(self.refinement_info['max_iter']):
            self.logger.debug("Entering refinement step {}".format(n))
            if 'regions' in self.refinement_info:
                cell_markers = d.MeshFunction('bool', self.mesh,
                                              self.dimension)
            elif 'facets' in self.refinement_info:
                cell_markers = d.MeshFunction('bool', self.mesh,
                                              self.dimension - 1)
            else:
                raise Exception(
                    "You must provide facets or regions where the mesh should be refined."
                )
            cell_markers.set_all(False)
            if 'regions' in self.refinement_info:
                hlp = np.asarray(self.cells.array(), dtype=np.int32)
                for i in range(hlp.size):
                    if any(hlp[i] == self.subdomaininfo[a]
                           for a in self.refinement_info['regions']):
                        cell_markers[i] = True
            elif 'facets' in self.refinement_info:
                hlp = np.asarray(self.facets.array(), dtype=np.int32)
                for i in range(hlp.size):
                    if any(hlp[i] == self.facetinfo[a]
                           for a in self.refinement_info['facets']):
                        cell_markers[i] = True

            self.mesh = d.refine(self.mesh, cell_markers)
            self.facets = d.adapt(self.facets, self.mesh)
            self.cells = d.adapt(self.cells, self.mesh)
        if 'regions' in self.refinement_info:
            self.logger.info("refined mesh {} times at {}".format(
                self.refinement_info['max_iter'],
                self.refinement_info['regions']))
        else:
            self.logger.info("refined mesh {} times at {}".format(
                self.refinement_info['max_iter'],
                self.refinement_info['facets']))
        try:
            meshsave = self.refinement_info['save_mesh']
            self.logger.info('Will save mesh.')
            if meshsave is True:
                hdfout = d.HDF5File(self.mesh.mpi_comm(),
                                    self.meshname + "_refined.h5", "w")
                hdfout.write(self.mesh, "/mesh")
                hdfout.write(self.cells, "/subdomains")
                hdfout.write(self.facets, "/facets")
            else:
                self.logger.info("Mesh won't be saved")
        except KeyError:
            self.logger.info("Mesh won't be saved")
Example #31
0
    def refine_mesh(mesh):
        # Refine on top
        cell_markers = MeshFunction('bool', mesh, mesh.topology().dim())
        cell_markers.set_all(False)
        for c in cells(mesh):
            verts = np.reshape(c.get_vertex_coordinates(), (3, 2))
            verts_x = verts[:, 0]
            verts_y = verts[:, 1]
            newval = verts_y.min() > 2 * length / 3 and verts_x.min() > length / \
                8 and verts_x.max() < 7 / 8 * length
            cell_markers[c] = newval

        # Redefine markers on new mesh
        return refine(mesh, cell_markers)
Example #32
0
    def refine(self, refine_domains):  # todo implement
        """Refine mesh should be overwritten in developed model class as default behaviour is to do nothing.

        :param n_refine: Number of times to refine the mesh
        :param refine_domains: Name of domains to refine
        """
        cell_markers = df.MeshFunction('bool', self.mesh, 2, self.mesh.domains())
        cell_markers.set_all(False)

        domain_id = [self.model_geometry.domain_names[n] for n in refine_domains]
        cell_markers.set_values(np.isin(self.domains.array(), domain_id))

        self.mesh = df.refine(self.mesh, marker=cell_markers)

        self.structural_compilation()
Example #33
0
def refine_boundary_layers(mesh, s, d, x0, x1):

   from dolfin import CellFunction, cells, refine, DOLFIN_EPS

   h = mesh.hmax()
   cell_markers = CellFunction('bool', mesh, mesh.topology().dim())
   cell_markers.set_all(False)

   for cell in cells(mesh):
      x = cell.midpoint()
      for i, d_ in enumerate(d):
         if x[d_] > (x1[i]-s*h-DOLFIN_EPS) or x[d_] < (s*h + x0[i] + DOLFIN_EPS):
            cell_markers[cell] = True
         
   return refine(mesh, cell_markers)
Example #34
0
def l_panel_mesh(lx, ly, refinement=5, show=False):
    """
    Creates something like
     ly +---------+
        |    ^y   |
        |    |    |
        |    0->--+
        |    | x
        |    |
    -ly +----+
       -lx       lx
        out of triangles
    """
    mesh = df.Mesh()

    e = df.MeshEditor()
    e.open(mesh, "triangle", 2, 2)
    e.init_vertices(8)
    e.add_vertex(0, [0, 0])
    e.add_vertex(1, [lx, 0])
    e.add_vertex(2, [lx, ly])
    e.add_vertex(3, [0, ly])
    e.add_vertex(4, [-lx, ly])
    e.add_vertex(5, [-lx, 0])
    e.add_vertex(6, [-lx, -ly])
    e.add_vertex(7, [0, -ly])

    e.init_cells(6)
    e.add_cell(0, [0, 1, 3])
    e.add_cell(1, [1, 2, 3])
    e.add_cell(2, [0, 3, 5])
    e.add_cell(3, [5, 3, 4])
    e.add_cell(4, [0, 5, 7])
    e.add_cell(5, [7, 5, 6])

    e.close
    mesh.order()

    for _ in range(refinement):
        mesh = df.refine(mesh)

    if show:
        df.plot(mesh)
        import matplotlib.pyplot as plt

        plt.show()

    return mesh
Example #35
0
    def addMesh(self, mesh=None):
        """
        Keep fully transformed mesh.

        This breaks pickling.
        """
        if mesh is None:
            self.mesh = build_mesh(*self.pickleMesh)
            self.mesh = self.refineMesh()
            self.mesh = transform_mesh(self.mesh, self.transformList)
            self.finalsize = self.mesh.size(self.dim)
        else:
            self.mesh = mesh
        self.extraRefine = self.deg > 1
        if self.extraRefine:
            self.mesh = refine(self.mesh)
Example #36
0
    def addMesh(self, mesh=None):
        """
        Keep fully transformed mesh.

        This breaks pickling.
        """
        if mesh is None:
            self.mesh = build_mesh(*self.pickleMesh)
            self.mesh = self.refineMesh()
            self.mesh = transform_mesh(self.mesh, self.transformList)
            self.finalsize = self.mesh.size(self.dim)
        else:
            self.mesh = mesh
        self.extraRefine = self.deg > 1
        if self.extraRefine:
            self.mesh = refine(self.mesh)
Example #37
0
def generate_cube(Nelements, length, refinements=0):
    """
    Creates a square mesh of given elements and length with markers on
    the sides: left, bottom, right and top
    """
    from dolfin import UnitCubeMesh, SubDomain, MeshFunction, Measure, near, refine
    mesh = UnitCubeMesh(Nelements, Nelements, Nelements)
    for i in range(refinements):
        mesh = refine(mesh)
    mesh.coordinates()[:] *= length

    # Subdomains: Solid
    class Xp(SubDomain):
        def inside(self, x, on_boundary):
            return near(x[0], length) and on_boundary

    class Xm(SubDomain):
        def inside(self, x, on_boundary):
            return near(x[0], 0.0) and on_boundary

    class Yp(SubDomain):
        def inside(self, x, on_boundary):
            return near(x[1], length) and on_boundary

    class Ym(SubDomain):
        def inside(self, x, on_boundary):
            return near(x[1], 0.0) and on_boundary

    class Zp(SubDomain):
        def inside(self, x, on_boundary):
            return near(x[2], length) and on_boundary

    class Zm(SubDomain):
        def inside(self, x, on_boundary):
            return near(x[2], 0.0) and on_boundary
    xp, xm, yp, ym, zp, zm = Xp(), Xm(), Yp(), Ym(), Zp(), Zm()
    XP, XM, YP, YM, ZP, ZM = 1, 2, 3, 4, 5, 6  # Set numbering

    markers = MeshFunction("size_t", mesh, 2)
    markers.set_all(0)

    boundaries = (xp, xm, yp, ym, zp, zm)
    def_names = (XP, XM, YP, YM, ZP, ZM)
    for side, num in zip(boundaries, def_names):
        side.mark(markers, num)

    return mesh, markers, XP, XM, YP, YM, ZP, ZM
Example #38
0
def refine_mesh(mesh):
    """" To refine selected parts of the mesh. """
    for r in [2.5]:  #[20, 15, 10, 8]:
        print("Refining ...")
        cell_markers = df.MeshFunction("bool",
                                       mesh,
                                       dim=mesh.topology().dim() - 1)
        cell_markers.set_all(False)
        for cell in df.cells(mesh):
            # p = np.sum(np.array(cell.midpoint()[:])**2)
            if np.abs(cell.midpoint()[2]) < r:
                cell_markers[cell] = True
        mesh = df.refine(mesh, cell_markers)

        print(mesh.num_cells())
    mesh.smooth()
    return mesh
def spherical_shell(dim, radii, n_refinements=0):
    """
    Creates the mesh of a spherical shell using the mshr module.
    """
    assert isinstance(dim, int)
    assert dim == 2 or dim == 3

    assert isinstance(radii, (list, tuple)) and len(radii) == 2
    ri, ro = radii
    assert isinstance(ri, float) and ri > 0.
    assert isinstance(ro, float) and ro > 0.
    assert ri < ro

    assert isinstance(n_refinements, int) and n_refinements >= 0

    # mesh generation
    if dim == 2:
        center = dlfn.Point(0., 0.)
    elif dim == 3:
        center = dlfn.Point(0., 0., 0.)

    if dim == 2:
        domain = Circle(center, ro)\
            - Circle(center, ri)
        mesh = generate_mesh(domain, 75)
    elif dim == 3:
        domain = Sphere(center, ro) \
            - Sphere(center, ri)
        mesh = generate_mesh(domain, 15)

    # mesh refinement
    for i in range(n_refinements):
        mesh = dlfn.refine(mesh)

    # MeshFunction for boundaries ids
    facet_marker = dlfn.MeshFunction("size_t", mesh, mesh.topology().dim() - 1)
    facet_marker.set_all(0)

    # mark boundaries
    BoundaryMarkers = SphericalAnnulusBoundaryMarkers
    gamma_inner = CircularBoundary(mesh=mesh, radius=ri)
    gamma_inner.mark(facet_marker, BoundaryMarkers.interior_boundary.value)
    gamma_outer = CircularBoundary(mesh=mesh, radius=ro)
    gamma_outer.mark(facet_marker, BoundaryMarkers.exterior_boundary.value)

    return mesh, facet_marker
Example #40
0
def mesh(Lx=1., Ly=1., grid_spacing=1. / 16, refine_depth=3, **namespace):
    m = df.RectangleMesh(df.Point(0., 0.), df.Point(Lx, Ly),
                         int(Lx / grid_spacing), int(Ly / grid_spacing))
    # x = m.coordinates()[:]

    # beta = 0.0
    # x[:, 1] = beta*x[:, 1] + (1.-beta)*Ly*(
    #     np.arctan(1.0*np.pi*((x[:, 1]-Ly)/Ly))/np.arctan(np.pi) + 1.)

    for level in range(1, refine_depth + 1):
        cell_markers = df.MeshFunction("bool", m, m.topology().dim())
        cell_markers.set_all(False)
        for cell in df.cells(m):
            y_mean = np.mean([node.x(1) for node in df.vertices(cell)])
            if y_mean < 1. / 2**level:
                cell_markers[cell] = True
        m = df.refine(m, cell_markers)

    return m
Example #41
0
    def refine(self, cell_ids=None):
        """Refine mesh of basis uniformly or wrt cells, returns (new_basis,prolongate,restrict)."""
        mesh = self._fefs.mesh()
        cell_markers = CellFunction("bool", mesh)
        if cell_ids is None:
            cell_markers.set_all(True)
        else:
            cell_markers.set_all(False)
            for cid in cell_ids:
                cell_markers[cid] = True
        new_mesh = refine(mesh, cell_markers)
#        if isinstance(self._fefs, VectorFunctionSpace):
        if self._fefs.num_sub_spaces() > 1:
            new_fs = VectorFunctionSpace(new_mesh, self._fefs.ufl_element().family(), self._fefs.ufl_element().degree())
        else:
            new_fs = FunctionSpace(new_mesh, self._fefs.ufl_element().family(), self._fefs.ufl_element().degree())
        new_basis = FEniCSBasis(new_fs)
        prolongate = new_basis.project_onto
        restrict = self.project_onto
        return new_basis, prolongate, restrict
Example #42
0
def refine_vo_fun(mes_ho):
    '''
    Refines huge cells.
    '''

    markers = do.CellFunction('bool', mes_ho)
    markers.set_all(False)

    avg_cell_volume = np.mean([cell.volume() for cell in do.cells(mes_ho)])

    for cell in do.cells(mes_ho):
        if cell.volume() > 5. * avg_cell_volume:
            #mark huge cells
            markers[cell] = True

    mes_ho_refined = do.refine(mes_ho, markers)

    print 'mean(cell_volume) = ', avg_cell_volume

    return mes_ho_refined
Example #43
0
def get_projection_basis(mesh0, mesh_refinements=None, maxh=None, degree=1, sub_spaces=None, family='CG'):
    if mesh_refinements is not None:
        mesh = mesh0
        for _ in range(mesh_refinements):
            mesh = refine(mesh)
        if sub_spaces is None or sub_spaces == 0:
            V = FunctionSpace(mesh, family, degree)
        else:
            V = VectorFunctionSpace(mesh, family, degree)
            assert V.num_sub_spaces() == sub_spaces
        return FEniCSBasis(V)
    else:
        assert maxh is not None
        if sub_spaces is None or sub_spaces == 0:
            V = FunctionSpace(mesh0, family, degree)
        else:
            V = VectorFunctionSpace(mesh0, family, degree)
            assert V.num_sub_spaces() == sub_spaces
        B = FEniCSBasis(V)
        return B.refine_maxh(maxh, True)[0]
Example #44
0
def refine_mesh(mesh, size, edge=False):
    """ Refine mesh to at least given size, using one of two methods. """
    dim = mesh.topology().dim()
    if not edge:
        # FEniCS 1.5 and 1.6 have a bug which prevents uniform refinement
        while mesh.size(dim) < size:
            mesh = refine(mesh)
    else:
        # Refine based on MeshFunction
        while mesh.size(dim) < size:
            print refine(mesh).size(dim)
            full = CellFunction("bool", mesh, True)
            print refine(mesh, full).size(dim)
            mesh = refine(mesh, full)
    return mesh
Example #45
0
def refine_mesh(mesh, size, edge=False):
    """ Refine mesh to at least given size, using one of two methods. """
    dim = mesh.topology().dim()
    if not edge:
        # FEniCS 1.5 and 1.6 have a bug which prevents uniform refinement
        while mesh.size(dim) < size:
            mesh = refine(mesh)
    else:
        # Refine based on MeshFunction
        while mesh.size(dim) < size:
            print refine(mesh).size(dim)
            full = CellFunction("bool", mesh, True)
            print refine(mesh, full).size(dim)
            mesh = refine(mesh, full)
    return mesh
Example #46
0
def _compute_refinement_undefined_value():
    '''super convoluted way to get (size_t)-1'''

    mesh = dolfin.UnitSquareMesh(2, 2)
    dim = mesh.geometry().dim()
    ff = dolfin.MeshFunction("size_t", mesh, dim - 1, 42)

    old_refinement_algorithm = dolfin.parameters["refinement_algorithm"]
    try:
        dolfin.parameters["refinement_algorithm"] = 'plaza_with_parent_facets'
        mesh2 = dolfin.refine(mesh)
    finally:
        dolfin.parameters["refinement_algorithm"] = old_refinement_algorithm

    # mesh2 = mesh.child() ## <-- DOLFIN2018
    ff2 = dolfin.adapt(ff, mesh2)
    before = frozenset(ff.array())
    after = frozenset(ff2.array())
    new_facet_values = after - before
    assert len(new_facet_values) == 1
    return next(iter(new_facet_values))
Example #47
0
def generate_square(Nelements, length, refinements=0):
    """
    Creates a square mesh of given elements and length with markers on
    the sides: left, bottom, right and top
    """
    from dolfin import UnitSquareMesh, SubDomain, MeshFunction, Measure, near, refine
    mesh = UnitSquareMesh(Nelements, Nelements)
    for i in range(refinements):
        mesh = refine(mesh)
    mesh.coordinates()[:] *= length

    # Subdomains: Solid
    class Left(SubDomain):
        def inside(self, x, on_boundary):
            return near(x[0], 0.0) and on_boundary

    class Right(SubDomain):
        def inside(self, x, on_boundary):
            return near(x[0], length) and on_boundary

    class Top(SubDomain):
        def inside(self, x, on_boundary):
            return near(x[1], length) and on_boundary

    class Bottom(SubDomain):
        def inside(self, x, on_boundary):
            return near(x[1], 0.0) and on_boundary
    left, right, top, bottom = Left(), Right(), Top(), Bottom()
    LEFT, RIGHT, TOP, BOTTOM = 1, 2, 3, 4  # Set numbering
    NONE = 99  # Marker for empty boundary

    markers = MeshFunction("size_t", mesh, 1)
    markers.set_all(0)

    boundaries = (left, right, top, bottom)
    def_names = (LEFT, RIGHT, TOP, BOTTOM)
    for side, num in zip(boundaries, def_names):
        side.mark(markers, num)

    return mesh, markers, LEFT, RIGHT, TOP, BOTTOM, NONE
def hyper_simplex(dim, n_refinements=0):
    assert isinstance(dim, int)
    assert dim <= 2, "This method is only implemented in 1D and 2D."
    assert isinstance(n_refinements, int) and n_refinements >= 0

    # mesh generation
    if dim == 1:
        mesh = dlfn.UnitIntervalMesh(n_refinements)
    elif dim == 2:
        mesh = dlfn.UnitTriangleMesh.create()

    # mesh refinement
    if dim != 1:
        for i in range(n_refinements):
            mesh = dlfn.refine(mesh)

    # MeshFunction for boundaries ids
    facet_marker = dlfn.MeshFunction("size_t", mesh, dim - 1)
    facet_marker.set_all(0)

    # mark boundaries
    BoundaryMarkers = HyperSimplexBoundaryMarkers
    if dim == 1:
        gamma01 = dlfn.CompiledSubDomain("near(x[0], 0.0) && on_boundary")
        gamma02 = dlfn.CompiledSubDomain("near(x[0], 1.0) && on_boundary")

        gamma01.mark(facet_marker, BoundaryMarkers.left.value)
        gamma02.mark(facet_marker, BoundaryMarkers.right.value)

    elif dim == 2:
        gamma00 = dlfn.CompiledSubDomain("on_boundary")
        gamma01 = dlfn.CompiledSubDomain("near(x[0], 0.0) && on_boundary")
        gamma02 = dlfn.CompiledSubDomain("near(x[1], 0.0) && on_boundary")
        # first mark the entire boundary with the diagonal id
        gamma00.mark(facet_marker, BoundaryMarkers.diagonal.value)
        # then mark the other edges with the correct ids
        gamma01.mark(facet_marker, BoundaryMarkers.left.value)
        gamma02.mark(facet_marker, BoundaryMarkers.bottom.value)

    return mesh, facet_marker
Example #49
0
def refine_bo_fun(mes_ho, boundary_name):
    '''
    Refines faces on a given boundary.
    '''
    geo_params_d = geo_fun()[1]

    #center of the cylinder base
    x_c = geo_params_d['x_0']
    y_c = geo_params_d['y_0']
    #z_c = geo_params_d['z_0']

    x_c_l = [geo_params_d['x_0_{}'.format(itera)] for itera in xrange(1)]
    y_c_l = [geo_params_d['y_0_{}'.format(itera)] for itera in xrange(1)]
    #z_c_l = [geo_params_d['z_0_{}'.format(itera)] for itera in xrange(4)]

    R_in = geo_params_d['R_in']
    R_ex = geo_params_d['R_ex']

    markers = do.CellFunction('bool', mes_ho)
    markers.set_all(False)

    for cell in do.cells(mes_ho):
        for facet in do.facets(cell):
            if 'outer' in boundary_name:
                if abs(((facet.midpoint()[0] - x_c)**2. +
                        (facet.midpoint()[1] - y_c)**2.)**.5 - R_ex) < 5e-2:
                    #mark cells with facet midpoints close to the outer boundary
                    markers[cell] = True
                    print 'refinement close to the outer boundary'
            elif 'inner' in boundary_name:
                if abs(((facet.midpoint()[0] - x_c_l[0]) ** 2. + \
                        (facet.midpoint()[1] - y_c_l[0]) ** 2.) ** .5 - R_in) < 1e-4:
                    #mark cells with facet midpoints close to the inner boundary
                    markers[cell] = True
                    print 'refinement close to the inner boundary'

    mes_ho_refined = do.refine(mes_ho, markers)

    return mes_ho_refined
Example #50
0
    def get_projection_error_function_old(self, mu_src, mu_dest, reference_degree, refine_mesh=0):
        """Construct projection error function by projecting mu_src vector to mu_dest space of dest_degree.
        From this, the projection of mu_src onto the mu_dest space, then to the mu_dest space of dest_degree is subtracted.
        If refine_mesh > 0, the destination mesh is refined uniformly n times."""
        # TODO: If refine_mesh is True, the destination space of mu_dest is ensured to include the space of mu_src by mesh refinement
        # TODO: proper description
        # TODO: separation of fenics specific code
        from dolfin import refine, FunctionSpace, VectorFunctionSpace
        from spuq.fem.fenics.fenics_basis import FEniCSBasis
        if not refine_mesh:
            w_reference = self.get_projection(mu_src, mu_dest, reference_degree)
            w_dest = self.get_projection(mu_src, mu_dest)
            w_dest = w_reference.basis.project_onto(w_dest)
            sum_up = lambda vals: vals
        else:
            # uniformly refine destination mesh
            # NOTE: the cell_marker based refinement used in FEniCSBasis is a bisection of elements
            # while refine(mesh) carries out a red-refinement of all cells (split into 4)
#            basis_src = self[mu_src].basis
            basis_dest = self[mu_dest].basis
            mesh_reference = basis_dest.mesh
            for _ in range(refine_mesh):
                mesh_reference = refine(mesh_reference)
#            print "multi_vector::get_projection_error_function"
#            print type(basis_src._fefs), type(basis_dest._fefs)
#            print basis_src._fefs.num_sub_spaces(), basis_dest._fefs.num_sub_spaces()
#            if isinstance(basis_dest, VectorFunctionSpace):
            if basis_dest._fefs.num_sub_spaces() > 0:
                fs_reference = VectorFunctionSpace(mesh_reference, basis_dest._fefs.ufl_element().family(), reference_degree)
            else:
                fs_reference = FunctionSpace(mesh_reference, basis_dest._fefs.ufl_element().family(), reference_degree)
            basis_reference = FEniCSBasis(fs_reference, basis_dest._ptype)
            # project both vectors to reference space
            w_reference = basis_reference.project_onto(self[mu_src])
            w_dest = self.get_projection(mu_src, mu_dest)
            w_dest = basis_reference.project_onto(w_dest)
            sum_up = lambda vals: np.array([sum(vals[i * 4:(i + 1) * 4]) for i in range(len(vals) / 4 ** refine_mesh)])
        return w_dest - w_reference, sum_up
Example #51
0
 def adaptive(self, mesh, eigv, eigf):
     """Refine mesh based on residual errors."""
     fraction = 0.1
     C = FunctionSpace(mesh, "DG", 0)  # constants on triangles
     w = TestFunction(C)
     h = CellSize(mesh)
     n = FacetNormal(mesh)
     marker = CellFunction("bool", mesh)
     print len(marker)
     indicators = np.zeros(len(marker))
     for e, u in zip(eigv, eigf):
         errform = avg(h) * jump(grad(u), n) ** 2 * avg(w) * dS \
             + h * (inner(grad(u), n) - Constant(e) * u) ** 2 * w * ds
         if self.degree > 1:
             errform += h ** 2 * div(grad(u)) ** 2 * w * dx
         indicators[:] += assemble(errform).array()  # errors for each cell
     print "Residual error: ", sqrt(sum(indicators) / len(eigv))
     cutoff = sorted(
         indicators, reverse=True)[
         int(len(indicators) * fraction) - 1]
     marker.array()[:] = indicators > cutoff  # mark worst errors
     mesh = refine(mesh, marker)
     return mesh
Example #52
0
    def solve(self):
        """ Find eigenvalues for transformed mesh. """
        self.progress("Building mesh.")
        # build transformed mesh
        mesh = self.refineMesh()
        # dim = mesh.topology().dim()
        if self.bcLast:
            mesh = transform_mesh(mesh, self.transformList)
            Robin, Steklov, shift, bcs = get_bc_parts(mesh, self.bcList)
        else:
            Robin, Steklov, shift, bcs = get_bc_parts(mesh, self.bcList)
            mesh = transform_mesh(mesh, self.transformList)
            # boundary conditions computed on non-transformed mesh
            # copy the values to transformed mesh
            fun = FacetFunction("size_t", mesh, shift)
            fun.array()[:] = bcs.array()[:]
            bcs = fun
        ds = Measure('ds', domain=mesh, subdomain_data=bcs)
        V = FunctionSpace(mesh, self.method, self.deg)
        u = TrialFunction(V)
        v = TestFunction(V)
        self.progress("Assembling matrices.")
        wTop = Expression(self.wTop, degree=self.deg)
        wBottom = Expression(self.wBottom, degree=self.deg)

        #
        # build stiffness matrix form
        #
        s = dot(grad(u), grad(v))*wTop*dx
        # add Robin parts
        for bc in Robin:
            s += Constant(bc.parValue)*u*v*wTop*ds(bc.value+shift)

        #
        # build mass matrix form
        #
        if len(Steklov) > 0:
            m = 0
            for bc in Steklov:
                m += Constant(bc.parValue)*u*v*wBottom*ds(bc.value+shift)
        else:
            m = u*v*wBottom*dx

        # assemble
        # if USE_EIGEN:
        #     S, M = EigenMatrix(), EigenMatrix()
            # tempv = EigenVector()
        # else:
        S, M = PETScMatrix(), PETScMatrix()
        # tempv = PETScVector()

        if not np.any(bcs.array() == shift+1):
            # no Dirichlet parts
            assemble(s, tensor=S)
            assemble(m, tensor=M)
        else:
            #
            # with EIGEN we could
            #   apply Dirichlet condition symmetrically
            #   completely remove rows and columns
            #
            # Dirichlet parts are marked with shift+1
            #
            # temp = Constant(0)*v*dx
            bc = DirichletBC(V, Constant(0.0), bcs, shift+1)
            # assemble_system(s, temp, bc, A_tensor=S, b_tensor=tempv)
            # assemble_system(m, temp, bc, A_tensor=M, b_tensor=tempv)
            assemble(s, tensor=S)
            bc.apply(S)
            assemble(m, tensor=M)
            # bc.zero(M)

        # if USE_EIGEN:
        #    M = M.sparray()
        #    M.eliminate_zeros()
        #    print M.shape
        #    indices = M.indptr[:-1] - M.indptr[1:] < 0
        #    M = M[indices, :].tocsc()[:, indices]
        #    S = S.sparray()[indices, :].tocsc()[:, indices]
        #    print M.shape
        #
        # solve the eigenvalue problem
        #
        self.progress("Solving eigenvalue problem.")
        eigensolver = SLEPcEigenSolver(S, M)
        eigensolver.parameters["problem_type"] = "gen_hermitian"
        eigensolver.parameters["solver"] = "krylov-schur"
        if self.target is not None:
            eigensolver.parameters["spectrum"] = "target real"
            eigensolver.parameters["spectral_shift"] = self.target
        else:
            eigensolver.parameters["spectrum"] = "smallest magnitude"
            eigensolver.parameters["spectral_shift"] = -0.01
        eigensolver.parameters["spectral_transform"] = "shift-and-invert"
        eigensolver.solve(self.number)
        self.progress("Generating eigenfunctions.")
        if eigensolver.get_number_converged() == 0:
            return None
        eigf = []
        eigv = []
        if self.deg > 1:
            mesh = refine(mesh)
        W = FunctionSpace(mesh, 'CG', 1)
        for i in range(eigensolver.get_number_converged()):
            pair = eigensolver.get_eigenpair(i)[::2]
            eigv.append(pair[0])
            u = Function(V)
            u.vector()[:] = pair[1]
            eigf.append(interpolate(u, W))
        return eigv, eigf
Example #53
0
    def __init__(self, cparams, dtype_u, dtype_f):
        """
        Initialization routine

        Args:
            cparams: custom parameters for the example
            dtype_u: particle data type (will be passed parent class)
            dtype_f: acceleration data type (will be passed parent class)
        """

        # define the Dirichlet boundary
        # def Boundary(x, on_boundary):
        #     return on_boundary

        # Sub domain for Periodic boundary condition
        class PeriodicBoundary(df.SubDomain):

            # Left boundary is "target domain" G
            def inside(self, x, on_boundary):
                return bool(x[0] < df.DOLFIN_EPS and x[0] > -df.DOLFIN_EPS and on_boundary)

            # Map right boundary (H) to left boundary (G)
            def map(self, x, y):
                y[0] = x[0] - 1.0

        # these parameters will be used later, so assert their existence
        assert 'c_nvars' in cparams
        assert 'nu' in cparams
        assert 't0' in cparams
        assert 'family' in cparams
        assert 'order' in cparams
        assert 'refinements' in cparams

        # add parameters as attributes for further reference
        for k,v in cparams.items():
            setattr(self,k,v)

        df.set_log_level(df.WARNING)

        # set mesh and refinement (for multilevel)
        mesh = df.UnitIntervalMesh(self.c_nvars)
        # mesh = df.UnitSquareMesh(self.c_nvars[0],self.c_nvars[1])
        for i in range(self.refinements):
            mesh = df.refine(mesh)

        # define function space for future reference
        self.V = df.FunctionSpace(mesh, self.family, self.order, constrained_domain=PeriodicBoundary())
        tmp = df.Function(self.V)
        print('DoFs on this level:',len(tmp.vector().array()))

        # invoke super init, passing number of dofs, dtype_u and dtype_f
        super(fenics_adv_diff_1d,self).__init__(self.V,dtype_u,dtype_f)

        u = df.TrialFunction(self.V)
        v = df.TestFunction(self.V)

        # Stiffness term (diffusion)
        a_K = -1.0*df.inner(df.nabla_grad(u), self.nu*df.nabla_grad(v))*df.dx

        # Stiffness term (advection)
        a_G = df.inner(self.mu*df.nabla_grad(u)[0], v)*df.dx

        # Mass term
        a_M = u*v*df.dx

        self.M = df.assemble(a_M)
        self.K = df.assemble(a_K)
        self.G = df.assemble(a_G)
Example #54
0
    def __init__(self, cparams, dtype_u, dtype_f):
        """
        Initialization routine

        Args:
            cparams: custom parameters for the example
            dtype_u: particle data type (will be passed parent class)
            dtype_f: acceleration data type (will be passed parent class)
        """

        # define the Dirichlet boundary
        def Boundary(x, on_boundary):
            return on_boundary

        # these parameters will be used later, so assert their existence
        assert 'c_nvars' in cparams
        assert 't0' in cparams
        assert 'family' in cparams
        assert 'order' in cparams
        assert 'refinements' in cparams

        # add parameters as attributes for further reference
        for k,v in cparams.items():
            setattr(self,k,v)

        df.set_log_level(df.WARNING)

        df.parameters["form_compiler"]["optimize"]     = True
        df.parameters["form_compiler"]["cpp_optimize"] = True

        # set mesh and refinement (for multilevel)
        # mesh = df.UnitIntervalMesh(self.c_nvars)
        # mesh = df.UnitSquareMesh(self.c_nvars[0],self.c_nvars[1])
        mesh = df.IntervalMesh(self.c_nvars,0,100)
        # mesh = df.RectangleMesh(0.0,0.0,2.0,2.0,self.c_nvars[0],self.c_nvars[1])
        for i in range(self.refinements):
            mesh = df.refine(mesh)

        # self.mesh = mesh
        # define function space for future reference
        V = df.FunctionSpace(mesh, self.family, self.order)
        self.V = V*V

        # invoke super init, passing number of dofs, dtype_u and dtype_f
        super(fenics_grayscott,self).__init__(self.V,dtype_u,dtype_f)

        # rhs in weak form
        self.w = df.Function(self.V)
        q1,q2 = df.TestFunctions(self.V)

        self.w1,self.w2 = df.split(self.w)

        self.F1 = (-self.Du*df.inner(df.nabla_grad(self.w1), df.nabla_grad(q1)) - self.w1*(self.w2**2)*q1 + self.A*(1-self.w1)*q1)*df.dx
        self.F2 = (-self.Dv*df.inner(df.nabla_grad(self.w2), df.nabla_grad(q2)) + self.w1*(self.w2**2)*q2 - self.B*    self.w2*q2)*df.dx
        self.F = self.F1+self.F2

        # mass matrix
        u1,u2 = df.TrialFunctions(self.V)
        a_M = u1*q1*df.dx
        M1 = df.assemble(a_M)
        a_M = u2*q2*df.dx
        M2 = df.assemble(a_M)
        self.M = M1+M2
Example #55
0
def run_MC(opts, conf):
    # propagate config values
    _G = globals()
    for sec in conf.keys():
        if sec == "LOGGING":
            continue
        secconf = conf[sec]
        for key, val in secconf.iteritems():
            print "CONF_" + key + "= secconf['" + key + "'] =", secconf[key]
            _G["CONF_" + key] = secconf[key]

    # setup logging
    _G["LOG_LEVEL"] = eval("logging." + conf["LOGGING"]["level"])
    print "LOG_LEVEL = logging." + conf["LOGGING"]["level"]    
    setup_logging(LOG_LEVEL, logfile=CONF_experiment_name + "_MC-P{0}".format(CONF_FEM_degree))
    
    # determine path of this module
    path = os.path.dirname(__file__)


    # ============================================================
    # PART A: Setup Problem
    # ============================================================
    
    # get boundaries
    mesh0, boundaries, dim = SampleDomain.setupDomain(CONF_domain, initial_mesh_N=CONF_initial_mesh_N)

    # define coefficient field
    coeff_types = ("EF-square-cos", "EF-square-sin", "monomials", "constant")
    from itertools import count
    if CONF_mu is not None:
        muparam = (CONF_mu, (0 for _ in count()))
    else:
        muparam = None 
    coeff_field = SampleProblem.setupCF(coeff_types[CONF_coeff_type], decayexp=CONF_decay_exp, gamma=CONF_gamma,
                                    freqscale=CONF_freq_scale, freqskip=CONF_freq_skip, rvtype="uniform", scale=CONF_coeff_scale, secondparam=muparam)
    
    # setup boundary conditions and pde
#    initial_mesh_N = CONF_initial_mesh_N
    pde, Dirichlet_boundary, uD, Neumann_boundary, g, f = SampleProblem.setupPDE(CONF_boundary_type, CONF_domain, CONF_problem_type, boundaries, coeff_field)
    
    # define multioperator
    A = MultiOperator(coeff_field, pde.assemble_operator, pde.assemble_operator_inner_dofs)

    
    # ============================================================
    # PART B: Import Solution
    # ============================================================
    import pickle
    PATH_SOLUTION = os.path.join(opts.basedir, CONF_experiment_name)
    FILE_SOLUTION = 'SFEM2-SOLUTIONS-P{0}.pkl'.format(CONF_FEM_degree)
    FILE_STATS = 'SIM2-STATS-P{0}.pkl'.format(CONF_FEM_degree)

    print "LOADING solutions from %s" % os.path.join(PATH_SOLUTION, FILE_SOLUTION)
    logger.info("LOADING solutions from %s" % os.path.join(PATH_SOLUTION, FILE_SOLUTION))
    # load solutions
    with open(os.path.join(PATH_SOLUTION, FILE_SOLUTION), 'rb') as fin:
        w_history = pickle.load(fin)
    # load simulation data
    logger.info("LOADING statistics from %s" % os.path.join(PATH_SOLUTION, FILE_STATS))
    with open(os.path.join(PATH_SOLUTION, FILE_STATS), 'rb') as fin:
        sim_stats = pickle.load(fin)

    logger.info("active indices of w after initialisation: %s", w_history[-1].active_indices())

    
    # ============================================================
    # PART C: MC Error Sampling
    # ============================================================

    # determine reference setting
    ref_mesh, ref_Lambda = generate_reference_setup(PATH_SOLUTION)

    MC_N = CONF_N
    MC_HMAX = CONF_maxh
    if CONF_runs > 0:
        # determine reference mesh
        w = w_history[-1]
        # ref_mesh = w.basis.basis.mesh
        for _ in range(CONF_ref_mesh_refine):
            ref_mesh = refine(ref_mesh)
        # TODO: the following association with the sampling order does not make too much sense...
        ref_maxm = CONF_sampling_order if CONF_sampling_order > 0 else max(len(mu) for mu in ref_Lambda) + CONF_sampling_order_increase
        stored_rv_samples = []
        for i, w in enumerate(w_history):
#            if i == 0:
#                continue

            # memory usage info
            import resource
            logger.info("\n======================================\nMEMORY USED: " + str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) + "\n======================================\n")
            logger.info("================>>> MC error sampling for w[%i] (of %i) on %i cells with maxm %i <<<================" % (i, len(w_history), ref_mesh.num_cells(), ref_maxm))

            MC_start = 0
            old_stats = sim_stats[i]
            if opts.continueMC:
                try:
                    MC_start = sim_stats[i]["MC-N"]
                    logger.info("CONTINUING MC of %s for solution (iteration) %s of %s", PATH_SOLUTION, i, len(w_history))
                except:
                    logger.info("STARTING MC of %s for solution (iteration) %s of %s", PATH_SOLUTION, i, len(w_history))
            if MC_start <= 0:
                    sim_stats[i]["MC-N"] = 0
                    sim_stats[i]["MC-ERROR-L2"] = 0
                    sim_stats[i]["MC-ERROR-H1A"] = 0
#                     sim_stats[i]["MC-ERROR-L2_a0"] = 0
#                     sim_stats[i]["MC-ERROR-H1_a0"] = 0
            
            MC_RUNS = max(CONF_runs - MC_start, 0)
            if MC_RUNS > 0:
                logger.info("STARTING %s MC RUNS", MC_RUNS)
#                L2err, H1err, L2err_a0, H1err_a0, N = sample_error_mc(w, pde, A, coeff_field, mesh0, ref_maxm, MC_RUNS, MC_N, MC_HMAX)
                L2err, H1err, L2err_a0, H1err_a0, N = sample_error_mc(w, pde, A, coeff_field, ref_mesh, ref_maxm, MC_RUNS, MC_N, MC_HMAX, stored_rv_samples, CONF_quadrature_degree)
                # combine current and previous results
                sim_stats[i]["MC-N"] = N + old_stats["MC-N"]
                sim_stats[i]["MC-ERROR-L2"] = (L2err * N + old_stats["MC-ERROR-L2"]) / sim_stats[i]["MC-N"]
                sim_stats[i]["MC-ERROR-H1A"] = (H1err * N + old_stats["MC-ERROR-H1A"]) / sim_stats[i]["MC-N"]
#                 sim_stats[i]["MC-ERROR-L2_a0"] = (L2err_a0 * N + old_stats["MC-ERRORL2_a0"]) / sim_stats[i]["MC-N"]
#                 sim_stats[i]["MC-ERROR-H1A_a0"] = (H1err_a0 * N + old_stats["MC-ERROR-H1A_a0"]) / sim_stats[i]["MC-N"]
                print "MC-ERROR-H1A (N:%i) = %f" % (sim_stats[i]["MC-N"], sim_stats[i]["MC-ERROR-H1A"])
            else:
                logger.info("SKIPPING MC RUN since sufficiently many samples are available")
    
    # ============================================================
    # PART D: Export Updated Data and Plotting
    # ============================================================
    # save updated data
    if opts.saveData:
        # save updated statistics
        print "SAVING statistics into %s" % os.path.join(PATH_SOLUTION, FILE_STATS)
        print sim_stats[-1].keys()
        logger.info("SAVING statistics into %s" % os.path.join(PATH_SOLUTION, FILE_STATS))
        with open(os.path.join(PATH_SOLUTION, FILE_STATS), 'wb') as fout:
            pickle.dump(sim_stats, fout)
    
    # plot residuals
    if opts.plotEstimator and len(sim_stats) > 1:
        try:
            from matplotlib.pyplot import figure, show, legend
            
            X = [s["DOFS"] for s in sim_stats]
            err_L2 = [s["MC-ERROR-L2"] for s in sim_stats]
            err_H1A = [s["MC-ERROR-H1A"] for s in sim_stats]
            err_est = [s["ERROR-EST"] for s in sim_stats]
            err_res = [s["ERROR-RES"] for s in sim_stats]
            err_tail = [s["ERROR-TAIL"] for s in sim_stats]
            mi = [s["MI"] for s in sim_stats]
            num_mi = [len(m) for m in mi]
            eff_H1A = [est / err for est, err in zip(err_est, err_H1A)]
            
            # --------
            # figure 1
            # --------
            fig1 = figure()
            fig1.suptitle("residual estimator")
            ax = fig1.add_subplot(111)
            if REFINEMENT["TAIL"]:
                ax.loglog(X, num_mi, '--y+', label='active mi')
            ax.loglog(X, eff_H1A, '--yo', label='efficiency')
            ax.loglog(X, err_L2, '-.b>', label='L2 error')
            ax.loglog(X, err_H1A, '-.r>', label='H1A error')
            ax.loglog(X, err_est, '-g<', label='error estimator')
            ax.loglog(X, err_res, '-.cx', label='residual')
            ax.loglog(X, err_tail, '-.m>', label='tail')
            legend(loc='upper right')

            print "error L2", err_L2
            print "error H1A", err_H1A
            print "EST", err_est
            print "RES", err_res
            print "TAIL", err_tail
            
            show()  # this invalidates the figure instances...
        except:
            import traceback
            print traceback.format_exc()
            logger.info("skipped plotting since matplotlib is not available...")
Example #56
0
    F = ( (2./Re)*dl.inner(strain(v),strain(v_test))+ dl.inner (dl.nabla_grad(v)*v, v_test)
           - (q * dl.div(v_test)) + ( dl.div(v) * q_test) ) * dl.dx
           
    dl.solve(F == 0, vq, bcs, solver_parameters={"newton_solver":
                                         {"relative_tolerance":1e-4, "maximum_iterations":100,
                                          "linear_solver":"default"}})
        
    return v
    

        
if __name__ == "__main__":
    dl.set_log_active(False)
    np.random.seed(1)
    sep = "\n"+"#"*80+"\n"
    mesh = dl.refine( dl.Mesh("ad_20.xml") )
    
    rank = dl.MPI.rank(mesh.mpi_comm())
    nproc = dl.MPI.size(mesh.mpi_comm())
        
    if rank == 0:
        print( sep, "Set up the mesh and finite element spaces.\n","Compute wind velocity", sep )
    Vh = dl.FunctionSpace(mesh, "Lagrange", 2)
    ndofs = Vh.dim()
    if rank == 0:
        print( "Number of dofs: {0}".format( ndofs ) )
    
    if rank == 0:
        print( sep, "Set up Prior Information and model", sep )
    
    ic_expr = dl.Expression('min(0.5,exp(-100*(pow(x[0]-0.35,2) +  pow(x[1]-0.7,2))))', element=Vh.ufl_element())
Example #57
0
def create_joint_mesh(meshes, destmesh=None, additional_refine=0):
    if destmesh is None:
        # start with finest mesh to avoid (most) refinements
#        hmin = [m.hmin() for m in meshes]
#        mind = hmin.index(min(hmin))
        numcells = [m.num_cells() for m in meshes]
        mind = numcells.index(max(numcells))
        destmesh = meshes.pop(mind)

    try:
        # test for old FEniCS version < 1.2
        destmesh.closest_cell(Point(0,0))
        bbt = None
    except:
        # FEniCS > 1.2
        bbt = destmesh.bounding_box_tree()

    # setup parent cells
    parents = {}
    for c in cells(destmesh):
        parents[c.index()] = [c.index()]
    PM = []

    # refinement loop for destmesh
    for m in meshes:
        # loop until all cells of destmesh are finer than the respective cells in the set of meshes
        while True: 
            cf = CellFunction("bool", destmesh)
            cf.set_all(False)
            rc = 0      # counter for number of marked cells
            # get cell sizes of current mesh
            h = [c.diameter() for c in cells(destmesh)]
            # check all cells with destination sizes and mark for refinement when destination mesh is coarser (=larger)
            for c in cells(m):
                p = c.midpoint()
                if bbt is not None:
                    # FEniCS > 1.2
                    cid = bbt.compute_closest_entity(p)[0]
                else:
                    # FEniCS < 1.2
                    cid = destmesh.closest_cell(p)
                if h[cid] > c.diameter():
                    cf[cid] = True
                    rc += 1
            # carry out refinement if any cells are marked
            if rc:
                # refine marked cells
                newmesh = refine(destmesh, cf)
                # determine parent cell association map
                pc = newmesh.data().array("parent_cell", newmesh.topology().dim())
                pmap = defaultdict(list)
                for i, cid in enumerate(pc):
                    pmap[cid].append(i)
                PM.append(pmap)
                # set refined mesh as current mesh
                destmesh = newmesh
            else:
                break

        # carry out additional uniform refinements
        for _ in range(additional_refine):
            # refine uniformly
            newmesh = refine(destmesh)
            # determine parent cell association map
            pc = newmesh.data().array("parent_cell", newmesh.topology().dim())
            pmap = defaultdict(list)
            for i, cid in enumerate(pc):
                pmap[cid].append(i)
            PM.append(pmap)
            # set refined mesh as current mesh
            destmesh = newmesh

    # determine association to parent cells
    for level in range(len(PM)):
        for parentid, childids in parents.iteritems():
            newchildids = []
            for cid in childids:
                for cid in PM[level][cid]:
                    newchildids.append(cid)
            parents[parentid] = newchildids
    
    return destmesh, parents
    def _pressure_poisson(self, p1, p0,
                          mu, ui,
                          u,
                          p_bcs=None,
                          rotational_form=False,
                          tol=1.0e-10,
                          verbose=True
                          ):
        '''Solve the pressure Poisson equation
            -1/r \div(r \nabla (p1-p0)) = -1/r div(r*u),
            boundary conditions,
        for
            \nabla p = u.
        '''
        r = Expression('x[0]', degree=1, domain=self.W.mesh())

        Q = p1.function_space()

        p = TrialFunction(Q)
        q = TestFunction(Q)
        a2 = dot(r * grad(p), grad(q)) * 2 * pi * dx
        # The boundary conditions
        #     n.(p1-p0) = 0
        # are implicitly included.
        #
        # L2 = -div(r*u) * q * 2*pi*dx
        div_u = 1/r * (r * u[0]).dx(0) + u[1].dx(1)
        L2 = -div_u * q * 2*pi*r*dx
        if p0:
            L2 += r * dot(grad(p0), grad(q)) * 2*pi*dx

        # In the Cartesian variant of the rotational form, one makes use of the
        # fact that
        #
        #     curl(curl(u)) = grad(div(u)) - div(grad(u)).
        #
        # The same equation holds true in cylindrical form. Hence, to get the
        # rotational form of the splitting scheme, we need to
        #
        # rotational form
        if rotational_form:
            # If there is no dependence of the angular coordinate, what is
            # div(grad(div(u))) in Cartesian coordinates becomes
            #
            #     1/r div(r * grad(1/r div(r*u)))
            #
            # in cylindrical coordinates (div and grad are in cylindrical
            # coordinates). Unfortunately, we cannot write it down that
            # compactly since u_phi is in the game.
            # When using P2 elements, this value will be 0 anyways.
            div_ui = 1/r * (r * ui[0]).dx(0) + ui[1].dx(1)
            grad_div_ui = as_vector((div_ui.dx(0), div_ui.dx(1)))
            L2 -= r * mu * dot(grad_div_ui, grad(q)) * 2*pi*dx
            #div_grad_div_ui = 1/r * (r * grad_div_ui[0]).dx(0) \
            #    + (grad_div_ui[1]).dx(1)
            #L2 += mu * div_grad_div_ui * q * 2*pi*r*dx
            #n = FacetNormal(Q.mesh())
            #L2 -= mu * (n[0] * grad_div_ui[0] + n[1] * grad_div_ui[1]) \
            #    * q * 2*pi*r*ds

        if p_bcs:
            solve(
                a2 == L2, p1,
                bcs=p_bcs,
                solver_parameters={
                    'linear_solver': 'iterative',
                    'symmetric': True,
                    'preconditioner': 'amg',
                    'krylov_solver': {'relative_tolerance': tol,
                                      'absolute_tolerance': 0.0,
                                      'maximum_iterations': 100,
                                      'monitor_convergence': verbose}
                    }
                )
        else:
            # If we're dealing with a pure Neumann problem here (which is the
            # default case), this doesn't hurt CG if the system is consistent,
            # cf. :cite:`vdV03`. And indeed it is consistent if and only if
            #
            #   \int_\Gamma r n.u = 0.
            #
            # This makes clear that for incompressible Navier-Stokes, one
            # either needs to make sure that inflow and outflow always add up
            # to 0, or one has to specify pressure boundary conditions.
            #
            # If the right-hand side is very small, round-off errors may impair
            # the consistency of the system. Make sure the system we are
            # solving remains consistent.
            A = assemble(a2)
            b = assemble(L2)
            # Assert that the system is indeed consistent.
            e = Function(Q)
            e.interpolate(Constant(1.0))
            evec = e.vector()
            evec /= norm(evec)
            alpha = b.inner(evec)
            normB = norm(b)
            # Assume that in every component of the vector, a round-off error
            # of the magnitude DOLFIN_EPS is present. This leads to the
            # criterion
            #    |<b,e>| / (||b||*||e||) < DOLFIN_EPS
            # as a check whether to consider the system consistent up to
            # round-off error.
            #
            # TODO think about condition here
            #if abs(alpha) > normB * DOLFIN_EPS:
            if abs(alpha) > normB * 1.0e-12:
                divu = 1 / r * (r * u[0]).dx(0) + u[1].dx(1)
                adivu = assemble(((r * u[0]).dx(0) + u[1].dx(1)) * 2 * pi * dx)
                info('\int 1/r * div(r*u) * 2*pi*r  =  %e' % adivu)
                n = FacetNormal(Q.mesh())
                boundary_integral = assemble((n[0] * u[0] + n[1] * u[1])
                                             * 2 * pi * r * ds)
                info('\int_Gamma n.u * 2*pi*r = %e' % boundary_integral)
                message = ('System not consistent! '
                           '<b,e> = %g, ||b|| = %g, <b,e>/||b|| = %e.') \
                           % (alpha, normB, alpha / normB)
                info(message)
                # Plot the stuff, and project it to a finer mesh with linear
                # elements for the purpose.
                plot(divu, title='div(u_tentative)')
                #Vp = FunctionSpace(Q.mesh(), 'CG', 2)
                #Wp = MixedFunctionSpace([Vp, Vp])
                #up = project(u, Wp)
                fine_mesh = Q.mesh()
                for k in range(1):
                    fine_mesh = refine(fine_mesh)
                V = FunctionSpace(fine_mesh, 'CG', 1)
                W = V * V
                #uplot = Function(W)
                #uplot.interpolate(u)
                uplot = project(u, W)
                plot(uplot[0], title='u_tentative[0]')
                plot(uplot[1], title='u_tentative[1]')
                #plot(u, title='u_tentative')
                interactive()
                exit()
                raise RuntimeError(message)
            # Project out the roundoff error.
            b -= alpha * evec

            #
            # In principle, the ILU preconditioner isn't advised here since it
            # might destroy the semidefiniteness needed for CG.
            #
            # The system is consistent, but the matrix has an eigenvalue 0.
            # This does not harm the convergence of CG, but when
            # preconditioning one has to make sure that the preconditioner
            # preserves the kernel.  ILU might destroy this (and the
            # semidefiniteness). With AMG, the coarse grid solves cannot be LU
            # then, so try Jacobi here.
            # <http://lists.mcs.anl.gov/pipermail/petsc-users/2012-February/012139.html>
            #
            prec = PETScPreconditioner('hypre_amg')
            from dolfin import PETScOptions
            PETScOptions.set('pc_hypre_boomeramg_relax_type_coarse', 'jacobi')
            solver = PETScKrylovSolver('cg', prec)
            solver.parameters['absolute_tolerance'] = 0.0
            solver.parameters['relative_tolerance'] = tol
            solver.parameters['maximum_iterations'] = 100
            solver.parameters['monitor_convergence'] = verbose
            # Create solver and solve system
            A_petsc = as_backend_type(A)
            b_petsc = as_backend_type(b)
            p1_petsc = as_backend_type(p1.vector())
            solver.set_operator(A_petsc)
            solver.solve(p1_petsc, b_petsc)
            # This would be the stump for Epetra:
            #solve(A, p.vector(), b, 'cg', 'ml_amg')
        return
Example #59
0
def test_estimator_refinement():
    # define source term
    f = Constant("1.0")
    #    f = Expression("10.*exp(-(pow(x[0] - 0.6, 2) + pow(x[1] - 0.4, 2)) / 0.02)", degree=3)

    # set default vector for new indices
    mesh0 = refine(Mesh(lshape_xml))
    fs0 = FunctionSpace(mesh0, "CG", 1)
    B = FEniCSBasis(fs0)
    u0 = Function(fs0)
    diffcoeff = Constant("1.0")
    pde = FEMPoisson()
    fem_A = pde.assemble_lhs(diffcoeff, B)
    fem_b = pde.assemble_rhs(f, B)
    solve(fem_A, u0.vector(), fem_b)
    vec0 = FEniCSVector(u0)

    # setup solution multi vector
    mis = [Multiindex([0]),
           Multiindex([1]),
           Multiindex([0, 1]),
           Multiindex([0, 2])]
    N = len(mis)

    #    meshes = [UnitSquare(i + 3, 3 + N - i) for i in range(N)]
    meshes = [refine(Mesh(lshape_xml)) for _ in range(N)]
    fss = [FunctionSpace(mesh, "CG", 1) for mesh in meshes]

    # solve Poisson problem
    w = MultiVectorWithProjection()
    for i, mi in enumerate(mis):
        B = FEniCSBasis(fss[i])
        u = Function(fss[i])
        pde = FEMPoisson()
        fem_A = pde.assemble_lhs(diffcoeff, B)
        fem_b = pde.assemble_rhs(f, B)
        solve(fem_A, u.vector(), fem_b)
        w[mi] = FEniCSVector(u)
        #        plot(w[mi]._fefunc)

    # define coefficient field
    a0 = Expression("1.0", element=FiniteElement('Lagrange', ufl.triangle, 1))
    #    a = [Expression('2.+sin(2.*pi*I*x[0]+x[1]) + 10.*exp(-pow(I*(x[0] - 0.6)*(x[1] - 0.3), 2) / 0.02)', I=i, degree=3,
    a = (Expression('A*cos(pi*I*x[0])*cos(pi*I*x[1])', A=1 / i ** 2, I=i, degree=2,
        element=FiniteElement('Lagrange', ufl.triangle, 1)) for i in count())
    rvs = (NormalRV(mu=0.5) for _ in count())
    coeff_field = ParametricCoefficientField(a, rvs, a0=a0)

    # refinement loop
    # ===============
    refinements = 3

    for refinement in range(refinements):
        print "*****************************"
        print "REFINEMENT LOOP iteration ", refinement + 1
        print "*****************************"

        # evaluate residual and projection error estimates
        # ================================================
        maxh = 1 / 10
        resind, reserr = ResidualEstimator.evaluateResidualEstimator(w, coeff_field, f)
        projind, projerr = ResidualEstimator.evaluateProjectionError(w, coeff_field, maxh)

        # testing -->
        projglobal, _ = ResidualEstimator.evaluateProjectionError(w, coeff_field, maxh, local=False)
        for mu, val in projglobal.iteritems():
            print "GLOBAL Projection Error for", mu, "=", val
            # <-- testing

        # ==============
        # MARK algorithm
        # ==============

        # setup marking sets
        mesh_markers = defaultdict(set)

        # residual marking
        # ================
        theta_eta = 0.8
        global_res = sum([res[1] for res in reserr.items()])
        allresind = list()
        for mu, resmu in resind.iteritems():
            allresind = allresind + [(resmu.coeffs[i], i, mu) for i in range(len(resmu.coeffs))]
        allresind = sorted(allresind, key=itemgetter(1))
        # TODO: check that indexing and cell ids are consistent (it would be safer to always work with cell indices) 
        marked_res = 0
        for res in allresind:
            if marked_res >= theta_eta * global_res:
                break
            mesh_markers[res[2]].add(res[1])
            marked_res += res[0]

        print "RES MARKED elements:\n", [(mu, len(cell_ids)) for mu, cell_ids in mesh_markers.iteritems()]

        # projection marking
        # ==================
        theta_zeta = 0.8
        min_zeta = 1e-10
        max_zeta = max([max(projind[mu].coeffs) for mu in projind.active_indices()])
        print "max_zeta =", max_zeta
        if max_zeta >= min_zeta:
            for mu, vec in projind.iteritems():
                indmu = [i for i, p in enumerate(vec.coeffs) if p >= theta_zeta * max_zeta]
                mesh_markers[mu] = mesh_markers[mu].union(set(indmu))
                print "PROJ MARKING", len(indmu), "elements in", mu

            print "FINAL MARKED elements:\n", [(mu, len(cell_ids)) for mu, cell_ids in mesh_markers.iteritems()]
        else:
            print "NO PROJECTION MARKING due to very small projection error!"

        # new multiindex activation
        # =========================
        # determine possible new indices
        theta_delta = 0.9
        maxm = 10
        a0_f = coeff_field.mean_func
        Ldelta = {}
        Delta = w.active_indices()
        deltaN = int(ceil(0.1 * len(Delta)))               # max number new multiindices
        for mu in Delta:
            norm_w = norm(w[mu].coeffs, 'L2')
            for m in count():
                mu1 = mu.inc(m)
                if mu1 not in Delta:
                    if m > maxm or m >= coeff_field.length:  # or len(Ldelta) >= deltaN
                        break
                    am_f, am_rv = coeff_field[m]
                    beta = am_rv.orth_polys.get_beta(1)
                    # determine ||a_m/\overline{a}||_{L\infty(D)} (approximately)
                    f = Function(w[mu]._fefunc.function_space())
                    f.interpolate(a0_f)
                    min_a0 = min(f.vector().array())
                    f.interpolate(am_f)
                    max_am = max(f.vector().array())
                    ainfty = max_am / min_a0
                    assert isinstance(ainfty, float)

                    #                    print "A***", beta[1], ainfty, norm_w
                    #                    print "B***", beta[1] * ainfty * norm_w
                    #                    print "C***", theta_delta, max_zeta
                    #                    print "D***", theta_delta * max_zeta
                    #                    print "E***", bool(beta[1] * ainfty * norm_w >= theta_delta * max_zeta)

                    if beta[1] * ainfty * norm_w >= theta_delta * max_zeta:
                        val1 = beta[1] * ainfty * norm_w
                        if mu1 not in Ldelta.keys() or (mu1 in Ldelta.keys() and Ldelta[mu1] < val1):
                            Ldelta[mu1] = val1

        print "POSSIBLE NEW MULTIINDICES ", sorted(Ldelta.iteritems(), key=itemgetter(1), reverse=True)
        Ldelta = sorted(Ldelta.iteritems(), key=itemgetter(1), reverse=True)[:min(len(Ldelta), deltaN)]
        # add new multiindices to solution vector
        for mu, _ in Ldelta:
            w[mu] = vec0
        print "SELECTED NEW MULTIINDICES ", Ldelta

        # create new refined (and enlarged) multi vector
        # ==============================================
        for mu, cell_ids in mesh_markers.iteritems():
            vec = w[mu].refine(cell_ids, with_prolongation=False)
            fs = vec._fefunc.function_space()
            B = FEniCSBasis(fs)
            u = Function(fs)
            pde = FEMPoisson()
            fem_A = pde.assemble_lhs(diffcoeff, B)
            fem_b = pde.assemble_rhs(f, B)
            solve(fem_A, vec.coeffs, fem_b)
            w[mu] = vec
Example #60
0
    def __init__(self, cparams, dtype_u, dtype_f):
        """
        Initialization routine

        Args:
            cparams: custom parameters for the example
            dtype_u: particle data type (will be passed parent class)
            dtype_f: acceleration data type (will be passed parent class)
        """

        # define the Dirichlet boundary
        # def Boundary(x, on_boundary):
        #     return on_boundary

        # Sub domain for Periodic boundary condition
        class PeriodicBoundary(df.SubDomain):

            # Left boundary is "target domain" G
            def inside(self, x, on_boundary):
                # return True if on left or bottom boundary AND NOT on one of the two corners (0, 1) and (1, 0)
                return bool((df.near(x[0], 0) or df.near(x[1], 0)) and
                        (not ((df.near(x[0], 0) and df.near(x[1], 1)) or
                                (df.near(x[0], 1) and df.near(x[1], 0)))) and on_boundary)

            def map(self, x, y):
                if df.near(x[0], 1) and df.near(x[1], 1):
                    y[0] = x[0] - 1.
                    y[1] = x[1] - 1.
                elif df.near(x[0], 1):
                    y[0] = x[0] - 1.
                    y[1] = x[1]
                else:   # near(x[1], 1)
                    y[0] = x[0]
                    y[1] = x[1] - 1.

        # these parameters will be used later, so assert their existence
        assert 'c_nvars' in cparams
        assert 'nu' in cparams
        assert 't0' in cparams
        assert 'family' in cparams
        assert 'order' in cparams
        assert 'refinements' in cparams

        # add parameters as attributes for further reference
        for k,v in cparams.items():
            setattr(self,k,v)

        df.set_log_level(df.WARNING)

        # set mesh and refinement (for multilevel)
        # mesh = df.UnitIntervalMesh(self.c_nvars)
        mesh = df.UnitSquareMesh(self.c_nvars[0],self.c_nvars[1])
        for i in range(self.refinements):
            mesh = df.refine(mesh)

        self.mesh = df.Mesh(mesh)

        self.bc = PeriodicBoundary()

        # define function space for future reference
        self.V = df.FunctionSpace(mesh, self.family, self.order, constrained_domain=self.bc)
        tmp = df.Function(self.V)
        print('DoFs on this level:',len(tmp.vector().array()))

        # invoke super init, passing number of dofs, dtype_u and dtype_f
        super(fenics_vortex_2d,self).__init__(self.V,dtype_u,dtype_f)

        w = df.TrialFunction(self.V)
        v = df.TestFunction(self.V)

        # Stiffness term (diffusion)
        a_K = df.inner(df.nabla_grad(w), df.nabla_grad(v))*df.dx

        # Mass term
        a_M = w*v*df.dx

        self.M = df.assemble(a_M)
        self.K = df.assemble(a_K)