Exemplo n.º 1
0
def test_demag_2d(plot=False):
    mesh = df.UnitSquareMesh(4, 4)

    Ms = 1.0
    S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3)
    m0 = df.Expression(("0", "0", "1"), degree=1)

    m = Field(S3, m0)

    h = 0.001

    demag = Demag2D(thickness=h)

    demag.setup(m, Ms)
    print demag.compute_field()

    f0 = demag.compute_field()
    m.set_with_numpy_array_debug(f0)

    print demag.m.probe(0., 0., 0)
    print demag.m.probe(1., 0., 0)
    print demag.m.probe(0., 1., 0)
    print demag.m.probe(1., 1., 0)
    print '=' * 50

    print demag.m.probe(0., 0., h)
    print demag.m.probe(1., 0., h)
    print demag.m.probe(0., 1., h)
    print demag.m.probe(1., 1., h)

    if plot:
        df.plot(m.f)
        df.interactive()
Exemplo n.º 2
0
def compute_noweb(pool):
    # Load pool into DOLFIN's parameters data structure
    from parampool.utils import set_dolfin_prm
    import dolfin
    pool.traverse(set_dolfin_prm, user_data=dolfin.parameters)
    # Load user's parameters
    Nx = pool.get_value('Nx')
    Ny = pool.get_value('Ny')
    degree = pool.get_value('element degree')
    f_str = pool.get_value('f')
    u0_str = pool.get_value('u0')
    f = dolfin.Expression(f_str)
    u0 = dolfin.Expression(u0_str)

    from poisson_solver import solver
    u = solver(f, u0, Nx, Ny, degree)
    #dolfin.plot(u, title='Solution', interactive=True)

    from poisson_iterative import gradient
    grad_u = gradient(u)
    grad_u_x, grad_u_y = grad_u.split(deepcopy=True)

    # Make VTK file, offer for download
    vtkfile = dolfin.File('poisson2D.pvd')
    vtkfile << u
    vtkfile << grad_u

    dolfin.plot(u)
    dolfin.plot(u.function_space().mesh())
    dolfin.plot(grad_u)
    dolfin.interactive()
Exemplo n.º 3
0
def example3(Ms):
    x0 = y0 = z0 = 0
    x1 = 500
    y1 = 10
    z1 = 500
    nx = 50
    ny = 1
    nz = 1
    mesh = df.Box(x0, y0, z0, x1, y1, z1, nx, ny, nz)

    sim = Sim(mesh, Ms, unit_length=1e-9)
    sim.alpha = 0.01
    sim.set_m((1, 0, 0.1))

    H_app = Zeeman((0, 0, 5e5))
    sim.add(H_app)

    exch = fe.Exchange(13.0e-12)
    sim.add(exch)

    demag = Demag(solver="FK")
    sim.add(demag)

    llg = sim.llg
    max_time = 1 * np.pi / (llg.gamma * 1e5)
    ts = np.linspace(0, max_time, num=100)

    for t in ts:
        print t
        sim.run_until(t)

        df.plot(llg._m)

    df.interactive()
def _adaptive_mesh_refinement(dx, phi, mu, sigma, omega, conv, voltages):
    from dolfin import cells, refine
    eta = _error_estimator(dx, phi, mu, sigma, omega, conv, voltages)
    mesh = phi.function_space().mesh()
    level = 0
    TOL = 1.0e-4
    E = sum([e * e for e in eta])
    E = sqrt(MPI.sum(E))
    info('Level %d: E = %g (TOL = %g)' % (level, E, TOL))
    # Mark cells for refinement
    REFINE_RATIO = 0.5
    cell_markers = MeshFunction('bool', mesh, mesh.topology().dim())
    eta_0 = sorted(eta, reverse=True)[int(len(eta) * REFINE_RATIO)]
    eta_0 = MPI.max(eta_0)
    for c in cells(mesh):
        cell_markers[c] = eta[c.index()] > eta_0
    # Refine mesh
    mesh = refine(mesh, cell_markers)
    # Plot mesh
    plot(mesh)
    interactive()
    exit()
    ## Compute error indicators
    #K = array([c.volume() for c in cells(mesh)])
    #R = array([abs(source([c.midpoint().x(), c.midpoint().y()])) for c in cells(mesh)])
    #gam = h*R*sqrt(K)
    return
Exemplo n.º 5
0
def compute_noweb(pool):
    # Load pool into DOLFIN's parameters data structure
    from parampool.utils import set_dolfin_prm
    import dolfin
    pool.traverse(set_dolfin_prm, user_data=dolfin.parameters)
    # Load user's parameters
    Nx = pool.get_value('Nx')
    Ny = pool.get_value('Ny')
    degree = pool.get_value('element degree')
    f_str = pool.get_value('f')
    u0_str = pool.get_value('u0')
    f = dolfin.Expression(f_str)
    u0 = dolfin.Expression(u0_str)

    from poisson_solver import solver
    u = solver(f, u0, Nx, Ny, degree)
    #dolfin.plot(u, title='Solution', interactive=True)

    from poisson_iterative import gradient
    grad_u = gradient(u)
    grad_u_x, grad_u_y = grad_u.split(deepcopy=True)

    # Make VTK file, offer for download
    vtkfile = dolfin.File('poisson2D.pvd')
    vtkfile << u
    vtkfile << grad_u

    dolfin.plot(u)
    dolfin.plot(u.function_space().mesh())
    dolfin.plot(grad_u)
    dolfin.interactive()
def checktheboundarycoordinates(bcsd, femp, plot=False):
    g1 = dolfin.Constant((0, 0))
    for bc in bcsd:
        bcrl = dolfin.DirichletBC(femp['V'], g1, bc())
        bcdict = bcrl.get_boundary_values()
        print(list(bcdict.keys()))

    bcinds = list(bcdict.keys())

    V = femp['V']

    cylmesh = femp['V'].mesh()
    if plot:
        dolfin.plot(cylmesh)
        dolfin.interactive(True)

    gdim = cylmesh.geometry().dim()
    dofmap = V.dofmap()

    # Get coordinates as len(dofs) x gdim array
    dofs_x = dofmap.tabulate_all_coordinates(cylmesh).reshape((-1, gdim))

    # for dof, dof_x in zip(dofs, dofs_x):
    #     print dof, ':', dof_x
    xcenter = 0.2
    ycenter = 0.2
    for bcind in bcinds:
        dofx = dofs_x[bcind, :]
        dx = dofx[0] - xcenter
        dy = dofx[1] - ycenter
        r = dolfin.sqrt(dx*dx + dy*dy)
        print(bcind, ':', dofx, r)
Exemplo n.º 7
0
 def plot_soln(self, backend='matplotlib', SAVE=False):
     """
     Function to plot solutions of the PDE.
     """
     #         parameters["plotting_backend"]=backend
     # title settings
     if not hasattr(self, 'titles'):
         self.titles = ['Potential Function', 'Lagrange Multiplier']
     if not hasattr(self, 'sols'):
         self.sols = ['fwd', 'adj', 'fwd2', 'adj2']
     if not hasattr(self, 'sub_titles'):
         self.sub_titles = [
             'forward', 'adjoint', '2nd forward', '2nd adjoint'
         ]
     if SAVE:
         self._check_folder()
     if backend is 'matplotlib':
         import matplotlib.pyplot as plt
         self._plot_mpl(SAVE=SAVE)
         plt.show()
     elif backend is 'vtk':
         self._plot_vtk(SAVE=SAVE)
         df.interactive()
     else:
         raise Exception(backend + 'not found!')
Exemplo n.º 8
0
 def plot_soln(self, backend='matplotlib', SAVE=False):
     """
     Function to plot solutions of the PDE.
     """
     #         parameters["plotting_backend"]=backend
     # title settings
     if not hasattr(self, 'titles'):
         self.titles = ['Potential Function', 'Lagrange Multiplier']
     if not hasattr(self, 'sols'):
         self.sols = ['fwd', 'adj', 'fwd2', 'adj2']
     if not hasattr(self, 'sub_titles'):
         self.sub_titles = [
             'forward', 'adjoint', '2nd forward', '2nd adjoint'
         ]
     if SAVE:
         import os
         if not hasattr(self, 'savepath'):
             cwd = os.getcwd()
             self.savepath = os.path.join(cwd, 'result')
             if not os.path.exists(self.savepath):
                 print('Save path does not exist; created one.')
                 os.makedirs(self.savepath)
     if backend is 'matplotlib':
         import matplotlib.pyplot as plt
         self._plot_mpl(SAVE=SAVE)
         plt.show()
     elif backend is 'vtk':
         self._plot_vtk(SAVE=SAVE)
         df.interactive()
     else:
         raise Exception(backend + 'not found!')
Exemplo n.º 9
0
def checktheboundarycoordinates(bcsd, femp, plot=False):
    g1 = dolfin.Constant((0, 0))
    for bc in bcsd:
        bcrl = dolfin.DirichletBC(femp['V'], g1, bc())
        bcdict = bcrl.get_boundary_values()
        print bcdict.keys()

    bcinds = bcdict.keys()

    V = femp['V']

    cylmesh = femp['V'].mesh()
    if plot:
        dolfin.plot(cylmesh)
        dolfin.interactive(True)

    gdim = cylmesh.geometry().dim()
    dofmap = V.dofmap()

    # Get coordinates as len(dofs) x gdim array
    dofs_x = dofmap.tabulate_all_coordinates(cylmesh).reshape((-1, gdim))

    # for dof, dof_x in zip(dofs, dofs_x):
    #     print dof, ':', dof_x
    xcenter = 0.2
    ycenter = 0.2
    for bcind in bcinds:
        dofx = dofs_x[bcind, :]
        dx = dofx[0] - xcenter
        dy = dofx[1] - ycenter
        r = dolfin.sqrt(dx*dx + dy*dy)
        print bcind, ':', dofx, r
Exemplo n.º 10
0
 def plot(self, sub=False):
     geo = self.create_geometry() if not hasattr(self, "geo") else self.geo
     if hasattr(geo, "subdomains"):
         dolfin.plot(geo.subdomains)
         dolfin.plot(geo.boundaries)
     else:
         dolfin.plot(geo.mesh)
     dolfin.interactive()
Exemplo n.º 11
0
def method(Lx=1.,
           Ly=1.,
           scale=0.75,
           dx=0.02,
           do_plot=True,
           polygon="dolphin",
           center=(0.5, 0.5),
           **kwargs):
    edges = np.loadtxt(os.path.join(MESHES_DIR, polygon + ".edges"),
                       dtype=int).tolist()
    nodes = np.loadtxt(os.path.join(MESHES_DIR, polygon + ".nodes"))

    nodes[:, 0] -= 0.5 * np.max(nodes[:, 0])
    nodes[:, 1] -= 0.5 * np.max(nodes[:, 1])
    nodes[:, :] *= scale
    nodes[:, 0] += center[0] * Lx
    nodes[:, 1] += center[1] * Ly

    nodes = nodes.tolist()

    x_min, x_max = 0., Lx
    y_min, y_max = 0., Ly

    corner_pts = [(x_min, y_min), (x_max, y_min), (x_max, y_max),
                  (x_min, y_max)]

    outer_nodes, outer_edges = make_polygon(corner_pts, dx, len(nodes))
    nodes.extend(outer_nodes)
    edges.extend(outer_edges)

    plot_edges(nodes, edges)

    mi = tri.MeshInfo()
    mi.set_points(nodes)
    mi.set_facets(edges)
    mi.set_holes([(center[0] * Lx, center[1] * Ly)])

    max_area = 0.5 * dx**2

    mesh = tri.build(mi,
                     max_volume=max_area,
                     min_angle=25,
                     allow_boundary_steiner=False)

    coords = np.array(mesh.points)
    faces = np.array(mesh.elements)

    if do_plot:
        plot_faces(coords, faces)

    mesh = numpy_to_dolfin(coords, faces)

    if do_plot:
        df.plot(mesh)
        df.interactive()

    mesh_path = os.path.join(MESHES_DIR, polygon + "_dx" + str(dx))
    store_mesh_HDF5(mesh, mesh_path)
Exemplo n.º 12
0
def method(ts, time=None, step=0, **kwargs):
    """ Plot at given time/step using dolfin. """
    info_cyan("Plotting at given timestep using Dolfin.")
    step, time = get_step_and_info(ts, time)
    f = ts.functions()
    for field in ts.fields:
        ts.update(f[field], field, step)
        df.plot(f[field])
    df.interactive()
Exemplo n.º 13
0
    def debug_plot(self):
        ro = self.robjs

        for k, v in self.facet_regions.items():
            dolfin.plot(self.debug_fvs_to_mf(v, signed=False), title=k)

        dolfin.plot(ro['cf'], title='cf')

        dolfin.interactive()
def check_input_opa(NU, femp=None):

    if femp is None:
        # from dolfin_navier_scipy.problem_setups import cyl_fems
        # femp = cyl_fems(2)
        from dolfin_navier_scipy.problem_setups import drivcav_fems

        femp = drivcav_fems(20)

    V = femp["V"]
    Q = femp["Q"]

    cdcoo = femp["cdcoo"]

    # get the system matrices
    stokesmats = dts.get_stokessysmats(V, Q)

    # check the B
    B1, Mu = cou.get_inp_opa(cdcoo=cdcoo, V=V, NU=NU, xcomp=0)
    B2, Mu = cou.get_inp_opa(cdcoo=cdcoo, V=V, NU=NU, xcomp=1)

    # get the rhs expression of Bu
    Bu1 = spsla.spsolve(
        stokesmats["M"],
        B1 * np.vstack([np.linspace(0, 1, NU).reshape((NU, 1)), np.linspace(0, 1, NU).reshape((NU, 1))]),
    )

    Bu2 = spsla.spsolve(
        stokesmats["M"],
        B2 * np.vstack([np.linspace(0, 1, NU).reshape((NU, 1)), np.linspace(0, 1, NU).reshape((NU, 1))]),
    )
    Bu3 = spsla.spsolve(stokesmats["M"], B1 * np.vstack([1 * np.ones((NU, 1)), 0.2 * np.ones((NU, 1))]))

    bu1 = dolfin.Function(V)
    bu1.vector().set_local(Bu1)
    bu1.vector()[2] = 1  # for scaling and illustration purposes

    bu2 = dolfin.Function(V)
    bu2.vector().set_local(Bu2)
    bu2.vector()[2] = 1  # for scaling and illustration purposes

    bu3 = dolfin.Function(V)
    bu3.vector().set_local(Bu3)
    bu3.vector()[2] = 1  # for scaling and illustration purposes

    plt.figure(11)
    dolfin.plot(bu1, title="plot of Bu - extending in x")
    plt.figure(12)
    dolfin.plot(bu2, title="plot of Bu - extending in y")
    plt.figure(13)
    dolfin.plot(bu3, title="plot of Bu - extending in y")
    # dolfin.plot(V.mesh())

    dolfin.interactive()
    plt.show(block=False)
Exemplo n.º 15
0
        def visualize(self, U, discretization, title="", legend=None, filename=None):
            """Visualize the provided data.

            Parameters
            ----------
            U
                |VectorArray| of the data to visualize (length must be 1). Alternatively,
                a tuple of |VectorArrays| which will be visualized in separate windows.
                If `filename` is specified, only one |VectorArray| may be provided which,
                however, is allowed to contain multipled vectors which will be interpreted
                as a time series.
            discretization
                Filled in :meth:`pymor.discretizations.DiscretizationBase.visualize` (ignored).
            title
                Title of the plot.
            legend
                Description of the data that is plotted. If `U` is a tuple of |VectorArrays|,
                `legend` has to be a tuple of the same length.
            filename
                If specified, write the data to that file. `filename` needs to have an extension
                supported by FEniCS (e.g. `.pvd`).
            """
            if filename:
                assert not isinstance(U, tuple)
                assert U in self.space
                f = df.File(filename)
                function = df.Function(self.function_space)
                if legend:
                    function.rename(legend, legend)
                for u in U._list:
                    function.vector()[:] = u.impl
                    f << function
            else:
                assert (
                    U in self.space
                    and len(U) == 1
                    or (isinstance(U, tuple) and all(u in self.space for u in U) and all(len(u) == 1 for u in U))
                )
                if not isinstance(U, tuple):
                    U = (U,)
                if isinstance(legend, str):
                    legend = (legend,)
                assert legend is None or len(legend) == len(U)

                for i, u in enumerate(U):
                    function = df.Function(self.function_space)
                    function.vector()[:] = u._list[0].impl
                    if legend:
                        tit = title + " -- " if title else ""
                        tit += legend[i]
                    else:
                        tit = title
                    df.plot(function, interactive=False, title=tit)
                df.interactive()
Exemplo n.º 16
0
def _compute_time_errors(problem, method, mesh_sizes, Dt, plot_error=False):
    mesh_generator, solution, ProblemClass, cell_type = problem()
    # Translate data into FEniCS expressions.
    fenics_sol = Expression(smp.printing.ccode(solution['value']),
                            degree=solution['degree'],
                            t=0.0,
                            cell=cell_type
                            )
    # Compute the problem
    errors = {'theta': numpy.empty((len(mesh_sizes), len(Dt)))}
    # Create initial state.
    # Deepcopy the expression into theta0. Specify the cell to allow for
    # more involved operations with it (e.g., grad()).
    theta0 = Expression(fenics_sol.cppcode,
                        degree=solution['degree'],
                        t=0.0,
                        cell=cell_type
                        )
    for k, mesh_size in enumerate(mesh_sizes):
        mesh = mesh_generator(mesh_size)
        V = FunctionSpace(mesh, 'CG', 1)
        theta_approx = Function(V)
        theta0p = project(theta0, V)
        stepper = method(ProblemClass(V))
        if plot_error:
            error = Function(V)
        for j, dt in enumerate(Dt):
            # TODO We are facing a little bit of a problem here, being the
            # fact that the time stepper only accept elements from V as u0.
            # In principle, though, this isn't necessary or required. We
            # could allow for arbitrary expressions here, but then the API
            # would need changing for problem.lhs(t, u).
            # Think about this.
            stepper.step(theta_approx, theta0p,
                         0.0, dt,
                         tol=1.0e-12,
                         verbose=False
                         )
            fenics_sol.t = dt
            #
            # NOTE
            # When using errornorm(), it is quite likely to see a good part
            # of the error being due to the spatial discretization.  Some
            # analyses "get rid" of this effect by (sometimes implicitly)
            # projecting the exact solution onto the discrete function
            # space.
            errors['theta'][k][j] = errornorm(fenics_sol, theta_approx)
            if plot_error:
                error.assign(project(fenics_sol - theta_approx, V))
                plot(error, title='error (dt=%e)' % dt)
                interactive()
    return errors, stepper.name, stepper.order
Exemplo n.º 17
0
def show_fenics_mesh(fname):
    # boundary layer, quad element is not supported
    from dolfin import Mesh, MeshFunction, plot, interactive  # TODO: fenicsX may have change API
    mesh = Mesh(fname+".xml")
    if os.path.exists(fname+"_physical_region.xml"):
        subdomains = MeshFunction("size_t", mesh, fname+"_physical_region.xml")
        plot(subdomains)
    if os.path.exists(fname+"_facet_region.xml"):
        boundaries = MeshFunction("size_t", mesh, fname+"_facet_region.xml")
        plot(boundaries)

    plot(mesh)
    interactive()  # FIXME: this event loop may conflict with FreeCAD GUI's
Exemplo n.º 18
0
def plot_indicators(indicators, mesh, refinements=1, interactive=True):
    DG = FunctionSpace(mesh, 'DG', 0)
    for _ in range(refinements):
        mesh = refine(mesh)
    V = FunctionSpace(refine(mesh), 'CG', 1)
    if len(indicators) == 1 and not isinstance(indicators[0], (list, tuple)):
        indicators = [indicators]
    for eta, title in indicators:
        e = Function(DG, eta)
        f = interpolate(e, V) 
        plot(f, title=title)
    if interactive:
        from dolfin import interactive
        interactive()
Exemplo n.º 19
0
def method(Lx=4.,
           Ly=4.,
           rad=0.2,
           R=0.3,
           N=24,
           n_segments=40,
           res=80,
           do_plot=False,
           **kwargs):
    """ Porous mesh. Not really done or useful. """
    info("Generating porous mesh")

    # x = np.random.rand(N, 2)

    diam2 = 4 * R**2

    pts = np.zeros((N, 2))
    for i in range(N):
        while True:
            pt = (np.random.rand(2) - 0.5) * np.array([Lx - 2 * R, Ly - 2 * R])
            if i == 0:
                break
            dist = pts[:i, :] - np.outer(np.ones(i), pt)
            dist2 = dist[:, 0]**2 + dist[:, 1]**2
            if all(dist2 > diam2):
                break
        pts[i, :] = pt

    rect = mshr.Rectangle(df.Point(-Lx / 2, -Ly / 2), df.Point(Lx / 2, Ly / 2))
    domain = rect
    for i in range(N):
        domain -= mshr.Circle(df.Point(pts[i, 0], pts[i, 1]),
                              rad,
                              segments=n_segments)

    mesh = mshr.generate_mesh(domain, res)

    store_mesh_HDF5(
        mesh,
        os.path.join(
            MESHES_DIR,
            "porous_Lx{Lx}_Ly{Ly}_rad{rad}_R{R}_N{N}_res{res}.h5".format(
                Lx=Lx, Ly=Ly, rad=rad, R=R, N=N, res=res)))

    if do_plot:
        df.plot(mesh)
        df.interactive()
Exemplo n.º 20
0
def plot(args):
    from dolfin import plot, interactive, Mesh, Function, VectorFunctionSpace

    # Plot the mesh
    mesh = Mesh(args.xml)
    plot(mesh, title="Mesh")
    interactive()

    # Plot velocities
    if args.velocity is not None:
        G = VectorFunctionSpace(mesh, "DG", 0)
        base_file = args.velocity[:-4] + "_{}.xml"

        for i, u in enumerate(fvcom_reader.velocity):
            g = Function(G, base_file.format(i))
            plot(g, title="Velocity time={}".format(i))
            interactive()
Exemplo n.º 21
0
def plot(args):
    from dolfin import plot, interactive, Mesh, Function, VectorFunctionSpace

    # Plot the mesh
    mesh = Mesh(args.xml)
    plot(mesh, title="Mesh")
    interactive()

    # Plot velocities
    if args.velocity is not None:
        G = VectorFunctionSpace(mesh, "DG", 0)
        base_file = args.velocity[:-4] + "_{}.xml"

        for i, u in enumerate(fvcom_reader.velocity):
            g = Function(G, base_file.format(i))
            plot(g, title="Velocity time={}".format(i))
            interactive()
Exemplo n.º 22
0
def example2(Ms):
    x0 = y0 = z0 = 0
    x1 = 500
    y1 = 10
    z1 = 100
    nx = 50
    ny = 1
    nz = 1
    mesh = df.Box(x0, y0, z0, x1, y1, z1, nx, ny, nz)

    S1 = df.FunctionSpace(mesh, "Lagrange", 1)
    S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3)

    llb = LLB(S1, S3)

    llb.alpha = 0.01
    llb.beta = 0.0
    llb.M0 = Ms
    llb.set_M((Ms, 0, 0))
    llb.set_up_solver(jacobian=False)
    llb.chi = 1e-4

    H_app = Zeeman((0, 0, 5e5))
    H_app.setup(S3, llb._M, Ms=1)
    llb.interactions.append(H_app)

    exchange = Exchange(13.0e-12, 1e-2)
    exchange.chi = 1e-4
    exchange.setup(S3, llb._M, Ms, unit_length=1e-9)
    llb.interactions.append(exchange)

    demag = Demag("FK")
    demag.setup(S3, llb._M, Ms=1)
    llb.interactions.append(demag)

    max_time = 1 * np.pi / (llb.gamma * 1e5)
    ts = np.linspace(0, max_time, num=100)

    for t in ts:
        print t
        llb.run_until(t)

        df.plot(llb._M)

    df.interactive()
Exemplo n.º 23
0
def example1(Ms=8.6e5):
    x0 = y0 = z0 = 0
    x1 = y1 = z1 = 10
    nx = ny = nz = 1
    mesh = df.Box(x0, x1, y0, y1, z0, z1, nx, ny, nz)

    S1 = df.FunctionSpace(mesh, "Lagrange", 1)
    S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3)
    vis = df.Function(S3)

    llb = LLB(S1, S3)

    llb.alpha = 0.01
    llb.beta = 0.0
    llb.M0 = Ms
    llb.set_M((Ms, 0, 0))
    llb.set_up_solver(jacobian=False)
    llb.chi = 1e-4

    H_app = Zeeman((0, 0, 1e5))
    H_app.setup(S3, llb._M, Ms=1)
    llb.interactions.append(H_app)

    exchange = Exchange(13.0e-12, 1e-2)
    exchange.chi = 1e-4
    exchange.setup(S3, llb._M, Ms, unit_length=1e-9)

    llb.interactions.append(exchange)

    max_time = 2 * np.pi / (llb.gamma * 1e5)
    ts = np.linspace(0, max_time, num=100)

    mlist = []
    Ms_average = []
    for t in ts:
        llb.run_until(t)
        mlist.append(llb.M)
        vis.vector()[:] = mlist[-1]
        Ms_average.append(llb.M_average)
        df.plot(vis)
        time.sleep(0.00)
    print 'llb times', llb.call_field_times
    save_plot(ts, Ms_average, 'Ms_%g-time.png' % Ms)
    df.interactive()
Exemplo n.º 24
0
def main():
    def round_trip_connect(start, end):
      result = []
      for i in range(start, end):
        result.append((i, i+1))
      result.append((end, start))
      return result

    corners, mesh1, mesh2 = generate_meshes(2, 1, 0.3)
    points = get_vertices(corners, mesh1, mesh2)
    print "points", np.array(points)

    info = triangle.MeshInfo()
    info.set_points(points)
    info.set_facets(round_trip_connect(0, len(corners)-1))

    mesh = triangle.build(info, allow_volume_steiner=False, allow_boundary_steiner=False, min_angle=60)

    if False:
        print "vertices:"
        for i, p in enumerate(mesh.points):
            print i, p
        print "point numbers in triangles:"
        for i, t in enumerate(mesh.elements):
            print i, t

    finemesh = Mesh()
    ME = MeshEditor()
    ME.open(finemesh,2,2)
    ME.init_vertices(len(mesh.points))
    ME.init_cells(len(mesh.elements))
    for i,v in enumerate(mesh.points):
        ME.add_vertex(i,v[0],v[1])
    for i,c in enumerate(mesh.elements):
        ME.add_cell(i,c[0],c[1],c[2])
    ME.close()

    triangle.write_gnuplot_mesh("triangles.dat", mesh)

    plot(mesh1)
    plot(mesh2)
    plot(finemesh)
    interactive()
def problem_whirl_cylindrical():
    '''Pistol Pete's example from Teodora I. Mitkova's text
    "Finite-Elemente-Methoden fur die Stokes-Gleichungen", adapted for
    cylindrical Navier-Stokes.
    '''
    alpha = 1.0

    def mesh_generator(n):
        #return UnitSquareMesh(n, n, 'left/right')
        return RectangleMesh(alpha, 0.0,
                             1.0 + alpha, 1.0,
                             n, n, 'left/right')
    cell_type = triangle
    x = smp.DeferredVector('x')
    # Note that the exact solution is indeed div-free.
    x0 = x[0] - alpha
    x1 = x[1]
    u = (x0 ** 2 * (1 - x0) ** 2 * 2 * x1 * (1 - x1) * (2 * x1 - 1) / x[0],
         x1 ** 2 * (1 - x1) ** 2 * 2 * x0 * (1 - x0) * (1 - 2 * x0) / x[0],
         0
         )
    p = x0 * (1 - x0) * x1 * (1 - x1)
    solution = {'u': {'value': u, 'degree': numpy.infty},
                'p': {'value': p, 'degree': 4}
                }
    plot_solution = False
    if plot_solution:
        sol_u = Expression((smp.printing.ccode(u[0]),
                            smp.printing.ccode(u[1])),
                           degree=numpy.infty,
                           t=0.0,
                           cell=cell_type,
                           )
        plot(sol_u,
             mesh=mesh_generator(20)
             )
        interactive()
    f = {'value': _get_navier_stokes_rhs_cylindrical(u, p),
         'degree': numpy.infty
         }
    mu = 1.0
    rho = 1.0
    return mesh_generator, solution, f, mu, rho, cell_type
Exemplo n.º 26
0
def _error_estimator(dx, phi, mu, sigma, omega, conv, voltages):
    '''Simple error estimator from

        A posteriori error estimation and adaptive mesh-refinement techniques;
        R. Verfürth;
        Journal of Computational and Applied Mathematics;
        Volume 50, Issues 1-3, 20 May 1994, Pages 67-83;
        <https://www.sciencedirect.com/science/article/pii/0377042794902909>.

    The strong PDE is

        - div(1/(mu r) grad(rphi)) + <u, 1/r grad(rphi)> + i sigma omega phi
      = sigma v_k / (2 pi r).
    '''
    from dolfin import cells
    mesh = phi.function_space().mesh()
    # Assemble the cell-wise residual in DG space
    DG = FunctionSpace(mesh, 'DG', 0)
    # get residual in DG
    v = TestFunction(DG)
    R = _residual_strong(dx, v, phi, mu, sigma, omega, conv, voltages)
    r_r = assemble(R[0])
    r_i = assemble(R[1])
    r = r_r * r_r + r_i * r_i
    visualize = True
    if visualize:
        # Plot the cell-wise residual
        u = TrialFunction(DG)
        a = zero() * dx(0)
        subdomain_indices = mu.keys()
        for i in subdomain_indices:
            a += u * v * dx(i)
        A = assemble(a)
        R2 = Function(DG)
        solve(A, R2.vector(), r)
        plot(R2, title='||R||^2')
        interactive()
    K = r.array()
    info('%r' % K)
    h = numpy.array([c.diameter() for c in cells(mesh)])
    eta = h * numpy.sqrt(K)
    return eta
Exemplo n.º 27
0
def relax(mesh):

    Ms = 8.6e5
    sim = Simulation(mesh, Ms, pbc='2d',unit_length=1e-9)
    sim.set_m(m_init_fun)

    sim.add(Exchange(1.3e-11))
    sim.add(DMI(D = 4e-3))
    sim.add(Zeeman((0,0,0.45*Ms)))

    sim.alpha = 0.5

    ts = np.linspace(0, 1e-9, 101)
    for t in ts:
        sim.run_until(t)
        p = df.plot(sim.llg._m)
    sim.save_vtk()
        
    df.plot(sim.llg._m).write_png("vortex")
    df.interactive()
Exemplo n.º 28
0
def example1_sundials(Ms):
    x0 = y0 = z0 = 0
    x1 = y1 = z1 = 10
    nx = ny = nz = 1
    mesh = df.Box(x0, x1, y0, y1, z0, z1, nx, ny, nz)

    S1 = df.FunctionSpace(mesh, "Lagrange", 1)
    S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3)
    vis = df.Function(S3)

    llb = LLB(S1, S3)
    llb.alpha = 0.00
    llb.set_m((1, 1, 1))
    llb.Ms = Ms
    H_app = Zeeman((0, 0, 1e5))
    H_app.setup(S3, llb._m, Ms=Ms)
    llb.interactions.append(H_app)
    exchange = BaryakhtarExchange(13.0e-12, 1e-2)
    exchange.setup(S3, llb._m, llb._Ms)
    llb.interactions.append(exchange)

    integrator = llg_integrator(llb, llb.M, abstol=1e-10, reltol=1e-6)

    max_time = 2 * np.pi / (llb.gamma * 1e5)
    ts = np.linspace(0, max_time, num=50)

    mlist = []
    Ms_average = []
    for t in ts:
        integrator.advance_time(t)
        mlist.append(integrator.m.copy())
        llb.M = mlist[-1]
        vis.vector()[:] = mlist[-1]
        Ms_average.append(llb.M_average)
        df.plot(vis)
        time.sleep(0.0)
    print llb.count
    save_plot(ts, Ms_average, 'Ms_%g-time-sundials.png' % Ms)
    df.interactive()
Exemplo n.º 29
0
def example1(Ms=8.6e5):
    x0 = y0 = z0 = 0
    x1 = y1 = z1 = 10
    nx = ny = nz = 1
    mesh = df.Box(x0, x1, y0, y1, z0, z1, nx, ny, nz)

    S1 = df.FunctionSpace(mesh, "Lagrange", 1)
    S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3)
    vis = df.Function(S3)

    llb = LLB(S1, S3, rtol=1e-6, atol=1e-10)
    llb.Ms = Ms
    llb.alpha = 0.0
    llb.set_m((1, 1, 1))
    H_app = Zeeman((0, 0, 1e5))
    H_app.setup(S3, llb._m, Ms=Ms)
    llb.interactions.append(H_app)
    exchange = BaryakhtarExchange(13.0e-12, 1e-5)
    exchange.setup(S3, llb._m, llb._Ms)
    llb.interactions.append(exchange)

    max_time = 2 * np.pi / (llb.gamma * 1e5)
    ts = np.linspace(0, max_time, num=100)

    mlist = []
    Ms_average = []
    for t in ts:
        llb.run_until(t)
        mlist.append(llb.M)
        vis.vector()[:] = mlist[-1]
        Ms_average.append(llb.M_average)
        df.plot(vis)
        time.sleep(0.00)
    print llb.count
    save_plot(ts, Ms_average, 'Ms_%g-time.png' % Ms)
    df.interactive()
Exemplo n.º 30
0
def compute_time_errors(problem, method, mesh_sizes, Dt):

    mesh_generator, solution, f, mu, rho, cell_type = problem()

    # Translate data into FEniCS expressions.
    u = solution['u']
    sol_u = Expression((ccode(u['value'][0]), ccode(u['value'][1])),
                       degree=_truncate_degree(u['degree']),
                       t=0.0)

    p = solution['p']
    sol_p = Expression(ccode(p['value']),
                       degree=_truncate_degree(p['degree']),
                       t=0.0)

    # Deep-copy expression to be able to provide f0, f1 for the Dirichlet-
    # boundary conditions later on.
    fenics_rhs0 = Expression((ccode(f['value'][0]), ccode(f['value'][1])),
                             degree=_truncate_degree(f['degree']),
                             t=0.0,
                             mu=mu,
                             rho=rho)
    fenics_rhs1 = Expression(fenics_rhs0.cppcode,
                             element=fenics_rhs0.ufl_element(),
                             **fenics_rhs0.user_parameters)
    # Create initial states.
    p = Expression(sol_p.cppcode,
                   degree=_truncate_degree(solution['p']['degree']),
                   t=0.0,
                   cell=cell_type)

    # Compute the problem
    errors = {
        'u': numpy.empty((len(mesh_sizes), len(Dt))),
        'p': numpy.empty((len(mesh_sizes), len(Dt)))
    }
    for k, mesh_size in enumerate(mesh_sizes):
        print()
        print()
        print('Computing for mesh size %r...' % mesh_size)
        mesh = mesh_generator(mesh_size)
        mesh_area = assemble(1.0 * dx(mesh))
        W = VectorFunctionSpace(mesh, 'CG', 2)
        P = FunctionSpace(mesh, 'CG', 1)
        u1 = Function(W)
        p1 = Function(P)
        err_p = Function(P)
        divu1 = Function(P)
        for j, dt in enumerate(Dt):
            # Prepare previous states for multistepping.
            u_1 = Expression(sol_u.cppcode,
                             degree=_truncate_degree(solution['u']['degree']),
                             t=-dt,
                             cell=cell_type)
            u_1 = project(u_1, W)
            u0 = Expression(
                sol_u.cppcode,
                degree=_truncate_degree(solution['u']['degree']),
                t=0.0,
                # t=0.5*dt,
                cell=cell_type)
            u0 = project(u0, W)
            sol_u.t = dt
            u_bcs = [DirichletBC(W, sol_u, 'on_boundary')]
            sol_p.t = dt
            p0 = project(p, P)
            p_bcs = []
            fenics_rhs0.t = 0.0
            fenics_rhs1.t = dt
            u1, p1 = method.step(Constant(dt), {
                -1: u_1,
                0: u0
            },
                                 p0,
                                 u_bcs=u_bcs,
                                 p_bcs=p_bcs,
                                 rho=Constant(rho),
                                 mu=Constant(mu),
                                 f={
                                     0: fenics_rhs0,
                                     1: fenics_rhs1
                                 },
                                 verbose=False,
                                 tol=1.0e-10)

            # plot(sol_u, mesh=mesh, title='u_sol')
            # plot(sol_p, mesh=mesh, title='p_sol')
            # plot(u1, title='u')
            # plot(p1, title='p')
            # from dolfin import div
            # plot(div(u1), title='div(u)')
            # plot(p1 - sol_p, title='p_h - p')
            # interactive()

            sol_u.t = dt
            sol_p.t = dt
            errors['u'][k][j] = errornorm(sol_u, u1)
            # The pressure is only determined up to a constant which makes
            # it a bit harder to define what the error is. For our
            # purposes, choose an alpha_0\in\R such that
            #
            #    alpha0 = argmin ||e - alpha||^2
            #
            # with  e := sol_p - p.
            # This alpha0 is unique and explicitly given by
            #
            #     alpha0 = 1/(2|Omega|) \int (e + e*)
            #            = 1/|Omega| \int Re(e),
            #
            # i.e., the mean error in \Omega.
            alpha = (+assemble(sol_p * dx(mesh)) - assemble(p1 * dx(mesh)))
            alpha /= mesh_area
            # We would like to perform
            #     p1 += alpha.
            # To avoid creating a temporary function every time, assume
            # that p1 lives in a function space where the coefficients
            # represent actual function values. This is true for CG
            # elements, for example. In that case, we can just add any
            # number to the vector of p1.
            p1.vector()[:] += alpha
            errors['p'][k][j] = errornorm(sol_p, p1)

            show_plots = False
            if show_plots:
                plot(p1, title='p1', mesh=mesh)
                plot(sol_p, title='sol_p', mesh=mesh)
                err_p.vector()[:] = p1.vector()
                sol_interp = interpolate(sol_p, P)
                err_p.vector()[:] -= sol_interp.vector()
                # plot(sol_p - p1, title='p1 - sol_p', mesh=mesh)
                plot(err_p, title='p1 - sol_p', mesh=mesh)
                # r = Expression('x[0]', degree=1, cell=triangle)
                # divu1 = 1 / r * (r * u1[0]).dx(0) + u1[1].dx(1)
                divu1.assign(project(u1[0].dx(0) + u1[1].dx(1), P))
                plot(divu1, title='div(u1)')
                interactive()
    return errors
Exemplo n.º 31
0
#df.plot(mesh)
#
#df.interactive()

Ms = 0.86e6  # saturation magnetisation        A/m
A = 13.0e-12  # exchange coupling strength      J/m

init = (0, 0.1, 1)

sim = Simulation(mesh, Ms, unit_length=1e-9)
sim.add(Demag())
sim.add(Exchange(A))
sim.set_m(init)

f = df.File(os.path.join(MODULE_DIR,
                         'cubes.pvd'))  #same more data for paraview

ns = 1e-9
dt = 0.01 * ns
v = df.plot(sim.llg._m)
for time in np.arange(0, 150.5 * dt, dt):
    print "time=", time, "m=",
    print sim.llg.m_average
    sim.run_until(time)
    v.update(sim.llg._m)

    f << sim.llg._m

df.interactive()
    def _pressure_poisson(self, p1, p0,
                          mu, ui,
                          u,
                          p_bcs=None,
                          rotational_form=False,
                          tol=1.0e-10,
                          verbose=True
                          ):
        '''Solve the pressure Poisson equation
            -1/r \div(r \nabla (p1-p0)) = -1/r div(r*u),
            boundary conditions,
        for
            \nabla p = u.
        '''
        r = Expression('x[0]', degree=1, domain=self.W.mesh())

        Q = p1.function_space()

        p = TrialFunction(Q)
        q = TestFunction(Q)
        a2 = dot(r * grad(p), grad(q)) * 2 * pi * dx
        # The boundary conditions
        #     n.(p1-p0) = 0
        # are implicitly included.
        #
        # L2 = -div(r*u) * q * 2*pi*dx
        div_u = 1/r * (r * u[0]).dx(0) + u[1].dx(1)
        L2 = -div_u * q * 2*pi*r*dx
        if p0:
            L2 += r * dot(grad(p0), grad(q)) * 2*pi*dx

        # In the Cartesian variant of the rotational form, one makes use of the
        # fact that
        #
        #     curl(curl(u)) = grad(div(u)) - div(grad(u)).
        #
        # The same equation holds true in cylindrical form. Hence, to get the
        # rotational form of the splitting scheme, we need to
        #
        # rotational form
        if rotational_form:
            # If there is no dependence of the angular coordinate, what is
            # div(grad(div(u))) in Cartesian coordinates becomes
            #
            #     1/r div(r * grad(1/r div(r*u)))
            #
            # in cylindrical coordinates (div and grad are in cylindrical
            # coordinates). Unfortunately, we cannot write it down that
            # compactly since u_phi is in the game.
            # When using P2 elements, this value will be 0 anyways.
            div_ui = 1/r * (r * ui[0]).dx(0) + ui[1].dx(1)
            grad_div_ui = as_vector((div_ui.dx(0), div_ui.dx(1)))
            L2 -= r * mu * dot(grad_div_ui, grad(q)) * 2*pi*dx
            #div_grad_div_ui = 1/r * (r * grad_div_ui[0]).dx(0) \
            #    + (grad_div_ui[1]).dx(1)
            #L2 += mu * div_grad_div_ui * q * 2*pi*r*dx
            #n = FacetNormal(Q.mesh())
            #L2 -= mu * (n[0] * grad_div_ui[0] + n[1] * grad_div_ui[1]) \
            #    * q * 2*pi*r*ds

        if p_bcs:
            solve(
                a2 == L2, p1,
                bcs=p_bcs,
                solver_parameters={
                    'linear_solver': 'iterative',
                    'symmetric': True,
                    'preconditioner': 'amg',
                    'krylov_solver': {'relative_tolerance': tol,
                                      'absolute_tolerance': 0.0,
                                      'maximum_iterations': 100,
                                      'monitor_convergence': verbose}
                    }
                )
        else:
            # If we're dealing with a pure Neumann problem here (which is the
            # default case), this doesn't hurt CG if the system is consistent,
            # cf. :cite:`vdV03`. And indeed it is consistent if and only if
            #
            #   \int_\Gamma r n.u = 0.
            #
            # This makes clear that for incompressible Navier-Stokes, one
            # either needs to make sure that inflow and outflow always add up
            # to 0, or one has to specify pressure boundary conditions.
            #
            # If the right-hand side is very small, round-off errors may impair
            # the consistency of the system. Make sure the system we are
            # solving remains consistent.
            A = assemble(a2)
            b = assemble(L2)
            # Assert that the system is indeed consistent.
            e = Function(Q)
            e.interpolate(Constant(1.0))
            evec = e.vector()
            evec /= norm(evec)
            alpha = b.inner(evec)
            normB = norm(b)
            # Assume that in every component of the vector, a round-off error
            # of the magnitude DOLFIN_EPS is present. This leads to the
            # criterion
            #    |<b,e>| / (||b||*||e||) < DOLFIN_EPS
            # as a check whether to consider the system consistent up to
            # round-off error.
            #
            # TODO think about condition here
            #if abs(alpha) > normB * DOLFIN_EPS:
            if abs(alpha) > normB * 1.0e-12:
                divu = 1 / r * (r * u[0]).dx(0) + u[1].dx(1)
                adivu = assemble(((r * u[0]).dx(0) + u[1].dx(1)) * 2 * pi * dx)
                info('\int 1/r * div(r*u) * 2*pi*r  =  %e' % adivu)
                n = FacetNormal(Q.mesh())
                boundary_integral = assemble((n[0] * u[0] + n[1] * u[1])
                                             * 2 * pi * r * ds)
                info('\int_Gamma n.u * 2*pi*r = %e' % boundary_integral)
                message = ('System not consistent! '
                           '<b,e> = %g, ||b|| = %g, <b,e>/||b|| = %e.') \
                           % (alpha, normB, alpha / normB)
                info(message)
                # Plot the stuff, and project it to a finer mesh with linear
                # elements for the purpose.
                plot(divu, title='div(u_tentative)')
                #Vp = FunctionSpace(Q.mesh(), 'CG', 2)
                #Wp = MixedFunctionSpace([Vp, Vp])
                #up = project(u, Wp)
                fine_mesh = Q.mesh()
                for k in range(1):
                    fine_mesh = refine(fine_mesh)
                V = FunctionSpace(fine_mesh, 'CG', 1)
                W = V * V
                #uplot = Function(W)
                #uplot.interpolate(u)
                uplot = project(u, W)
                plot(uplot[0], title='u_tentative[0]')
                plot(uplot[1], title='u_tentative[1]')
                #plot(u, title='u_tentative')
                interactive()
                exit()
                raise RuntimeError(message)
            # Project out the roundoff error.
            b -= alpha * evec

            #
            # In principle, the ILU preconditioner isn't advised here since it
            # might destroy the semidefiniteness needed for CG.
            #
            # The system is consistent, but the matrix has an eigenvalue 0.
            # This does not harm the convergence of CG, but when
            # preconditioning one has to make sure that the preconditioner
            # preserves the kernel.  ILU might destroy this (and the
            # semidefiniteness). With AMG, the coarse grid solves cannot be LU
            # then, so try Jacobi here.
            # <http://lists.mcs.anl.gov/pipermail/petsc-users/2012-February/012139.html>
            #
            prec = PETScPreconditioner('hypre_amg')
            from dolfin import PETScOptions
            PETScOptions.set('pc_hypre_boomeramg_relax_type_coarse', 'jacobi')
            solver = PETScKrylovSolver('cg', prec)
            solver.parameters['absolute_tolerance'] = 0.0
            solver.parameters['relative_tolerance'] = tol
            solver.parameters['maximum_iterations'] = 100
            solver.parameters['monitor_convergence'] = verbose
            # Create solver and solve system
            A_petsc = as_backend_type(A)
            b_petsc = as_backend_type(b)
            p1_petsc = as_backend_type(p1.vector())
            solver.set_operator(A_petsc)
            solver.solve(p1_petsc, b_petsc)
            # This would be the stump for Epetra:
            #solve(A, p.vector(), b, 'cg', 'ml_amg')
        return
Exemplo n.º 33
0
def problem_coscos():
    """cosine example.
    """
    def mesh_generator(n):
        mesh = UnitSquareMesh(n, n, "left/right")
        dim = mesh.topology().dim()
        domains = MeshFunction("size_t", mesh, dim)
        domains.set_all(0)
        dx = Measure("dx", subdomain_data=domains)
        boundaries = MeshFunction("size_t", mesh, dim - 1)
        boundaries.set_all(0)
        ds = Measure("ds", subdomain_data=boundaries)
        return mesh, dx, ds

    x = sympy.DeferredVector("x")

    # Choose the solution, the parameters specifically, such that the boundary
    # conditions are fulfilled exactly, namely:
    #
    #    sol(x) = 0   for x[0] == 0, and
    #    dot(n, grad(sol)) = 0    everywhere else.
    #
    alpha = 2 * pi
    r1 = 1.0
    beta = numpy.cos(alpha * r1) - r1 * alpha * numpy.sin(alpha * r1)

    solution = {
        "value": (
            beta * (1.0 - sympy.cos(alpha * x[0])),
            beta * (1.0 - sympy.cos(alpha * x[0]))
            # beta * sympy.sin(alpha * x[0]),
            # beta * sympy.sin(alpha * x[0])
        ),
        "degree":
        MAX_DEGREE,
    }

    # Produce a matching right-hand side.
    phi = solution["value"]
    mu = 1.0
    sigma = 1.0
    omega = 1.0
    rhs_sympy = (
        -sympy.diff(1 / (mu * x[0]) * sympy.diff(x[0] * phi[0], x[0]), x[0]) -
        sympy.diff(1 / (mu * x[0]) * sympy.diff(x[0] * phi[0], x[1]), x[1]) -
        omega * sigma * phi[1],
        -sympy.diff(1 / (mu * x[0]) * sympy.diff(x[0] * phi[1], x[0]), x[0]) -
        sympy.diff(1 / (mu * x[0]) * sympy.diff(x[0] * phi[1], x[1]), x[1]) +
        omega * sigma * phi[0],
    )

    rhs_sympy = (sympy.simplify(rhs_sympy[0]), sympy.simplify(rhs_sympy[1]))

    # The rhs expressions contain terms like 1/x[0]. If naively evaluated, this
    # will result in NaNs, even for points where not x[0]==0. This is because,
    # by default, expressions get interpolated to polynomials.
    # See
    # <https://fenicsproject.org/qa/12796/1-x-near-boundary-nans-where-there-shouldnt-be-nans>,
    # <https://bitbucket.org/fenics-project/dolfin/issues/831/some-problems-with-quadrature-expressions>.
    # for a workaround.
    Q = FiniteElement("Quadrature",
                      triangle,
                      degree=MAX_DEGREE,
                      quad_scheme="default")
    rhs = {
        "value": (
            Expression(helpers.ccode(rhs_sympy[0]), element=Q),
            Expression(helpers.ccode(rhs_sympy[1]), element=Q),
        ),
        "degree":
        MAX_DEGREE,
    }

    # Show the solution and the right-hand side.
    show = False
    if show:
        from dolfin import plot, interactive

        n = 50
        mesh, _, _ = mesh_generator(n)
        plot(Expression(helpers.ccode(phi[0])), mesh=mesh, title="phi.real")
        plot(Expression(helpers.ccode(phi[1])), mesh=mesh, title="phi.imag")
        plot(rhs[0], mesh=mesh, title="f.real")
        plot(rhs[1], mesh=mesh, title="f.imag")
        interactive()
    return mesh_generator, solution, rhs, triangle
Exemplo n.º 34
0
def run_and_calculate_error(N, dt, tmax, polydeg_u, polydeg_p, modifier=None):
    """
    Run Ocellaris and return L2 & H1 errors in the last time step
    """
    say(N, dt, tmax, polydeg_u, polydeg_p)

    # Setup and run simulation
    sim = Simulation()
    sim.input.read_yaml('kovasznay.inp')

    sim.input.set_value('mesh/Nx', N)
    sim.input.set_value('mesh/Ny', N)
    sim.input.set_value('time/dt', dt)
    sim.input.set_value('time/tmax', tmax)
    sim.input.set_value('solver/polynomial_degree_velocity', polydeg_u)
    sim.input.set_value('solver/polynomial_degree_pressure', polydeg_p)
    sim.input.set_value('output/stdout_enabled', False)

    if modifier:
        modifier(sim)  # Running regression tests, modify some input params

    say('Running ...')
    try:
        t1 = time.time()
        setup_simulation(sim)
        run_simulation(sim)
        duration = time.time() - t1
    except KeyboardInterrupt:
        raise
    except BaseException as e:
        raise
        import traceback

        traceback.print_exc()
        return [1e10] * 6 + [1, dt, time.time() - t1]
    say('DONE')
    tmax_warning = ' <------ NON CONVERGENCE!!' if sim.time > tmax - dt / 2 else ''

    # Interpolate the analytical solution to the same function space
    Vu = sim.data['Vu']
    Vp = sim.data['Vp']
    lambda_ = sim.input.get_value('user_code/constants/LAMBDA',
                                  required_type='float')
    u0e = dolfin.Expression(
        sim.input.get_value('boundary_conditions/0/u/cpp_code/0'),
        LAMBDA=lambda_,
        degree=polydeg_u)
    u1e = dolfin.Expression(
        sim.input.get_value('boundary_conditions/0/u/cpp_code/1'),
        LAMBDA=lambda_,
        degree=polydeg_u)
    pe = dolfin.Expression(
        '-0.5*exp(LAMBDA*2*x[0]) + 1/(4*LAMBDA)*(exp(2*LAMBDA) - 1.0)',
        LAMBDA=lambda_,
        degree=polydeg_p,
    )
    u0a = dolfin.project(u0e, Vu)
    u1a = dolfin.project(u1e, Vu)
    pa = dolfin.project(pe, Vp)

    # Correct pa (we want to be spot on, not close)
    int_pa = dolfin.assemble(pa * dolfin.dx)
    vol = dolfin.assemble(dolfin.Constant(1.0) * dolfin.dx(domain=Vp.mesh()))
    pa.vector()[:] -= int_pa / vol

    # Calculate L2 errors
    err_u0 = calc_err(sim.data['u0'], u0a)
    err_u1 = calc_err(sim.data['u1'], u1a)
    err_p = calc_err(sim.data['p'], pa)

    # Calculate H1 errors
    err_u0_H1 = calc_err(sim.data['u0'], u0a, 'H1')
    err_u1_H1 = calc_err(sim.data['u1'], u1a, 'H1')
    err_p_H1 = calc_err(sim.data['p'], pa, 'H1')

    say('Number of time steps:', sim.timestep, tmax_warning)
    loglines = sim.log.get_full_log().split('\n')
    say('Num inner iterations:',
        sum(1 if 'Inner iteration' in line else 0 for line in loglines))
    say('max(ui_new-ui_prev)',
        sim.reporting.get_report('max(ui_new-ui_prev)')[1][-1])
    int_p = dolfin.assemble(sim.data['p'] * dolfin.dx)
    say('p*dx', int_p)
    say('pa*dx', dolfin.assemble(pa * dolfin.dx(domain=Vp.mesh())))
    div_u_Vp = abs(
        dolfin.project(dolfin.div(sim.data['u']),
                       Vp).vector().get_local()).max()
    say('div(u)|Vp', div_u_Vp)
    div_u_Vu = abs(
        dolfin.project(dolfin.div(sim.data['u']),
                       Vu).vector().get_local()).max()
    say('div(u)|Vu', div_u_Vu)
    Vdg0 = dolfin.FunctionSpace(sim.data['mesh'], "DG", 0)
    div_u_DG0 = abs(
        dolfin.project(dolfin.div(sim.data['u']),
                       Vdg0).vector().get_local()).max()
    say('div(u)|DG0', div_u_DG0)
    Vdg1 = dolfin.FunctionSpace(sim.data['mesh'], "DG", 1)
    div_u_DG1 = abs(
        dolfin.project(dolfin.div(sim.data['u']),
                       Vdg1).vector().get_local()).max()
    say('div(u)|DG1', div_u_DG1)

    if False:
        # Plot the results
        for fa, name in ((u0a, 'u0'), (u1a, 'u1'), (pa, 'p')):
            p1 = dolfin.plot(sim.data[name] - fa,
                             title='%s_diff' % name,
                             key='%s_diff' % name)
            p2 = dolfin.plot(fa, title=name + ' analytical', key=name)
            p1.write_png('%g_%g_%s_diff' % (N, dt, name))
            p2.write_png('%g_%g_%s' % (N, dt, name))
        dolfin.interactive()

    from numpy import argmax

    for d in range(2):
        up = sim.data['up%d' % d]
        upp = sim.data['upp%d' % d]

        V = up.function_space()
        coords = V.tabulate_dof_coordinates().reshape((-1, 2))

        up.vector()[:] -= upp.vector()
        diff = abs(up.vector().get_local())
        i = argmax(diff)
        say('Max difference in %d direction is %.4e at %r' %
            (d, diff[i], coords[i]))

        if 'uppp%d' % d in sim.data:
            uppp = sim.data['uppp%d' % d]
            upp.vector()[:] -= uppp.vector()
            diffp = abs(upp.vector().get_local())
            ip = argmax(diffp)
            say('Prev max diff. in %d direction is %.4e at %r' %
                (d, diffp[ip], coords[ip]))

    if False and N == 24:
        # dolfin.plot(sim.data['u0'], title='u0')
        # dolfin.plot(sim.data['u1'], title='u1')
        # dolfin.plot(sim.data['p'], title='p')
        # dolfin.plot(u0a, title='u0a')
        # dolfin.plot(u1a, title='u1a')
        # dolfin.plot(pa, title='pa')
        plot_err(sim.data['u0'], u0a, title='u0a - u0')
        plot_err(sim.data['u1'], u1a, title='u1a - u1')
        plot_err(sim.data['p'], pa, 'pa - p')

        # plot_err(sim.data['u0'], u0a, title='u0a - u0')
        dolfin.plot(sim.data['up0'], title='up0 - upp0')
        dolfin.plot(sim.data['upp0'], title='upp0 - uppp0')

        # plot_err(sim.data['u1'], u1a, title='u1a - u1')
        dolfin.plot(sim.data['up1'], title='up1 - upp1')
        # dolfin.plot(sim.data['upp1'], title='upp1 - uppp1')

    hmin = sim.data['mesh'].hmin()
    return err_u0, err_u1, err_p, err_u0_H1, err_u1_H1, err_p_H1, hmin, dt, duration
Exemplo n.º 35
0
def solve_maxwell(V, dx,
                  Mu, Sigma,  # dictionaries
                  omega,
                  f_list,  # list of dictionaries
                  convections,  # dictionary
                  bcs=None,
                  tol=1.0e-12,
                  compute_residuals=True,
                  verbose=False
                  ):
    '''Solve the complex-valued time-harmonic Maxwell system in 2D cylindrical
    coordinates.

    :param V: function space for potentials
    :param dx: measure
    :param omega: current frequency
    :type omega: float
    :param f_list: list of right-hand sides
    :param convections: convection terms by subdomains
    :type convections: dictionary
    :param bcs: Dirichlet boundary conditions
    :param tol: solver tolerance
    :type tol: float
    :param verbose: solver verbosity
    :type verbose: boolean
    :rtype: list of functions
    '''
    # For the exact solution of the magnetic scalar potential, see
    # <http://www.physics.udel.edu/~jim/PHYS809_10F/Class_Notes/Class_26.pdf>.
    # Here, the value of \phi along the rotational axis is specified as
    #
    #    phi(z) = 2 pi I / c * (z/|z| - z/sqrt(z^2 + a^2))
    #
    # where 'a' is the radius of the coil. This expression contradicts what is
    # specified by [Chaboudez97]_ who claim that phi=0 is the natural value
    # at the symmetry axis.
    #
    # For more analytic expressions, see
    #
    #     Simple Analytic Expressions for the Magnetic Field of a Circular
    #     Current Loop;
    #     James Simpson, John Lane, Christopher Immer, and Robert Youngquist;
    #     <http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20010038494_2001057024.pdf>.
    #

    # Check if boundary conditions on phi are explicitly provided.
    if not bcs:
        # Create Dirichlet boundary conditions.
        # In the cylindrically symmetric formulation, the magnetic vector
        # potential is given by
        #
        #    A = e^{i omega t} phi(r,z) e_{theta}.
        #
        # It is natural to demand phi=0 along the symmetry axis r=0 to avoid
        # discontinuities there.
        # Also, this makes sure that the system is well-defined (see comment
        # below).
        #
        def xzero(x, on_boundary):
            return on_boundary and abs(x[0]) < DOLFIN_EPS
        bcs = DirichletBC(V * V, (0.0, 0.0), xzero)
        #
        # Concerning the boundary conditions for the rest of the system:
        # At the other boundaries, it is not uncommon (?) to set so-called
        # impedance boundary conditions; see, e.g.,
        #
        #    Chaboudez et al.,
        #    Numerical Modeling in Induction Heating for Axisymmetric
        #    Geometries,
        #    IEEE Transactions on Magnetics, vol. 33, no. 1, Jan 1997,
        #    <http://www.esi-group.com/products/casting/publications/Articles_PDF/InductionaxiIEEE97.pdf>.
        #
        # or
        #
        #    <ftp://ftp.math.ethz.ch/pub/sam-reports/reports/reports2010/2010-39.pdf>.
        #
        # TODO review those, references don't seem to be too accurate
        # Those translate into Robin-type boundary conditions (and are in fact
        # sometimes called that, cf.
        # https://en.wikipedia.org/wiki/Robin_boundary_condition).
        # The classical reference is
        #
        #     Impedance boundary conditions for imperfectly conducting
        #     surfaces,
        #     T.B.A. Senior,
        #     <http://link.springer.com/content/pdf/10.1007/BF02920074>.
        #
        #class OuterBoundary(SubDomain):
        #    def inside(self, x, on_boundary):
        #        return on_boundary and abs(x[0]) > DOLFIN_EPS
        #boundaries = FacetFunction('size_t', mesh)
        #boundaries.set_all(0)
        #outer_boundary = OuterBoundary()
        #outer_boundary.mark(boundaries, 1)
        #ds = Measure('ds')[boundaries]
        ##n = FacetNormal(mesh)
        ##a += - 1.0/Mu[i] * dot(grad(r*ur), n) * vr * ds(1) \
        ##     - 1.0/Mu[i] * dot(grad(r*ui), n) * vi * ds(1)
        ##L += - 1.0/Mu[i] * 1.0 * vr * ds(1) \
        ##     - 1.0/Mu[i] * 1.0 * vi * ds(1)
        ## This is -n.grad(r u) = u:
        #a += 1.0/Mu[i] * ur * vr * ds(1) \
        #   + 1.0/Mu[i] * ui * vi * ds(1)

    # Create the system matrix, preconditioner, and the right-hand sides.
    # For preconditioners, there are two approaches. The first one, described
    # in
    #
    #     Algebraic Multigrid for Complex Symmetric Systems;
    #     D. Lahaye, H. De Gersem, S. Vandewalle, and K. Hameyer;
    #     <https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=877730>
    #
    # doesn't work too well here.
    # The matrix P, created in _build_system(), provides a better alternative.
    # For more details, see documentation in _build_system().
    #
    A, P, b_list, M, W = _build_system(V, dx,
                                       Mu, Sigma,  # dictionaries
                                       omega,
                                       f_list,  # list of dicts
                                       convections,  # dict
                                       bcs
                                       )

    #from matplotlib import pyplot as pp
    #rows, cols, values = M.data()
    #from scipy.sparse import csr_matrix
    #M_matrix = csr_matrix((values, cols, rows))
    ##from matplotlib import pyplot as pp
    ###pp.spy(M_matrix, precision=1e-3, marker='.', markersize=5)
    ##pp.spy(M_matrix)
    ##pp.show()
    ## colormap
    #cmap = pp.cm.gray_r
    #M_dense = M_matrix.todense()
    #from matplotlib.colors import LogNorm
    #im = pp.imshow(abs(M_dense), cmap=cmap, interpolation='nearest', norm=LogNorm())
    ##im = pp.imshow(abs(M_dense), cmap=cmap, interpolation='nearest')
    ##im = pp.imshow(abs(A_r), cmap=cmap, interpolation='nearest')
    ##im = pp.imshow(abs(A_i), cmap=cmap, interpolation='nearest')
    #pp.colorbar()
    #pp.show()
    #exit()
    #print A
    #rows, cols, values = A.data()
    #from scipy.sparse import csr_matrix
    #A_matrix = csr_matrix((values, cols, rows))

    ###pp.spy(A_matrix, precision=1e-3, marker='.', markersize=5)
    ##pp.spy(A_matrix)
    ##pp.show()

    ## colormap
    #cmap = pp.cm.gray_r
    #A_dense = A_matrix.todense()
    ##A_r = A_dense[0::2][0::2]
    ##A_i = A_dense[1::2][0::2]
    #cmap.set_bad('r')
    ##im = pp.imshow(abs(A_dense), cmap=cmap, interpolation='nearest', norm=LogNorm())
    #im = pp.imshow(abs(A_dense), cmap=cmap, interpolation='nearest')
    ##im = pp.imshow(abs(A_r), cmap=cmap, interpolation='nearest')
    ##im = pp.imshow(abs(A_i), cmap=cmap, interpolation='nearest')
    #pp.colorbar()
    #pp.show()

    # prepare solver
    solver = KrylovSolver('gmres', 'amg')
    solver.set_operators(A, P)

    # The PDE for A has huge coefficients (order 10^8) all over. Hence, if
    # relative residual is set to 10^-6, the actual residual will still be of
    # the order 10^2. While this isn't too bad (after all the equations are
    # upscaled by a large factor), one can choose a very small relative
    # tolerance here to get a visually pleasing residual norm.
    solver.parameters['relative_tolerance'] = 1.0e-12
    solver.parameters['absolute_tolerance'] = 0.0
    solver.parameters['maximum_iterations'] = 100
    solver.parameters['report'] = verbose
    solver.parameters['monitor_convergence'] = verbose

    phi_list = []
    for k, b in enumerate(b_list):
        with Message('Computing coil ring %d/%d...' % (k + 1, len(b_list))):
            # Define goal functional for adaptivity.
            # Adaptivity not working for subdomains, cf.
            # https://bugs.launchpad.net/dolfin/+bug/872105.
            #(phi_r, phi_i) = split(phi)
            #M = (phi_r*phi_r + phi_i*phi_i) * dx(2)
            phi_list.append(Function(W))
            phi_list[-1].rename('phi%d' % k, 'phi%d' % k)
            solver.solve(phi_list[-1].vector(), b)

        ## Adaptive mesh refinement.
        #_adaptive_mesh_refinement(dx,
        #                          phi_list[-1],
        #                          Mu, Sigma, omega,
        #                          convections,
        #                          f_list[k]
        #                          )
        #exit()

        if compute_residuals:
            # Sanity check: Compute residuals.
            # This is quite the good test that we haven't messed up
            # real/imaginary in the above formulation.
            r_r, r_i = _build_residuals(V, dx, phi_list[-1],
                                        omega, Mu, Sigma,
                                        convections, voltages
                                        )

            def xzero(x, on_boundary):
                return on_boundary and abs(x[0]) < DOLFIN_EPS

            subdomain_indices = Mu.keys()

            # Solve an FEM problem to get the corresponding residual function
            # out.
            # This is exactly what we need here! :)
            u = TrialFunction(V)
            v = TestFunction(V)
            a = zero() * dx(0)
            for i in subdomain_indices:
                a += u * v * dx(i)

            # TODO don't hard code the boundary conditions like this
            R_r = Function(V)
            solve(a == r_r, R_r,
                  bcs=DirichletBC(V, 0.0, xzero)
                  )

            # TODO don't hard code the boundary conditions like this
            R_i = Function(V)
            solve(a == r_i, R_i,
                  bcs=DirichletBC(V, 0.0, xzero)
                  )

            nrm_r = norm(R_r)
            info('||r_r|| = %e' % nrm_r)
            nrm_i = norm(R_i)
            info('||r_i|| = %e' % nrm_i)
            res_norm = sqrt(nrm_r * nrm_r + nrm_i * nrm_i)
            info('||r|| = %e' % res_norm)

            plot(R_r, title='R_r')
            plot(R_i, title='R_i')
            interactive()
            #exit()
    return phi_list
Exemplo n.º 36
0
from cbcpost import *
from dolfin import set_log_level, WARNING, interactive
set_log_level(WARNING)

pp = PostProcessor(dict(casedir="../Basic/Results"))

pp.add_fields([
    SolutionField("Temperature", dict(plot=True)),
    Norm("Temperature", dict(save=True, plot=True)),
    TimeIntegral("Norm_Temperature", dict(save=True, start_time=0.0, end_time=6.0)),
])

replayer = Replay(pp)
replayer.replay()
interactive()
Exemplo n.º 37
0
        def visualize(self, U, discretization, title='', legend=None, filename=None, block=True,
                      separate_colorbars=True):
            """Visualize the provided data.

            Parameters
            ----------
            U
                |VectorArray| of the data to visualize (length must be 1). Alternatively,
                a tuple of |VectorArrays| which will be visualized in separate windows.
                If `filename` is specified, only one |VectorArray| may be provided which,
                however, is allowed to contain multipled vectors which will be interpreted
                as a time series.
            discretization
                Filled in :meth:`pymor.discretizations.DiscretizationBase.visualize` (ignored).
            title
                Title of the plot.
            legend
                Description of the data that is plotted. If `U` is a tuple of |VectorArrays|,
                `legend` has to be a tuple of the same length.
            filename
                If specified, write the data to that file. `filename` needs to have an extension
                supported by FEniCS (e.g. `.pvd`).
            separate_colorbars
                If `True`, use separate colorbars for each subplot.
            block
                If `True`, block execution until the plot window is closed
                (non-blocking execution is currently unsupported).
            """
            if not block:
                raise NotImplementedError
            if filename:
                assert not isinstance(U, tuple)
                assert U in self.space
                f = df.File(filename)
                function = df.Function(self.function_space)
                if legend:
                    function.rename(legend, legend)
                for u in U._list:
                    function.vector()[:] = u.impl
                    f << function
            else:
                assert U in self.space and len(U) == 1 \
                    or (isinstance(U, tuple) and all(u in self.space for u in U) and all(len(u) == 1 for u in U))
                if not isinstance(U, tuple):
                    U = (U,)
                if isinstance(legend, str):
                    legend = (legend,)
                assert legend is None or len(legend) == len(U)

                if not separate_colorbars:
                    vmin = np.inf
                    vmax = -np.inf
                    for u in U:
                        vec = u._list[0].impl
                        vmin = min(vmin, vec.min())
                        vmax = max(vmax, vec.max())

                for i, u in enumerate(U):
                    function = df.Function(self.function_space)
                    function.vector()[:] = u._list[0].impl
                    if legend:
                        tit = title + ' -- ' if title else ''
                        tit += legend[i]
                    else:
                        tit = title
                    if separate_colorbars:
                        df.plot(function, interactive=False, title=tit)
                    else:
                        df.plot(function, interactive=False, title=tit,
                                range_min=vmin, range_max=vmax)
                df.interactive()
Exemplo n.º 38
0
def compute_time_errors(problem, MethodClass, mesh_sizes, Dt):

    mesh_generator, solution, f, mu, rho, cell_type = problem()
    # Translate data into FEniCS expressions.
    sol_u = Expression((smp.printing.ccode(solution['u']['value'][0]),
                        smp.printing.ccode(solution['u']['value'][1])
                        ),
                       degree=_truncate_degree(solution['u']['degree']),
                       t=0.0,
                       cell=cell_type
                       )
    sol_p = Expression(smp.printing.ccode(solution['p']['value']),
                       degree=_truncate_degree(solution['p']['degree']),
                       t=0.0,
                       cell=cell_type
                       )

    fenics_rhs0 = Expression((smp.printing.ccode(f['value'][0]),
                              smp.printing.ccode(f['value'][1])
                              ),
                             degree=_truncate_degree(f['degree']),
                             t=0.0,
                             mu=mu, rho=rho,
                             cell=cell_type
                             )
    # Deep-copy expression to be able to provide f0, f1 for the Dirichlet-
    # boundary conditions later on.
    fenics_rhs1 = Expression(fenics_rhs0.cppcode,
                             degree=_truncate_degree(f['degree']),
                             t=0.0,
                             mu=mu, rho=rho,
                             cell=cell_type
                             )
    # Create initial states.
    p0 = Expression(
        sol_p.cppcode,
        degree=_truncate_degree(solution['p']['degree']),
        t=0.0,
        cell=cell_type
        )

    # Compute the problem
    errors = {'u': numpy.empty((len(mesh_sizes), len(Dt))),
              'p': numpy.empty((len(mesh_sizes), len(Dt)))
              }
    for k, mesh_size in enumerate(mesh_sizes):
        info('')
        info('')
        with Message('Computing for mesh size %r...' % mesh_size):
            mesh = mesh_generator(mesh_size)
            mesh_area = assemble(1.0 * dx(mesh))
            W = VectorFunctionSpace(mesh, 'CG', 2)
            P = FunctionSpace(mesh, 'CG', 1)
            method = MethodClass(W, P,
                                 rho, mu,
                                 theta=1.0,
                                 #theta=0.5,
                                 stabilization=None
                                 #stabilization='SUPG'
                                 )
            u1 = Function(W)
            p1 = Function(P)
            err_p = Function(P)
            divu1 = Function(P)
            for j, dt in enumerate(Dt):
                # Prepare previous states for multistepping.
                u = [Expression(
                    sol_u.cppcode,
                    degree=_truncate_degree(solution['u']['degree']),
                    t=0.0,
                    cell=cell_type
                    ),
                    # Expression(
                    #sol_u.cppcode,
                    #degree=_truncate_degree(solution['u']['degree']),
                    #t=0.5*dt,
                    #cell=cell_type
                    #)
                    ]
                sol_u.t = dt
                u_bcs = [DirichletBC(W, sol_u, 'on_boundary')]
                sol_p.t = dt
                #p_bcs = [DirichletBC(P, sol_p, 'on_boundary')]
                p_bcs = []
                fenics_rhs0.t = 0.0
                fenics_rhs1.t = dt
                method.step(dt,
                            u1, p1,
                            u, p0,
                            u_bcs=u_bcs, p_bcs=p_bcs,
                            f0=fenics_rhs0, f1=fenics_rhs1,
                            verbose=False,
                            tol=1.0e-10
                            )
                sol_u.t = dt
                sol_p.t = dt
                errors['u'][k][j] = errornorm(sol_u, u1)
                # The pressure is only determined up to a constant which makes
                # it a bit harder to define what the error is. For our
                # purposes, choose an alpha_0\in\R such that
                #
                #    alpha0 = argmin ||e - alpha||^2
                #
                # with  e := sol_p - p.
                # This alpha0 is unique and explicitly given by
                #
                #     alpha0 = 1/(2|Omega|) \int (e + e*)
                #            = 1/|Omega| \int Re(e),
                #
                # i.e., the mean error in \Omega.
                alpha = assemble(sol_p * dx(mesh)) \
                    - assemble(p1 * dx(mesh))
                alpha /= mesh_area
                # We would like to perform
                #     p1 += alpha.
                # To avoid creating a temporary function every time, assume
                # that p1 lives in a function space where the coefficients
                # represent actual function values. This is true for CG
                # elements, for example. In that case, we can just add any
                # number to the vector of p1.
                p1.vector()[:] += alpha
                errors['p'][k][j] = errornorm(sol_p, p1)

                show_plots = False
                if show_plots:
                    plot(p1, title='p1', mesh=mesh)
                    plot(sol_p, title='sol_p', mesh=mesh)
                    err_p.vector()[:] = p1.vector()
                    sol_interp = interpolate(sol_p, P)
                    err_p.vector()[:] -= sol_interp.vector()
                    #plot(sol_p - p1, title='p1 - sol_p', mesh=mesh)
                    plot(err_p, title='p1 - sol_p', mesh=mesh)
                    #r = Expression('x[0]', degree=1, cell=triangle)
                    #divu1 = 1 / r * (r * u1[0]).dx(0) + u1[1].dx(1)
                    divu1.assign(project(u1[0].dx(0) + u1[1].dx(1), P))
                    plot(divu1, title='div(u1)')
                    interactive()
    return errors
Exemplo n.º 39
0
def main():
    L = 10.0
    H = 10.0

    mesh = df.UnitSquare(10,10,'left')
    mesh.coordinates()[:,0] *= L
    mesh.coordinates()[:,1] *= H

    U = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=2)
    U_x, U_y = U.split()
    u = df.TrialFunction(U)
    v = df.TestFunction(U)

    E = 2.0E11
    nu = 0.3

    lmbda = nu*E/((1.0 + nu)*(1.0 - 2.0*nu))
    mu = E/(2.0*(1.0 + nu))

    # Elastic Modulus
    C_numpy = np.array([[lmbda + 2.0*mu, lmbda, 0.0],
                        [lmbda, lmbda + 2.0*mu, 0.0],
                        [0.0, 0.0, mu ]])
    C = df.as_matrix(C_numpy)

    from dolfin import dot, dx, grad, inner, ds

    def eps(u):
        """ Returns a vector of strains of size (3,1) in the Voigt notation
        layout {eps_xx, eps_yy, gamma_xy} where gamma_xy = 2*eps_xy"""
        return df.as_vector([u[i].dx(i) for i in range(2)] +
                            [u[i].dx(j) + u[j].dx(i) for (i,j) in [(0,1)]])

    a = inner(eps(v), C*eps(u))*dx
    A = a

    # Dirichlet BC
    class LeftBoundary(df.SubDomain):
        def inside(self, x, on_boundary):
            tol = 1E-14
            return on_boundary and np.abs(x[0]) < tol

    class RightBoundary(df.SubDomain):
        def inside(self, x, on_boundary):
            tol = 1E-14
            return on_boundary and np.abs(x[0] - self.L) < tol

    class BottomBoundary(df.SubDomain):
        def inside(self, x, on_boundary):
            tol = 1E-14
            return on_boundary and np.abs(x[1]) < tol

    left_boundary = LeftBoundary()
    right_boundary = RightBoundary()
    right_boundary.L = L
    bottom_boundary = BottomBoundary()

    zero = df.Constant(0.0)
    bc_left_Ux = df.DirichletBC(U_x, zero, left_boundary)
    bc_bottom_Uy = df.DirichletBC(U_y, zero, bottom_boundary)
    bcs = [bc_left_Ux, bc_bottom_Uy]

    # Neumann BCs
    t = df.Constant(10000.0)
    boundary_parts = df.EdgeFunction("uint", mesh, 1)
    right_boundary.mark(boundary_parts, 0)
    l = inner(t,v[0])*ds(0)

    u_h = df.Function(U)
    problem = df.LinearVariationalProblem(A, l, u_h, bcs=bcs)
    solver = df.LinearVariationalSolver(problem)
    solver.parameters["linear_solver"] = "direct"
    solver.solve()

    u_x, u_y = u_h.split()

    stress = df.project(C*eps(u_h), df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3))

    df.plot(u_x)
    df.plot(u_y)
    df.plot(stress[0])
    df.interactive()
Exemplo n.º 40
0
        def visualize(self,
                      U,
                      d,
                      title='',
                      legend=None,
                      filename=None,
                      block=True,
                      separate_colorbars=True):
            """Visualize the provided data.

            Parameters
            ----------
            U
                |VectorArray| of the data to visualize (length must be 1). Alternatively,
                a tuple of |VectorArrays| which will be visualized in separate windows.
                If `filename` is specified, only one |VectorArray| may be provided which,
                however, is allowed to contain multipled vectors that will be interpreted
                as a time series.
            d
                Filled in by :meth:`pymor.discretizations.DiscretizationBase.visualize` (ignored).
            title
                Title of the plot.
            legend
                Description of the data that is plotted. If `U` is a tuple of |VectorArrays|,
                `legend` has to be a tuple of the same length.
            filename
                If specified, write the data to that file. `filename` needs to have an extension
                supported by FEniCS (e.g. `.pvd`).
            separate_colorbars
                If `True`, use separate colorbars for each subplot.
            block
                If `True`, block execution until the plot window is closed
                (non-blocking execution is currently unsupported).
            """
            if not block:
                raise NotImplementedError
            if filename:
                assert not isinstance(U, tuple)
                assert U in self.space
                f = df.File(filename)
                function = df.Function(self.space.V)
                if legend:
                    function.rename(legend, legend)
                for u in U._list:
                    function.vector()[:] = u.impl
                    f << function
            else:
                assert U in self.space and len(U) == 1 \
                    or (isinstance(U, tuple) and all(u in self.space for u in U) and all(len(u) == 1 for u in U))
                if not isinstance(U, tuple):
                    U = (U, )
                if isinstance(legend, str):
                    legend = (legend, )
                assert legend is None or len(legend) == len(U)

                if not separate_colorbars:
                    vmin = np.inf
                    vmax = -np.inf
                    for u in U:
                        vec = u._list[0].impl
                        vmin = min(vmin, vec.min())
                        vmax = max(vmax, vec.max())

                for i, u in enumerate(U):
                    function = df.Function(self.space.V)
                    function.vector()[:] = u._list[0].impl
                    if legend:
                        tit = title + ' -- ' if title else ''
                        tit += legend[i]
                    else:
                        tit = title
                    if separate_colorbars:
                        df.plot(function, interactive=False, title=tit)
                    else:
                        df.plot(function,
                                interactive=False,
                                title=tit,
                                range_min=vmin,
                                range_max=vmax)
                df.interactive()
Exemplo n.º 41
0
def run_SFEM(opts, conf):
    # propagate config values
    _G = globals()
    for sec in conf.keys():
        if sec == "LOGGING":
            continue
        secconf = conf[sec]
        for key, val in secconf.iteritems():
            print "CONF_" + key + "= secconf['" + key + "'] =", secconf[key]
            _G["CONF_" + key] = secconf[key]

    # setup logging
    _G["LOG_LEVEL"] = eval("logging." + conf["LOGGING"]["level"])
    exec "LOG_LEVEL = logging." + conf["LOGGING"]["level"]
    setup_logging(LOG_LEVEL, logfile=CONF_experiment_name + "_SFEM-P{0}".format(CONF_FEM_degree))
    
    # determine path of this module
    path = os.path.dirname(__file__)
    
    # ============================================================
    # PART A: Simulation Options
    # ============================================================
    
    # flags for residual and tail refinement 
    REFINEMENT = {"RES":CONF_refine_residual, "TAIL":CONF_refine_tail, "OSC":CONF_refine_osc}
    
    # ============================================================
    # PART B: Problem Setup
    # ============================================================
    
    # define initial multiindices
    mis = [Multiindex(mis) for mis in MultiindexSet.createCompleteOrderSet(CONF_initial_Lambda, 1)]
    
    # setup domain and meshes
    mesh0, boundaries, dim = SampleDomain.setupDomain(CONF_domain, initial_mesh_N=CONF_initial_mesh_N)
    #meshes = SampleProblem.setupMesh(mesh0, num_refine=10, randref=(0.4, 0.3))
    mesh0 = SampleProblem.setupMesh(mesh0, num_refine=0)
    
    # define coefficient field
    # NOTE: for proper treatment of corner points, see elasticity_residual_estimator
    coeff_types = ("EF-square-cos", "EF-square-sin", "monomials", "constant")
    from itertools import count
    if CONF_mu is not None:
        muparam = (CONF_mu, (0 for _ in count()))
    else:
        muparam = None 
    coeff_field = SampleProblem.setupCF(coeff_types[CONF_coeff_type], decayexp=CONF_decay_exp, gamma=CONF_gamma,
                                    freqscale=CONF_freq_scale, freqskip=CONF_freq_skip, rvtype="uniform", scale=CONF_coeff_scale, secondparam=muparam)

    # setup boundary conditions and pde
    pde, Dirichlet_boundary, uD, Neumann_boundary, g, f = SampleProblem.setupPDE(CONF_boundary_type, CONF_domain, CONF_problem_type, boundaries, coeff_field)

    # define multioperator
    A = MultiOperator(coeff_field, pde.assemble_operator, pde.assemble_operator_inner_dofs)

    # setup initial solution multivector
    w = SampleProblem.setupMultiVector(mis, pde, mesh0, CONF_FEM_degree)
    logger.info("active indices of w after initialisation: %s", w.active_indices())

    sim_stats = None
    w_history = []
    PATH_SOLUTION = os.path.join(opts.basedir, CONF_experiment_name)
    try:
        os.makedirs(PATH_SOLUTION)
    except:
        pass    
    FILE_SOLUTION = 'SFEM2-SOLUTIONS-P{0}.pkl'.format(CONF_FEM_degree)
    FILE_STATS = 'SIM2-STATS-P{0}.pkl'.format(CONF_FEM_degree)
    
    if opts.continueSFEM:
        try:
            logger.info("CONTINUING EXPERIMENT: loading previous data of %s...", CONF_experiment_name)
            import pickle
            logger.info("loading solutions from %s" % os.path.join(PATH_SOLUTION, FILE_SOLUTION))
            # load solutions
            with open(os.path.join(PATH_SOLUTION, FILE_SOLUTION), 'rb') as fin:
                w_history = pickle.load(fin)
            # convert to MultiVectorWithProjection
            for i, mv in enumerate(w_history):
                w_history[i] = MultiVectorSharedBasis(multivector=w_history[i])
            # load simulation data
            logger.info("loading statistics from %s" % os.path.join(PATH_SOLUTION, FILE_STATS))
            with open(os.path.join(PATH_SOLUTION, FILE_STATS), 'rb') as fin:
                sim_stats = pickle.load(fin)
            logger.info("active indices of w after initialisation: %s", w_history[-1].active_indices())
            w0 = w_history[-1]
        except:
            logger.warn("FAILED LOADING EXPERIMENT %s --- STARTING NEW DATA", CONF_experiment_name)
            w0 = w    
    else:
        w0 = w

    
    # ============================================================
    # PART C: Adaptive Algorithm
    # ============================================================
    
    # refinement loop
    # ===============
    w, sim_stats = AdaptiveSolver(A, coeff_field, pde, mis, w0, mesh0, CONF_FEM_degree,
                        # marking parameters
                        rho=CONF_rho, # tail factor
                        theta_x=CONF_theta_x, # residual marking bulk parameter
                        theta_y=CONF_theta_y, # tail bound marking bulk paramter
                        maxh=CONF_maxh, # maximal mesh width for coefficient maximum norm evaluation
                        add_maxm=CONF_add_maxm, # maximal search length for new new
                        # error estimator evaluation
                        estimator_type=CONF_estimator_type,
                        quadrature_degree=CONF_quadrature_degree,
                        # pcg solver
                        pcg_eps=CONF_pcg_eps, pcg_maxiter=CONF_pcg_maxiter,
                        # adaptive algorithm threshold
                        error_eps=CONF_error_eps,
                        # refinements
                        max_refinements=CONF_iterations, max_dof=CONF_max_dof,
                        do_refinement=REFINEMENT, do_uniform_refinement=CONF_uniform_refinement, refine_osc_factor=CONF_refine_osc_factor,
                        w_history=w_history,
                        sim_stats=sim_stats)
    
    from operator import itemgetter
    active_mi = [(mu, w[mu]._fefunc.function_space().mesh().num_cells()) for mu in w.active_indices()]
    active_mi = sorted(active_mi, key=itemgetter(1), reverse=True)
    logger.info("==== FINAL MESHES ====")
    for mu in active_mi:
        logger.info("--- %s has %s cells", mu[0], mu[1])
    print "ACTIVE MI:", active_mi
    print
    
    # memory usage info
    import resource
    logger.info("\n======================================\nMEMORY USED: " + str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) + "\n======================================\n")
    
    
    # ============================================================
    # PART D: Export of Solutions and Simulation Data
    # ============================================================
        
    # flag for final solution export
    if opts.saveData:
        import pickle
        try:
            os.makedirs(PATH_SOLUTION)
        except:
            pass
        logger.info("saving solutions into %s" % os.path.join(PATH_SOLUTION, FILE_SOLUTION))
        # save solutions
        with open(os.path.join(PATH_SOLUTION, FILE_SOLUTION), 'wb') as fout:
            pickle.dump(w_history, fout)
        # save simulation data
        sim_stats[0]["OPTS"] = opts
        sim_stats[0]["CONF"] = conf
        logger.info("saving statistics into %s" % os.path.join(PATH_SOLUTION, FILE_STATS))
        with open(os.path.join(PATH_SOLUTION, FILE_STATS), 'wb') as fout:
            pickle.dump(sim_stats, fout)


    # ============================================================
    # PART E: Plotting
    # ============================================================
    
    # plot residuals
    if opts.plotEstimator and len(sim_stats) > 1:
        try:
            from matplotlib.pyplot import figure, show, legend
            X = [s["DOFS"] for s in sim_stats]
            print "DOFS", X
            err_est = [s["ERROR-EST"] for s in sim_stats]
            err_res = [s["ERROR-RES"] for s in sim_stats]
            err_tail = [s["ERROR-TAIL"] for s in sim_stats]
            res_L2 = [s["RESIDUAL-L2"] for s in sim_stats]
            res_H1A = [s["RESIDUAL-H1A"] for s in sim_stats]
            mi = [s["MI"] for s in sim_stats]
            num_mi = [len(m) for m in mi]
            
            # --------
            # figure 1
            # --------
            fig1 = figure()
            fig1.suptitle("residual estimator")
            ax = fig1.add_subplot(111)
            if REFINEMENT["TAIL"]:
                ax.loglog(X, num_mi, '--y+', label='active mi')
            ax.loglog(X, err_est, '-g<', label='error estimator')
            ax.loglog(X, err_res, '-.cx', label='residual')
            ax.loglog(X, err_tail, '-.m>', label='tail')
            legend(loc='upper right')
            
            print "RESIDUAL L2", res_L2
            print "RESIDUAL H1A", res_H1A
            print "EST", err_est
            print "RES", err_res
            print "TAIL", err_tail
            
            show()  # this invalidates the figure instances...
        except:
            import traceback
            print traceback.format_exc()
            logger.info("skipped plotting since matplotlib is not available...")

    # plot final meshes
    if opts.plotMesh:
        w = w_history[-1]
        viz_mesh = plot(w.basis.basis.mesh, title="shared mesh", interactive=False)
        interactive()
    
    # plot sample solution
    if opts.plotSolution:
        w = w_history[-1]
        # get random field sample and evaluate solution (direct and parametric)
        RV_samples = coeff_field.sample_rvs()
        ref_maxm = w_history[-1].max_order
        sub_spaces = w[Multiindex()].basis.num_sub_spaces
        degree = w[Multiindex()].basis.degree
        maxh = min(w[Multiindex()].basis.minh / 4, CONF_maxh)
        maxh = w[Multiindex()].basis.minh
        projection_basis = get_projection_basis(mesh0, maxh=maxh, degree=degree, sub_spaces=sub_spaces)
        sample_sol_param = compute_parametric_sample_solution(RV_samples, coeff_field, w, projection_basis)
        sample_sol_direct = compute_direct_sample_solution(pde, RV_samples, coeff_field, A, ref_maxm, projection_basis)
        sol_variance = compute_solution_variance(coeff_field, w, projection_basis)
    
        # plot
        print sub_spaces
        if sub_spaces == 0:
            viz_p = plot(sample_sol_param._fefunc, title="parametric solution")
            viz_d = plot(sample_sol_direct._fefunc, title="direct solution")
            if ref_maxm > 0:
                viz_v = plot(sol_variance._fefunc, title="solution variance")
        else:
            mesh_param = sample_sol_param._fefunc.function_space().mesh()
            mesh_direct = sample_sol_direct._fefunc.function_space().mesh()
            wireframe = True
            viz_p = plot(sample_sol_param._fefunc, title="parametric solution", mode="displacement", mesh=mesh_param, wireframe=wireframe)#, rescale=False)
            viz_d = plot(sample_sol_direct._fefunc, title="direct solution", mode="displacement", mesh=mesh_direct, wireframe=wireframe)#, rescale=False)
        interactive()
Exemplo n.º 42
0
def teXXXst_fenics_vector():
#    quad_degree = 13
#    dolfin.parameters["form_compiler"]["quadrature_degree"] = quad_degree
    pi = 3.14159265358979323
    k1, k2 = 2, 3
    EV = pi * pi * (k1 * k1 + k2 * k2)
    N = 11
    degree = 1
    mesh = UnitSquare(N, N)
    fs = FunctionSpace(mesh, "CG", degree)
    ex = Expression("A*sin(k1*pi*x[0])*sin(k2*pi*x[1])", k1=k1, k2=k2, A=1.0)

    x = FEniCSVector(interpolate(ex, fs))
#    print "x.coeff", x.coeffs.array()

    ex.A = EV
    b_ex = assemble_rhs(ex, fs)
    bexg = interpolate(ex, fs)

#    print b_ex.array()
#    print b_ex.array() / (2 * pi * pi * x.coeffs.array())

    Afe = assemble_lhs(Expression('1'), fs)

    # apply discrete operator on (interpolated) x
    A = FEniCSOperator(Afe, x.basis)
    b = A * x

    # evaluate solution for eigenfunction rhs
    if False:
        b_num = Function(fs)
        solve(A, b_num.vector(), b_ex)
        bnv = A * b_num.vector()
        b3 = Function(fs, bnv / EV)

    np.set_printoptions(threshold='nan', suppress=True)
    print b.coeffs.array()
    print np.abs((b_ex.array() - b.coeffs.array()) / np.max(b_ex.array()))
    print np.max(np.abs((b_ex.array() - b.coeffs.array()) / np.max(b_ex.array())))
    #print b_ex.array() / (M * interpolate(ex1, fs).vector()).array()

#    #assert_array_almost_equal(b.coeffs, b_ex.coeffs)


    b2 = Function(fs, b_ex.copy())
    bg = Function(fs, b_ex.copy())
    b2g = Function(fs, b_ex.copy())
    G = assemble_gramian(x.basis)
    dolfin.solve(G, bg.vector(), b.coeffs)
    dolfin.solve(G, b2g.vector(), b2.vector())


#    # compute eigenpairs numerically
#    eigensolver = evaluate_evp(FEniCSBasis(fs))
#    # Extract largest (first) eigenpair
#    r, c, rx, cx = eigensolver.get_eigenpair(0)    
#    print "Largest eigenvalue: ", r    
#    # Initialize function and assign eigenvector
#    ef0 = Function(fs)
#    ef0.vector()[:] = rx

    if False:
        # export
        out_b = dolfin.File(__name__ + "_b.pvd", "compressed")
        out_b << b._fefunc
        out_b_ex = dolfin.File(__name__ + "_b_ex.pvd", "compressed")
        out_b_ex << b2
        out_b_num = dolfin.File(__name__ + "_b_num.pvd", "compressed")
        out_b_num << b_num


    #dolfin.plot(x._fefunc, title="interpolant x", rescale=False, axes=True, legend=True)
    dolfin.plot(bg, title="b", rescale=False, axes=True, legend=True)
    dolfin.plot(b2g, title="b_ex (ass/G)", rescale=False, axes=True, legend=True)
    dolfin.plot(bexg, title="b_ex (dir)", rescale=False, axes=True, legend=True)
    #dolfin.plot(b_num, title="b_num", rescale=False, axes=True, legend=True)
#    dolfin.plot(b3, title="M*b_num", rescale=False, axes=True, legend=True)
    #dolfin.plot(ef0, title="ef0", rescale=False, axes=True, legend=True)
    print dolfin.errornorm(u=b._fefunc, uh=b2) #, norm_type, degree, mesh)
    dolfin.interactive()
    def _pressure_poisson(self,
                          p1, p0,
                          mu, ui,
                          divu,
                          p_bcs=None,
                          p_n=None,
                          rotational_form=False,
                          tol=1.0e-10,
                          verbose=True
                          ):
        '''Solve the pressure Poisson equation

            - \Delta phi = -div(u),
            boundary conditions,

        for

            \nabla p = u.
        '''
        P = p1.function_space()
        p = TrialFunction(P)
        q = TestFunction(P)

        a2 = dot(grad(p), grad(q)) * dx
        L2 = -divu * q * dx
        if p0:
            L2 += dot(grad(p0), grad(q)) * dx
        if p_n:
            n = FacetNormal(P.mesh())
            L2 += dot(n, p_n) * q * ds

        if rotational_form:
            L2 -= mu * dot(grad(div(ui)), grad(q)) * dx

        if p_bcs:
            solve(a2 == L2, p1,
                  bcs=p_bcs,
                  solver_parameters={
                      'linear_solver': 'iterative',
                      'symmetric': True,
                      'preconditioner': 'hypre_amg',
                      'krylov_solver': {'relative_tolerance': tol,
                                        'absolute_tolerance': 0.0,
                                        'maximum_iterations': 100,
                                        'monitor_convergence': verbose}
                  })
        else:
            # If we're dealing with a pure Neumann problem here (which is the
            # default case), this doesn't hurt CG if the system is consistent,
            # cf.
            #
            #    Iterative Krylov methods for large linear systems,
            #    Henk A. van der Vorst.
            #
            # And indeed, it is consistent: Note that
            #
            #    <1, rhs> = \sum_i 1 * \int div(u) v_i
            #             = 1 * \int div(u) \sum_i v_i
            #             = \int div(u).
            #
            # With the divergence theorem, we have
            #
            #    \int div(u) = \int_\Gamma n.u.
            #
            # The latter term is 0 iff inflow and outflow are exactly the same
            # at any given point in time. This corresponds with the
            # incompressibility of the liquid.
            #
            # In turn, this hints towards penetrable boundaries to require
            # Dirichlet conditions on the pressure.
            #
            A = assemble(a2)
            b = assemble(L2)
            #
            # In principle, the ILU preconditioner isn't advised here since it
            # might destroy the semidefiniteness needed for CG.
            #
            # The system is consistent, but the matrix has an eigenvalue 0.
            # This does not harm the convergence of CG, but when
            # preconditioning one has to take care that the preconditioner
            # preserves the kernel.  ILU might destroy this (and the
            # semidefiniteness). With AMG, the coarse grid solves cannot be LU
            # then, so try Jacobi here.
            # <http://lists.mcs.anl.gov/pipermail/petsc-users/2012-February/012139.html>
            #
            prec = PETScPreconditioner('hypre_amg')
            PETScOptions.set('pc_hypre_boomeramg_relax_type_coarse', 'jacobi')
            solver = PETScKrylovSolver('cg', prec)
            solver.parameters['absolute_tolerance'] = 0.0
            solver.parameters['relative_tolerance'] = tol
            solver.parameters['maximum_iterations'] = 100
            solver.parameters['monitor_convergence'] = verbose
            # Create solver and solve system
            A_petsc = as_backend_type(A)
            b_petsc = as_backend_type(b)
            p1_petsc = as_backend_type(p1.vector())
            solver.set_operator(A_petsc)
            try:
                solver.solve(p1_petsc, b_petsc)
            except RuntimeError as error:
                info('')
                # Check if the system is indeed consistent.
                #
                # If the right hand side is flawed (e.g., by round-off errors),
                # then it may have a component b1 in the direction of the null
                # space, orthogonal the image of the operator:
                #
                #     b = b0 + b1.
                #
                # When starting with initial guess x0=0, the minimal achievable
                # relative tolerance is then
                #
                #    min_rel_tol = ||b1|| / ||b||.
                #
                # If ||b|| is very small, which is the case when ui is almost
                # divergence-free, then min_rel_to may be larger than the
                # prescribed relative tolerance tol.
                #
                # Use this as a consistency check, i.e., bail out if
                #
                #     tol < min_rel_tol = ||b1|| / ||b||.
                #
                # For computing ||b1||, we use the fact that the null space is
                # one-dimensional, i.e.,  b1 = alpha e,  and
                #
                #     e.b = e.(b0 + b1) = e.b1 = alpha ||e||^2,
                #
                # so  alpha = e.b/||e||^2  and
                #
                #     ||b1|| = |alpha| ||e|| = e.b / ||e||
                #
                e = Function(P)
                e.interpolate(Constant(1.0))
                evec = e.vector()
                evec /= norm(evec)
                alpha = b.inner(evec)
                normB = norm(b)
                info('Linear system convergence failure.')
                info(error.message)
                message = ('Linear system not consistent! '
                           '<b,e> = %g, ||b|| = %g, <b,e>/||b|| = %e, tol = %e.') \
                           % (alpha, normB, alpha/normB, tol)
                info(message)
                if tol < abs(alpha) / normB:
                    info('\int div(u)  =  %e' % assemble(divu * dx))
                    #n = FacetNormal(Q.mesh())
                    #info('\int_Gamma n.u = %e' % assemble(dot(n, u)*ds))
                    #info('\int_Gamma u[0] = %e' % assemble(u[0]*ds))
                    #info('\int_Gamma u[1] = %e' % assemble(u[1]*ds))
                    ## Now plot the faulty u on a finer mesh (to resolve the
                    ## quadratic trial functions).
                    #fine_mesh = Q.mesh()
                    #for k in range(1):
                    #    fine_mesh = refine(fine_mesh)
                    #V1 = FunctionSpace(fine_mesh, 'CG', 1)
                    #W1 = V1*V1
                    #uplot = project(u, W1)
                    ##uplot = Function(W1)
                    ##uplot.interpolate(u)
                    #plot(uplot, title='u_tentative')
                    #plot(uplot[0], title='u_tentative[0]')
                    #plot(uplot[1], title='u_tentative[1]')
                    plot(divu, title='div(u_tentative)')
                    interactive()
                    exit()
                    raise RuntimeError(message)
                else:
                    exit()
                    raise RuntimeError('Linear system failed to converge.')
            except:
                exit()
        return
Exemplo n.º 44
0
        # check orthogonality
        dots = np.array([zi.dot(x) for zi in self.nullspace])
        print self.n_iters, dots,

        # Orthogonalize
        for dot, zi in zip(dots, self.nullspace):
            x -= dot*zi

        # check orthogonality
        dots = np.array([zi.dot(x) for zi in self.nullspace])
        print dots

x, info = la.cg(AA, bb, callback=RangeProjector(ZZ))

uh = Function(V)
copy_to_vector(uh.vector(), x)
plot(uh, title='callback')

# How do PAA, AAP, A compare
D0 = PAA - AAP
print '|PAA-AAP|', npla.norm(D0)

D1 = PAA - AA
print '|PAA-AA|', npla.norm(D1)

D2 = AA - AAP
print '|AA-AAP|', npla.norm(D2)

interactive()
Exemplo n.º 45
0
        np.savetxt("hmisfit/eigevalues.dat", d)
    
    
    if rank == 0:
        print( sep, "Generate samples from Prior and Posterior", sep)
    fid_prior = dl.File("samples/sample_prior.pvd")
    fid_post  = dl.File("samples/sample_post.pvd")
    nsamples = 50
    noise = dl.Vector()
    posterior.init_vector(noise,"noise")
    s_prior = dl.Function(Vh, name="sample_prior")
    s_post = dl.Function(Vh, name="sample_post")
    for i in range(nsamples):
        parRandom.normal(1., noise)
        posterior.sample(noise, s_prior.vector(), s_post.vector())
        fid_prior << s_prior
        fid_post << s_post
    
    if rank == 0:
        print( sep, "Visualize results", sep )
        plt.figure()
        plt.plot(range(0,k), d, 'b*', range(0,k), np.ones(k), '-r')
        plt.yscale('log')
        plt.show()
    
    if nproc == 1:
        dl.plot(vector2Function(m, Vh, name = "Initial Condition"))
        dl.interactive()

    
import dolfin
import dolfin_navier_scipy.problem_setups as dnsps

N = 10
mesh = dolfin.UnitSquareMesh(N, N)

edgepoint = dolfin.Point(1., 1.)
aaa = mesh.bounding_box_tree().compute_first_entity_collision(edgepoint)

femp, stokesmatsc, rhsd_vfrc, rhsd_stbc \
    = dnsps.get_sysmats(problem='cylinderwake', N=4, Re=2, scheme='CR')

mesh = femp['mesh']
edgepoint = dolfin.Point(2.5, .2)
aaac = mesh.bounding_box_tree().compute_first_entity_collision(edgepoint)
print 'Cell id at boundary: ', aaac
dolfin.plot(mesh)
dolfin.interactive(True)
    est = np.array(est, dtype='float')

    gs = gridspec.GridSpec(3, 2, width_ratios=[7, 1])

    plt.subplot(gs[0])
    plt.plot(dofs, est[:, 0], 'o-', label='est')
    plt.loglog()
    plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)

    plt.subplot(gs[2])
    plt.plot(dofs, est[:, 1], 'o-', label='est')
    plt.loglog()
    plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)

    plt.subplot(gs[4])
    plt.plot(dofs, est[:, 2], 'o-', label='est')
    plt.loglog()
    plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)

    plt.tight_layout()
    mkdir_p(prefix)
    plt.savefig(prefix+'/convergence.pdf')
    if os.environ.get("DOLFIN_NOPLOT", "0") == "0":
        dolfin.info('Blocking matplotlib figure on rank 0. Close to continue...')
        plt.show()

if os.environ.get("DOLFIN_NOPLOT", "0") == "0":
    dolfin.interactive()

dolfin.list_timings(dolfin.TimingClear_keep, [dolfin.TimingType_wall])
Exemplo n.º 48
0
def method(Lx=6.,
           Ly=4.,
           Lx_inner=4.,
           num_obstacles=32,
           rad=0.2,
           R=0.3,
           dx=0.05,
           seed=121,
           do_plot=True,
           **kwargs):
    N = int(np.ceil(Lx / dx))

    x_min, x_max = -Lx / 2, Lx / 2
    y_min, y_max = -Ly / 2, Ly / 2

    y = np.linspace(y_min, y_max, N).flatten()

    pts = np.zeros((num_obstacles, 2))
    diam2 = 4 * R**2

    np.random.seed(seed)

    for i in range(num_obstacles):
        while True:
            pt = (np.random.rand(2) - 0.5) * np.array([Lx_inner, Ly])
            if i == 0:
                break
            dist = pts[:i, :] - np.outer(np.ones(i), pt)
            for j in range(len(dist)):
                if abs(dist[j, 1]) > Ly / 2:
                    dist[j, 1] = abs(dist[j, 1]) - Ly
            dist2 = dist[:, 0]**2 + dist[:, 1]**2
            if all(dist2 > diam2):
                break
        pts[i, :] = pt

    pts = pts[pts[:, 0].argsort(), :]

    obstacles = [tuple(row) for row in pts]

    line_segments_top = []
    line_segments_btm = []

    x_prev = x_min

    curve_segments_top = []
    curve_segments_btm = []

    interior_obstacles = []
    exterior_obstacles = []

    for x_c in obstacles:
        # Close to the top of the domain
        if x_c[1] > y_max - rad:
            # identify intersection
            theta = np.arcsin((y_max - x_c[1]) / rad)
            rx = rad * np.cos(theta)
            x_left = x_c[0] - rx
            x_right = x_c[0] + rx

            line_segments_top.append(
                line_points((x_prev, y_max), (x_left, y_max), dx))
            line_segments_btm.append(
                line_points((x_prev, y_min), (x_left, y_min), dx))
            curve_btm = rad_points((x_c[0], x_c[1] - Ly),
                                   rad,
                                   dx,
                                   theta_start=np.pi - theta,
                                   theta_stop=theta)[1:-1]
            curve_top = rad_points(x_c,
                                   rad,
                                   dx,
                                   theta_start=np.pi - theta,
                                   theta_stop=2 * np.pi + theta)[1:-1]
            curve_segments_btm.append(curve_btm)
            curve_segments_top.append(curve_top)

            x_prev = x_right

            exterior_obstacles.append(x_c)
            exterior_obstacles.append((x_c[0], x_c[1] - Ly))
        # Close to the bottom of the domain
        elif x_c[1] < y_min + rad:
            # identify intersection
            theta = np.arcsin((-y_min + x_c[1]) / rad)
            rx = rad * np.cos(theta)
            x_left = x_c[0] - rx
            x_right = x_c[0] + rx

            line_segments_top.append(
                line_points((x_prev, y_max), (x_left, y_max), dx))
            line_segments_btm.append(
                line_points((x_prev, y_min), (x_left, y_min), dx))
            curve_btm = rad_points(x_c,
                                   rad,
                                   dx,
                                   theta_start=np.pi + theta,
                                   theta_stop=-theta)[1:-1]
            curve_top = rad_points((x_c[0], x_c[1] + Ly),
                                   rad,
                                   dx,
                                   theta_start=np.pi + theta,
                                   theta_stop=2 * np.pi - theta)[1:-1]
            curve_segments_btm.append(curve_btm)
            curve_segments_top.append(curve_top)

            x_prev = x_right

            exterior_obstacles.append(x_c)
            exterior_obstacles.append((x_c[0], x_c[1] + Ly))
        else:
            interior_obstacles.append(x_c)

    line_segments_top.append(line_points((x_prev, y_max), (x_max, y_max), dx))
    line_segments_btm.append(line_points((x_prev, y_min), (x_max, y_min), dx))

    assert (len(line_segments_top) == len(curve_segments_top) + 1)
    assert (len(line_segments_btm) == len(curve_segments_btm) + 1)

    pts_top = line_segments_top[0]
    for i in range(len(curve_segments_top)):
        pts_top.extend(curve_segments_top[i])
        pts_top.extend(line_segments_top[i + 1])
    pts_top = pts_top[::-1]

    pts_btm = line_segments_btm[0]
    for i in range(len(curve_segments_btm)):
        pts_btm.extend(curve_segments_btm[i])
        pts_btm.extend(line_segments_btm[i + 1])

    y_side = y[1:-1]
    pts_right = zip(x_max * np.ones(N - 2), y_side)
    pts_left = zip(x_min * np.ones(N - 2), y_side[::-1])

    pts = pts_btm + pts_right + pts_top + pts_left
    edges = round_trip_connect(0, len(pts) - 1)

    for interior_obstacle in interior_obstacles:
        pts_obstacle = rad_points(interior_obstacle, rad, dx)[1:]
        edges_obstacle = round_trip_connect(len(pts),
                                            len(pts) + len(pts_obstacle) - 1)

        pts.extend(pts_obstacle)
        edges.extend(edges_obstacle)

    if do_plot:
        plot_edges(pts, edges)

    mi = tri.MeshInfo()
    mi.set_points(pts)
    mi.set_facets(edges)
    mi.set_holes(interior_obstacles)

    max_area = 0.5 * dx**2

    mesh = tri.build(mi,
                     max_volume=max_area,
                     min_angle=25,
                     allow_boundary_steiner=False)

    coords = np.array(mesh.points)
    faces = np.array(mesh.elements)

    # pp = [tuple(point) for point in mesh.points]
    # print "Number of points:", len(pp)
    # print "Number unique points:", len(set(pp))

    if do_plot:
        plot_faces(coords, faces)

    msh = numpy_to_dolfin(coords, faces)

    mesh_path = os.path.join(
        MESHES_DIR, "periodic_porous_Lx{}_Ly{}_rad{}_N{}_dx{}".format(
            Lx, Ly, rad, num_obstacles, dx))
    store_mesh_HDF5(msh, mesh_path)

    obstacles_path = os.path.join(
        MESHES_DIR, "periodic_porous_Lx{}_Ly{}_rad{}_N{}_dx{}.dat".format(
            Lx, Ly, rad, num_obstacles, dx))

    all_obstacles = np.vstack(
        (np.array(exterior_obstacles), np.array(interior_obstacles)))
    np.savetxt(
        obstacles_path,
        np.hstack((all_obstacles, np.ones((len(all_obstacles), 1)) * rad)))

    if do_plot:
        df.plot(msh)
        df.interactive()
Exemplo n.º 49
0
# <markdowncell>

# Write the output to a file. 

# <codecell>

file_H = dfn.File("../pvd/ShllwIce-VrblPrcp.pvd")
Hout = dfn.Function(V)

# <codecell>

t = 0
while t <= T:
    file_H << Hout # write to file.
    solver.solve()
    H_o.vector()[:] = H_n.vector()
    print t                                                                                               
    t += dt

# <codecell>

dfn.plot(H_n)
dfn.interactive()

# <codecell>


# <codecell>


Exemplo n.º 50
0
    def __init__(self):

        GMSH_EPS = 1.0e-15

        # https://fenicsproject.org/qa/12891/initialize-mesh-from-vertices-connectivities-at-once
        points, cells, point_data, cell_data, _ = meshes.crucible_with_coils.generate(
        )

        # Convert the cell data to 'uint' so we can pick a size_t MeshFunction
        # below as usual.
        for k0 in cell_data:
            for k1 in cell_data[k0]:
                cell_data[k0][k1] = numpy.array(cell_data[k0][k1],
                                                dtype=numpy.dtype("uint"))

        with TemporaryDirectory() as temp_dir:
            tmp_filename = os.path.join(temp_dir, "test.xml")
            meshio.write_points_cells(
                tmp_filename,
                points,
                cells,
                cell_data=cell_data,
                file_format="dolfin-xml",
            )
            self.mesh = Mesh(tmp_filename)
            self.subdomains = MeshFunction(
                "size_t", self.mesh,
                os.path.join(temp_dir, "test_gmsh:physical.xml"))

        self.subdomain_materials = {
            1: my_materials.porcelain,
            2: materials.argon,
            3: materials.gallium_arsenide_solid,
            4: materials.gallium_arsenide_liquid,
            27: materials.air,
        }

        # coils
        for k in range(5, 27):
            self.subdomain_materials[k] = my_materials.ek90

        # Define the subdomains which together form a single coil.
        self.coil_domains = [
            [5, 6, 7, 8, 9],
            [10, 11, 12, 13, 14],
            [15, 16, 17, 18, 19],
            [20, 21, 22, 23],
            [24, 25, 26],
        ]

        self.wpi = 4

        self.submesh_workpiece = SubMesh(self.mesh, self.subdomains, self.wpi)

        # http://fenicsproject.org/qa/2026/submesh-workaround-for-parallel-computation
        # submesh_parallel_bug_fixed = False
        # if submesh_parallel_bug_fixed:
        #     submesh_workpiece = SubMesh(self.mesh, self.subdomains, self.wpi)
        # else:
        #     # To get the mesh in parallel, we need to read it in from a file.
        #     # Writing out can only happen in serial mode, though. :/
        #     base = os.path.join(current_path,
        #                         '../../meshes/2d/crucible-with-coils-submesh'
        #                         )
        #     filename = base + '.xml'
        #     if not os.path.isfile(filename):
        #         warnings.warn(
        #             'Submesh file \'{}\' does not exist. Creating... '.format(
        #             filename
        #             ))
        #         if MPI.size(mpi_comm_world()) > 1:
        #             raise RuntimeError(
        #                 'Can only write submesh in serial mode.'
        #                 )
        #         submesh_workpiece = \
        #             SubMesh(self.mesh, self.subdomains, self.wpi)
        #         output_stream = File(filename)
        #         output_stream << submesh_workpiece
        #     # Read the mesh
        #     submesh_workpiece = Mesh(filename)

        coords = self.submesh_workpiece.coordinates()
        ymin = min(coords[:, 1])
        ymax = max(coords[:, 1])

        # Find the top right point.
        k = numpy.argmax(numpy.sum(coords, 1))
        topright = coords[k, :]

        # Initialize mesh function for boundary domains
        class Left(SubDomain):
            def inside(self, x, on_boundary):
                # Explicitly exclude the lowest and the highest point of the
                # symmetry axis.
                # It is necessary for the consistency of the pressure-Poisson
                # system in the Navier-Stokes solver that the velocity is
                # exactly 0 at the boundary r>0. Hence, at the corner points
                # (r=0, melt-crucible, melt-crystal) we must enforce u=0
                # already and cannot have a component in z-direction.
                return (on_boundary and x[0] < GMSH_EPS
                        and x[1] < ymax - GMSH_EPS and x[1] > ymin + GMSH_EPS)

        class Crucible(SubDomain):
            def inside(self, x, on_boundary):
                return on_boundary and (
                    (x[0] > GMSH_EPS and x[1] < ymax - GMSH_EPS) or
                    (x[0] > topright[0] - GMSH_EPS
                     and x[1] > topright[1] - GMSH_EPS) or
                    (x[0] < GMSH_EPS and x[1] < ymin + GMSH_EPS))

        # At the top right part (boundary melt--gas), slip is allowed, so only
        # n.u=0 is enforced. Very weirdly, the PPE is consistent if and only if
        # the end points of UpperRight are in UpperRight. This contrasts
        # Left(), where the end points must NOT belong to Left().  Judging from
        # the experiments, these settings do the right thing.
        # TODO try to better understand the PPE system/dolfin's boundary
        # settings
        class Upper(SubDomain):
            def inside(self, x, on_boundary):
                return on_boundary and x[1] > ymax - GMSH_EPS

        class UpperRight(SubDomain):
            def inside(self, x, on_boundary):
                return (on_boundary and x[1] > ymax - GMSH_EPS
                        and x[0] > 0.038 - GMSH_EPS)

        # The crystal boundary is taken to reach up to 0.038 where the
        # Dirichlet boundary data is about the melting point of the crystal,
        # 1511K. This setting gives pretty acceptable results when there is no
        # convection except the one induced by buoyancy. Is there is any more
        # stirring going on, though, the end point of the crystal with its
        # fixed temperature of 1511K might be the hottest point globally. This
        # looks rather unphysical.
        # TODO check out alternatives
        class UpperLeft(SubDomain):
            def inside(self, x, on_boundary):
                return (on_boundary and x[1] > ymax - GMSH_EPS
                        and x[0] < 0.038 + GMSH_EPS)

        left = Left()
        crucible = Crucible()
        upper_left = UpperLeft()
        upper_right = UpperRight()

        self.wp_boundaries = MeshFunction(
            "size_t",
            self.submesh_workpiece,
            self.submesh_workpiece.topology().dim() - 1,
        )
        self.wp_boundaries.set_all(0)
        left.mark(self.wp_boundaries, 1)
        crucible.mark(self.wp_boundaries, 2)
        upper_right.mark(self.wp_boundaries, 3)
        upper_left.mark(self.wp_boundaries, 4)

        if DEBUG:
            from dolfin import plot, interactive

            plot(self.wp_boundaries, title="Boundaries")
            interactive()

        submesh_boundary_indices = {
            "left": 1,
            "crucible": 2,
            "upper right": 3,
            "upper left": 4,
        }

        # Boundary conditions for the velocity.
        #
        # [1] Incompressible flow and the finite element method; volume two;
        #     Isothermal Laminar Flow;
        #     P.M. Gresho, R.L. Sani;
        #
        # For the choice of function space, [1] says:
        #     "In 2D, the triangular elements P_2^+P_1 and P_2^+P_{-1} are very
        #      good [...]. [...] If you wish to avoid bubble functions on
        #      triangular elements, P_2P_1 is not bad, and P_2(P_1+P_0) is even
        #      better [...]."
        #
        # It turns out that adding the bubble space significantly hampers the
        # convergence of the Stokes solver and also considerably increases the
        # time it takes to construct the Jacobian matrix of the Navier--Stokes
        # problem if no optimization is applied.
        V_element = FiniteElement("CG", self.submesh_workpiece.ufl_cell(), 2)
        with_bubbles = False
        if with_bubbles:
            V_element += FiniteElement("B", self.submesh_workpiece.ufl_cell(),
                                       2)
        self.W_element = MixedElement(3 * [V_element])
        self.W = FunctionSpace(self.submesh_workpiece, self.W_element)

        rot0 = Expression(("0.0", "0.0", "-2*pi*x[0] * 5.0/60.0"), degree=1)
        # rot0 = (0.0, 0.0, 0.0)
        rot1 = Expression(("0.0", "0.0", "2*pi*x[0] * 5.0/60.0"), degree=1)
        self.u_bcs = [
            DirichletBC(self.W, rot0, crucible),
            DirichletBC(self.W.sub(0), 0.0, left),
            DirichletBC(self.W.sub(2), 0.0, left),
            # Make sure that u[2] is 0 at r=0.
            DirichletBC(self.W, rot1, upper_left),
            DirichletBC(self.W.sub(1), 0.0, upper_right),
        ]
        self.p_bcs = []

        self.P_element = FiniteElement("CG", self.submesh_workpiece.ufl_cell(),
                                       1)
        self.P = FunctionSpace(self.submesh_workpiece, self.P_element)

        self.Q_element = FiniteElement("CG", self.submesh_workpiece.ufl_cell(),
                                       2)
        self.Q = FunctionSpace(self.submesh_workpiece, self.Q_element)

        # Dirichlet.
        # This is a bit of a tough call since the boundary conditions need to
        # be read from a Tecplot file here.
        filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                "data/crucible-boundary.dat")
        data = tecplot_reader.read(filename)
        RZ = numpy.c_[data["ZONE T"]["node data"]["r"],
                      data["ZONE T"]["node data"]["z"]]
        T_vals = data["ZONE T"]["node data"]["temp. [K]"]

        class TecplotDirichletBC(Expression):
            def eval(self, value, x):
                # Find on which edge x sits, and raise exception if it doesn't.
                edge_found = False
                for edge in data["ZONE T"]["element data"]:
                    # Given a point X and an edge X0--X1,
                    #
                    #     (1 - theta) X0 + theta X1,
                    #
                    # the minimum distance is assumed for
                    #
                    #    argmin_theta ||(1-theta) X0  + theta X1 - X||^2
                    #    = <X1 - X0, X - X0> / ||X1 - X0||^2.
                    #
                    # If the distance is 0 and 0<=theta<=1, we found the edge.
                    #
                    # Note that edges are 1-based in Tecplot.
                    X0 = RZ[edge[0] - 1]
                    X1 = RZ[edge[1] - 1]
                    theta = numpy.dot(X1 - X0, x - X0) / numpy.dot(
                        X1 - X0, X1 - X0)
                    diff = (1.0 - theta) * X0 + theta * X1 - x
                    if (numpy.dot(diff, diff) < 1.0e-10 and 0.0 <= theta
                            and theta <= 1.0):
                        # Linear interpolation of the temperature value.
                        value[0] = (1.0 - theta) * T_vals[
                            edge[0] - 1] + theta * T_vals[edge[1] - 1]
                        edge_found = True
                        break
                # This class is supposed to be used for Dirichlet boundary
                # conditions. For some reason, FEniCS also evaluates
                # DirichletBC objects at coordinates which do not sit on the
                # boundary, see
                # <http://fenicsproject.org/qa/1033/dirichletbc-expressions-evaluated-away-from-the-boundary>.
                # The assigned values have no meaning though, so not assigning
                # values[0] here is okay.
                #
                # from matplotlib import pyplot as pp
                # pp.plot(x[0], x[1], 'xg')
                if not edge_found:
                    value[0] = 0.0
                    if False:
                        warnings.warn(
                            "Coordinate ({:e}, {:e}) doesn't sit on edge.".
                            format(x[0], x[1]))
                    # pp.plot(RZ[:, 0], RZ[:, 1], '.k')
                    # pp.plot(x[0], x[1], 'xr')
                    # pp.show()
                    # raise RuntimeError('Input coordinate '
                    #                    '{} is not on boundary.'.format(x))
                return

        tecplot_dbc = TecplotDirichletBC(degree=5)
        self.theta_bcs_d = [DirichletBC(self.Q, tecplot_dbc, upper_left)]
        self.theta_bcs_d_strict = [
            DirichletBC(self.Q, tecplot_dbc, upper_right),
            DirichletBC(self.Q, tecplot_dbc, crucible),
            DirichletBC(self.Q, tecplot_dbc, upper_left),
        ]

        # Neumann
        dTdr_vals = data["ZONE T"]["node data"]["dTempdx [K/m]"]
        dTdz_vals = data["ZONE T"]["node data"]["dTempdz [K/m]"]

        class TecplotNeumannBC(Expression):
            def eval(self, value, x):
                # Same problem as above: This expression is not only evaluated
                # at boundaries.
                for edge in data["ZONE T"]["element data"]:
                    X0 = RZ[edge[0] - 1]
                    X1 = RZ[edge[1] - 1]
                    theta = numpy.dot(X1 - X0, x - X0) / numpy.dot(
                        X1 - X0, X1 - X0)
                    dist = numpy.linalg.norm((1 - theta) * X0 + theta * X1 - x)
                    if dist < 1.0e-5 and 0.0 <= theta and theta <= 1.0:
                        value[0] = (1 - theta) * dTdr_vals[
                            edge[0] - 1] + theta * dTdr_vals[edge[1] - 1]
                        value[1] = (1 - theta) * dTdz_vals[
                            edge[0] - 1] + theta * dTdz_vals[edge[1] - 1]
                        break
                return

            def value_shape(self):
                return (2, )

        tecplot_nbc = TecplotNeumannBC(degree=5)
        n = FacetNormal(self.Q.mesh())
        self.theta_bcs_n = {
            submesh_boundary_indices["upper right"]: dot(n, tecplot_nbc),
            submesh_boundary_indices["crucible"]: dot(n, tecplot_nbc),
        }
        self.theta_bcs_r = {}

        # It seems that the boundary conditions from above are inconsistent in
        # that solving with Dirichlet overall and mixed Dirichlet-Neumann give
        # different results; the value *cannot* correspond to one solution.
        # From looking at the solutions, the pure Dirichlet setting appears
        # correct, so extract the Neumann values directly from that solution.

        # Pick fixed coefficients roughly at the temperature that we expect.
        # This could be made less magic by having the coefficients depend on
        # theta and solving the quasilinear equation.
        temp_estimate = 1550.0

        # Get material parameters
        wp_material = self.subdomain_materials[self.wpi]
        if isinstance(wp_material.specific_heat_capacity, float):
            cp = wp_material.specific_heat_capacity
        else:
            cp = wp_material.specific_heat_capacity(temp_estimate)
        if isinstance(wp_material.density, float):
            rho = wp_material.density
        else:
            rho = wp_material.density(temp_estimate)
        if isinstance(wp_material.thermal_conductivity, float):
            k = wp_material.thermal_conductivity
        else:
            k = wp_material.thermal_conductivity(temp_estimate)

        reference_problem = cyl_heat.Heat(
            self.Q,
            convection=None,
            kappa=k,
            rho=rho,
            cp=cp,
            source=Constant(0.0),
            dirichlet_bcs=self.theta_bcs_d_strict,
        )
        theta_reference = reference_problem.solve_stationary()
        theta_reference.rename("theta", "temperature (Dirichlet)")

        # Create equivalent boundary conditions from theta_ref. This
        # makes sure that the potentially expensive Expression evaluation in
        # theta_bcs_* is replaced by something reasonably cheap.
        self.theta_bcs_d = [
            DirichletBC(bc.function_space(), theta_reference,
                        bc.domain_args[0]) for bc in self.theta_bcs_d
        ]
        # Adapt Neumann conditions.
        n = FacetNormal(self.Q.mesh())
        self.theta_bcs_n = {
            k: dot(n, grad(theta_reference))
            # k: Constant(1000.0)
            for k in self.theta_bcs_n
        }

        if DEBUG:
            # Solve the heat equation with the mixed Dirichlet-Neumann
            # boundary conditions and compare it to the Dirichlet-only
            # solution.
            theta_new = Function(self.Q,
                                 name="temperature (Neumann + Dirichlet)")
            from dolfin import Measure

            ds_workpiece = Measure("ds", subdomain_data=self.wp_boundaries)

            heat = cyl_heat.Heat(
                self.Q,
                convection=None,
                kappa=k,
                rho=rho,
                cp=cp,
                source=Constant(0.0),
                dirichlet_bcs=self.theta_bcs_d,
                neumann_bcs=self.theta_bcs_n,
                robin_bcs=self.theta_bcs_r,
                my_ds=ds_workpiece,
            )
            theta_new = heat.solve_stationary()
            theta_new.rename("theta", "temperature (Neumann + Dirichlet)")

            from dolfin import plot, interactive, errornorm

            print("||theta_new - theta_ref|| = {:e}".format(
                errornorm(theta_new, theta_reference)))
            plot(theta_reference)
            plot(theta_new)
            plot(theta_reference - theta_new, title="theta_ref - theta_new")
            interactive()

        self.background_temp = 1400.0

        # self.omega = 2 * pi * 10.0e3
        self.omega = 2 * pi * 300.0

        return
Exemplo n.º 51
0
def test_marking():
# PDE data
# ========
# define source term and diffusion coefficient
#    f = Expression("10.*exp(-(pow(x[0] - 0.6, 2) + pow(x[1] - 0.4, 2)) / 0.02)", degree=3)
    f = Constant("1.0")
    diffcoeff = Constant("1.0")

    # setup multivector
    #==================
    # solution evaluation function
    def eval_poisson(vec=None):
        if vec == None:
            # set default vector for new indices
            #    mesh0 = refine(Mesh(lshape_xml))
            mesh0 = UnitSquare(4, 4)
            fs = FunctionSpace(mesh0, "CG", 1)
            vec = FEniCSVector(Function(fs))
        pde = FEMPoisson()
        fem_A = pde.assemble_lhs(diffcoeff, vec.basis)
        fem_b = pde.assemble_rhs(f, vec.basis)
        solve(fem_A, vec.coeffs, fem_b)
        return vec

    # define active multiindices
    mis = [Multiindex([0]),
           Multiindex([1]),
           Multiindex([0, 1]),
           Multiindex([0, 2])]

    # setup initial multivector
    w = MultiVectorWithProjection()
    Marking.refine(w, {}, mis, eval_poisson)
    logger.info("active indices of after initialisation: %s", w.active_indices())

    # define coefficient field
    # ========================
    # define coefficient field
    a0 = Expression("1.0", element=FiniteElement('Lagrange', ufl.triangle, 1))
    #    a = [Expression('2.+sin(2.*pi*I*x[0]+x[1]) + 10.*exp(-pow(I*(x[0] - 0.6)*(x[1] - 0.3), 2) / 0.02)', I=i, degree=3,
    a = (Expression('A*cos(pi*I*x[0])*cos(pi*I*x[1])', A=1 / i ** 2, I=i, degree=2,
        element=FiniteElement('Lagrange', ufl.triangle, 1)) for i in count())
    rvs = (NormalRV(mu=0.5) for _ in count())
    coeff_field = ParametricCoefficientField(a, rvs, a0=a0)

    # refinement loop
    # ===============
    theta_eta = 0.3
    theta_zeta = 0.8
    min_zeta = 1e-10
    maxh = 1 / 10
    theta_delta = 0.8
    refinements = 1

    for refinement in range(refinements):
        logger.info("*****************************")
        logger.info("REFINEMENT LOOP iteration %i", refinement + 1)
        logger.info("*****************************")

        # evaluate residual and projection error estimates
        # ================================================
        mesh_markers_R, mesh_markers_P, new_multiindices = Marking.estimate_mark(w, coeff_field, f, theta_eta,
            theta_zeta, theta_delta, min_zeta, maxh)
        mesh_markers = mesh_markers_R.copy()
        mesh_markers.update(mesh_markers_P)
        Marking.refine(w, mesh_markers, new_multiindices.keys(), eval_poisson)

    # show refined meshes
    plot_meshes = False
    if plot_meshes:
        for mu, vec in w.iteritems():
            plot(vec.basis.mesh, title=str(mu), interactive=False, axes=True)
            plot(vec._fefunc)
        interactive()
Exemplo n.º 52
0
    def __init__(self):
        import os
        from dolfin import Mesh, MeshFunction, SubMesh, SubDomain, \
            FacetFunction, DirichletBC, dot, grad, FunctionSpace, \
            MixedFunctionSpace, Expression, FacetNormal, pi, Function, \
            Constant, TestFunction, MPI, mpi_comm_world, File
        import numpy
        import warnings

        from maelstrom import heat_cylindrical as cyl_heat
        from maelstrom import materials_database as md

        GMSH_EPS = 1.0e-15

        current_path = os.path.dirname(os.path.realpath(__file__))
        base = os.path.join(
            current_path,
            '../../meshes/2d/crucible-with-coils'
            )
        self.mesh = Mesh(base + '.xml')
        self.subdomains = MeshFunction('size_t',
                                       self.mesh,
                                       base + '_physical_region.xml'
                                       )

        self.subdomain_materials = {
            1: md.get_material('porcelain'),
            2: md.get_material('argon'),
            3: md.get_material('GaAs (solid)'),
            4: md.get_material('GaAs (liquid)'),
            27: md.get_material('air')
            }

        # coils
        for k in range(5, 27):
            self.subdomain_materials[k] = md.get_material('graphite EK90')

        # Define the subdomains which together form a single coil.
        self.coil_domains = [
            [5, 6, 7, 8, 9],
            [10, 11, 12, 13, 14],
            [15, 16, 17, 18, 19],
            [20, 21, 22, 23],
            [24, 25, 26]
            ]

        self.wpi = 4
        # http://fenicsproject.org/qa/2026/submesh-workaround-for-parallel-computation
        submesh_parallel_bug_fixed = False
        if submesh_parallel_bug_fixed:
            submesh_workpiece = SubMesh(self.mesh, self.subdomains, self.wpi)
        else:
            # To get the mesh in parallel, we need to read it in from a file.
            # Writing out can only happen in serial mode, though. :/
            base = os.path.join(current_path,
                                '../../meshes/2d/crucible-with-coils-submesh'
                                )
            filename = base + '.xml'
            if not os.path.isfile(filename):
                warnings.warn(
                    'Submesh file \'%s\' does not exist. Creating... '
                    % filename
                    )
                if MPI.size(mpi_comm_world()) > 1:
                    raise RuntimeError(
                        'Can only write submesh in serial mode.'
                        )
                submesh_workpiece = \
                    SubMesh(self.mesh, self.subdomains, self.wpi)
                output_stream = File(filename)
                output_stream << submesh_workpiece
            # Read the mesh
            submesh_workpiece = Mesh(base + '.xml')

        coords = submesh_workpiece.coordinates()
        ymin = min(coords[:, 1])
        ymax = max(coords[:, 1])

        # Find the top right point.
        k = numpy.argmax(numpy.sum(coords, 1))
        topright = coords[k, :]

        # Initialize mesh function for boundary domains
        class Left(SubDomain):
            def inside(self, x, on_boundary):
                # Explicitly exclude the lowest and the highest point of the
                # symmetry axis.
                # It is necessary for the consistency of the pressure-Poisson
                # system in the Navier-Stokes solver that the velocity is
                # exactly 0 at the boundary r>0. Hence, at the corner points
                # (r=0, melt-crucible, melt-crystal) we must enforce u=0
                # already and cannot have a component in z-direction.
                return on_boundary \
                    and x[0] < GMSH_EPS \
                    and x[1] < ymax - GMSH_EPS \
                    and x[1] > ymin + GMSH_EPS

        class Crucible(SubDomain):
            def inside(self, x, on_boundary):
                return on_boundary \
                    and ((x[0] > GMSH_EPS and x[1] < ymax - GMSH_EPS)
                         or (x[0] > topright[0] - GMSH_EPS
                             and x[1] > topright[1] - GMSH_EPS)
                         or (x[0] < GMSH_EPS and x[1] < ymin + GMSH_EPS)
                         )

        # At the top right part (boundary melt--gas), slip is allowed, so only
        # n.u=0 is enforced. Very weirdly, the PPE is consistent if and only if
        # the end points of UpperRight are in UpperRight. This contrasts
        # Left(), where the end points must NOT belong to Left().  Judging from
        # the experiments, these settings do the right thing.
        # TODO try to better understand the PPE system/dolfin's boundary
        # settings
        class Upper(SubDomain):
            def inside(self, x, on_boundary):
                return on_boundary \
                    and x[1] > ymax - GMSH_EPS

        class UpperRight(SubDomain):
            def inside(self, x, on_boundary):
                return on_boundary \
                    and x[1] > ymax - GMSH_EPS \
                    and x[0] > 0.038 - GMSH_EPS

        # The crystal boundary is taken to reach up to 0.038 where the
        # Dirichlet boundary data is about the melting point of the crystal,
        # 1511K. This setting gives pretty acceptable results when there is no
        # convection except the one induced by buoyancy. Is there is any more
        # stirring going on, though, the end point of the crystal with its
        # fixed temperature of 1511K might be the hottest point globally. This
        # looks rather unphysical.
        # TODO check out alternatives
        class UpperLeft(SubDomain):
            def inside(self, x, on_boundary):
                return on_boundary \
                    and x[1] > ymax - GMSH_EPS \
                    and x[0] < 0.038 + GMSH_EPS

        left = Left()
        crucible = Crucible()
        upper_left = UpperLeft()
        upper_right = UpperRight()

        self.wp_boundaries = FacetFunction('size_t', submesh_workpiece)
        self.wp_boundaries.set_all(0)
        left.mark(self.wp_boundaries, 1)
        crucible.mark(self.wp_boundaries, 2)
        upper_right.mark(self.wp_boundaries, 3)
        upper_left.mark(self.wp_boundaries, 4)

        if DEBUG:
            from dolfin import plot, interactive
            plot(self.wp_boundaries, title='Boundaries')
            interactive()

        submesh_boundary_indices = {
            'left': 1,
            'crucible': 2,
            'upper right': 3,
            'upper left': 4
            }

        # Boundary conditions for the velocity.
        #
        # [1] Incompressible flow and the finite element method; volume two;
        #     Isothermal Laminar Flow;
        #     P.M. Gresho, R.L. Sani;
        #
        # For the choice of function space, [1] says:
        #     "In 2D, the triangular elements P_2^+P_1 and P_2^+P_{-1} are very
        #      good [...]. [...] If you wish to avoid bubble functions on
        #      triangular elements, P_2P_1 is not bad, and P_2(P_1+P_0) is even
        #      better [...]."
        #
        # It turns out that adding the bubble space significantly hampers the
        # convergence of the Stokes solver and also considerably increases the
        # time it takes to construct the Jacobian matrix of the Navier--Stokes
        # problem if no optimization is applied.
        V = FunctionSpace(submesh_workpiece, 'CG', 2)
        with_bubbles = False
        if with_bubbles:
            V += FunctionSpace(submesh_workpiece, 'B', 3)
        self.W = MixedFunctionSpace([V, V, V])

        self.u_bcs = [
            DirichletBC(self.W,
                        Expression(
                            ('0.0', '0.0', '-2*pi*x[0] * 5.0/60.0'),
                            degree=1
                            ),
                        #(0.0, 0.0, 0.0),
                        crucible),
            DirichletBC(self.W.sub(0), 0.0, left),
            DirichletBC(self.W.sub(2), 0.0, left),
            # Make sure that u[2] is 0 at r=0.
            DirichletBC(self.W,
                        Expression(
                            ('0.0', '0.0', '2*pi*x[0] * 5.0/60.0'),
                            degree=1
                            ),
                        upper_left),
            DirichletBC(self.W.sub(1), 0.0, upper_right),
            ]
        self.p_bcs = []

        self.P = FunctionSpace(submesh_workpiece, 'CG', 1)

        # Boundary conditions for heat equation.
        self.Q = FunctionSpace(submesh_workpiece, 'CG', 2)
        # Dirichlet.
        # This is a bit of a tough call since the boundary conditions need to
        # be read from a Tecplot file here.
        import tecplot_reader
        filename = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            'data/crucible-boundary.dat'
            )
        data = tecplot_reader.read(filename)
        RZ = numpy.c_[data['ZONE T']['node data']['r'],
                      data['ZONE T']['node data']['z']
                      ]
        T_vals = data['ZONE T']['node data']['temp. [K]']

        class TecplotDirichletBC(Expression):
            # TODO specify degree
            def eval(self, value, x):
                # Find on which edge x sits, and raise exception if it doesn't.
                edge_found = False
                for edge in data['ZONE T']['element data']:
                    # Given a point X and an edge X0--X1,
                    #
                    #     (1 - theta) X0 + theta X1,
                    #
                    # the minimum distance is assumed for
                    #
                    #    argmin_theta ||(1-theta) X0  + theta X1 - X||^2
                    #    = <X1 - X0, X - X0> / ||X1 - X0||^2.
                    #
                    # If the distance is 0 and 0<=theta<=1, we found the edge.
                    #
                    # Note that edges are 1-based in Tecplot.
                    X0 = RZ[edge[0] - 1]
                    X1 = RZ[edge[1] - 1]
                    theta = numpy.dot(X1-X0, x-X0) / numpy.dot(X1-X0, X1-X0)
                    diff = (1.0-theta)*X0 + theta*X1 - x
                    if numpy.dot(diff, diff) < 1.0e-10 and \
                            0.0 <= theta and theta <= 1.0:
                        # Linear interpolation of the temperature value.
                        value[0] = (1.0-theta) * T_vals[edge[0]-1] \
                                 + theta       * T_vals[edge[1]-1]
                        edge_found = True
                        break
                # This class is supposed to be used for Dirichlet boundary
                # conditions. For some reason, FEniCS also evaluates
                # DirichletBC objects at coordinates which do not sit on the
                # boundary, see
                # <http://fenicsproject.org/qa/1033/dirichletbc-expressions-evaluated-away-from-the-boundary>.
                # The assigned values have no meaning though, so not assigning
                # values[0] here is okay.
                #
                #from matplotlib import pyplot as pp
                #pp.plot(x[0], x[1], 'xg')
                if not edge_found:
                    value[0] = 0.0
                    warnings.warn('Coordinate (%e, %e) doesn\'t sit on edge.'
                                  % (x[0], x[1]))
                    #pp.plot(RZ[:, 0], RZ[:, 1], '.k')
                    #pp.plot(x[0], x[1], 'xr')
                    #pp.show()
                    #raise RuntimeError('Input coordinate '
                    #                   '%r is not on boundary.' % x)
                return

        tecplot_dbc = TecplotDirichletBC()
        self.theta_bcs_d = [
            DirichletBC(self.Q, tecplot_dbc, upper_left)
            ]
        theta_bcs_d_strict = [
            DirichletBC(self.Q, tecplot_dbc, upper_right),
            DirichletBC(self.Q, tecplot_dbc, crucible),
            DirichletBC(self.Q, tecplot_dbc, upper_left)
            ]

        # Neumann
        dTdr_vals = data['ZONE T']['node data']['dTempdx [K/m]']
        dTdz_vals = data['ZONE T']['node data']['dTempdz [K/m]']

        class TecplotNeumannBC(Expression):
            # TODO specify degree
            def eval(self, value, x):
                # Same problem as above: This expression is not only evaluated
                # at boundaries.
                for edge in data['ZONE T']['element data']:
                    X0 = RZ[edge[0] - 1]
                    X1 = RZ[edge[1] - 1]
                    theta = numpy.dot(X1-X0, x-X0) / numpy.dot(X1-X0, X1-X0)
                    dist = numpy.linalg.norm((1-theta)*X0 + theta*X1 - x)
                    if dist < 1.0e-5 and 0.0 <= theta and theta <= 1.0:
                        value[0] = (1-theta) * dTdr_vals[edge[0]-1] \
                            + theta * dTdr_vals[edge[1]-1]
                        value[1] = (1-theta) * dTdz_vals[edge[0]-1] \
                            + theta * dTdz_vals[edge[1]-1]
                        break
                return

            def value_shape(self):
                return (2,)

        tecplot_nbc = TecplotNeumannBC()
        n = FacetNormal(self.Q.mesh())
        self.theta_bcs_n = {
            submesh_boundary_indices['upper right']: dot(n, tecplot_nbc),
            submesh_boundary_indices['crucible']: dot(n, tecplot_nbc)
            }
        self.theta_bcs_r = {}

        # It seems that the boundary conditions from above are inconsistent in
        # that solving with Dirichlet overall and mixed Dirichlet-Neumann give
        # different results; the value *cannot* correspond to one solution.
        # From looking at the solutions, the pure Dirichlet setting appears
        # correct, so extract the Neumann values directly from that solution.
        zeta = TestFunction(self.Q)

        theta_reference = Function(self.Q, name='temperature (Dirichlet)')
        theta_reference.vector()[:] = 0.0

        # Solve the *quasilinear* PDE (coefficients may depend on theta).
        # This is to avoid setting a fixed temperature for the coefficients.

        # Get material parameters
        wp_material = self.subdomain_materials[self.wpi]
        if isinstance(wp_material.specific_heat_capacity, float):
            cp = wp_material.specific_heat_capacity
        else:
            cp = wp_material.specific_heat_capacity(theta_reference)
        if isinstance(wp_material.density, float):
            rho = wp_material.density
        else:
            rho = wp_material.density(theta_reference)
        if isinstance(wp_material.thermal_conductivity, float):
            k = wp_material.thermal_conductivity
        else:
            k = wp_material.thermal_conductivity(theta_reference)

        reference_problem = cyl_heat.HeatCylindrical(
            self.Q, theta_reference,
            zeta,
            b=Constant((0.0, 0.0, 0.0)),
            kappa=k,
            rho=rho,
            cp=cp,
            source=Constant(0.0),
            dirichlet_bcs=theta_bcs_d_strict
            )

        from dolfin import solve
        solve(reference_problem.F0 == 0,
              theta_reference,
              bcs=theta_bcs_d_strict
              )

        # Create equivalent boundary conditions from theta_reference. This
        # makes sure that the potentially expensive Expression evaluation in
        # theta_bcs_* is replaced by something reasonably cheap.
        for k, bc in enumerate(self.theta_bcs_d):
            self.theta_bcs_d[k] = DirichletBC(bc.function_space(),
                                              theta_reference,
                                              bc.domain_args[0]
                                              )
        # Adapt Neumann conditions.
        n = FacetNormal(self.Q.mesh())
        for k in self.theta_bcs_n:
            self.theta_bcs_n[k] = dot(n, grad(theta_reference))

        if DEBUG:
            # Solve the heat equation with the mixed Dirichlet-Neumann
            # boundary conditions and compare it to the Dirichlet-only
            # solution.
            theta_new = Function(
                self.Q,
                name='temperature (Neumann + Dirichlet)'
                )
            from dolfin import Measure
            ds_workpiece = Measure('ds')[self.wp_boundaries]
            problem_new = cyl_heat.HeatCylindrical(
                self.Q, theta_new,
                zeta,
                b=Constant((0.0, 0.0, 0.0)),
                kappa=k,
                rho=rho,
                cp=cp,
                source=Constant(0.0),
                dirichlet_bcs=self.theta_bcs_d,
                neumann_bcs=self.theta_bcs_n,
                ds=ds_workpiece
                )

            from dolfin import solve
            solve(problem_new.F0 == 0,
                  theta_new,
                  bcs=problem_new.dirichlet_bcs
                  )
            from dolfin import plot, interactive, errornorm
            print('||theta_new - theta_ref|| = %e'
                  % errornorm(theta_new, theta_reference)
                  )
            plot(theta_reference)
            plot(theta_new)
            plot(
                theta_reference - theta_new,
                title='theta_ref - theta_new'
                )
            interactive()

        #omega = 2 * pi * 10.0e3
        self.omega = 2 * pi * 300.0

        return
Exemplo n.º 53
0
    # Solution
    psi = df.Function(V)
    solver = df.PETScLUSolver(H)
    solver.parameters['symmetric'] = True

    solver.solve(psi.vector(),Psi0) 
 
    q = psi.vector()
    
    # Do inverse iteration
    for k in range(5):
        Mq = M*q
        qHq = q.inner(H*q)
        qMq = q.inner(Mq)

        # Rayleigh quotient
        E = qHq/qMq
        print(E)
 
        q /= np.sqrt(qMq)

        solver.solve(q,Mq) 

    Mq = M*q
    q /= np.sqrt(q.inner(Mq))

    psi.vector()[:] = q
  
    df.plot(psi,title="Ground State")
    df.interactive() 
Exemplo n.º 54
0
# You should have received a copy of the GNU General Public License
# along with mshr.  If not, see <http://www.gnu.org/licenses/>.

from __future__ import print_function
import dolfin
from mshr import *

dolfin.set_log_level(dolfin.TRACE)

# Define 2D geometry
domain =   Rectangle(dolfin.Point(0., 0.), dolfin.Point(5., 5.)) \
         - Rectangle(dolfin.Point(2., 1.25), dolfin.Point(3., 1.75)) \
         - Circle(dolfin.Point(1, 4), .25) \
         - Circle(dolfin.Point(4, 4), .25)
domain.set_subdomain(1, Rectangle(dolfin.Point(1., 1.), dolfin.Point(4., 3.)))
domain.set_subdomain(2, Rectangle(dolfin.Point(2., 2.), dolfin.Point(3., 4.)))

dolfin.info("\nVerbose output of 2D geometry:")
dolfin.info(domain, True)

# Generate and plot mesh
mesh2d = generate_mesh(domain, 45)
print(mesh2d)
dolfin.plot(mesh2d, "2D mesh")

# Convert subdomains to mesh function for plotting
mf = dolfin.MeshFunction("size_t", mesh2d, 2, mesh2d.domains())
dolfin.plot(mf, "Subdomains")

dolfin.interactive()
Exemplo n.º 55
0
def method(L=6., H=2., R=0.3, n_segments=40, res=120, **kwargs):
    """
    Generates barbell capilar with rounded edges.
    """
    info("Generating mesh of rounded barbell capilar")
    
    pt_1 = df.Point(0., 0.)
    pt_1star = df.Point(1., 0.)
    pt_1starstar = df.Point(L/(2*res), 0.)
    pt_2 = df.Point(L, H)
    pt_2star = df.Point(L-1., H)
    pt_2starstar = df.Point(L-L/(2*res), H)
    pt_3 = df.Point(1., H)
    pt_3star = df.Point(0, H)
    pt_3starstar = df.Point(L/(2*res), H)
    pt_4 = df.Point(L-1., 0)
    pt_4star = df.Point(L, 0)
    pt_4starstar = df.Point(L-L/(2*res), 0)
    pt_5 = df.Point(1., R)
    pt_6 = df.Point(1., H-R)
    pt_7 = df.Point(L-1., R)
    pt_8 = df.Point(L-1., H-R)
    pt_9 = df.Point(1.+2*R, R)
    pt_10 = df.Point(1.+2*R, H-R)
    pt_11 = df.Point(L-2*R-1, R)
    pt_12 = df.Point(L-2*R-1, H-R)
    pt_13 = df.Point(1.+2*R, H-2*R)
    pt_14 = df.Point(L-2*R-1, 2*R)

    inlet_polygon = [pt_1]
    inlet_polygon.append(pt_1starstar)
    add_vertical_boundary_vertices(inlet_polygon, L/res, H, res, 1)
    inlet_polygon.append(pt_3starstar)
    inlet_polygon.append(pt_3star)
    add_vertical_boundary_vertices(inlet_polygon, 0.0, H, res, -1)
    inlet_polygon.append(pt_1)

    outlet_polygon = [pt_4starstar]
    outlet_polygon.append(pt_4star)
    add_vertical_boundary_vertices(outlet_polygon, L, H, res, 1)
    outlet_polygon.append(pt_2)
    outlet_polygon.append(pt_2starstar)
    add_vertical_boundary_vertices(outlet_polygon, L-L/res, H, res, -1)
    outlet_polygon.append(pt_4starstar)

    inlet1 = mshr.Polygon(inlet_polygon)
    inlet2 = mshr.Rectangle(pt_1starstar, pt_3)
    outlet1 = mshr.Polygon(outlet_polygon)
    outlet2 = mshr.Rectangle(pt_4, pt_2starstar)
    channel = mshr.Rectangle(pt_5, pt_8)
    pos_cir_1 = mshr.Circle(pt_5, R, segments=n_segments)
    pos_cir_2 = mshr.Circle(pt_6, R, segments=n_segments)
    pos_cir_3 = mshr.Circle(pt_7, R, segments=n_segments)
    pos_cir_4 = mshr.Circle(pt_8, R, segments=n_segments)
    neg_cir_1 = mshr.Circle(pt_9, R, segments=n_segments)
    neg_cir_2 = mshr.Circle(pt_10, R, segments=n_segments)
    neg_cir_3 = mshr.Circle(pt_11, R, segments=n_segments)
    neg_cir_4 = mshr.Circle(pt_12, R, segments=n_segments)
    neg_reg_1 = mshr.Rectangle(pt_13, pt_12)
    neg_reg_2 = mshr.Rectangle(pt_9, pt_14)

    domain = inlet1 + inlet2 + outlet1 + outlet2 + \
        channel + pos_cir_1 + pos_cir_2 + pos_cir_3 + \
        pos_cir_4 - neg_cir_1 - neg_cir_2 - neg_cir_3 - \
        neg_cir_4 - neg_reg_1 - neg_reg_2

    mesh = mshr.generate_mesh(domain, res)

    mesh_path = os.path.join(MESHES_DIR,
                             "roundet_barbell_res" + str(res))
    store_mesh_HDF5(mesh, mesh_path)
    df.plot(mesh)
    df.interactive()
Exemplo n.º 56
0
    r,theta,z = cart_to_cyl(x)  # NB: check the function cart_to_cyl can take non-tuple collecs.
    return d.near(r,bzo_radius)

## Testing some differing-boundary BCs:
bzo = d.DirichletBC(V, u_bzo, bzo_boundary)
ybco = d.DirichletBC(V, u_ybco, ybco_boundary)
##

# Finally, set up the PDE and solve it:
u = d.Function(V)
problem = d.LinearVariationalProblem(a, L, u, bcs=bc)
solver = d.LinearVariationalSolver(problem)
solver.parameters['symmetric'] = True
s = solver.solve()
#print u.vector()
d.plot(u);d.interactive()

#######
## Now we'll try a more low-level (mathematically) approach
#######

#element = d.FiniteElement('Vector Lagrange', 'tetrahedron', 1)
#
#v = d.BasisFunction(element)
#U = d.BasisFunction(element)
#f = d.Function(element)
#
#E, nu = 10., .3
#
#mu = E / (2 * (1 + nu))
#lambda_const = E * nu / ((1 + nu) * (1 - 2 * nu))
Exemplo n.º 57
0
def run_SFEM(opts, conf):
    # propagate config values
    for sec in conf.keys():
        if sec == "LOGGING":
            continue
        secconf = conf[sec]
        for key, val in secconf.iteritems():
            print "CONF_" + key + "= secconf['" + key + "'] =", secconf[key]
            exec "CONF_" + key + "= secconf['" + key + "']"

    # setup logging
    print "LOG_LEVEL = logging." + conf["LOGGING"]["level"]
    exec "LOG_LEVEL = logging." + conf["LOGGING"]["level"]
    setup_logging(LOG_LEVEL, logfile=CONF_experiment_name + "_SFEM")
    
    # determine path of this module
    path = os.path.dirname(__file__)
    
    # ============================================================
    # PART A: Simulation Options
    # ============================================================
    
    # flags for residual, projection, new mi refinement 
    REFINEMENT = {"RES":CONF_refine_residual, "PROJ":CONF_refine_projection, "MI":CONF_refine_Lambda}

    
    # ============================================================
    # PART B: Problem Setup
    # ============================================================
    
    # define initial multiindices
    mis = [Multiindex(mis) for mis in MultiindexSet.createCompleteOrderSet(CONF_initial_Lambda, 1)]
    
    # setup domain and meshes
    mesh0, boundaries, dim = SampleDomain.setupDomain(CONF_domain, initial_mesh_N=CONF_initial_mesh_N)
    #meshes = SampleProblem.setupMeshes(mesh0, len(mis), num_refine=10, randref=(0.4, 0.3))
    meshes = SampleProblem.setupMeshes(mesh0, len(mis), num_refine=0)
    
    # define coefficient field
    # NOTE: for proper treatment of corner points, see elasticity_residual_estimator
    coeff_types = ("EF-square-cos", "EF-square-sin", "monomials", "constant")
    from itertools import count
    if CONF_mu is not None:
        muparam = (CONF_mu, (0 for _ in count()))
    else:
        muparam = None 
    coeff_field = SampleProblem.setupCF(coeff_types[CONF_coeff_type], decayexp=CONF_decay_exp, gamma=CONF_gamma,
                                    freqscale=CONF_freq_scale, freqskip=CONF_freq_skip, rvtype="uniform", scale=CONF_coeff_scale, secondparam=muparam)

    # setup boundary conditions and pde
    pde, Dirichlet_boundary, uD, Neumann_boundary, g, f = SampleProblem.setupPDE(CONF_boundary_type, CONF_domain, CONF_problem_type, boundaries, coeff_field)

    # define multioperator
    A = MultiOperator(coeff_field, pde.assemble_operator, pde.assemble_operator_inner_dofs, assembly_type=eval("ASSEMBLY_TYPE." + CONF_assembly_type))

    # setup initial solution multivector
    w = SampleProblem.setupMultiVector(dict([(mu, m) for mu, m in zip(mis, meshes)]), functools.partial(setup_vector, pde=pde, degree=CONF_FEM_degree))
    logger.info("active indices of w after initialisation: %s", w.active_indices())

    sim_stats = None
    w_history = []
    if opts.continueSFEM:
        try:
            logger.info("CONTINUIING EXPERIMENT: loading previous data of %s...", CONF_experiment_name)
            import pickle
            LOAD_SOLUTION = os.path.join(opts.basedir, CONF_experiment_name)
            logger.info("loading solutions from %s" % os.path.join(LOAD_SOLUTION, 'SFEM-SOLUTIONS.pkl'))
            # load solutions
            with open(os.path.join(LOAD_SOLUTION, 'SFEM-SOLUTIONS.pkl'), 'rb') as fin:
                w_history = pickle.load(fin)
            # convert to MultiVectorWithProjection
            for i, mv in enumerate(w_history):
                w_history[i] = MultiVectorWithProjection(cache_active=True, multivector=w_history[i])
            # load simulation data
            logger.info("loading statistics from %s" % os.path.join(LOAD_SOLUTION, 'SIM-STATS.pkl'))
            with open(os.path.join(LOAD_SOLUTION, 'SIM-STATS.pkl'), 'rb') as fin:
                sim_stats = pickle.load(fin)
            logger.info("active indices of w after initialisation: %s", w_history[-1].active_indices())
            w0 = w_history[-1]
        except:
            logger.warn("FAILED LOADING EXPERIMENT %s --- STARTING NEW DATA", CONF_experiment_name)
            w0 = w    
    else:
        w0 = w

    
    # ============================================================
    # PART C: Adaptive Algorithm
    # ============================================================
    
    # refinement loop
    # ===============
    w, sim_stats = AdaptiveSolver(A, coeff_field, pde, mis, w0, mesh0, CONF_FEM_degree, gamma=CONF_gamma, cQ=CONF_cQ, ceta=CONF_ceta,
                        # marking parameters
                        theta_eta=CONF_theta_eta, theta_zeta=CONF_theta_zeta, min_zeta=CONF_min_zeta,
                        maxh=CONF_maxh, newmi_add_maxm=CONF_newmi_add_maxm, theta_delta=CONF_theta_delta,
                        marking_strategy=CONF_marking_strategy, max_Lambda_frac=CONF_max_Lambda_frac,
                        # residual error evaluation
                        quadrature_degree=CONF_quadrature_degree,
                        # projection error evaluation
                        projection_degree_increase=CONF_projection_degree_increase, refine_projection_mesh=CONF_refine_projection_mesh,
                        # pcg solver
                        pcg_eps=CONF_pcg_eps, pcg_maxiter=CONF_pcg_maxiter,
                        # adaptive algorithm threshold
                        error_eps=CONF_error_eps,
                        # refinements
                        max_refinements=CONF_iterations, max_dof=CONF_max_dof,
                        do_refinement=REFINEMENT, do_uniform_refinement=CONF_uniform_refinement,
                        w_history=w_history,
                        sim_stats=sim_stats)
    
    from operator import itemgetter
    active_mi = [(mu, w[mu]._fefunc.function_space().mesh().num_cells()) for mu in w.active_indices()]
    active_mi = sorted(active_mi, key=itemgetter(1), reverse=True)
    logger.info("==== FINAL MESHES ====")
    for mu in active_mi:
        logger.info("--- %s has %s cells", mu[0], mu[1])
    print "ACTIVE MI:", active_mi
    print
    
    # memory usage info
    import resource
    logger.info("\n======================================\nMEMORY USED: " + str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) + "\n======================================\n")
    
    
    # ============================================================
    # PART D: Export of Solutions and Simulation Data
    # ============================================================
        
    # flag for final solution export
    if opts.saveData:
        import pickle
        SAVE_SOLUTION = os.path.join(opts.basedir, CONF_experiment_name)
        try:
            os.makedirs(SAVE_SOLUTION)
        except:
            pass
        logger.info("saving solutions into %s" % os.path.join(SAVE_SOLUTION, 'SFEM-SOLUTIONS.pkl'))
        # save solutions
        with open(os.path.join(SAVE_SOLUTION, 'SFEM-SOLUTIONS.pkl'), 'wb') as fout:
            pickle.dump(w_history, fout)
        # save simulation data
        sim_stats[0]["OPTS"] = opts
        sim_stats[0]["CONF"] = conf
        logger.info("saving statistics into %s" % os.path.join(SAVE_SOLUTION, 'SIM-STATS.pkl'))
        with open(os.path.join(SAVE_SOLUTION, 'SIM-STATS.pkl'), 'wb') as fout:
            pickle.dump(sim_stats, fout)
    
    
    # ============================================================
    # PART E: Plotting
    # ============================================================
    
    # plot residuals
    if opts.plotEstimator and len(sim_stats) > 1:
        try:
            from matplotlib.pyplot import figure, show, legend
            X = [s["DOFS"] for s in sim_stats]
            print "DOFS", X
            L2 = [s["L2"] for s in sim_stats]
            H1 = [s["H1"] for s in sim_stats]
            errest = [sqrt(s["EST"]) for s in sim_stats]
            res_part = [s["RES-PART"] for s in sim_stats]
            proj_part = [s["PROJ-PART"] for s in sim_stats]
            pcg_part = [s["PCG-PART"] for s in sim_stats]
            _reserrmu = [s["RES-mu"] for s in sim_stats]
            _projerrmu = [s["PROJ-mu"] for s in sim_stats]
            proj_max_zeta = [s["PROJ-MAX-ZETA"] for s in sim_stats]
            proj_max_inactive_zeta = [s["PROJ-MAX-INACTIVE-ZETA"] for s in sim_stats]
            try:
                proj_inactive_zeta = sorted([v for v in sim_stats[-2]["PROJ-INACTIVE-ZETA"].values()], reverse=True)
            except:
                proj_inactive_zeta = None
            mi = [s["MI"] for s in sim_stats]
            num_mi = [len(m) for m in mi]
            time_pcg = [s["TIME-PCG"] for s in sim_stats]
            time_estimator = [s["TIME-ESTIMATOR"] for s in sim_stats]
            time_inactive_mi = [s["TIME-INACTIVE-MI"] for s in sim_stats]
            time_marking = [s["TIME-MARKING"] for s in sim_stats]
            reserrmu = defaultdict(list)
            for rem in _reserrmu:
                for mu, v in rem:
                    reserrmu[mu].append(v)
            projerrmu = defaultdict(list)
            for pem in _projerrmu:
                for mu, v in pem:
                    projerrmu[mu].append(v)
            print "errest", errest
    
            # --------
            # figure 2
            # --------
            fig2 = figure()
            fig2.suptitle("error estimator")
            ax = fig2.add_subplot(111)
            ax.loglog(X, errest, '-g<', label='error estimator')
            legend(loc='upper right')
    
            # --------
            # figure 3a
            # --------
            if opts.plotEstimatorAll:
                max_mu_plotting = 7
                fig3 = figure()
                fig3.suptitle("residual contributions")
                ax = fig3.add_subplot(111)
                for i, muv in enumerate(reserrmu.iteritems()):
                    mu, v = muv
                    if i < max_mu_plotting:
                        mu, v = muv
                        ms = str(mu)
                        ms = ms[ms.find('=') + 1:-1]
                        ax.loglog(X[-len(v):], v, '-g<', label=ms)
                legend(loc='upper right')
    
            # --------
            # figure 3b
            # --------
            if opts.plotEstimatorAll:
                fig3b = figure()
                fig3b.suptitle("projection contributions")
                ax = fig3b.add_subplot(111)
                for i, muv in enumerate(projerrmu.iteritems()):
                    mu, v = muv
                    if max(v) > 1e-10 and i < max_mu_plotting:
                        ms = str(mu)
                        ms = ms[ms.find('=') + 1:-1]
                        ax.loglog(X[-len(v):], v, '-g<', label=ms)
                legend(loc='upper right')
    
            # --------
            # figure 4
            # --------
            if opts.plotEstimatorAll:
                fig4 = figure()
                fig4.suptitle("projection $\zeta$")
                ax = fig4.add_subplot(111)
                ax.loglog(X[1:], proj_max_zeta[1:], '-g<', label='max active $\zeta$')
                ax.loglog(X[1:], proj_max_inactive_zeta[1:], '-b^', label='max inactive $\zeta$')
                legend(loc='upper right')
    
            # --------
            # figure 5
            # --------
            fig5 = figure()
            fig5.suptitle("timings")
            ax = fig5.add_subplot(111)
            ax.loglog(X, time_pcg, '-g<', label='pcg')
            ax.loglog(X, time_estimator, '-b^', label='estimator')
            ax.loglog(X, time_inactive_mi, '-c+', label='inactive_mi')
            ax.loglog(X, time_marking, '-ro', label='marking')
            legend(loc='upper right')
                
            # --------
            # figure 6
            # --------
            if opts.plotEstimatorAll:
                fig6 = figure()
                fig6.suptitle("projection error")
                ax = fig6.add_subplot(111)
                ax.loglog(X[1:], proj_part[1:], '-.m>', label='projection part')
                legend(loc='upper right')
                
            # --------
            # figure 7
            # --------
            if opts.plotEstimatorAll and proj_inactive_zeta is not None:
                fig7 = figure()
                fig7.suptitle("inactive multiindex $\zeta$")
                ax = fig7.add_subplot(111)
                ax.loglog(range(len(proj_inactive_zeta)), proj_inactive_zeta, '-.m>', label='inactive $\zeta$')
                legend(loc='lower right')
                
            # --------
            # figure 1
            # --------
            fig1 = figure()
            fig1.suptitle("residual estimator")
            ax = fig1.add_subplot(111)
            if REFINEMENT["MI"]:
                ax.loglog(X, num_mi, '--y+', label='active mi')
            ax.loglog(X, errest, '-g<', label='error estimator')
            ax.loglog(X, res_part, '-.cx', label='residual part')
            ax.loglog(X[1:], proj_part[1:], '-.m>', label='projection part')
            ax.loglog(X, pcg_part, '-.b>', label='pcg part')
            legend(loc='upper right')
            
            show()  # this invalidates the figure instances...
        except:
            import traceback
            print traceback.format_exc()
            logger.info("skipped plotting since matplotlib is not available...")
    
    # plot final meshes
    if opts.plotMesh:
        USE_MAYAVI = Plotter.hasMayavi() and False
        w = w_history[-1]
        for mu, vec in w.iteritems():
            if USE_MAYAVI:
                # mesh
    #            Plotter.figure(bgcolor=(1, 1, 1))
    #            mesh = vec.basis.mesh
    #            Plotter.plotMesh(mesh.coordinates(), mesh.cells(), representation='mesh')
    #            Plotter.axes()
    #            Plotter.labels()
    #            Plotter.title(str(mu))
                # function
                Plotter.figure(bgcolor=(1, 1, 1))
                mesh = vec.basis.mesh
                Plotter.plotMesh(mesh.coordinates(), mesh.cells(), vec.coeffs)
                Plotter.axes()
                Plotter.labels()
                Plotter.title(str(mu))
            else:
                viz_mesh = plot(vec.basis.mesh, title="mesh " + str(mu), interactive=False)
#                if SAVE_SOLUTION != '':
#                    viz_mesh.write_png(SAVE_SOLUTION + '/mesh' + str(mu) + '.png')
#                    viz_mesh.write_ps(SAVE_SOLUTION + '/mesh' + str(mu), format='pdf')
    #            vec.plot(title=str(mu), interactive=False)
        if USE_MAYAVI:
            Plotter.show(stop=True)
            Plotter.close(allfig=True)
        else:
            interactive()
    
    # plot sample solution
    if opts.plotSolution:
        w = w_history[-1]
        # get random field sample and evaluate solution (direct and parametric)
        RV_samples = coeff_field.sample_rvs()
        ref_maxm = w_history[-1].max_order
        sub_spaces = w[Multiindex()].basis.num_sub_spaces
        degree = w[Multiindex()].basis.degree
        maxh = min(w[Multiindex()].basis.minh / 4, CONF_max_h)
        maxh = w[Multiindex()].basis.minh
        projection_basis = get_projection_basis(mesh0, maxh=maxh, degree=degree, sub_spaces=sub_spaces)
        sample_sol_param = compute_parametric_sample_solution(RV_samples, coeff_field, w, projection_basis)
        sample_sol_direct = compute_direct_sample_solution(pde, RV_samples, coeff_field, A, ref_maxm, projection_basis)
        sol_variance = compute_solution_variance(coeff_field, w, projection_basis)
    
        # plot
        print sub_spaces
        if sub_spaces == 0:
            viz_p = plot(sample_sol_param._fefunc, title="parametric solution")
            viz_d = plot(sample_sol_direct._fefunc, title="direct solution")
            if ref_maxm > 0:
                viz_v = plot(sol_variance._fefunc, title="solution variance")
    
            # debug---
            if not True:        
                for mu in w.active_indices():
                    for i, wi in enumerate(w_history):
                        if i == len(w_history) - 1 or True:
                            plot(wi[mu]._fefunc, title="parametric solution " + str(mu) + " iteration " + str(i))
    #                        plot(wi[mu]._fefunc.function_space().mesh(), title="parametric solution " + str(mu) + " iteration " + str(i), axes=True)
                    interactive()
            # ---debug
            
#            for mu in w.active_indices():
#                plot(w[mu]._fefunc, title="parametric solution " + str(mu))
        else:
            mesh_param = sample_sol_param._fefunc.function_space().mesh()
            mesh_direct = sample_sol_direct._fefunc.function_space().mesh()
            wireframe = True
            viz_p = plot(sample_sol_param._fefunc, title="parametric solution", mode="displacement", mesh=mesh_param, wireframe=wireframe)#, rescale=False)
            viz_d = plot(sample_sol_direct._fefunc, title="direct solution", mode="displacement", mesh=mesh_direct, wireframe=wireframe)#, rescale=False)
            
#            for mu in w.active_indices():
#                viz_p = plot(w[mu]._fefunc, title="parametric solution: " + str(mu), mode="displacement", mesh=mesh_param, wireframe=wireframe)
        interactive()


    if opts.plotFlux:
        w = w_history[-1]
        # get random field sample and evaluate solution (direct and parametric)
        RV_samples = coeff_field.sample_rvs()
        ref_maxm = w_history[-1].max_order
        sub_spaces = w[Multiindex()].basis.num_sub_spaces
        degree = w[Multiindex()].basis.degree
        maxh = min(w[Multiindex()].basis.minh / 4, CONF_max_h)
        maxh = w[Multiindex()].basis.minh
        projection_basis = get_projection_basis(mesh0, maxh=maxh, degree=degree, sub_spaces=sub_spaces)
        vec_projection_basis = get_projection_basis(mesh0, maxh=maxh, degree=degree, sub_spaces=2)
        sample_sol_param = compute_parametric_sample_solution(RV_samples, coeff_field, w, projection_basis)
        sample_sol_direct = compute_direct_sample_solution(pde, RV_samples, coeff_field, A, ref_maxm, projection_basis)
        sol_variance = compute_solution_variance(coeff_field, w, projection_basis)
    
        sol_param_flux = compute_solution_flux(pde, RV_samples, coeff_field, sample_sol_param, ref_maxm, projection_basis, vec_projection_basis)
        sol_direct_flux = compute_solution_flux(pde, RV_samples, coeff_field, sample_sol_direct, ref_maxm, projection_basis, vec_projection_basis)

        # plot
        if sub_spaces == 0:
            #viz_p = plot(sol_param_flux._fefunc, title="parametric solution flux")
            flux_x, flux_y = sol_param_flux._fefunc.split(deepcopy=True)
            viz_x = plot(flux_x, title="parametric solution flux x")
            viz_y = plot(flux_y, title="parametric solution flux y")

            flux_x, flux_y = sol_direct_flux._fefunc.split(deepcopy=True)
            viz_x = plot(flux_x, title="direct solution flux x")
            viz_y = plot(flux_y, title="direct solution flux y")

        else:
            raise Exception("not implemented");
        interactive()
Exemplo n.º 58
0
        fid = dl.File("results/pointwise_variance.pvd")
        fid << vector2Function(post_pw_variance, Vh, name="Posterior")
        fid << vector2Function(pr_pw_variance, Vh, name="Prior")
        fid << vector2Function(corr_pw_variance, Vh, name="Correction")

    posterior.exportU(Vh, "hmisfit/evect.pvd")
    np.savetxt("hmisfit/eigevalues.dat", d)

    print sep, "Generate samples from Prior and Posterior", sep
    fid_prior = dl.File("samples/sample_prior.pvd")
    fid_post = dl.File("samples/sample_post.pvd")
    nsamples = 500
    noise = dl.Vector()
    posterior.init_vector(noise, "noise")
    noise_size = noise.array().shape[0]
    s_prior = dl.Function(Vh, name="sample_prior")
    s_post = dl.Function(Vh, name="sample_post")
    for i in range(nsamples):
        noise.set_local(np.random.randn(noise_size))
        posterior.sample(noise, s_prior.vector(), s_post.vector())
        fid_prior << s_prior
        fid_post << s_post

    print sep, "Visualize results", sep
    plt.figure()
    plt.plot(range(0, k), d, 'b*', range(0, k), np.ones(k), '-r')
    plt.yscale('log')
    dl.plot(vector2Function(a, Vh, name="Initial Condition"))
    plt.show()
    dl.interactive()
def run_mc(w, err, pde):
    import time
    from dolfin import norm
    
    # create reference mesh and function space
    projection_basis = get_projection_basis(mesh0, maxh=min(w[Multiindex()].basis.minh / 4, MC_HMAX))
    logger.debug("hmin of mi[0] = %s, reference mesh = (%s, %s)", w[Multiindex()].basis.minh, projection_basis.minh, projection_basis.maxh)

    # get realization of coefficient field
    err_L2, err_H1 = 0, 0
    for i in range(MC_N):
        logger.info("---- MC Iteration %i/%i ----", i + 1 , MC_N)
        RV_samples = coeff_field.sample_rvs()
        logger.debug("-- RV_samples: %s", [RV_samples[j] for j in range(w.max_order)])
        t1 = time.time()
        sample_sol_param = compute_parametric_sample_solution(RV_samples, coeff_field, w, projection_basis)
        t2 = time.time()
        sample_sol_direct = compute_direct_sample_solution(pde, RV_samples, coeff_field, A, 2 * w.max_order, projection_basis)
        t3 = time.time()
        cerr_L2 = errornorm(sample_sol_param._fefunc, sample_sol_direct._fefunc, "L2")
        cerr_H1 = errornorm(sample_sol_param._fefunc, sample_sol_direct._fefunc, "H1")
        logger.debug("-- current error L2 = %s    H1 = %s", cerr_L2, cerr_H1)
        err_L2 += 1.0 / MC_N * cerr_L2
        err_H1 += 1.0 / MC_N * cerr_H1
        
        if i + 1 == MC_N:
            # error function
            errf = sample_sol_param - sample_sol_direct
            
            # deterministic part
            sample_sol_direct_a0 = compute_direct_sample_solution(pde, RV_samples, coeff_field, A, 0, projection_basis)
            L2_a0 = errornorm(sample_sol_param._fefunc, sample_sol_direct_a0._fefunc, "L2")
            H1_a0 = errornorm(sample_sol_param._fefunc, sample_sol_direct_a0._fefunc, "H1")
            logger.info("-- DETERMINISTIC error L2 = %s    H1 = %s", L2_a0, H1_a0)

            # stochastic part
            sample_sol_direct_am = sample_sol_direct - sample_sol_direct_a0
#            L2_am = errornorm(sample_sol_param._fefunc, sample_sol_direct_am._fefunc, "L2")
#            H1_am = errornorm(sample_sol_param._fefunc, sample_sol_direct_am._fefunc, "H1")
            logger.info("-- STOCHASTIC norm L2 = %s    H1 = %s", sample_sol_direct_am.norm("L2"), sample_sol_direct_am.norm("H1"))

            if MC_PLOT:
                sample_sol_param.plot(title="param")
                sample_sol_direct.plot(title="direct")
                errf.plot(title="|param-direct| error")
                sample_sol_direct_am.plot(title="direct stochastic part")
                fc = get_coeff_realisation(RV_samples, coeff_field, w.max_order, projection_basis)
                fc.plot(title="coeff")


                sol_variance = compute_solution_variance(coeff_field, w, projection_basis)
                sol_variance.plot(title="sol variance")
                interactive()

                #coeff_variance = compute_solution_variance(coeff_field, w0, proj_basis)
                #sol_variance.plot(title="variance")
            
        t4 = time.time()
        logger.info("TIMING: param: %s, direct %s, error %s", t2 - t1, t3 - t2, t4 - t3)

    logger.info("MC Error: L2: %s, H1: %s", err_L2, err_H1)
    err.append((err_L2, err_H1))