Exemple #1
0
def setup_logging(level, logfile=None, logfile_level=None, verbatim_filename=False):
    # log level and format configuration
    log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
    logging.basicConfig(level=level, format=log_format)

    # FEniCS logging
    dolfin.set_log_active(True)
    dolfin.set_log_level(logging.WARNING)
    fenics_logger = logging.getLogger("FFC")
    fenics_logger.setLevel(logging.WARNING)
    fenics_logger = logging.getLogger("UFL")
    fenics_logger.setLevel(logging.WARNING)
    
    # explicitly set levels for certain loggers
    logging.getLogger("spuq").setLevel(level)
    logging.getLogger("spuq.application.egsz.multi_operator").setLevel(logging.WARNING)
    #logging.getLogger("spuq.application.egsz.marking").setLevel(logging.INFO)

    # output to logfile, if set
    if logfile:
        # replace .py or .conf with .log, if not prohibited
        if not verbatim_filename:
            logfile = basename(logfile, ".conf")
            logfile = basename(logfile, ".py")
            logfile = logfile + ".log"

        # create a new file handler (obsolete: specify mode "w" for overwriting, instead of the default "a")
        ch = logging.FileHandler(logfile, mode="a")
        ch.setLevel(logfile_level if logfile_level else level)
        ch.setFormatter(logging.Formatter(log_format))

        # add to root logger
        root = logging.getLogger()
        root.addHandler(ch)
Exemple #2
0
def make_logger(name, level=parameters["log_level"]):
    def log_if_process0(record):
        if dolfin.MPI.rank(mpi_comm_world()) == 0:
            return 1
        else:
            return 0

    mpi_filt = Object()
    mpi_filt.filter = log_if_process0

    logger = logging.getLogger(name)
    logger.setLevel(level)

    ch = logging.StreamHandler()
    ch.setLevel(0)
    # formatter = logging.Formatter('%(message)s')
    formatter = logging.Formatter(
        ("%(asctime)s - " "%(name)s - " "%(levelname)s - " "%(message)s")
    )
    ch.setFormatter(formatter)
    logger.addHandler(ch)
    logger.addFilter(mpi_filt)

    dolfin.set_log_level(logging.WARNING)

    ffc_logger = logging.getLogger("FFC")
    ffc_logger.setLevel(logging.WARNING)
    ffc_logger.addFilter(mpi_filt)

    ufl_logger = logging.getLogger("UFL")
    ufl_logger.setLevel(logging.WARNING)
    ufl_logger.addFilter(mpi_filt)

    return logger
Exemple #3
0
def make_logger(name, level=logging.INFO):
    import logging
    import dolfin

    mpi_filt = lambda: None

    def log_if_proc0(record):
        if dolfin.mpi_comm_world().rank == 0:
            return 1
        else:
            return 0

    mpi_filt.filter = log_if_proc0

    logger = logging.getLogger(name)
    logger.setLevel(log_level)

    ch = logging.StreamHandler()
    ch.setLevel(0)

    formatter = logging.Formatter("%(message)s")
    ch.setFormatter(formatter)

    logger.addHandler(ch)
    logger.addFilter(mpi_filt)

    dolfin.set_log_active(False)
    dolfin.set_log_level(dolfin.WARNING)

    return logger
def setup_logging(level):
    # log level and format configuration
    log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
    logging.basicConfig(filename=__file__[:-2] + 'log', level=level,
                        format=log_format)
    
    # FEniCS logging
    from dolfin import (set_log_level, set_log_active, INFO, DEBUG, WARNING)
    set_log_active(True)
    set_log_level(WARNING)
    fenics_logger = logging.getLogger("FFC")
    fenics_logger.setLevel(logging.WARNING)
    fenics_logger = logging.getLogger("UFL")
    fenics_logger.setLevel(logging.WARNING)
    
    # module logger
    logger = logging.getLogger(__name__)
    logging.getLogger("spuq.application.egsz.multi_operator").disabled = True
    #logging.getLogger("spuq.application.egsz.marking").setLevel(logging.INFO)
    # add console logging output
    ch = logging.StreamHandler()
    ch.setLevel(level)
    ch.setFormatter(logging.Formatter(log_format))
    logger.addHandler(ch)
    logging.getLogger("spuq").addHandler(ch)
    return logger
Exemple #5
0
def main():
    "Generates the mesh"

    import mshr as m
    import dolfin as d
    import matplotlib.pyplot as plt

    d.set_log_level(13)  # PROGRESS

    r_1 = 0.5  # inner
    r_2 = 2.0  # outer
    res = 10  # resolution

    circle_inner = m.Circle(d.Point(0.0, 0.0), r_1)
    circle_outer = m.Circle(d.Point(0.0, 0.0), r_2)

    domain = circle_outer - circle_inner

    domain.set_subdomain(1, circle_inner)
    domain.set_subdomain(2, circle_outer)

    mesh = m.generate_mesh(domain, res)

    print("max edge length:", mesh.hmax())

    mesh_file_pvd = d.File("mesh.pvd")
    mesh_file_pvd.write(mesh)

    plt.figure()
    d.plot(mesh, title="Mesh")
    plt.show()
Exemple #6
0
    def __init__(self, cparams, dtype_u, dtype_f):
        """
        Initialization routine

        Args:
            cparams: custom parameters for the example
            dtype_u: particle data type (will be passed parent class)
            dtype_f: acceleration data type (will be passed parent class)
        """

        # define the Dirichlet boundary
        def Boundary(x, on_boundary):
            return on_boundary

        # these parameters will be used later, so assert their existence
        assert 'c_nvars' in cparams
        assert 'nu' in cparams
        assert 't0' in cparams
        assert 'family' in cparams
        assert 'order' in cparams
        assert 'refinements' in cparams

        # add parameters as attributes for further reference
        for k,v in cparams.items():
            setattr(self,k,v)

        df.set_log_level(df.WARNING)

        df.parameters["form_compiler"]["optimize"]     = True
        df.parameters["form_compiler"]["cpp_optimize"] = True

        # set mesh and refinement (for multilevel)
        mesh = df.UnitIntervalMesh(self.c_nvars)
        # mesh = df.UnitSquareMesh(self.c_nvars[0],self.c_nvars[1])
        for i in range(self.refinements):
            mesh = df.refine(mesh)

        # self.mesh = mesh
        # define function space for future reference
        self.V = df.FunctionSpace(mesh, self.family, self.order)
        tmp = df.Function(self.V)
        print('DoFs on this level:',len(tmp.vector().array()))

        # invoke super init, passing number of dofs, dtype_u and dtype_f
        super(fenics_heat,self).__init__(self.V,dtype_u,dtype_f)

        self.g = df.Expression('-sin(a*x[0]) * (sin(t) - b*a*a*cos(t))',a=np.pi,b=self.nu,t=self.t0,degree=self.order)

        # rhs in weak form
        self.w = df.Function(self.V)
        v = df.TestFunction(self.V)
        self.a_K = -self.nu*df.inner(df.nabla_grad(self.w), df.nabla_grad(v))*df.dx + self.g*v*df.dx

        # mass matrix
        u = df.TrialFunction(self.V)
        a_M = u*v*df.dx
        self.M = df.assemble(a_M)

        self.bc = df.DirichletBC(self.V, df.Constant(0.0), Boundary)
Exemple #7
0
    def setup(self):
        """
        Setup logging to file if requested in the simulation input
        """
        # Ensure that the output directory exist
        tmp = self.simulation.input.get_output_file_path('BOGUS', 'xxxxx')
        output_dir = os.path.split(tmp)[0]
        if output_dir and not os.path.exists(output_dir):
            os.makedirs(output_dir)

        log_name = self.simulation.input.get_output_file_path('output/log_name', '.log')
        log_on_all_ranks = self.simulation.input.get_value(
            'output/log_on_all_ranks', False, 'bool'
        )
        log_enabled = self.simulation.input.get_value(
            'output/log_enabled', True, 'bool'
        )
        log_append_existing = self.simulation.input.get_value(
            'output/log_append_to_existing_file', True, 'bool'
        )
        stdout_on_all_ranks = self.simulation.input.get_value(
            'output/stdout_on_all_ranks', False, 'bool'
        )
        stdout_enabled = self.simulation.input.get_value(
            'output/stdout_enabled', True, 'bool'
        )
        self.show_memory_usage = self.simulation.input.get_value(
            'output/show_memory_usage', False, 'bool'
        )
        rank = self.simulation.rank

        self.write_stdout = (rank == 0 or stdout_on_all_ranks) and stdout_enabled
        self.write_log = False
        if log_enabled:
            if log_on_all_ranks and rank > 0:
                log_name = '%s.%d' % (log_name, self.simulation.rank)

            if rank == 0 or log_on_all_ranks:
                self.write_log = True
                self.log_file_name = log_name
                if log_append_existing:
                    self.log_file = open(self.log_file_name, 'at')
                    self.log_file.write('\n\n')
                else:
                    self.log_file = open(self.log_file_name, 'wt')

        # Set the Ocellaris log level
        log_level = self.simulation.input.get_value(
            'output/ocellaris_log_level', 'info'
        )
        self.simulation.log.set_log_level(self.AVAILABLE_LOG_LEVELS[log_level])

        # Set the Dolfin log level
        df_log_level = self.simulation.input.get_value(
            'output/dolfin_log_level', 'warning'
        )
        dolfin.set_log_level(self.AVAILABLE_LOG_LEVELS[df_log_level])
Exemple #8
0
def init_ffc():
    import dolfin as _dolfin

    flags = ["-O3", "-ffast-math"]  # , "-march=native"]
    _dolfin.parameters["form_compiler"]["quadrature_degree"] = 4
    _dolfin.parameters["form_compiler"]["representation"] = "uflacs"
    _dolfin.parameters["form_compiler"]["cpp_optimize"] = True
    _dolfin.parameters["form_compiler"]["cpp_optimize_flags"] = " ".join(flags)
    _dolfin.set_log_level(_logging.WARNING)
Exemple #9
0
    def __init__(self, domain, **kwargs):
        dolf.set_log_level(LOG_LEVEL)
        self.domain = domain

        self._visualization_files = None
        experiment_id = kwargs.get("experiment_id", "")
        if experiment_id:
            experiment_id = "_" + experiment_id
        self.vis_dir = "Visualization" + experiment_id + "/"
        if not os.path.exists(self.vis_dir):
            os.makedirs(self.vis_dir)
Exemple #10
0
    def setUp(self):
        d.set_log_level(d.LogLevel.WARNING)
        N = 50
        order = 2
        tF = 0.10
        # Dirichlet boundary characteristic time
        tau = tF / 10.0
        # time step
        h = 0.001

        self.num_steps = round(tF / h)

        # Mesh and Function space
        mesh = d.UnitIntervalMesh(N)
        V = d.FunctionSpace(mesh, "P", order)
        w = d.TestFunction(V)
        v = d.TrialFunction(V)

        # Initial conditions chosen such that the wave travels to the right
        uInit = d.Expression(
            "((1./3 < x[0]) && (x[0] < 2./3)) ? 0.5*(1-cos(2*pi*3*(x[0]-1./3))) : 0.",
            degree=2,
        )

        u = d.interpolate(uInit, V)  # values

        # Dirichlet boundary on the right with its derivatives
        g = d.Expression(
            "(t < total) ? 0.4*(1.-cos(2*pi*t/total))/2. : 0.",
            degree=2,
            t=0.0,
            total=tau,
        )
        dg = d.Expression(
            "(t < total) ? 0.4*(pi/total) * sin(2*pi*t/total) : 0.",
            degree=2,
            t=0.0,
            total=tau,
        )

        def updateBC(t):
            g.t = t
            dg.t = t

        def right(x, on_boundary):
            return on_boundary and d.near(x[0], 1.0)

        bc0 = d.DirichletBC(V, g, right)
        bc1 = d.DirichletBC(V, dg, right)
        self.bc = [bc0, bc1]

        L1 = -d.inner(d.grad(w), d.grad(u)) * d.dx
        L2 = w * v * d.dx
        self.parameters = (L1, L2, u, h, updateBC)
Exemple #11
0
def set_log_level(level):
    """Sets the log level.

    Parameters
    ----------
    level : int
        Log level that should be set.
    """
    df.set_log_level(level)
    m3h3.parameters.update({"log_level": df.get_log_level()})
    log(LogLevel.INFO, "Log level updated to {}".format(level))
Exemple #12
0
def set_solver():
    # "hypre_amg") #"hypre_euclid") # "hypre_amg") # "petsc_amg" "petsc_amg"
    solver = d.KrylovSolver("cg", "hypre_amg")
    solver.parameters["maximum_iterations"] = 1000
    solver.parameters["absolute_tolerance"] = 1E-8
    solver.parameters["error_on_nonconvergence"] = True
    solver.parameters["monitor_convergence"] = True
    # solver.parameters["divergence_limit"] = 1E+6
    # solver.parameters["nonzero_initial_guess"] = True
    d.info(solver.parameters, verbose=True)
    d.set_log_level(d.PROGRESS)
    return solver
Exemple #13
0
def test_save_load():
    """Solve a problem, save the data, load it back and compare."""
    df.set_log_level(100)       # supress dolfin logger

    # Setup solver
    solver = SubdomainSolver(N=32)

    with tempfile.TemporaryDirectory() as tmpdirname:
        tmpdirname = "foo"
        casedir = Path(tmpdirname) / "test_pp_casedir"

        # Setup saver
        field_spec = FieldSpec(stride_timestep=1, save_as=("checkpoint", "hdf5"))
        saver_spec = SaverSpec(casedir=str(casedir))
        saver = Saver(saver_spec)

        saver.store_mesh(
            solver.mesh,
            cell_domains=solver.cell_function,
            facet_domains=solver.facet_function
        )
        saver.add_field(Field("u", field_spec))

        # Solver loop
        time_func_dict = {}
        for timestep, (t, u) in enumerate(solver.solve(0, 100, 1.0)):
            saver.update(t, timestep, {"u": u})
            time_func_dict[timestep] = u.copy(True)
        saver.close()

        # Define loader
        loader_spec = LoaderSpec(casedir=str(casedir))
        loader = Loader(loader_spec)
        loaded_mesh = loader.load_mesh()
        loaded_cell_function = loader.load_mesh_function("cell_function")
        loaded_facet_function = loader.load_mesh_function("facet_function")

        # Compare mesh and meshfunctions
        assert np.sum(solver.mesh.coordinates() - loaded_mesh.coordinates()) == 0
        assert np.sum(solver.mesh.cells() - loaded_mesh.cells()) == 0
        assert np.sum(solver.cell_function.array() - loaded_cell_function.array()) == 0
        assert np.sum(solver.facet_function.array() - loaded_facet_function.array()) == 0

        # Compare functions and time checkpoint
        for timestep, (loaded_t, loaded_u) in enumerate(loader.load_checkpoint("u")):
            diff = np.sum(time_func_dict[timestep].vector().get_local() - loaded_u.vector().get_local())
            assert diff == 0, diff

        # Compare functions and time hdf5
        for timestep, (loaded_t, loaded_u) in enumerate(loader.load_field("u")):
            diff = np.sum(time_func_dict[timestep].vector().get_local() - loaded_u.vector().get_local())
            assert diff == 0, diff
Exemple #14
0
def test_mpset():
    #set_log_level(DEBUG)

    # Print parameters and their values
    #mpset.show()

    # Check that assignment out of range raises
    # FIXME: dolfin/parameter/Parameter.cpp is broken.
    #        It doesn't raise when assigning a value out of range;
    #        see 921c56cee4f50f016a07f49a5e90f6627c7317a6
    # with pytest.raises(RuntimeError):
    #     mpset["discretization"]["N"] = 1
    # with pytest.raises(RuntimeError):
    #     mpset["model"]["mobility"]["beta"] = 2.0
    with pytest.raises(RuntimeError):
        mpset["model"]["mobility"]["m"] = 0.0

    # Try to add parameter
    mpset.add("foo", "bar")
    assert mpset["foo"] == "bar"

    # Try direct access to a parameter
    mpset["foo"] = "bar_"
    assert mpset["foo"] == "bar_"

    # Try to write parameters to a file
    comm = mpi_comm_world()
    tempdir = "/tmp/pytest-of-fenics"
    fname = tempdir + "/foo.xml"
    mpset.write(comm, fname)
    if MPI.rank(comm) == 0:
        assert os.path.isfile(fname)
    MPI.barrier(comm)  # wait until the file is written

    # Change back value of parameter 'foo'
    mpset["foo"] = "bar"
    assert mpset["foo"] == "bar"

    # Try to read parameters back
    mpset.read(fname)
    assert mpset["foo"] == "bar_"
    MPI.barrier(comm)  # wait until each process finishes reading
    if MPI.rank(comm) == 0:
        os.remove(fname)
    del fname

    # Check that every other call points to the same object
    assert id(MuflonParameterSet()) == id(mpset)

    # Cleanup
    set_log_level(INFO)
    mpset.refresh()
Exemple #15
0
def setup_general_parameters():
    """
    Parameters to speed up the compiler
    """

    # Parameter for the compiler
    flags = ["-O3", "-ffast-math", "-march=native"]
    dolfin.parameters["form_compiler"]["quadrature_degree"] = 4
    dolfin.parameters["form_compiler"]["representation"] = "uflacs"
    dolfin.parameters["form_compiler"]["cpp_optimize"] = True
    dolfin.parameters["form_compiler"]["cpp_optimize_flags"] = " ".join(flags)

    # dolfin.set_log_active(False)
    dolfin.set_log_level(logging.INFO)
def run_mms0():
    df.set_log_level(20)
    N = [2, 4, 8, 16, 32, 40, 50, 60, 70, 80]
    errors = []
    dxs = []

    rho = 0.7

    # load mms
    p_code, m_code, q_code = manufactured_solution(rho)

    for nx in N:
        mesh = df.UnitCubeMesh(nx, nx, nx)
        p_D = df.Expression(p_code, degree=2)
        m_D = df.Expression(m_code, degree=2)
        q_D = df.Expression(q_code, degree=2)

        # load perspect solution
        m = run_perspect(mesh, q_D, p_D, m_D, rho)

        e = df.errornorm(m_D, m, degree_rise=0)

        F = df.FunctionSpace(mesh, 'P', 2)
        n = df.interpolate(m_D, F)
        f = df.File("test.pvd")
        f << m
        errors.append(e)
        dxs.append(mesh.hmin())

    errors = np.array(errors)
    dxs = np.array(dxs)
    conv_rates = np.log(errors[1:] / errors[0:-1]) / np.log(
        dxs[1:] / dxs[0:-1])

    print(errors)
    print(dxs)
    print(conv_rates)

    import matplotlib.pyplot as plt
    plt.loglog(dxs, errors)
    plt.loglog(dxs, errors, marker='o')
    plt.xlabel("dx")
    plt.ylabel("L2 errornorm")
    plt.grid(True, which="both")
    plt.savefig("mms0_convergence.png")
def make_logger(name, level=logging.INFO):
    import logging

    mpi_filt = lambda: None

    def log_if_proc0(record):
        if dolfin.MPI.rank(dolfin.mpi_comm_world()) == 0:
            return 1
        else:
            return 0

    mpi_filt.filter = log_if_proc0

    logger = logging.getLogger(name)
    logger.setLevel(level)

    ch = logging.StreamHandler()
    ch.setLevel(0)
    formatter = logging.Formatter(
        "%(message)s")  #'\n%(name)s - %(levelname)s - %(message)s\n'
    ch.setFormatter(formatter)
    logger.addHandler(ch)
    logger.addFilter(mpi_filt)

    dolfin.set_log_active(False)
    dolfin.set_log_level(dolfin.WARNING)

    # ffc_logger = logging.getLogger('FFC')
    # ffc_logger.setLevel(DEBUG)
    # ffc_logger.addFilter(mpi_filt)

    # ufl_logger = logging.getLogger('UFL')
    # ufl_logger.setLevel(DEBUG)
    # ufl_logger.addFilter(mpi_filt)

    # from haosolver import logger as hao_logger
    # hao_logger.setLevel(DEBUG)

    return logger
Exemple #18
0
def make_logger(name, level=logging.INFO):
    def log_if_process0(record):
        if dolfin.MPI.rank(dolfin.mpi_comm_world()) == 0:
            return 1
        else:
            return 0

    mpi_filt = Object()
    mpi_filt.filter = log_if_process0

    logger = logging.getLogger(name)
    logger.setLevel(level)

    ch = logging.StreamHandler()
    ch.setLevel(0)
    # formatter = logging.Formatter('%(message)s')
    formatter = logging.Formatter(('%(asctime)s - '
                                   '%(name)s - '
                                   '%(levelname)s - '
                                   '%(message)s'))
    ch.setFormatter(formatter)
    logger.addHandler(ch)
    logger.addFilter(mpi_filt)

    dolfin.set_log_active(False)
    dolfin.set_log_level(dolfin.WARNING)

    ffc_logger = logging.getLogger('FFC')
    ffc_logger.setLevel(dolfin.WARNING)
    ffc_logger.addFilter(mpi_filt)

    ufl_logger = logging.getLogger('UFL')
    ufl_logger.setLevel(dolfin.WARNING)
    ufl_logger.addFilter(mpi_filt)

    return logger
from dolfin import set_log_level, WARNING, Expression, FunctionSpace, \
    DirichletBC, Function, errornorm, project, plot, interactive, triangle, \
    norm, UnitIntervalMesh, pi, inner, grad, dx, ds, dot, UnitSquareMesh, \
    FacetNormal, interval, RectangleMesh, TrialFunction, TestFunction, \
    assemble, lhs, rhs, MPI

import maelstrom.time_steppers as ts
import sympy as smp
import numpy
import itertools

import helpers

# Turn down the log level to only error messages.
set_log_level(WARNING)
#set_log_level(ERROR)
#set_log_level(0)


def test_generator():
    '''Test order of time discretization.
    '''
    # TODO add test for spatial order
    problems = [
        #problem_sinsin1d,
        #problem_sinsin,
        problem_coscos_cartesian,
        #problem_coscos_cylindrical,
        #problem_stefanboltzmann
        ]
Exemple #20
0
 def custom_apply(self_, *args, **kwargs):
     set_log_level(ERROR)
     original_apply(*args, **kwargs)
     set_log_level(PROGRESS)
Exemple #21
0
def project(*args, **kwargs):
    set_log_level(ERROR)
    output = dolfin_project(*args, **kwargs)
    set_log_level(PROGRESS)
    return output
Exemple #22
0
 def wrapped_f(*args, **kwargs):
     old_level = get_log_level()
     set_log_level(log_level)
     f(*args, **kwargs)
     set_log_level(old_level)
def test_stokes_noflow(gamma, Re, nu_interp, postprocessor):
    #set_log_level(WARNING)

    basename = postprocessor.basename
    label = "{}_{}_gamma_{}_Re_{:.0e}".format(basename, nu_interp, gamma, Re)

    c = postprocessor.get_coefficients()
    c[r"\nu_1"] = c[r"\rho_1"] / Re
    c[r"\nu_2"] = c[r"r_visc"] * c[r"\nu_1"]
    c[r"\nu_1"] /= c[r"\rho_0"] * c[r"V_0"] * c[r"L_0"]
    c[r"\nu_2"] /= c[r"\rho_0"] * c[r"V_0"] * c[r"L_0"]

    cc = wrap_coeffs_as_constants(c)
    nu = eval("nu_" + nu_interp)  # choose viscosity interpolation

    for level in range(1, 4):
        mesh, boundary_markers, pinpoint, periodic_bnd = create_domain(level)
        periodic_bnd = None
        W = create_mixed_space(mesh, periodic_boundary=periodic_bnd)
        bcs = create_bcs(W,
                         boundary_markers,
                         periodic_boundary=periodic_bnd,
                         pinpoint=pinpoint)

        phi = create_fixed_vfract(mesh, c)

        # Create forms
        a, L = create_forms(W, rho(phi, cc), nu(phi, cc), c[r"g_a"],
                            boundary_markers, gamma)

        # Solve problem
        w = df.Function(W)
        A, b = df.assemble_system(a, L, bcs)
        solver = df.LUSolver("mumps")
        df.PETScOptions.set("fieldsplit_u_mat_mumps_icntl_14", 500)
        solver.set_operator(A)
        try:
            solver.solve(w.vector(), b)
        except:
            df.warning("Ooops! Something went wrong: {}".format(
                sys.exc_info()[0]))
            continue

        # Pre-process results
        v, p = w.split(True)
        v.rename("v", "velocity")
        p.rename("p", "pressure")

        V_dv = df.FunctionSpace(mesh, "DG",
                                W.sub(0).ufl_element().degree() - 1)
        div_v = df.project(df.div(v), V_dv)
        div_v.rename("div_v", "velocity-divergence")
        D_22 = df.project(v.sub(1).dx(1), V_dv)

        p_h = create_hydrostatic_pressure(mesh, cc)
        #p_ref = df.project(p_h, W.sub(1).ufl_element())
        p_ref = df.project(
            p_h,
            df.FunctionSpace(mesh, df.FiniteElement("CG", mesh.ufl_cell(), 4)))
        v_errL2, v_errH10, div_errL2, p_errL2 = compute_errornorms(
            v, div_v, p, p_ref)

        if nu_interp[:2] == "PW":
            V_nu = df.FunctionSpace(mesh, "DG", phi.ufl_element().degree())
        else:
            V_nu = phi.function_space()
        nu_0 = df.project(nu(phi, cc), V_nu)
        T_22 = df.project(2.0 * nu(phi, cc) * v.sub(1).dx(1), V_nu)

        # Save results
        make_cut = postprocessor._make_cut
        rs = dict(ndofs=W.dim(),
                  level=level,
                  h=mesh.hmin(),
                  r_dens=c[r"r_dens"],
                  r_visc=c[r"r_visc"],
                  gamma=gamma,
                  Re=Re,
                  nu_interp=nu_interp)
        rs[r"$v_2$"] = make_cut(v.sub(1))
        rs[r"$p$"] = make_cut(p)
        rs[r"$\phi$"] = make_cut(phi)
        rs[r"$D_{22}$"] = make_cut(D_22)
        rs[r"$T_{22}$"] = make_cut(T_22)
        rs[r"$\nu$"] = make_cut(nu_0)
        rs[r"$||\mathbf{v} - \mathbf{v}_h||_{L^2}$"] = v_errL2
        rs[r"$||\nabla (\mathbf{v} - \mathbf{v}_h)||_{L^2}$"] = v_errH10
        rs[r"$||\mathrm{div} \mathbf{v}_h||_{L^2}$"] = div_errL2
        rs[r"$||\mathbf{p} - \mathbf{p}_h||_{L^2}$"] = p_errL2
        print(label, level)

        # Send to posprocessor
        comm = mesh.mpi_comm()
        rank = df.MPI.rank(comm)
        postprocessor.add_result(rank, rs)

    # Plot results obtained in the last round
    outdir = os.path.join(postprocessor.outdir, "XDMFoutput")
    with df.XDMFFile(os.path.join(outdir, "v.xdmf")) as file:
        file.write(v, 0.0)
    with df.XDMFFile(os.path.join(outdir, "p.xdmf")) as file:
        file.write(p, 0.0)
    with df.XDMFFile(os.path.join(outdir, "phi.xdmf")) as file:
        file.write(phi, 0.0)
    with df.XDMFFile(os.path.join(outdir, "div_v.xdmf")) as file:
        file.write(div_v, 0.0)

    # Save results into a binary file
    filename = "results_{}.pickle".format(label)
    postprocessor.save_results(filename)

    # Flush plots as we now have data for all level values
    postprocessor.pop_items(["level", "h"])
    postprocessor.flush_plots()

    # Cleanup
    df.set_log_level(df.INFO)
    gc.collect()
Exemple #24
0
import dolfin as df
import numpy as np
import perspect
import pulse
import sys
import time
from geometry import Geometry, MarkerFunctions, Microstructure

comm = df.MPI.comm_world
df.set_log_level(40)

mesh = df.BoxMesh(comm, df.Point(0, 0, 0), df.Point(3, 1, 0.1), 30, 10, 1)


class Base(df.SubDomain):
    def inside(self, x, on_boundary):
        return on_boundary and df.near(x[1], 0)


class Endo(df.SubDomain):
    def inside(self, x, on_boundary):
        return on_boundary and df.near(x[0], 0)


class Epi(df.SubDomain):
    def inside(self, x, on_boundary):
        return on_boundary and df.near(x[0], 1.0)


markers = {'BASE': (10, 1), 'ENDO': (30, 1), 'EPI': (40, 1), 'NONE': (0, 2)}
ffun = df.MeshFunction("size_t", mesh, mesh.topology().dim() - 1)
import sys; sys.path.append('../')
import numpy as np
import matplotlib; matplotlib.use('agg')
import matplotlib.pyplot as plt
import os
import warnings
warnings.filterwarnings('ignore',category=FutureWarning)

from dolfin import set_log_level; set_log_level(40)

from tensorflow.keras import Sequential, Model, Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import *
from tensorflow.keras.regularizers import l1_l2, l2, l1
from tensorflow.keras.callbacks import LearningRateScheduler

from deep_learning.generate_fin_dataset import gen_affine_avg_rom_dataset

def load_dataset_avg_rom(load_prev=True, tr_size=6000, v_size=500, genrand=False):
    '''
    Load dataset where the conductivity is parametrized as a FEniCS function
    and the QoI is the averaged temperature per subfin
    '''
    if os.path.isfile('../data/z_aff_avg_tr.npy') and load_prev:
        z_train = np.load('../data/z_aff_avg_tr.npy')
        errors_train =  np.load('../data/errors_aff_avg_tr.npy')
    else:
        (z_train, errors_train) = gen_affine_avg_rom_dataset(tr_size, genrand=genrand)

    if os.path.isfile('../data/z_aff_avg_eval.npy') and load_prev:
        z_val = np.load('../data/z_aff_avg_eval.npy')
Exemple #26
0
args, remaining = parser.parse_known_args()

# additional output
PETScOptions.set('ksp_view')  # shows info about used PETSc Solver and preconditioner
# if args.solver == 'ipcs1':
    # PETScOptions.set('log_summary')

# Paralell run initialization
comm = mpi_comm_world()
rank = MPI.rank(comm)
# parameters["std_out_all_processes"] = False   # print only rank==0 process output
# parameters["ghost_mode"] = "shared_facet"     # may be needed for operating DG elements in parallel

# allows output using info() for the main process only
if rank == 0 or args.out == 'all':
    set_log_level(INFO)
else:
    set_log_level(INFO + 1)

info('Running on %d processor(s).' % MPI.size(comm))

if MPI.size(comm) > 1 and args.problem == 'womersley_cylinder':
    info('Womersley cylinder problem is not runnable in parallel due to method of construction of analytic solution,'
         ' which is used to describe boundary conditions.')  # the change of mesh format would be also needed
    exit()

# dynamically import selected solver and problem files
exec('from solvers.%s import Solver' % args.solver)
exec('from problems.%s import Problem' % args.problem)

# setup and parse problem- and solver-specific command-line arguments
import dolfin
import numpy as np
import scipy.sparse.linalg as spsla
from time_int_schemes import expand_vp_dolfunc, get_dtstr

import dolfin_navier_scipy.problem_setups as dnsps
import dolfin_navier_scipy.stokes_navier_utils as snu

from prob_defs import FempToProbParams

import matlibplots.conv_plot_utils as cpu

dolfin.set_log_level(60)

samplerate = 1

N, Re, scheme, tE = 3, 60, 'CR', .2
# Ntslist = [2**x for x in range(6, 11)]
Ntslist = [2**x for x in range(8, 10)]
Ntsref = 2048
tol = 2**(-16)
tolcor = True
method = 1

svdatapathref = 'data/'
svdatapath = 'data/'
# svdatapath = 'edithadata/'
# svdatapath = 'edithadata_scm/'  # with the scaled momentum eqn

femp, stokesmatsc, rhsd_vfrc, rhsd_stbc \
    = dnsps.get_sysmats(problem='cylinderwake', N=N, Re=Re,
Exemple #28
0
    def __init__(self, cparams, dtype_u, dtype_f):
        """
        Initialization routine

        Args:
            cparams: custom parameters for the example
            dtype_u: particle data type (will be passed parent class)
            dtype_f: acceleration data type (will be passed parent class)
        """

        # define the Dirichlet boundary
        def Boundary(x, on_boundary):
            return on_boundary

        # these parameters will be used later, so assert their existence
        assert 'c_nvars' in cparams
        assert 't0' in cparams
        assert 'family' in cparams
        assert 'order' in cparams
        assert 'refinements' in cparams

        # add parameters as attributes for further reference
        for k,v in cparams.items():
            setattr(self,k,v)

        df.set_log_level(df.WARNING)

        df.parameters["form_compiler"]["optimize"]     = True
        df.parameters["form_compiler"]["cpp_optimize"] = True

        # set mesh and refinement (for multilevel)
        # mesh = df.UnitIntervalMesh(self.c_nvars)
        # mesh = df.UnitSquareMesh(self.c_nvars[0],self.c_nvars[1])
        mesh = df.IntervalMesh(self.c_nvars,0,100)
        # mesh = df.RectangleMesh(0.0,0.0,2.0,2.0,self.c_nvars[0],self.c_nvars[1])
        for i in range(self.refinements):
            mesh = df.refine(mesh)

        # self.mesh = mesh
        # define function space for future reference
        V = df.FunctionSpace(mesh, self.family, self.order)
        self.V = V*V

        # invoke super init, passing number of dofs, dtype_u and dtype_f
        super(fenics_grayscott,self).__init__(self.V,dtype_u,dtype_f)

        # rhs in weak form
        self.w = df.Function(self.V)
        q1,q2 = df.TestFunctions(self.V)

        self.w1,self.w2 = df.split(self.w)

        self.F1 = (-self.Du*df.inner(df.nabla_grad(self.w1), df.nabla_grad(q1)) - self.w1*(self.w2**2)*q1 + self.A*(1-self.w1)*q1)*df.dx
        self.F2 = (-self.Dv*df.inner(df.nabla_grad(self.w2), df.nabla_grad(q2)) + self.w1*(self.w2**2)*q2 - self.B*    self.w2*q2)*df.dx
        self.F = self.F1+self.F2

        # mass matrix
        u1,u2 = df.TrialFunctions(self.V)
        a_M = u1*q1*df.dx
        M1 = df.assemble(a_M)
        a_M = u2*q2*df.dx
        M2 = df.assemble(a_M)
        self.M = M1+M2
Exemple #29
0
    def __init__(self, cparams, dtype_u, dtype_f):
        """
        Initialization routine

        Args:
            cparams: custom parameters for the example
            dtype_u: particle data type (will be passed parent class)
            dtype_f: acceleration data type (will be passed parent class)
        """

        # define the Dirichlet boundary
        # def Boundary(x, on_boundary):
        #     return on_boundary

        # Sub domain for Periodic boundary condition
        class PeriodicBoundary(df.SubDomain):

            # Left boundary is "target domain" G
            def inside(self, x, on_boundary):
                return bool(x[0] < df.DOLFIN_EPS and x[0] > -df.DOLFIN_EPS and on_boundary)

            # Map right boundary (H) to left boundary (G)
            def map(self, x, y):
                y[0] = x[0] - 1.0

        # these parameters will be used later, so assert their existence
        assert 'c_nvars' in cparams
        assert 'nu' in cparams
        assert 't0' in cparams
        assert 'family' in cparams
        assert 'order' in cparams
        assert 'refinements' in cparams

        # add parameters as attributes for further reference
        for k,v in cparams.items():
            setattr(self,k,v)

        df.set_log_level(df.WARNING)

        # set mesh and refinement (for multilevel)
        mesh = df.UnitIntervalMesh(self.c_nvars)
        # mesh = df.UnitSquareMesh(self.c_nvars[0],self.c_nvars[1])
        for i in range(self.refinements):
            mesh = df.refine(mesh)

        # define function space for future reference
        self.V = df.FunctionSpace(mesh, self.family, self.order, constrained_domain=PeriodicBoundary())
        tmp = df.Function(self.V)
        print('DoFs on this level:',len(tmp.vector().array()))

        # invoke super init, passing number of dofs, dtype_u and dtype_f
        super(fenics_adv_diff_1d,self).__init__(self.V,dtype_u,dtype_f)

        u = df.TrialFunction(self.V)
        v = df.TestFunction(self.V)

        # Stiffness term (diffusion)
        a_K = -1.0*df.inner(df.nabla_grad(u), self.nu*df.nabla_grad(v))*df.dx

        # Stiffness term (advection)
        a_G = df.inner(self.mu*df.nabla_grad(u)[0], v)*df.dx

        # Mass term
        a_M = u*v*df.dx

        self.M = df.assemble(a_M)
        self.K = df.assemble(a_K)
        self.G = df.assemble(a_G)
Exemple #30
0
 def wrapped(*args, **kwargs):
     old_level = get_log_level()
     set_log_level(log_level)
     function(*args, **kwargs)
     set_log_level(old_level)
Exemple #31
0
__author__ = "Jan Blechta"
__version__ = "2017.1.0.dev0"
__license__ = 'LGPL v3'

# Avoid PETSc being initialized by DOLFIN, which sets some performance
# degrading parameters since e91b4100. This assumes that this module
# is imported before DOLFIN; otherwise the assertion may fail.
# TODO: Test whether it works!
from petsc4py import PETSc
from dolfin import SubSystemsManager
assert not SubSystemsManager.responsible_petsc()
del PETSc, SubSystemsManager

# Reduce DOLFIN logging bloat in parallel
from dolfin import set_log_level, get_log_level, MPI, mpi_comm_world
set_log_level(get_log_level()+(0 if MPI.rank(mpi_comm_world())==0 else 1))
del set_log_level, get_log_level

# Parse command-line options
# FIXME: Automatic parsing temporarily commented out as it disallows parsing
#        our own application specific parameters. Do we need it somewhere?
#from dolfin import parameters
#parameters.parse()

# Enable info_{green,red,blue} on rank 0
import ufl
ufl.set_level(ufl.INFO if MPI.rank(mpi_comm_world())==0 else ufl.INFO+1)
del ufl, MPI, mpi_comm_world


from dolfintape.flux_reconstructor import FluxReconstructor
Exemple #32
0
    def __init__(self, cparams, dtype_u, dtype_f):
        """
        Initialization routine

        Args:
            cparams: custom parameters for the example
            dtype_u: particle data type (will be passed parent class)
            dtype_f: acceleration data type (will be passed parent class)
        """

        # define the Dirichlet boundary
        # def Boundary(x, on_boundary):
        #     return on_boundary

        # Sub domain for Periodic boundary condition
        class PeriodicBoundary(df.SubDomain):

            # Left boundary is "target domain" G
            def inside(self, x, on_boundary):
                # return True if on left or bottom boundary AND NOT on one of the two corners (0, 1) and (1, 0)
                return bool((df.near(x[0], 0) or df.near(x[1], 0)) and
                        (not ((df.near(x[0], 0) and df.near(x[1], 1)) or
                                (df.near(x[0], 1) and df.near(x[1], 0)))) and on_boundary)

            def map(self, x, y):
                if df.near(x[0], 1) and df.near(x[1], 1):
                    y[0] = x[0] - 1.
                    y[1] = x[1] - 1.
                elif df.near(x[0], 1):
                    y[0] = x[0] - 1.
                    y[1] = x[1]
                else:   # near(x[1], 1)
                    y[0] = x[0]
                    y[1] = x[1] - 1.

        # these parameters will be used later, so assert their existence
        assert 'c_nvars' in cparams
        assert 'nu' in cparams
        assert 't0' in cparams
        assert 'family' in cparams
        assert 'order' in cparams
        assert 'refinements' in cparams

        # add parameters as attributes for further reference
        for k,v in cparams.items():
            setattr(self,k,v)

        df.set_log_level(df.WARNING)

        # set mesh and refinement (for multilevel)
        # mesh = df.UnitIntervalMesh(self.c_nvars)
        mesh = df.UnitSquareMesh(self.c_nvars[0],self.c_nvars[1])
        for i in range(self.refinements):
            mesh = df.refine(mesh)

        self.mesh = df.Mesh(mesh)

        self.bc = PeriodicBoundary()

        # define function space for future reference
        self.V = df.FunctionSpace(mesh, self.family, self.order, constrained_domain=self.bc)
        tmp = df.Function(self.V)
        print('DoFs on this level:',len(tmp.vector().array()))

        # invoke super init, passing number of dofs, dtype_u and dtype_f
        super(fenics_vortex_2d,self).__init__(self.V,dtype_u,dtype_f)

        w = df.TrialFunction(self.V)
        v = df.TestFunction(self.V)

        # Stiffness term (diffusion)
        a_K = df.inner(df.nabla_grad(w), df.nabla_grad(v))*df.dx

        # Mass term
        a_M = w*v*df.dx

        self.M = df.assemble(a_M)
        self.K = df.assemble(a_K)
Exemple #33
0
def test_shear(scheme, nu_interp, postprocessor):
    #set_log_level(WARNING)
    assert scheme == "SemiDecoupled"

    dt = 0.0  # solve as the stationary problem

    # Read parameters
    scriptdir = os.path.dirname(os.path.realpath(__file__))
    prm_file = os.path.join(scriptdir, "interface-parameters.xml")
    mpset.read(prm_file)

    # Adjust parameters
    c = postprocessor.get_coefficients()
    mpset["model"]["eps"] = c[r"\eps"]
    mpset["model"]["rho"]["1"] = c[r"\rho_1"]
    mpset["model"]["rho"]["2"] = c[r"\rho_2"]
    mpset["model"]["nu"]["1"] = c[r"\nu_1"]
    mpset["model"]["nu"]["2"] = c[r"\nu_2"]
    mpset["model"]["chq"]["L"] = c[r"L_0"]
    mpset["model"]["chq"]["V"] = c[r"V_0"]
    mpset["model"]["chq"]["rho"] = c[r"\rho_0"]
    mpset["model"]["mobility"]["M0"] = 1.0e+0
    mpset["model"]["sigma"]["12"] = 1.0e-0
    #mpset.show()

    cc = wrap_coeffs_as_constants(c)

    # Names and directories
    basename = postprocessor.basename
    label = "{}_{}".format(basename, nu_interp)
    outdir = postprocessor.outdir

    for level in range(2, 3):
        # Prepare domain and discretization
        mesh, boundary_markers, pinpoint, periodic_bnd = create_domain(
            level, "crossed")
        del periodic_bnd
        DS, div_v = create_discretization(scheme, mesh, div_projection=True)
        DS.parameters["PTL"] = 1
        DS.setup()

        # Prepare initial and boundary conditions
        load_initial_conditions(DS, c)
        bcs = create_bcs(DS, boundary_markers, pinpoint)  # for Dirichlet
        p_h = create_hydrostatic_pressure(mesh, cc)  # for Neumann

        # Force applied on the top plate
        B = 0.0 if dt == 0.0 else 1.0
        applied_force = df.Expression(
            ("A*(1.0 - B*exp(-alpha*t))", "0.0"),
            degree=DS.subspace("v", 0).ufl_element().degree(),
            t=0.0,
            alpha=1.0,
            A=1.0,
            B=B)

        # Prepare model
        model = ModelFactory.create("Incompressible", DS, bcs)
        model.parameters["THETA2"] = 0.0
        #model.parameters["rho"]["itype"] = "lin"
        #model.parameters["rho"]["trunc"] = "minmax"
        model.parameters["nu"]["itype"] = nu_interp
        model.parameters["nu"]["trunc"] = "minmax"
        #model.parameters["nu"]["trunc"] = "clamp_hard"
        #model.parameters["mobility"]["cut"] = True

        # Prepare external source term
        g_a = c[r"g_a"]
        g_a /= mpset["model"]["chq"]["V"]**2.0 * mpset["model"]["chq"]["L"]
        f_src = df.Constant((0.0, -g_a), cell=mesh.ufl_cell(), name="f_src")
        model.load_sources(f_src)

        # Create forms
        forms = model.create_forms()

        # Add boundary integrals
        n = DS.facet_normal()
        ds = df.Measure("ds", subdomain_data=boundary_markers)
        test = DS.test_functions()

        forms["lin"]["rhs"] +=\
          df.inner(applied_force, test["v"]) * ds(3)     # driving force
        forms["lin"]["rhs"] -=\
          p_h * df.inner(n, test["v"]) * (ds(2) + ds(4)) # hydrostatic balance

        # Prepare solver
        solver = SolverFactory.create(model, forms, fix_p=False)

        # Prepare time-stepping algorithm
        comm = mesh.mpi_comm()
        pv = DS.primitive_vars_ctl()
        modulo_factor = 1
        xfields = list(zip(pv["phi"].split(), ("phi", )))
        xfields.append((pv["p"].dolfin_repr(), "p"))
        if scheme == "FullyDecoupled":
            xfields += list(zip(pv["v"].split(), ("v1", "v2")))
        else:
            xfields.append((pv["v"].dolfin_repr(), "v"))
        if div_v is not None:
            xfields.append((div_v, "div_v"))
        functionals = {"t": [], "E_kin": [], "Psi": [], "mean_p": []}
        hook = prepare_hook(model, applied_force, functionals, modulo_factor,
                            div_v)
        logfile = "log_{}.dat".format(label)
        TS = TimeSteppingFactory.create("ConstantTimeStep",
                                        comm,
                                        solver,
                                        hook=hook,
                                        logfile=logfile,
                                        xfields=xfields,
                                        outdir=outdir)
        TS.parameters["xdmf"]["folder"] = "XDMF_{}".format(label)
        TS.parameters["xdmf"]["modulo"] = modulo_factor
        TS.parameters["xdmf"]["flush"] = True
        TS.parameters["xdmf"]["iconds"] = True

        # Time-stepping
        with Timer("Time stepping") as tmr_tstepping:
            result = TS.run(0.0, 2.0, dt, OTD=1)

        # Pre-process results
        v = pv["v"].dolfin_repr()
        p = pv["p"].dolfin_repr()
        phi = pv["phi"].split()[0]

        w_diff = DS.solution_ctl()[0].copy(True)
        w0 = DS.solution_ptl(0)[0]
        w_diff.vector().axpy(-1.0, w0.vector())
        phi_diff = w_diff.split(True)[0]
        phi_diff.rename("phi_diff", "phi_tstep_difference")
        xdmfdir = \
          os.path.join(outdir, TS.parameters["xdmf"]["folder"], "phi_diff.xdmf")
        with df.XDMFFile(xdmfdir) as file:
            file.write(phi_diff, 0.0)

        D_12 = df.project(0.5 * v.sub(0).dx(1), div_v.function_space())

        if nu_interp in [
                "har",
        ]:
            deg = DS.subspace("phi", 0).ufl_element().degree()
            V_nu = df.FunctionSpace(mesh, "DG", deg)
        else:
            V_nu = DS.subspace("phi", 0, deepcopy=True)
        nu_0 = df.project(model.coeffs["nu"], V_nu)
        T_12 = df.project(model.coeffs["nu"] * v.sub(0).dx(1), V_nu)

        #p_ref = df.project(p_h, df.FunctionSpace(mesh, W.sub(1).ufl_element()))

        # Save results
        make_cut = postprocessor._make_cut
        rs = dict(level=level,
                  r_dens=c[r"r_dens"],
                  r_visc=c[r"r_visc"],
                  nu_interp=nu_interp)
        rs[r"$v_1$"] = make_cut(v.sub(0))
        rs[r"$p$"] = make_cut(p)
        rs[r"$\phi$"] = make_cut(phi)
        rs[r"$D_{12}$"] = make_cut(D_12)
        rs[r"$T_{12}$"] = make_cut(T_12)
        rs[r"$\nu$"] = make_cut(nu_0)
        print(label, level)

        # Send to posprocessor
        comm = mesh.mpi_comm()
        rank = df.MPI.rank(comm)
        postprocessor.add_result(rank, rs)

    # Save results into a binary file
    filename = "results_{}.pickle".format(label)
    postprocessor.save_results(filename)

    # Flush plots as we now have data for all level values
    postprocessor.flush_plots()

    # Cleanup
    df.set_log_level(df.INFO)
    gc.collect()
def write_file(f, u, label, t):
    df.set_log_level(40)
    f.write_checkpoint(u, label, t)
    df.set_log_level(30)
Exemple #35
0
# mshr is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with mshr. If not, see <http://www.gnu.org/licenses/>.
#

import dolfin
import pygmsh
# from mshr import Sphere, Cylinder, CSGCGALDomain3D, generate_mesh

# dolfin.set_log_level(dolfin.TRACE)
dolfin.set_log_active(True)
dolfin.set_log_level(4)

# Define 3D geometry
# sphere = Sphere(dolfin.Point(0, 0, 0), 0.5)
# cone = Cylinder(dolfin.Point(0, 0, 0), dolfin.Point(0, 0, -1), .35, .1)
# geometry = cone + sphere

geom = pygmsh.opencascade.Geometry(characteristic_length_min=0.1,
                                   characteristic_length_max=0.1)

sphere_a = geom.add_ball([0., 0., 0.], 0.5)
cone = geom.add_cylinder([0., 0., 0.], [0., 0., 1.], .1)
figure = geom.boolean_union([sphere_a, cone])
mesh = pygmsh.generate_mesh(geom, verbose=True)

# Geometry surfaces can be saved to off files
Exemple #36
0
    else:
        dx = Measure("dx", domain=mesh)

# Optimization options for the form compiler
    parameters["krylov_solver"]["maximum_iterations"] = 300
    parameters["krylov_solver"]["relative_tolerance"] = 1.0e-10
    parameters["krylov_solver"]["absolute_tolerance"] = 1.0e-10

    # ================= MPI PARAMETERS  ================= #

    # MPI Parameters
    comm = MPI.comm_world
    rank = MPI.rank(comm)

    # Set log level for parallel
    set_log_level(LogLevel.ERROR)
    if rank == 0:
        set_log_level(LogLevel.PROGRESS)

    ff = MeshFunction("size_t", mesh, mesh.geometry().dim() - 1)
    Dirichlet(Lx, Ly, Lpml).mark(ff, 1)

    # Create function spaces
    VE = VectorElement("CG", mesh.ufl_cell(), 1, dim=2)
    TE = TensorElement("DG", mesh.ufl_cell(), 0, shape=(2, 2), symmetry=True)

    W = FunctionSpace(mesh, MixedElement([VE, TE]))
    F = FunctionSpace(mesh, "CG", 2)
    V = W.sub(0).collapse()
    M = W.sub(1).collapse()
import dolfin as df
import numpy as np
import scipy as sp
import scipy.sparse as sps

# self defined modules
import sys
sys.path.append( "../" )
from util.dolfin_gadget import get_dof_coords,vec2fun
from util.sparse_geeklet import *
from util.Eigen import *

# set to warn only once for the same warnings
import warnings
warnings.simplefilter('once')
df.set_log_level(df.ERROR)

def _get_sqrtm(A,m_name='K',output_petsc=True,SAVE=True):
    """
    Get the root of matrix A.
    """
    if output_petsc and df.has_petsc4py():
        from petsc4py import PETSc
        rtA_f=os.path.join(os.getcwd(),'rt'+m_name+'_petsc.dat')
        try:
            viewer = PETSc.Viewer().createBinary(rtA_f, 'r')
            rtA_petsc=df.PETScMatrix(PETSc.Mat().load(viewer))
            print('Read the root of '+{'K':'kernel','C':'covariance'}[m_name]+' successfully!')
        except:
            import scipy.linalg as spla
            rtA_sps = sps.csr_matrix(spla.sqrtm(A.array()).real)
import math
# from dolfin import *
from dolfin import ERROR, set_log_level, exp, near, File, Expression, tanh, \
                   Constant, SubDomain, VectorFunctionSpace, FunctionSpace, \
                   DirichletBC, Function, split, \
                   MixedFunctionSpace, TestFunctions, inner, sym, grad, div, dx, \
                   RectangleMesh, Point, solve, project, assign, interpolate


'''
This version of the code runs a swarm of simulations of various viscosities
and temperatures per viscosity. Since 5-29, it also includes an adiabatic
temperature variance at the LAB.
'''

set_log_level(ERROR)

rho_0 = 3300.
rhomelt = 2900.
darcy = 1e-13  # k over (mu*phi) in darcy
alpha = 2.5e-5
g = 9.81
# not actually used.. (in cian's notes he is using the non dimensional
# kappa in the equations, which here i have defined as 1 (as
# kappa/kappa_0) so i think i can get away with this)
kappa = 1.E-6

b = 12.7
cc = math.log(128)
#Ep  = 0.014057
theta = 0.5
import sys
sys.path.append('../')

import matplotlib
matplotlib.use('macosx')
import time
import numpy as np
import matplotlib.pyplot as plt
import dolfin as dl
dl.set_log_level(40)
from utils import nb

# Tensorflow related imports
from tensorflow.keras.optimizers import Adam

# Scipy imports
from scipy.optimize import minimize, Bounds

# ROMML imports
from fom.forward_solve_exp import Fin
from fom.thermal_fin import get_space
from rom.averaged_affine_ROM import AffineROMFin
from deep_learning.dl_model import load_parametric_model_avg, load_bn_model
from gaussian_field import make_cov_chol

randobs = True

resolution = 40
V = get_space(resolution)
chol = make_cov_chol(V, length=1.2)
Exemple #40
0
#

import numbers
import types
import pytest
from numpy import allclose as float_array_equal, array_equal as integer_array_equal, bmat, hstack as bvec, sort, unique, vstack
from dolfin import assemble, Constant, DOLFIN_EPS, dx, Expression, FiniteElement, Function, FunctionSpace, has_pybind11, inner, MixedElement, project as dolfin_project, set_log_level, SubDomain, TensorElement, TensorFunctionSpace, VectorElement, VectorFunctionSpace
if has_pybind11():
    from dolfin.cpp.la import GenericMatrix, GenericVector
    from dolfin.cpp.log import LogLevel
    ERROR = LogLevel.ERROR
    PROGRESS = LogLevel.PROGRESS
else:
    from dolfin import ERROR, GenericMatrix, GenericVector, PROGRESS
from multiphenics import assign, block_assemble, block_assign, BlockDirichletBC, BlockFunction, block_split, BlockTestFunction, BlockTrialFunction, DirichletBC
set_log_level(PROGRESS)

# ================ PYTEST HELPER ================ #
def pytest_mark_slow_for_cartesian_product(generator_1, generator_2):
    for i in generator_1():
        for j in generator_2():
            slow = False
            if hasattr(i, "mark"):
                assert i.name == "slow"
                assert len(i.args) is 1
                i = i.args[0]
                slow = True
            if hasattr(j, "mark"):
                assert j.name == "slow"
                assert len(j.args) is 1
                j = j.args[0]
def read_file(f, u, label, i):
    df.set_log_level(40)
    f.read_checkpoint(u, label, i)
    df.set_log_level(30)
    return u
Exemple #42
0
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# 
# mshr is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
# 
# You should have received a copy of the GNU General Public License
# along with mshr.  If not, see <http://www.gnu.org/licenses/>.

from __future__ import print_function
import dolfin
from mshr import *

dolfin.set_log_level(dolfin.TRACE)

# Define 2D geometry
domain =   Rectangle(dolfin.Point(0., 0.), dolfin.Point(5., 5.)) \
         - Rectangle(dolfin.Point(2., 1.25), dolfin.Point(3., 1.75)) \
         - Circle(dolfin.Point(1, 4), .25) \
         - Circle(dolfin.Point(4, 4), .25)
domain.set_subdomain(1, Rectangle(dolfin.Point(1., 1.), dolfin.Point(4., 3.)))
domain.set_subdomain(2, Rectangle(dolfin.Point(2., 2.), dolfin.Point(3., 4.)))

dolfin.info("\nVerbose output of 2D geometry:")
dolfin.info(domain, True)

# Generate and plot mesh
mesh2d = generate_mesh(domain, 45)
print(mesh2d)
Exemple #43
0
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mshr is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mshr.  If not, see <http://www.gnu.org/licenses/>.

from __future__ import print_function
import dolfin
from mshr import *

dolfin.set_log_level(dolfin.TRACE)

# Define 2D geometry
domain =   Rectangle(dolfin.Point(0., 0.), dolfin.Point(5., 5.)) \
         - Rectangle(dolfin.Point(2., 1.25), dolfin.Point(3., 1.75)) \
         - Circle(dolfin.Point(1, 4), .25) \
         - Circle(dolfin.Point(4, 4), .25)
domain.set_subdomain(1, Rectangle(dolfin.Point(1., 1.), dolfin.Point(4., 3.)))
domain.set_subdomain(2, Rectangle(dolfin.Point(2., 2.), dolfin.Point(3., 4.)))

dolfin.info("\nVerbose output of 2D geometry:")
dolfin.info(domain, True)

# Generate and plot mesh
mesh2d = generate_mesh(domain, 45)
print(mesh2d)
Exemple #44
0
from time import clock
from os import makedirs
import math
# from dolfin import *
from dolfin import ERROR, set_log_level, exp, near, File, Expression, tanh, \
                   Constant, SubDomain, VectorFunctionSpace, FunctionSpace, \
                   DirichletBC, Function, split, \
                   MixedFunctionSpace, TestFunctions, inner, sym, grad, div, dx, \
                   RectangleMesh, Point, solve, project, assign, interpolate
'''
This version of the code runs a swarm of simulations of various viscosities
and temperatures per viscosity. Since 5-29, it also includes an adiabatic
temperature variance at the LAB.
'''

set_log_level(ERROR)

rho_0 = 3300.
rhomelt = 2900.
darcy = 1e-13  # k over (mu*phi) in darcy
alpha = 2.5e-5
g = 9.81
# not actually used.. (in cian's notes he is using the non dimensional
# kappa in the equations, which here i have defined as 1 (as
# kappa/kappa_0) so i think i can get away with this)
kappa = 1.E-6

b = 12.7
cc = math.log(128)
#Ep  = 0.014057
theta = 0.5