Пример #1
0
                           me,
                           ne,
                           vthe,
                           vd,
                           npc,
                           ext_bnd,
                           pdf=pdf,
                           pdf_max=1 + A)
        # species.append_raw(e, mp, ne, vthi, vd, npc, ext_bnd)

        pop = Population(mesh, bnd)
        load_particles(pop, species)

        V1 = df.FunctionSpace(mesh,
                              'CG',
                              1,
                              constrained_domain=PeriodicBoundary(
                                  Ld, periodic))
        dv_inv = voronoi_volume_approx(V1)
        rho1 = distribute(V1, pop, dv_inv)
        # rhoe = df.project(df.Expression('-0.5*sin(2*PI*x[0]/6.28)',degree=3,PI=np.pi),V1)
        rhoe = df.Expression('-1.0-0.5*sin(2*PI*x[0]/6.28)',
                             degree=3,
                             PI=np.pi)
        errors1.append(df.errornorm(rhoe, rho1))

        V2 = df.FunctionSpace(mesh,
                              'DG',
                              0,
                              constrained_domain=PeriodicBoundary(
                                  Ld, periodic))
Пример #2
0
def _discretize_fenics():

    # assemble system matrices - FEniCS code
    ########################################

    import dolfin as df

    mesh = df.UnitSquareMesh(GRID_INTERVALS, GRID_INTERVALS, 'crossed')
    V = df.FunctionSpace(mesh, 'Lagrange', FENICS_ORDER)
    u = df.TrialFunction(V)
    v = df.TestFunction(V)

    diffusion = df.Expression(
        '(lower0 <= x[0]) * (open0 ? (x[0] < upper0) : (x[0] <= upper0)) *'
        '(lower1 <= x[1]) * (open1 ? (x[1] < upper1) : (x[1] <= upper1))',
        lower0=0.,
        upper0=0.,
        open0=0,
        lower1=0.,
        upper1=0.,
        open1=0,
        element=df.FunctionSpace(mesh, 'DG', 0).ufl_element())

    def assemble_matrix(x, y, nx, ny):
        diffusion.user_parameters['lower0'] = x / nx
        diffusion.user_parameters['lower1'] = y / ny
        diffusion.user_parameters['upper0'] = (x + 1) / nx
        diffusion.user_parameters['upper1'] = (y + 1) / ny
        diffusion.user_parameters['open0'] = (x + 1 == nx)
        diffusion.user_parameters['open1'] = (y + 1 == ny)
        return df.assemble(
            df.inner(diffusion * df.nabla_grad(u), df.nabla_grad(v)) * df.dx)

    mats = [
        assemble_matrix(x, y, XBLOCKS, YBLOCKS) for x in range(XBLOCKS)
        for y in range(YBLOCKS)
    ]
    mat0 = mats[0].copy()
    mat0.zero()
    h1_mat = df.assemble(df.inner(df.nabla_grad(u), df.nabla_grad(v)) * df.dx)

    f = df.Constant(1.) * v * df.dx
    F = df.assemble(f)

    bc = df.DirichletBC(V, 0., df.DomainBoundary())
    for m in mats:
        bc.zero(m)
    bc.apply(mat0)
    bc.apply(h1_mat)
    bc.apply(F)

    # wrap everything as a pyMOR model
    ##################################

    # FEniCS wrappers
    from pymor.bindings.fenics import FenicsVectorSpace, FenicsMatrixOperator, FenicsVisualizer

    # define parameter functionals (same as in pymor.analyticalproblems.thermalblock)
    parameter_functionals = [
        ProjectionParameterFunctional(component_name='diffusion',
                                      component_shape=(YBLOCKS, XBLOCKS),
                                      index=(YBLOCKS - y - 1, x))
        for x in range(XBLOCKS) for y in range(YBLOCKS)
    ]

    # wrap operators
    ops = [FenicsMatrixOperator(mat0, V, V)
           ] + [FenicsMatrixOperator(m, V, V) for m in mats]
    op = LincombOperator(ops, [1.] + parameter_functionals)
    rhs = VectorOperator(FenicsVectorSpace(V).make_array([F]))
    h1_product = FenicsMatrixOperator(h1_mat, V, V, name='h1_0_semi')

    # build model
    visualizer = FenicsVisualizer(FenicsVectorSpace(V))
    parameter_space = CubicParameterSpace(op.parameter_type, 0.1, 1.)
    fom = StationaryModel(op,
                          rhs,
                          products={'h1_0_semi': h1_product},
                          parameter_space=parameter_space,
                          visualizer=visualizer)

    return fom
    def __init__(self,
                 problem_params,
                 dtype_u=fenics_mesh,
                 dtype_f=fenics_mesh):
        """
        Initialization routine

        Args:
            problem_params: custom parameters for the example
            dtype_u: FEniCS mesh data type (will be passed to parent class)
            dtype_f: FEniCS mesh data data type (will be passed to parent class)
        """

        # define the Dirichlet boundary
        def Boundary(x, on_boundary):
            return on_boundary

        # these parameters will be used later, so assert their existence
        essential_keys = [
            'c_nvars', 't0', 'family', 'order', 'refinements', 'Du', 'Dv', 'A',
            'B'
        ]
        for key in essential_keys:
            if key not in problem_params:
                msg = 'need %s to instantiate problem, only got %s' % (
                    key, str(problem_params.keys()))
                raise ParameterError(msg)

        # set logger level for FFC and dolfin
        df.set_log_level(df.WARNING)
        logging.getLogger('FFC').setLevel(logging.WARNING)

        # set solver and form parameters
        df.parameters["form_compiler"]["optimize"] = True
        df.parameters["form_compiler"]["cpp_optimize"] = True

        # set mesh and refinement (for multilevel)
        mesh = df.IntervalMesh(problem_params['c_nvars'], 0, 100)
        for i in range(problem_params['refinements']):
            mesh = df.refine(mesh)

        # define function space for future reference
        V = df.FunctionSpace(mesh, problem_params['family'],
                             problem_params['order'])
        self.V = V * V

        # invoke super init, passing number of dofs, dtype_u and dtype_f
        super(fenics_grayscott, self).__init__(self.V, dtype_u, dtype_f,
                                               problem_params)

        # rhs in weak form
        self.w = df.Function(self.V)
        q1, q2 = df.TestFunctions(self.V)

        self.w1, self.w2 = df.split(self.w)

        self.F1 = (-self.params.Du * df.inner(df.nabla_grad(self.w1),
                                              df.nabla_grad(q1)) - self.w1 *
                   (self.w2**2) * q1 + self.params.A *
                   (1 - self.w1) * q1) * df.dx
        self.F2 = (-self.params.Dv * df.inner(df.nabla_grad(self.w2),
                                              df.nabla_grad(q2)) + self.w1 *
                   (self.w2**2) * q2 - self.params.B * self.w2 * q2) * df.dx
        self.F = self.F1 + self.F2

        # mass matrix
        u1, u2 = df.TrialFunctions(self.V)
        a_M = u1 * q1 * df.dx
        M1 = df.assemble(a_M)
        a_M = u2 * q2 * df.dx
        M2 = df.assemble(a_M)
        self.M = M1 + M2
Пример #4
0
    def test_superposition(self):
        radius = 0.125
        mesh_resolution = 101
        mesh = generate_mesh_with_cicular_subdomain(mesh_resolution, radius,
                                                    False)

        degree = 1
        P1 = dl.FiniteElement('Lagrange', mesh.ufl_cell(), degree)
        element = dl.MixedElement([P1, P1])
        function_space = dl.FunctionSpace(mesh, element)

        frequency = 400 * 3
        omega = 2. * np.pi * frequency
        sound_speed = np.array([343.4, 6320, 60])
        gamma = 8.4e-4
        speaker_amplitude = .1

        kappas = omega / sound_speed
        cr1, cr2 = 0.5, 0.5 - radius / np.sqrt(8)
        kappa = dl.Expression(
            '((x[0]-c1)*(x[0]-c1)+(x[1]-c1)*(x[1]-c1) >= r*r + tol) ? k_0 : ((x[0]-c2)*(x[0]-c2)+(x[1]-c2)*(x[1]-c2)>=r*r/4+tol ? k_1 : k_2)',
            degree=0,
            tol=1e-14,
            k_0=kappas[0],
            k_1=kappas[1],
            k_2=kappas[2],
            r=radius,
            c1=cr1,
            c2=cr2)

        forcing = [dl.Constant(0), dl.Constant(0)]

        alpha = kappa * dl.Constant(gamma)
        beta = dl.Constant(1.204 * omega * speaker_amplitude)
        bndry_obj = get_2d_square_mesh_boundary_segments()

        def get_boundary_conditions():
            boundary_conditions = [[
                'neumann', bndry_obj[ii], [dl.Constant(0), beta]
            ] for ii in [1, 4, 7, 10]]
            boundary_conditions += [[
                'robin', bndry_obj[ii], [dl.Constant(0),
                                         dl.Constant(0)],
                [dl.Constant(0), alpha]
            ] for ii in [0, 2, 3, 5, 6, 8, 9, 11]]
            tmp = [None for ii in range(len(boundary_conditions))]
            for ii, jj in enumerate([1, 4, 7, 10]):
                tmp[jj] = boundary_conditions[ii]
            for ii, jj in enumerate([0, 2, 3, 5, 6, 8, 9, 11]):
                tmp[jj] = boundary_conditions[ii + 4]
            boundary_conditions = tmp
            return boundary_conditions

        boundary_conditions = get_boundary_conditions()
        sol = run_model(kappa, forcing, function_space, boundary_conditions)

        sols = []
        for jj in [1, 4, 7, 10]:
            boundary_conditions = get_boundary_conditions()
            for kk in range(12):
                if jj != kk:
                    boundary_conditions[kk][2][1] = dl.Constant(0)
            pii = run_model(kappa, forcing, function_space,
                            boundary_conditions)
            sols.append(pii)

        # for jj in [0,2,3,5,6,8,9,11]:
        #     boundary_conditions = get_boundary_conditions()
        #     for kk in range(12):
        #         if jj!=kk:
        #             boundary_conditions[kk][2][1]=dl.Constant(0)
        #     pii=run_model(kappa,forcing,function_space,boundary_conditions)
        #     sols.append(pii)

        boundary_conditions = get_boundary_conditions()
        for kk in range(12):
            if kk not in [0, 2, 3, 5, 6, 8, 9, 11]:
                boundary_conditions[kk][2][1] = dl.Constant(0)
        pii = run_model(kappa, forcing, function_space, boundary_conditions)
        sols.append(pii)

        superposition_sol = sols[0]
        for ii in range(1, len(sols)):
            superposition_sol += sols[ii]
        superposition_sol = dl.project(superposition_sol, function_space)

        pr, pi = sol.split()
        pr_super, pi_super = superposition_sol.split()
        # print('error',dl.errornorm(pr_super,pr))
        # print('error',dl.errornorm(pi_super,pi))
        assert dl.errornorm(pr_super, pr) < 1e-10
        assert dl.errornorm(pi_super, pi) < 1e-10
    def __init__(
        self,
        time: df.Constant,
        mesh: df.Mesh,
        intracellular_conductivity: Dict[int, df.Expression],
        extracellular_conductivity: Dict[int, df.Expression],
        cell_function: df.MeshFunction,
        cell_tags: CellTags,
        interface_function: df.MeshFunction,
        interface_tags: InterfaceTags,
        parameters: CoupledBidomainParameters,
        neumann_boundary_condition: Dict[int, df.Expression] = None,
        v_prev: df.Function = None,
        surface_to_volume_factor: Union[float, df.Constant] = None,
        membrane_capacitance: Union[float, df.Constant] = None,
    ) -> None:
        self._time = time
        self._mesh = mesh
        self._parameters = parameters

        # Strip none from cell tags
        cell_tags = set(cell_tags) - {None}

        if surface_to_volume_factor is None:
            surface_to_volume_factor = df.constant(1)

        if membrane_capacitance is None:
            membrane_capacitance = df.constant(1)

        # Set Chi*Cm
        self._chi_cm = df.Constant(surface_to_volume_factor)*df.Constant(membrane_capacitance)

        if not set(intracellular_conductivity.keys()) == {*tuple(extracellular_conductivity.keys())}:
            raise ValueError("intracellular conductivity and lambda does not havemnatching keys.")
        if not set(cell_tags) == set(intracellular_conductivity.keys()):
            raise ValueError("Cell tags does not match conductivity keys")
        self._intracellular_conductivity = intracellular_conductivity
        self._extracellular_conductivity = extracellular_conductivity

        # Check cell tags
        _cell_function_tags = set(cell_function.array())
        if set(cell_tags)!= _cell_function_tags:       # If not equal
            msg = "Mismatching cell tags. Expected {}, got {}"
            raise ValueError(msg.format(set(cell_tags), _cell_function_tags))
        self._cell_tags = set(cell_tags)
        self._cell_function = cell_function

        restrict_tags = self._parameters.restrict_tags
        if set(restrict_tags) >= self._cell_tags:
            msg = "restrict tags ({})is not a subset of cell tags ({})"
            raise ValueError(msg.format(set(restrict_tags), self._cell_tags))
        self._restrict_tags = set(restrict_tags)

        # Check interface tags
        _interface_function_tags = {*set(interface_function.array()), None}
        if not set(interface_tags) <= _interface_function_tags:     # if not subset of
            msg = "Mismatching interface tags. Expected {}, got {}"
            raise ValueError(msg.format(set(interface_tags), _interface_function_tags))
        self._interface_function = interface_function
        self._interface_tags = interface_tags

        # Set up function spaces
        self._transmembrane_function_space = df.FunctionSpace(self._mesh, "CG", 1)
        transmembrane_element = df.FiniteElement("CG", self._mesh.ufl_cell(), 1)
        extracellular_element = df.FiniteElement("CG", self._mesh.ufl_cell(), 1)

        if neumann_boundary_condition is None:
            self._neumann_bc: Dict[int, df.Expression] = dict()
        else:
            self._neumann_bc = neumann_boundary_condition

        if self._parameters.linear_solver_type == "direct":
            lagrange_element = df.FiniteElement("R", self._mesh.ufl_cell(), 0)
            mixed_element = df.MixedElement((transmembrane_element, extracellular_element, lagrange_element))
        else:
            mixed_element = df.MixedElement((transmembrane_element, extracellular_element))
        self._VUR = df.FunctionSpace(mesh, mixed_element)    # TODO: rename to something sensible

        # Set-up solution fields:
        if v_prev is None:
            self._merger = df.FunctionAssigner(self._transmembrane_function_space, self._VUR.sub(0))
            self._v_prev = df.Function(self._transmembrane_function_space, name="v_prev")
        else:
            self._merger = None
            self._v_prev = v_prev
        self._vur = df.Function(self._VUR, name="vur")        # TODO: Give sensible name

        # For normlising rhs_vector. TODO: Unsure about this. Check the nullspace from cbcbeat
        self._extracellular_dofs = np.asarray(self._VUR.sub(1).dofmap().dofs())

        # Mark first timestep
        self._timestep: df.Constant = None
Пример #6
0
 def setUp(self):
     self.mesh = dolfin.UnitCube(3, 3, 3)
     self.V = dolfin.FunctionSpace(self.mesh, "Nedelec 1st kind H(curl)", 2)
     self.u = dolfin.interpolate(dolfin.Expression(('0', '0', '2*x[2]')),
                                 self.V)
     self.DUT = VoltageAlongLine(self.u)
Пример #7
0
def _inputsort(obj):
    u = None
    mesh = None
    if not utils.isSequence(obj):
        obj = [obj]

    for ob in obj:
        inputtype = str(type(ob))
        #printc('inputtype is', inputtype, c=2)

        if "vtk" in inputtype:  # skip vtk objects, will be added later
            continue

        if "dolfin" in inputtype:
            if "MeshFunction" in inputtype:
                mesh = ob.mesh()

                import dolfin
                V = dolfin.FunctionSpace(mesh, "CG", 1)
                u = dolfin.Function(V)
                #print(mesh.cells())
                #print(len(mesh.cells()), len(mesh.coordinates()), len(ob.array()))
                #print(mesh.num_cells())
                #print(u.vector()[:])

                v2d = dolfin.vertex_to_dof_map(V)
                u.vector()[v2d] = ob.array()


#                r = ob.dim()
#                    if r == 0:
#                        V = dolfin.FunctionSpace(mesh, "CG", 1)
#                    elif r == 1:
#                        V = dolfin.VectorFunctionSpace(mesh, "CG", 1, dim=r)
#                    else:
#                        V = dolfin.TensorFunctionSpace(mesh, "CG", 1, shape=(r,r))
#                except:
#                    printc('~times Sorry could not deal with your MeshFunction', c=1)
#                    return None
#                tdim = mesh.topology().dim()
#                d = ob.dim()
#                if tdim == 2 and d == 2:
#                    import matplotlib.tri as tri
#                    xy = mesh.coordinates()
#                    mh = buildPolyData(xy, mesh.cells())
#                    show(mh)
#                    print( tri.Triangulation(xy[:, 0], xy[:, 1], mesh.cells()) )
#                    exit()

            elif "Function" in inputtype or "Expression" in inputtype:
                u = ob
            elif "Mesh" in inputtype:
                mesh = ob

        if "str" in inputtype:
            import dolfin
            mesh = dolfin.Mesh(ob)

    if u and not mesh and hasattr(u, "function_space"):
        V = u.function_space()
        if V:
            mesh = V.mesh()
    if u and not mesh and hasattr(u, "mesh"):
        mesh = u.mesh()

    if not mesh:
        printc("~times Error: dolfin mesh is not defined.", c=1)
        raise RuntimeError()

    #printc('------------------------------------')
    #printc('mesh.topology dim=', mesh.topology().dim())
    #printc('mesh.geometry dim=', mesh.geometry().dim())
    #if u: printc('u.value_rank()', u.value_rank())
    return (mesh, u)
Пример #8
0
def test_material(unitcube_geometry, Material, active_model, isochoric):

    compressible_model = "incompressible"

    if active_model == "active_stress":
        active_value = 20.0
        activation = dolfin.Constant(1.0)
        T_ref = active_value

        def dirichlet_bc(W):
            V = W if W.sub(0).num_sub_spaces() == 0 else W.sub(0)
            return dolfin.DirichletBC(V, dolfin.Constant((0.0, 0.0, 0.0)),
                                      fixed)

    else:
        activation = dolfin.Constant(0.0)
        active_value = 0.0
        T_ref = 1.0

        def dirichlet_bc(W):
            V = W if W.sub(0).num_sub_spaces() == 0 else W.sub(0)
            return dolfin.DirichletBC(V.sub(0), dolfin.Constant(0.0), fixed,
                                      'pointwise')

    neumann_bc = NeumannBC(traction=dolfin.Constant(-active_value),
                           marker=free_marker)

    bcs = BoundaryConditions(dirichlet=(dirichlet_bc, ),
                             neumann=(neumann_bc, ))

    matparams = Material.default_parameters()

    material = Material(activation=activation,
                        parameters=matparams,
                        T_ref=T_ref,
                        isochoric=isochoric,
                        compressible_model=compressible_model,
                        active_model=active_model)

    assert material.is_isochoric == isochoric

    problem = MechanicsProblem(unitcube_geometry, material, bcs)
    problem.solve()

    u, p = problem.state.split(deepcopy=True)

    print(material.name)

    if active_model == 'active_strain':

        tol = 1e-4

        if not isochoric:
            if material.name in [
                    "guccione", "linear_elastic", "saint_venant_kirchhoff"
            ]:
                assert all(abs(p.vector().get_local()) < tol)
            elif material.name == "holzapfel_ogden":

                assert all(
                    abs(p.vector().get_local() -
                        material.parameters["a"]) < tol)
            elif material.name == "neo_hookean":
                assert all(
                    abs(p.vector().get_local() -
                        material.parameters["mu"]) < tol)
            else:
                raise TypeError("Unkown material {}".format(material.name))

        else:
            assert all(abs(p.vector().get_local()) < tol)

    else:

        F = kinematics.DeformationGradient(u)
        T = material.CauchyStress(F, p)

        V_dg = dolfin.FunctionSpace(unitcube_geometry.mesh, "DG", 1)

        # Fiber on current geometry
        f = F * unitcube_geometry.f0

        # Fiber stress
        Tf = dolfin.inner(T * f / f**2, f)
        Tf_dg = dolfin.project(Tf, V_dg)

        tol = 1e-10

        assert all(abs(Tf_dg.vector().get_local() - active_value) < tol)
        assert all(abs(u.vector().get_local()) < tol)

        if not isochoric:
            if material.name in [
                    "guccione", "linear_elastic", "saint_venant_kirchhoff"
            ]:
                assert all(abs(p.vector().get_local()) < tol)
            elif material.name == "holzapfel_ogden":

                assert all(
                    abs(p.vector().get_local() -
                        material.parameters["a"]) < tol)
            elif material.name == "neo_hookean":
                assert all(
                    abs(p.vector().get_local() -
                        material.parameters["mu"]) < tol)
            else:
                raise TypeError("Unkown material {}".format(material.name))

        else:
            assert all(abs(p.vector().get_local()) < tol)
Пример #9
0
    def Rsolver(self):
        return self.prior.Rsolver

    def applyRaa(self, da, out):
        self.Raa.mult(da, out)

if __name__ == "__main__":
    dl.set_log_active(False)
    sep = "\n" + "#" * 80 + "\n"
    print sep, "Set up the mesh and finite element spaces", sep
    ndim = 2
    nx = 64
    ny = 64
    mesh = dl.UnitSquareMesh(nx, ny)
    Vh2 = dl.FunctionSpace(mesh, 'Lagrange', 2)
    Vh1 = dl.FunctionSpace(mesh, 'Lagrange', 1)
    Vh = [Vh2, Vh1, Vh2]
    print "Number of dofs: STATE={0}, PARAMETER={1}, ADJOINT={2}".format(
        Vh[STATE].dim(), Vh[PARAMETER].dim(), Vh[ADJOINT].dim())

    print sep, "Set up the location of observation, Prior Information, and model", sep
    ntargets = 300
    np.random.seed(seed=1)
    targets = np.random.uniform(0.1, 0.9, [ntargets, ndim])
    print "Number of observation points: {0}".format(ntargets)

    gamma = .1
    delta = .5

    anis_diff = dl.Expression(code_AnisTensor2D)
Пример #10
0
        def visualize(self,
                      U,
                      m,
                      title='',
                      legend=None,
                      filename=None,
                      block=True,
                      separate_colorbars=True):
            """Visualize the provided data.

            Parameters
            ----------
            U
                |VectorArray| of the data to visualize (length must be 1). Alternatively,
                a tuple of |VectorArrays| which will be visualized in separate windows.
                If `filename` is specified, only one |VectorArray| may be provided which,
                however, is allowed to contain multipled vectors that will be interpreted
                as a time series.
            m
                Filled in by :meth:`pymor.models.interface.Model.visualize` (ignored).
            title
                Title of the plot.
            legend
                Description of the data that is plotted. If `U` is a tuple of |VectorArrays|,
                `legend` has to be a tuple of the same length.
            filename
                If specified, write the data to that file. `filename` needs to have an extension
                supported by FEniCS (e.g. `.pvd`).
            separate_colorbars
                If `True`, use separate colorbars for each subplot.
            block
                If `True`, block execution until the plot window is closed.
            """
            if filename:
                assert not isinstance(U, tuple)
                assert U in self.space
                f = df.File(filename)
                coarse_function = df.Function(self.space.V)
                if self.mesh_refinements:
                    mesh = self.space.V.mesh()
                    for _ in range(self.mesh_refinements):
                        mesh = df.refine(mesh)
                    V_fine = df.FunctionSpace(mesh, self.space.V.ufl_element())
                    function = df.Function(V_fine)
                else:
                    function = coarse_function
                if legend:
                    function.rename(legend, legend)
                for u in U._list:
                    if u.imag_part is not None:
                        raise NotImplementedError
                    coarse_function.vector()[:] = u.real_part.impl
                    if self.mesh_refinements:
                        function.vector()[:] = df.interpolate(
                            coarse_function, V_fine).vector()
                    f << function
            else:
                from matplotlib import pyplot as plt

                assert U in self.space and len(U) == 1 \
                    or (isinstance(U, tuple) and all(u in self.space for u in U) and all(len(u) == 1 for u in U))
                if not isinstance(U, tuple):
                    U = (U, )
                if isinstance(legend, str):
                    legend = (legend, )
                assert legend is None or len(legend) == len(U)

                if not separate_colorbars:
                    vmin = np.inf
                    vmax = -np.inf
                    for u in U:
                        vec = u._list[0].real_part.impl
                        vmin = min(vmin, vec.min())
                        vmax = max(vmax, vec.max())

                for i, u in enumerate(U):
                    if u._list[0].imag_part is not None:
                        raise NotImplementedError
                    function = df.Function(self.space.V)
                    function.vector()[:] = u._list[0].real_part.impl
                    if legend:
                        tit = title + ' -- ' if title else ''
                        tit += legend[i]
                    else:
                        tit = title
                    if separate_colorbars:
                        plt.figure()
                        df.plot(function, title=tit)
                    else:
                        plt.figure()
                        df.plot(function,
                                title=tit,
                                range_min=vmin,
                                range_max=vmax)
                plt.show(block=block)
Пример #11
0
def _discretize_fenics():

    # assemble system matrices - FEniCS code
    ########################################

    import dolfin as df

    # discrete function space
    mesh = df.UnitSquareMesh(GRID_INTERVALS, GRID_INTERVALS, 'crossed')
    V = df.FunctionSpace(mesh, 'Lagrange', FENICS_ORDER)
    u = df.TrialFunction(V)
    v = df.TestFunction(V)

    # data functions
    bottom_diffusion = df.Expression(
        '(x[0] > 0.45) * (x[0] < 0.55) * (x[1] < 0.7) * 1.',
        element=df.FunctionSpace(mesh, 'DG', 0).ufl_element())
    top_diffusion = df.Expression(
        '(x[0] > 0.35) * (x[0] < 0.40) * (x[1] > 0.3) * 1. +' +
        '(x[0] > 0.60) * (x[0] < 0.65) * (x[1] > 0.3) * 1.',
        element=df.FunctionSpace(mesh, 'DG', 0).ufl_element())
    initial_data = df.Expression(
        '(x[0] > 0.45) * (x[0] < 0.55) * (x[1] < 0.7) * 10.',
        element=df.FunctionSpace(mesh, 'DG', 0).ufl_element())
    neumann_data = df.Expression('(x[0] > 0.45) * (x[0] < 0.55) * 1000.',
                                 element=df.FunctionSpace(mesh, 'DG',
                                                          0).ufl_element())

    # assemble matrices and vectors
    l2_mat = df.assemble(df.inner(u, v) * df.dx)
    l2_0_mat = l2_mat.copy()
    h1_mat = df.assemble(df.inner(df.nabla_grad(u), df.nabla_grad(v)) * df.dx)
    h1_0_mat = h1_mat.copy()
    mat0 = h1_mat.copy()
    mat0.zero()
    bottom_mat = df.assemble(bottom_diffusion *
                             df.inner(df.nabla_grad(u), df.nabla_grad(v)) *
                             df.dx)
    top_mat = df.assemble(top_diffusion *
                          df.inner(df.nabla_grad(u), df.nabla_grad(v)) * df.dx)
    u0 = df.project(initial_data, V).vector()
    f = df.assemble(neumann_data * v * df.ds)

    # boundary treatment
    def dirichlet_boundary(x, on_boundary):
        tol = 1e-14
        return on_boundary and (abs(x[0]) < tol or abs(x[0] - 1) < tol
                                or abs(x[1] - 1) < tol)

    bc = df.DirichletBC(V, df.Constant(0.), dirichlet_boundary)
    bc.apply(l2_0_mat)
    bc.apply(h1_0_mat)
    bc.apply(mat0)
    bc.zero(bottom_mat)
    bc.zero(top_mat)
    bc.apply(f)
    bc.apply(u0)

    # wrap everything as a pyMOR discretization
    ###########################################

    from pymor.bindings.fenics import FenicsVectorSpace, FenicsMatrixOperator, FenicsVisualizer

    d = InstationaryDiscretization(
        T=1.,
        initial_data=FenicsVectorSpace(V).make_array([u0]),
        operator=LincombOperator([
            FenicsMatrixOperator(mat0, V, V),
            FenicsMatrixOperator(h1_0_mat, V, V),
            FenicsMatrixOperator(bottom_mat, V, V),
            FenicsMatrixOperator(top_mat, V, V)
        ], [
            1., 1., 100. - 1.,
            ExpressionParameterFunctional('top - 1.', {'top': 0})
        ]),
        rhs=VectorFunctional(FenicsVectorSpace(V).make_array([f])),
        mass=FenicsMatrixOperator(l2_0_mat, V, V, name='l2'),
        products={
            'l2': FenicsMatrixOperator(l2_mat, V, V, name='l2'),
            'l2_0': FenicsMatrixOperator(l2_0_mat, V, V, name='l2_0'),
            'h1': FenicsMatrixOperator(h1_mat, V, V, name='h1'),
            'h1_0_semi': FenicsMatrixOperator(h1_0_mat, V, V, name='h1_0_semi')
        },
        time_stepper=ImplicitEulerTimeStepper(nt=NT),
        parameter_space=CubicParameterSpace({'top': 0},
                                            minimum=1,
                                            maximum=100.),
        visualizer=FenicsVisualizer(FenicsVectorSpace(V)))

    return d
Пример #12
0
# Define the traction boundary
sub_domains = df.MeshFunction('size_t', mesh, mesh.topology().dim() - 1)
upper_edge = TractionBoundary()
upper_edge.mark(sub_domains, 6)
dss = df.Measure('ds')(subdomain_data=sub_domains)
f = df.Constant((0, -1. / 4 ))

'''
3. Setup the PDE problem
'''
# PDE problem
pde_problem = PDEProblem(mesh)

# Add input to the PDE problem:
# name = 'density', function = density_function (function is the solution vector here)
density_function_space = df.FunctionSpace(mesh, 'DG', 0)
density_function = df.Function(density_function_space)
pde_problem.add_input('density', density_function)

# Add states to the PDE problem (line 58):
# name = 'displacements', function = displacements_function (function is the solution vector here)
# residual_form = get_residual_form(u, v, rho_e) from atomics.pdes.thermo_mechanical_uniform_temp
# *inputs = density (can be multiple, here 'density' is the only input)

displacements_function_space = df.VectorFunctionSpace(mesh, 'Lagrange', 1)
displacements_function = df.Function(displacements_function_space)
v = df.TestFunction(displacements_function_space)
method='SIMP'
residual_form = get_residual_form(
    displacements_function, 
    v, 
        ocean[f] = 2

#########################################################
#################  FUNCTION SPACES  #####################
#########################################################

# Define finite element function spaces.  Here we use CG1 for
# velocity computations, DG0 (aka finite volume) for mass cons,
# and "Real" (aka constant) elements for the length ODE

E_Q = df.FiniteElement("CG", mesh.ufl_cell(), 1)
E_dg = df.FiniteElement("DG", mesh.ufl_cell(), 0)
E_R = df.FiniteElement("R", mesh.ufl_cell(), 0)
E_V = df.MixedElement(E_Q, E_Q, E_Q, E_Q, E_dg, E_R)

Q = df.FunctionSpace(mesh, E_Q)
Q_dg = df.FunctionSpace(mesh, E_dg)
Q_R = df.FunctionSpace(mesh, E_R)
V = df.FunctionSpace(mesh, E_V)

# For moving data between vector functions and scalar functions
assigner_inv = df.FunctionAssigner([Q, Q, Q, Q, Q_dg, Q_R], V)
assigner = df.FunctionAssigner(V, [Q, Q, Q, Q, Q_dg, Q_R])

#########################################################
#################  FUNCTIONS  ###########################
#########################################################

# The zero function
ze = df.Function(Q)
Пример #14
0
    # test
    adif.test(1e-8)
    # obtain MAP
    map_v = adif.get_MAP(rand_init=False)
    fig=dl.plot(vector2Function(map_v,adif.pde.Vh[PARAMETER]))
    plt.colorbar(fig)
#     plt.show()
    plt.savefig(os.path.join(os.getcwd(),'properties/map.png'),bbox_inches='tight')
    # conversion
    v = adif.prior.sample()
    im = adif.vec2img(v)
    v1 = adif.img2vec(im)
    fig,axes = plt.subplots(nrows=1,ncols=3,sharex=True,sharey=False,figsize=(16,5))
    sub_figs=[None]*3
    plt.axes(axes.flat[0])
    sub_figs[0]=dl.plot(vector2Function(v,adif.pde.Vh[STATE]))
    axes.flat[0].axis('equal')
    axes.flat[0].set_title(r'Original Image')
    plt.axes(axes.flat[1])
    sub_figs[1]=plt.imshow(im,origin='lower',extent=[0,1,0,1])
    axes.flat[1].axis('equal')
    axes.flat[1].set_title(r'Transformed Image')
    plt.axes(axes.flat[2])
#     sub_figs[2]=dl.plot(vector2Function(v1,adif.pde.Vh[STATE]))
    sub_figs[2]=dl.plot(vector2Function(v1,dl.FunctionSpace(adif.mesh,'Lagrange',1)))
    axes.flat[2].axis('equal')
    axes.flat[2].set_title(r'Reconstructed Image')
    from util.common_colorbar import common_colorbar
    fig=common_colorbar(fig,axes,sub_figs)
#     plt.show()
    plt.savefig(os.path.join(os.getcwd(),'properties/conversion.png'),bbox_inches='tight')
Пример #15
0
lam = c0 / freq
l = lam / 4  # Dipole length
# l = 2.3*lam                               # Dipole length
I = 1.0  # Dipole current
source_direction = np.array([0, 0, 1.])  # Source orientation
source_centre = np.array([0, 0, 0.])  # Position of the source
source_endpoints = np.array(
    [-source_direction * l / 2, source_direction * l / 2]) + source_centre

## Discretisation settings
order = 2
domain_size = np.array([lam] * 3) / 2
max_edge_len = lam / 3
mesh = get_centred_cube(domain_size, max_edge_len)
## Implementation
V = dolfin.FunctionSpace(mesh, "Nedelec 1st kind H(curl)", order)
dipole_source = FillamentCurrentSource()
dipole_source.no_integration_points = 20
dipole_source.set_function_space(V)
dipole_source.set_source_endpoints(source_endpoints)
dipole_source.set_value(I)
dofnos, rhs_contribs = dipole_source.get_contribution()
sortind = np.argsort(dofnos)
dofnos = dofnos[sortind].copy()
rhs_contribs = rhs_contribs[sortind].copy()

pickle.dump(
    dict(order=order,
         I=I,
         source_endpoints=source_endpoints,
         domain_size=domain_size,
    def test_onstrained_newton_energy_solver(self):
        L, nelem = 1, 201

        mesh = dl.IntervalMesh(nelem, -L, L)
        Vh = dl.FunctionSpace(mesh, "CG", 2)

        forcing = dl.Constant(1)

        dirichlet_bcs = [dl.DirichletBC(Vh, dl.Constant(0.0), on_any_boundary)]
        bc0 = dl.DirichletBC(Vh, dl.Constant(0.0), on_any_boundary)

        uh = dl.TrialFunction(Vh)
        vh = dl.TestFunction(Vh)
        F = dl.inner((1 + uh**2) * dl.grad(uh),
                     dl.grad(vh)) * dl.dx - forcing * vh * dl.dx
        F += uh * vh * dl.inner(dl.nabla_grad(uh), dl.nabla_grad(uh)) * dl.dx
        u = dl.Function(Vh)
        F = dl.action(F, u)
        parameters={"symmetric": True, "newton_solver": {
            "relative_tolerance": 1e-12, "report": True, \
            "linear_solver": "cg", "preconditioner": "petsc_amg"}}
        dl.solve(F == 0, u, dirichlet_bcs, solver_parameters=parameters)
        #dl.plot(uh)
        #plt.show()


        F = 0.5*(1+uh**2)*dl.inner(dl.nabla_grad(uh),dl.nabla_grad(uh))*dl.dx-\
            forcing*uh*dl.dx
        #F = dl.inner((1+uh**2)*dl.grad(uh),dl.grad(vh))*dl.dx-forcing*vh*dl.dx
        u_newton = dl.Function(Vh)
        F = dl.action(F, u_newton)
        constrained_newton_energy_solve(F,
                                        u_newton,
                                        dirichlet_bcs=dirichlet_bcs,
                                        bc0=bc0,
                                        linear_solver='PETScLU',
                                        opts=dict())


        F = 0.5*(1+uh**2)*dl.inner(dl.nabla_grad(uh),dl.nabla_grad(uh))*dl.dx-\
             forcing*uh*dl.dx
        u_grad = dl.Function(Vh)
        F = dl.action(F, u_grad)
        grad = dl.derivative(F, u_grad)
        print(F)
        print(grad)
        parameters={"symmetric": True, "newton_solver": {
            "relative_tolerance": 1e-12, "report": True, \
            "linear_solver": "cg", "preconditioner": "petsc_amg"}}
        dl.solve(grad == 0,
                 u_grad,
                 dirichlet_bcs,
                 solver_parameters=parameters)
        error1 = dl.errornorm(u_grad, u_newton, mesh=mesh)
        #print(error)
        error2 = dl.errornorm(u, u_newton, mesh=mesh)
        #print(error)
        #dl.plot(u)
        #dl.plot(u_newton)
        #dl.plot(u_grad)
        #plt.show()
        assert error1 < 1e-15
        assert error2 < 1e-15
Пример #17
0
    try:
        dl.set_log_active(False)
    except:
        pass

    # set initial day of simulation
    date = "2020-07-15"

    # load mesh
    mesh_path = args.mesh_path
    mesh_fname = args.mesh_file
    mesh = dl.Mesh(mesh_path + mesh_fname + ".xml")

    # FE space
    FE_polynomial = 1
    Vu = dl.FunctionSpace(mesh, "Lagrange", FE_polynomial)

    # read COVID-19 data
    data_path = args.data_path
    infected_total_state = np.loadtxt(
        data_path + 'covid_11August2020/infected_total_state.txt')
    deceased_state = np.loadtxt(data_path +
                                'covid_11August2020/deceased_state.txt')

    # save data
    d0 = datetime.strptime("2020-07-15", "%Y-%m-%d")
    d1 = datetime.strptime(date, "%Y-%m-%d")
    day_index = abs((d1 - d0)).days

    # Define elements: P1 and real number
    P1 = dl.FiniteElement("Lagrange", mesh.ufl_cell(), 1)
Пример #18
0
def simulate_FEM():

    import dolfin as df
    df.parameters['allow_extrapolation'] = False

    # Define mesh
    mesh = df.Mesh(join(mesh_folder, "{}.xml".format(mesh_name)))
    subdomains = df.MeshFunction("size_t", mesh, join(mesh_folder,
                          "{}_physical_region.xml".format(mesh_name)))
    boundaries = df.MeshFunction("size_t", mesh, join(mesh_folder,
                          "{}_facet_region.xml".format(mesh_name)))

    print("Number of cells in mesh: ", mesh.num_cells())

    np.save(join(out_folder, "mesh_coordinates.npy"), mesh.coordinates())

    sigma_vec = df.Constant(sigma)

    V = df.FunctionSpace(mesh, "CG", 2)
    v = df.TestFunction(V)
    u = df.TrialFunction(V)

    ds = df.Measure("ds", domain=mesh, subdomain_data=boundaries)
    dx = df.Measure("dx", domain=mesh, subdomain_data=subdomains)

    a = df.inner(sigma_vec * df.grad(u), df.grad(v)) * dx(1)

    # This corresponds to Neumann boundary conditions zero, i.e.
    # all outer boundaries are insulating.
    L = df.Constant(0) * v * dx

    # Define Dirichlet boundary conditions outer cylinder boundaries (ground)
    bcs = [df.DirichletBC(V, 0.0, boundaries, 1)]

    for t_idx in range(num_tsteps):

        f_name = join(out_folder, "phi_xz_t_vec_{}.npy".format(t_idx))
        # if os.path.isfile(f_name):
        #     print("skipping ", f_name)
        #     continue

        print("Time step {} of {}".format(t_idx, num_tsteps))
        phi = df.Function(V)
        A = df.assemble(a)
        b = df.assemble(L)

        [bc.apply(A, b) for bc in bcs]

        # Adding point sources from neural simulation
        for s_idx, s_pos in enumerate(source_pos):

            point = df.Point(s_pos[0], s_pos[1], s_pos[2])
            delta = df.PointSource(V, point, imem[s_idx, t_idx])
            delta.apply(b)

        df.solve(A, phi.vector(), b, 'cg', "ilu")

        # df.File(join(out_folder, "phi_t_vec_{}.xml".format(t_idx))) << phi
        # np.save(join(out_folder, "phi_t_vec_{}.npy".format(t_idx)), phi.vector())

        plot_and_save_simulation_results(phi, t_idx)
# availability see https://hippylib.github.io.
#
# hIPPYlib is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License (as published by the Free
# Software Foundation) version 3.0 dated June 2007.

import dolfin as dl
import sys
sys.path.append( "../../" )
from hippylib import *
import numpy as np

nx = 32
ny = 32
mesh = dl.UnitSquareMesh(nx, ny)
Vh = dl.FunctionSpace(mesh, 'Lagrange', 1)
Prior = LaplacianPrior(Vh, 1.,100.)

nsamples = 1000

s = dl.Function(Vh, name = "sample")
noise = dl.Vector()
Prior.init_vector(noise,"noise")
size = len(noise.array())

fid = dl.File("results_cg/samples.pvd")

for i in range(0, nsamples ):
    noise.set_local( np.random.randn( size ) )
    Prior.sample(noise, s.vector())
    fid << s
Пример #20
0
def test_assembly_solve_taylor_hood(mesh):
    """Assemble Stokes problem with Taylor-Hood elements and solve."""
    P2 = dolfin.VectorFunctionSpace(mesh, ("Lagrange", 2))
    P1 = dolfin.FunctionSpace(mesh, ("Lagrange", 1))

    def boundary0(x):
        """Define boundary x = 0"""
        return x[:, 0] < 10 * numpy.finfo(float).eps

    def boundary1(x):
        """Define boundary x = 1"""
        return x[:, 0] > (1.0 - 10 * numpy.finfo(float).eps)

    u0 = dolfin.Function(P2)
    u0.vector().set(1.0)
    u0.vector().ghostUpdate(
        addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD)
    bc0 = dolfin.DirichletBC(P2, u0, boundary0)
    bc1 = dolfin.DirichletBC(P2, u0, boundary1)

    u, p = dolfin.TrialFunction(P2), dolfin.TrialFunction(P1)
    v, q = dolfin.TestFunction(P2), dolfin.TestFunction(P1)

    a00 = inner(ufl.grad(u), ufl.grad(v)) * dx
    a01 = ufl.inner(p, ufl.div(v)) * dx
    a10 = ufl.inner(ufl.div(u), q) * dx
    a11 = None

    p00 = a00
    p01, p10 = None, None
    p11 = inner(p, q) * dx

    # FIXME
    # We need zero function for the 'zero' part of L
    p_zero = dolfin.Function(P1)
    f = dolfin.Function(P2)
    L0 = ufl.inner(f, v) * dx
    L1 = ufl.inner(p_zero, q) * dx

    # -- Blocked and nested

    A0 = dolfin.fem.assemble_matrix_nest([[a00, a01], [a10, a11]], [bc0, bc1])
    A0norm = nest_matrix_norm(A0)
    P0 = dolfin.fem.assemble_matrix_nest([[p00, p01], [p10, p11]], [bc0, bc1])
    P0norm = nest_matrix_norm(P0)
    b0 = dolfin.fem.assemble_vector_nest([L0, L1], [[a00, a01], [a10, a11]],
                                         [bc0, bc1])
    b0norm = b0.norm()

    ksp = PETSc.KSP()
    ksp.create(mesh.mpi_comm())
    ksp.setOperators(A0, P0)
    nested_IS = P0.getNestISs()
    ksp.setType("minres")
    pc = ksp.getPC()
    pc.setType("fieldsplit")
    pc.setFieldSplitIS(["u", nested_IS[0][0]], ["p", nested_IS[1][1]])
    ksp_u, ksp_p = pc.getFieldSplitSubKSP()
    ksp_u.setType("preonly")
    ksp_u.getPC().setType('lu')
    ksp_u.getPC().setFactorSolverType('mumps')
    ksp_p.setType("preonly")

    def monitor(ksp, its, rnorm):
        # print("Num it, rnorm:", its, rnorm)
        pass

    ksp.setTolerances(rtol=1.0e-8, max_it=50)
    ksp.setMonitor(monitor)
    ksp.setFromOptions()
    x0 = b0.copy()
    ksp.solve(b0, x0)
    assert ksp.getConvergedReason() > 0

    # -- Blocked and monolithic

    A1 = dolfin.fem.assemble_matrix_block([[a00, a01], [a10, a11]], [bc0, bc1])
    assert A1.norm() == pytest.approx(A0norm, 1.0e-12)
    P1 = dolfin.fem.assemble_matrix_block([[p00, p01], [p10, p11]], [bc0, bc1])
    assert P1.norm() == pytest.approx(P0norm, 1.0e-12)
    b1 = dolfin.fem.assemble_vector_block([L0, L1], [[a00, a01], [a10, a11]],
                                          [bc0, bc1])
    assert b1.norm() == pytest.approx(b0norm, 1.0e-12)

    ksp = PETSc.KSP()
    ksp.create(mesh.mpi_comm())
    ksp.setOperators(A1, P1)
    ksp.setType("minres")
    pc = ksp.getPC()
    pc.setType('lu')
    pc.setFactorSolverType('mumps')
    ksp.setTolerances(rtol=1.0e-8, max_it=50)
    ksp.setFromOptions()
    x1 = A1.createVecRight()
    ksp.solve(b1, x1)
    assert ksp.getConvergedReason() > 0
    assert x1.norm() == pytest.approx(x0.norm(), 1e-8)

    # -- Monolithic

    P2 = ufl.VectorElement("Lagrange", mesh.ufl_cell(), 2)
    P1 = ufl.FiniteElement("Lagrange", mesh.ufl_cell(), 1)
    TH = P2 * P1
    W = dolfin.FunctionSpace(mesh, TH)
    (u, p) = dolfin.TrialFunctions(W)
    (v, q) = dolfin.TestFunctions(W)
    a00 = ufl.inner(ufl.grad(u), ufl.grad(v)) * dx
    a01 = ufl.inner(p, ufl.div(v)) * dx
    a10 = ufl.inner(ufl.div(u), q) * dx
    a = a00 + a01 + a10

    p00 = ufl.inner(ufl.grad(u), ufl.grad(v)) * dx
    p11 = ufl.inner(p, q) * dx
    p_form = p00 + p11

    f = dolfin.Function(W.sub(0).collapse())
    p_zero = dolfin.Function(W.sub(1).collapse())
    L0 = inner(f, v) * dx
    L1 = inner(p_zero, q) * dx
    L = L0 + L1

    bc0 = dolfin.DirichletBC(W.sub(0), u0, boundary0)
    bc1 = dolfin.DirichletBC(W.sub(0), u0, boundary1)

    A2 = dolfin.fem.assemble_matrix(a, [bc0, bc1])
    A2.assemble()
    assert A2.norm() == pytest.approx(A0norm, 1.0e-12)
    P2 = dolfin.fem.assemble_matrix(p_form, [bc0, bc1])
    P2.assemble()
    assert P2.norm() == pytest.approx(P0norm, 1.0e-12)

    b2 = dolfin.fem.assemble_vector(L)
    dolfin.fem.apply_lifting(b2, [a], [[bc0, bc1]])
    b2.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE)
    dolfin.fem.set_bc(b2, [bc0, bc1])
    b2norm = b2.norm()
    assert b2norm == pytest.approx(b0norm, 1.0e-12)

    ksp = PETSc.KSP()
    ksp.create(mesh.mpi_comm())
    ksp.setOperators(A2, P2)
    ksp.setType("minres")
    pc = ksp.getPC()
    pc.setType('lu')
    pc.setFactorSolverType('mumps')

    def monitor(ksp, its, rnorm):
        # print("Num it, rnorm:", its, rnorm)
        pass

    ksp.setTolerances(rtol=1.0e-8, max_it=50)
    ksp.setMonitor(monitor)
    ksp.setFromOptions()
    x2 = A2.createVecRight()
    ksp.solve(b2, x2)
    assert ksp.getConvergedReason() > 0
    assert x0.norm() == pytest.approx(x2.norm(), 1e-8)
Пример #21
0
# This is to test setting Function values in parallel
# https://fenicsproject.org/qa/10005/setting-function-values-in-parallel-repeated-question/

import dolfin as df
import sys
import numpy as np

n_cells = 10
#int(sys.argv[1])

mesh = df.UnitSquareMesh(n_cells, n_cells)
V = df.FunctionSpace(mesh, 'CG', 2)
u = df.Function(V)

vec = u.vector()
values = vec.get_local()

dofmap = V.dofmap()
my_first, my_last = dofmap.ownership_range()  # global

# 'Handle' API change of tabulate coordinates
if df.__version__ >= '1.6.0':
    x = V.tabulate_dof_coordinates().reshape((-1, 2))
else:
    x = V.dofmap().tabulate_all_coordinates(mesh)

unowned = dofmap.local_to_global_unowned()
dofs = filter(lambda dof: dofmap.local_to_global_index(dof) not in unowned,
              range(my_last - my_first))

dof_local = np.array(list(dofs))
Пример #22
0
def _inputsort(obj):
    import dolfin

    u = None
    mesh = None
    if not utils.isSequence(obj):
        obj = [obj]

    for ob in obj:
        inputtype = str(type(ob))

        #printc('inputtype is', inputtype, c=2)

        if "vtk" in inputtype:  # skip vtk objects, will be added later
            continue

        if "dolfin" in inputtype or "ufl" in inputtype:
            if "MeshFunction" in inputtype:
                mesh = ob.mesh()

                if ob.dim() > 0:
                    printc('MeshFunction of dim>0 not supported.', c=1)
                    printc('Try e.g.:  MeshFunction("size_t", mesh, 0)',
                           c=1,
                           italic=1)
                    printc('instead of MeshFunction("size_t", mesh, 1)',
                           c=1,
                           strike=1)
                else:
                    #printc(ob.dim(), mesh.num_cells(), len(mesh.coordinates()), len(ob.array()))
                    V = dolfin.FunctionSpace(mesh, "CG", 1)
                    u = dolfin.Function(V)
                    v2d = dolfin.vertex_to_dof_map(V)
                    u.vector()[v2d] = ob.array()
            elif "Function" in inputtype or "Expression" in inputtype:
                u = ob
            elif "ufl.mathfunctions" in inputtype:  # not working
                u = ob
            elif "Mesh" in inputtype:
                mesh = ob
            elif "algebra" in inputtype:
                mesh = ob.ufl_domain()
                #print('algebra', ob.ufl_domain())

        if "str" in inputtype:
            mesh = dolfin.Mesh(ob)

    if u and not mesh and hasattr(u, "function_space"):
        V = u.function_space()
        if V:
            mesh = V.mesh()
    if u and not mesh and hasattr(u, "mesh"):
        mesh = u.mesh()

    #printc('------------------------------------')
    #printc('mesh.topology dim=', mesh.topology().dim())
    #printc('mesh.geometry dim=', mesh.geometry().dim())
    #if u: printc('u.value_rank()', u.value_rank())
    #if u and u.value_rank(): printc('u.value_dimension()', u.value_dimension(0)) # axis=0
    ##if u: printc('u.value_shape()', u.value_shape())
    return (mesh, u)
Пример #23
0
def run(active_model, material_model, matparams_space):

    params = setup_adjoint_contraction_parameters(material_model)
    if active_model == "active_strain":
        params["T_ref"] = 0.2
    else:
        params["T_ref"] = 100.0
        
    params["phase"] == "all"
    # params["material_model"] = material_model
    params["active_model"] = active_model
    params["matparams_space"] = matparams_space

    solver_parameters, pressure, paramvec= make_solver_params(params, patient)


    Material = get_material_model(material_model)
    
    msg = "Should be {}, got {}".format(Material,
                                        type(solver_parameters["material"]))
    assert isinstance(solver_parameters["material"], Material), msg


    pressure 
    V_real = df.FunctionSpace(solver_parameters["mesh"],  "R", 0)
    gamma = df.Function(V_real, name = "gamma")
    

    matparams = setup_material_parameters(material_model)

    args = (patient.fiber,
            gamma,
            matparams,
            active_model,
            patient.sheet,
            patient.sheet_normal,
            params["T_ref"])

    
        

    material = Material(*args)

    solver_parameters["material"] = material
    solver = LVSolver(solver_parameters)
    solver.parameters["solve"]["newton_solver"]["report"] = True

    from pulse_adjoint.iterate import iterate

    pressures, volumes = [],[]
    # Increase pressure
    for plv in [0.0, 0.5, 1.0, 1.6]:
        iterate("pressure", solver, plv, pressure)
        u,p = solver.get_state().split(deepcopy=True)

        pressures.append(plv)
        
        vol = get_volume(patient, u = u)
        volumes.append(vol)

    
    # Increase gamma
    iterate("gamma", solver, 1.0, gamma)
Пример #24
0
u_b = df.Constant(1e-6 * spy)
alpha = df.Constant(5. / 4.)
beta = df.Constant(3. / 2.)

k = df.Constant(.005 * spy)

e_v = df.Constant(1e-3)

dt_float = 1e-1
dt = df.Constant(dt_float)

mesh = df.Mesh('data/isunnguata_sermia.xml')
nhat = df.FacetNormal(mesh)

E_cg = df.FiniteElement("CG", mesh.ufl_cell(), 1)
Q_cg = df.FunctionSpace(mesh, E_cg)

E_dg = df.FiniteElement("DG", mesh.ufl_cell(), 0)
Q_dg = df.FunctionSpace(mesh, E_dg)

E = df.MixedElement([E_cg, E_cg, E_dg])
V = df.FunctionSpace(mesh, E)

B_dg = df.Function(Q_dg, 'data/Bed/B_d.xml')
B = df.Function(Q_cg, 'data/Bed/B_c.xml')

H0 = df.Function(Q_dg, 'data/Thk/H_d.xml')
H0_c = df.Function(Q_cg, 'data/Thk/H_c.xml')

edgefunction = df.MeshFunction('size_t', mesh, 1)
Пример #25
0
 def _set_function_space(self):
     self.V = d.FunctionSpace(self.mesh.mesh, self.element)
     self.logger.info("Number of DOFs : {}".format(self.V.dim()))
Пример #26
0
    mesh = df.RectangleMesh(0, z_dom_min, r_dom, z_dom_max, nr, nz)

    # Define interior of quantum dot r^2+z^2<1 and z>0
    class QuantumDot(df.SubDomain):
        def inside(self, x, on_boundary):
            return df.between(x[0]**2 + x[1]**2,
                              (0, 1)) and df.between(x[1], (0, 1))

    quantumDot = QuantumDot()

    domains = df.CellFunction("size_t", mesh)
    domains.set_all(0)
    quantumDot.mark(domains, 1)

    V = df.FunctionSpace(mesh, "CG", 1)

    u = df.TrialFunction(V)
    v = df.TestFunction(V)

    drdz = df.Measure("dx")[domains]
    r = df.Expression("x[0]")

    # Confining potential
    potential = df.Constant(100)

    # Partial derivatives of trial and test functions
    u_r = u.dx(0)
    v_r = v.dx(0)
    u_z = u.dx(1)
    v_z = v.dx(1)
Пример #27
0
plt.ion()
n_dif = 100
dif = np.zeros((n_dif, 2))
loaded = np.load(file=os.path.join(
    folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) + '_training_XimgY' +
    '.npz'))
prng = np.random.RandomState(2020)
sel4eval = prng.choice(num_samp, size=n_dif, replace=False)
X = loaded['X'][sel4eval]
Y = loaded['Y'][sel4eval]
sel4print = prng.choice(n_dif, size=10, replace=False)
prog = np.ceil(n_dif * (.1 + np.arange(0, 1, .1)))
u_f = df.Function(adif.prior.V)
eldeg = adif.prior.V.ufl_element().degree()
if eldeg > 1:
    V_P1 = df.FunctionSpace(adif.mesh, 'Lagrange', 1)
    d2v = df.dof_to_vertex_map(V_P1)
    u_f1 = df.Function(V_P1)
else:
    u_f1 = u_f
for n in range(n_dif):
    u = X[n]
    # calculate gradient
    t_start = timeit.default_timer()
    ll_xact, dll_xact = adif.get_geom(
        adif.img2vec(u, adif.prior.V if eldeg > 1 else None), [0, 1])[:2]
    t_used[0] += timeit.default_timer() - t_start
    # emulate gradient
    t_start = timeit.default_timer()
    ll_emul = logLik(u[None, :, :, None]).numpy()[0]
    dll_emul = adif.img2vec(cnn.gradient(u[None, :, :, None], logLik))
Пример #28
0
def apex_to_base(mesh, base_marker, ffun=None):
    """
    Find the apex coordinate and compute the laplace
    equation to find the apex to base solution

    Arguments
    ---------
    mesh : dolfin.Mesh
        The mesh
    base_marker : int
        The marker value for the basal facets
    ffun : dolfin.MeshFunctionSizet (optional)
        A facet function containing markers for the boundaries.
        If not provided, the markers stored within the mesh will
        be used.
    """
    # Find apex by solving a laplacian with base solution = 0
    # Create Base variational problem
    V = df.FunctionSpace(mesh, "CG", 1)

    u = df.TrialFunction(V)
    v = df.TestFunction(V)

    a = df.dot(df.grad(u), df.grad(v)) * df.dx
    L = v * df.Constant(1) * df.dx

    apex = df.Function(V)

    base_bc = df.DirichletBC(V, 1, ffun, base_marker, "topological")

    def solve(solver_parameters):
        df.solve(a == L, apex, base_bc, solver_parameters=solver_parameters)

    solver_parameters = {"linear_solver": "cg", "preconditioner": "amg"}
    pcs = (pc for pc in ['petsc_amg', 'default'])
    while 1:
        try:
            solve(solver_parameters)
        except RuntimeError:
            solver_parameters["preconditioner"] = next(pcs)
        else:
            break

    if utils.DOLFIN_VERSION_MAJOR < 2018:
        dof_x = utils.gather_broadcast(V.tabulate_dof_coordinates()).reshape(
            (-1, 3))
        apex_values = utils.gather_broadcast(apex.vector().get_local())
        ind = apex_values.argmax()
        apex_coord = dof_x[ind]
    else:
        dof_x = V.tabulate_dof_coordinates()
        apex_values = apex.vector().get_local()
        local_max_val = apex_values.max()

        local_apex_coord = dof_x[apex_values.argmax()]
        comm = utils.mpi_comm_world()

        from mpi4py import MPI
        global_max, apex_coord = comm.allreduce(sendobj=(local_max_val,
                                                         local_apex_coord),
                                                op=MPI.MAXLOC)

    df.info("  Apex coord: ({0:.2f}, {1:.2f}, {2:.2f})".format(*apex_coord))

    # Update rhs
    L = v * df.Constant(0) * df.dx
    apex_domain = df.CompiledSubDomain(
        "near(x[0], {0}) && near(x[1], {1}) && near(x[2], {2})".format(
            *apex_coord))
    apex_bc = df.DirichletBC(V, 0, apex_domain, "pointwise")

    # Solve the poisson equation
    df.solve(a == L,
             apex, [base_bc, apex_bc],
             solver_parameters={"linear_solver": "gmres"})

    return apex
def gen_bccont_fems_3D(scheme='TH', bccontrol=True, verbose=False,
                       strtomeshfile='', strtophysicalregions='',
                       inflowvel=1., inflowprofile='parabola',
                       movingwallcntrl=False,
                       strtobcsobs=''):
    """
    dictionary for the fem items for a general 3D flow setup

    with
     * inflow/outflow
     * boundary control

    Parameters
    ----------
    scheme : {None, 'CR', 'TH'}
        the finite element scheme to be applied, 'CR' for Crouzieux-Raviart,\
        'TH' for Taylor-Hood, overrides `pdgree`, `vdgree`, defaults to `None`
    bccontrol : boolean, optional
        whether to consider boundary control via penalized Robin \
        defaults to `True`
    movingwallcntrl : boolean, optional
        whether control is via moving boundaries

    Returns
    -------
    femp : a dictionary with the keys:
         * `V`: FEM space of the velocity
         * `Q`: FEM space of the pressure
         * `diribcs`: list of the (Dirichlet) boundary conditions
         * `dbcsinds`: list vortex indices with (Dirichlet) boundary conditions
         * `dbcsvals`: list of values of the (Dirichlet) boundary conditions
         * `dirip`: list of the (Dirichlet) boundary conditions \
                 for the pressure
         * `fv`: right hand side of the momentum equation
         * `fp`: right hand side of the continuity equation
         * `charlen`: characteristic length of the setup
         * `odcoo`: dictionary with the coordinates of the \
                 domain of observation

    """

    # Load mesh
    mesh = dolfin.Mesh(strtomeshfile)

    if scheme == 'CR':
        V = dolfin.VectorFunctionSpace(mesh, "CR", 1)
        Q = dolfin.FunctionSpace(mesh, "DG", 0)
    elif scheme == 'TH':
        V = dolfin.VectorFunctionSpace(mesh, "CG", 2)
        Q = dolfin.FunctionSpace(mesh, "CG", 1)

    boundaries = dolfin.MeshFunction('size_t', mesh, strtophysicalregions)

    with open(strtobcsobs) as f:
        cntbcsdata = json.load(f)

    inflowgeodata = cntbcsdata['inflow']
    inflwpe = inflowgeodata['physical entity']
    inflwin = np.array(inflowgeodata['inward normal'])
    inflwxi = np.array(inflowgeodata['xone'])
    inflwxii = np.array(inflowgeodata['xtwo'])
    # inflwxiii = np.array(inflowgeodata['xthree'])
    inflwxiv = np.array(inflowgeodata['xfour'])

    if inflowprofile == 'block':
        raise NotImplementedError()
        inflwprfl = dolfin.\
            Expression(('cv*no', 'cv*nt'), cv=inflowvel,
                       no=inflwin[0], nt=inflwin[1],
                       element=V.ufl_element())
    elif inflowprofile == 'parabola':
        inflwprfl = InflowParabola3D(degree=2, xone=inflwxi, xtwo=inflwxii,
                                     xfour=inflwxiv,
                                     normalvec=inflwin, inflowvel=inflowvel)
    bcin = dolfin.DirichletBC(V, inflwprfl, boundaries, inflwpe)
    diribcu = [bcin]

    # ## THE WALLS
    wallspel = cntbcsdata['walls']['physical entity']
    gzero = dolfin.Constant((0, 0, 0))
    for wpe in wallspel:
        diribcu.append(dolfin.DirichletBC(V, gzero, boundaries, wpe))
        # bcdict = diribcu[-1].get_boundary_values()

    if not bccontrol:  # treat the control boundaries as walls
        try:
            for cntbc in cntbcsdata['controlbcs']:
                diribcu.append(dolfin.DirichletBC(V, gzero, boundaries,
                                                  cntbc['physical entity']))
        except KeyError:
            pass  # no control boundaries

    # yes-slip walls
    try:
        slipwallspel = cntbcsdata['slipwalls']['physical entity']
        slipwallsnvs = cntbcsdata['slipwalls']['inward normals']
        gscalzero = dolfin.Constant(0)
        for kk, swpe in enumerate(slipwallspel):
            cinwnrml = np.array(slipwallsnvs[kk])
            if np.abs(np.inner(cinwnrml, np.array([0, 0, 1.]))) == 1:
                cbcsw = dolfin.DirichletBC(V.sub(2), gscalzero,
                                           boundaries, swpe)
                diribcu.append(cbcsw)
            else:
                raise NotImplementedError()
    except KeyError:
        pass  # no control boundaries

    mvwdbcs = []
    mvwtvs = []
    try:
        for cntbc in cntbcsdata['moving walls']:
            raise NotImplementedError()
            center = np.array(cntbc['geometry']['center'])
            radius = cntbc['geometry']['radius']
            if cntbc['type'] == 'circle':
                omega = 1. if movingwallcntrl else 0.
                rotcyl = RotatingCircle(degree=2, radius=radius,
                                        xcenter=center, omega=omega)
            else:
                raise NotImplementedError()
            mvwdbcs.append(dolfin.DirichletBC(V, rotcyl, boundaries,
                                              cntbc['physical entity']))
    except KeyError:
        pass  # no moving walls defined
    if not movingwallcntrl:
        diribcu.extend(mvwdbcs)  # add the moving walls to the diri bcs
        mvwdbcs = []

    # Create outflow boundary condition for pressure
    # TODO XXX why zero pressure?? is this do-nothing???
    # outflwpe = cntbcsdata['outflow']['physical entity']
    # g2 = dolfin.Constant(0)
    # bc2 = dolfin.DirichletBC(Q, g2, boundaries, outflwpe)

    # Collect boundary conditions
    # bcp = [bc2]

    # Create right-hand side function
    fv = dolfin.Constant((0, 0, 0))
    fp = dolfin.Constant(0)

    def initial_conditions(self, V, Q):
        u0 = dolfin.Constant((0, 0, 0))
        p0 = dolfin.Constant(0)
        return u0, p0

    dbcinds, dbcvals = [], []
    for bc in diribcu:
        bcdict = bc.get_boundary_values()
        dbcvals.extend(list(bcdict.values()))
        dbcinds.extend(list(bcdict.keys()))

    mvwbcinds, mvwbcvals = [], []
    for bc in mvwdbcs:
        bcdict = bc.get_boundary_values()
        mvwbcvals.extend(list(bcdict.values()))
        mvwbcinds.extend(list(bcdict.keys()))

    # ## Control boundaries
    bcpes, bcshapefuns, bcds = [], [], []
    if bccontrol:
        raise NotImplementedError()
        for cbc in cntbcsdata['controlbcs']:
            cpe = cbc['physical entity']
            cxi, cxii = np.array(cbc['xone']), np.array(cbc['xtwo'])
            csf = _get_cont_shape_fun2D(xi=cxi, xii=cxii,
                                        element=V.ufl_element())
            bcshapefuns.append(csf)
            bcpes.append(cpe)
            bcds.append(dolfin.Measure("ds", subdomain_data=boundaries)(cpe))

    # ## Lift Drag Computation
    try:
        ldsurfpe = cntbcsdata['lift drag surface']['physical entity']
        raise NotImplementedError()
        liftdragds = dolfin.Measure("ds", subdomain_data=boundaries)(ldsurfpe)
        bclds = dolfin.DirichletBC(V, gzero, boundaries, ldsurfpe)
        bcldsdict = bclds.get_boundary_values()
        ldsbcinds = list(bcldsdict.keys())
    except KeyError:
        liftdragds = None  # no domain specified for lift/drag
        ldsbcinds = None
    try:
        outflwpe = cntbcsdata['outflow']['physical entity']
        outflowds = dolfin.Measure("ds", subdomain_data=boundaries)(outflwpe)
    except KeyError:
        outflowds = None  # no domain specified for outflow

    try:
        odcoo = cntbcsdata['observation-domain-coordinates']
        raise NotImplementedError()
    except KeyError:
        odcoo = None

    gbcfems = dict(V=V,
                   Q=Q,
                   dbcinds=dbcinds,
                   dbcvals=dbcvals,
                   mvwbcinds=mvwbcinds,
                   mvwbcvals=mvwbcvals,
                   mvwtvs=mvwtvs,
                   # dirip=bcp,
                   outflowds=outflowds,
                   # contrbcssubdomains=bcsubdoms,
                   liftdragds=liftdragds,
                   ldsbcinds=ldsbcinds,
                   contrbcmeshfunc=boundaries,
                   contrbcspes=bcpes,
                   contrbcsshapefuns=bcshapefuns,
                   cntrbcsds=bcds,
                   odcoo=odcoo,
                   fv=fv,
                   fp=fp,
                   charlen=cntbcsdata['characteristic length'],
                   mesh=mesh)

    return gbcfems
Пример #30
0
        return sum(wq*f(*xq) for xq, wq in zip(pts, weights))/l

    # Get the MEAN
    mesh = df.BoxMesh(df.Point(-1, -1, -1), df.Point(1, 1, 1), 16, 16, 16)
    # Make 1d
    f = df.MeshFunction('size_t', mesh, 1, 0)
    # df.CompiledSubDomain('near(x[0], x[1]) && near(x[1], x[2])').mark(f, 1)
    df.CompiledSubDomain('near(x[0], 0.) && near(x[1], 0.)').mark(f, 1)
    
    line_mesh = EmbeddedMesh(f, 1)

    # Circle ---------------------------------------------------------
    size = 0.125
    ci = Circle(radius=lambda x0: size, degree=12)

    u = df.Function(df.FunctionSpace(mesh, 'CG', 1))
    op = Average(u, line_mesh, ci)
        
    surface = render_avg_surface(op)
    
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    x0 = np.array([0, 0, 0.5])
    n = np.array([0, 0, 1])
    ci_integrate = lambda f, shape=ci, n=n, x0=x0: shape_integrate(f, shape, x0, n)
    
    # Sanity
    f = lambda x, y, z: 1
    value = ci_integrate(f)
    assert is_close(value, 1)