def __init__(self, args, tc, metadata):
        self.has_analytic_solution = False
        self.problem_code = 'FACB'
        super(Problem, self).__init__(args, tc, metadata)

        self.name = 'test on real mesh'
        self.status_functional_str = 'not selected'

        # input parameters
        self.factor = args.factor
        self.scale_factor.append(self.factor)

        self.nu = 0.001 * args.nufactor  # kinematic viscosity

        # Import gmsh mesh
        self.compatible_meshes = ['bench3D_1', 'bench3D_2', 'bench3D_3']
        if args.mesh not in self.compatible_meshes:
            exit('Bad mesh, should be some from %s' %
                 str(self.compatible_meshes))

        self.mesh = Mesh("meshes/" + args.mesh + ".xml")
        self.cell_function = MeshFunction(
            "size_t", self.mesh,
            "meshes/" + args.mesh + "_physical_region.xml")
        self.facet_function = MeshFunction(
            "size_t", self.mesh, "meshes/" + args.mesh + "_facet_region.xml")
        self.dsIn = Measure("ds",
                            subdomain_id=2,
                            subdomain_data=self.facet_function)
        self.dsOut = Measure("ds",
                             subdomain_id=3,
                             subdomain_data=self.facet_function)
        self.dsWall = Measure("ds",
                              subdomain_id=1,
                              subdomain_data=self.facet_function)
        self.dsCyl = Measure("ds",
                             subdomain_id=5,
                             subdomain_data=self.facet_function)
        self.normal = FacetNormal(self.mesh)
        print("Mesh name: ", args.mesh, "    ", self.mesh)
        print("Mesh norm max: ", self.mesh.hmax())
        print("Mesh norm min: ", self.mesh.hmin())

        self.actual_time = None
        self.v_in = None
示例#2
0
文件: driver.py 项目: wei-pan/tsfc
    def facetarea():
        from ufl import Measure
        assert integral_type != 'cell'
        integrand, degree = ufl_utils.one_times(Measure(integral_type, domain=domain))
        integrand = ufl_utils.replace_coordinates(integrand, coordinate_coefficient)

        config = kernel_config.copy()
        config.update(quadrature_degree=degree)
        expr, = fem.compile_ufl(integrand, point_sum=True, **config)
        return expr
示例#3
0
def test_ufl_id():
    """Test that UFL can process MeshTags (tests ufl_id attribute)"""
    comm = MPI.COMM_WORLD
    mesh = create_unit_cube(comm, 6, 6, 6)
    tdim = mesh.topology.dim
    marked_facets = locate_entities(mesh, tdim - 1,
                                    lambda x: np.isclose(x[1], 1))
    f_v = mesh.topology.connectivity(tdim - 1, 0).array.reshape(-1, 3)

    entities = create_adjacencylist(f_v[marked_facets])
    values = np.full(marked_facets.shape[0], 2, dtype=np.int32)
    ft = meshtags_from_entities(mesh, tdim - 1, entities, values)
    ds = Measure("ds", domain=mesh, subdomain_data=ft, subdomain_id=(2, 3))
    a = 1 * ds
    assert isinstance(a.subdomain_data(), dict)
示例#4
0
def solve_navier_stokes_equation(interior_circle=True, num_mesh_refinements=0):
    """
    Solve the Navier-Stokes equation on a hard-coded mesh with hard-coded initial and boundary conditions
    """
    mesh, om, im, nm, ymax, sub_domains = setup_geometry(
        interior_circle, num_mesh_refinements)
    dsi = Measure("ds", domain=mesh, subdomain_data=sub_domains)

    # Setup FEM function spaces
    # Function space for the velocity
    V = VectorFunctionSpace(mesh, "CG", 1)
    # Function space for the pressure
    Q = FunctionSpace(mesh, "CG", 1)
    # Mixed function space for velocity and pressure
    W = V * Q

    # Setup FEM functions
    v, q = TestFunctions(W)
    w = Function(W)
    (u, p) = (as_vector((w[0], w[1])), w[2])
    u0 = Function(V)

    # Inlet velocity
    uin = Expression(("4*(x[1]*(YMAX-x[1]))/(YMAX*YMAX)", "0."),
                     YMAX=ymax,
                     degree=1)

    # Viscosity and stabilization parameters
    nu = 1e-6
    h = CellSize(mesh)
    d = 0.2 * h**(3.0 / 2.0)

    # Time parameters
    time_step = 0.1
    t_start, t_end = 0.0, 10.0

    # Penalty parameter
    gamma = 10 / h

    # Time stepping
    t = t_start
    step = 0
    while t < t_end:
        # Time discretization (Crank–Nicolson method)
        um = 0.5 * u + 0.5 * u0

        # Navier-Stokes equations in weak residual form (stabilized FEM)
        # Basic residual
        r = (inner((u - u0) / time_step + grad(p) + grad(um) * um, v) +
             nu * inner(grad(um), grad(v)) + div(um) * q) * dx
        # Weak boundary conditions
        r += gamma * (om * p * q + im * inner(u - uin, v) +
                      nm * inner(u, v)) * ds
        # Stabilization
        r += d * (inner(grad(p) + grad(um) * um,
                        grad(q) + grad(um) * v) + inner(div(um), div(v))) * dx

        # Solve the Navier-Stokes equation (one time step)
        solve(r == 0, w)

        if step % 5 == 0:
            # Plot norm of velocity at current time step
            nov = project(sqrt(inner(u, u)), Q)
            fig = plt.figure()
            plot(nov, fig=fig)
            plt.show()

            # Compute drag force on circle
            n = FacetNormal(mesh)
            drag_force_measure = p * n[0] * dsi(1)  # Drag (only pressure)
            drag_force = assemble(drag_force_measure)
            print("Drag force = " + str(drag_force))

        # Shift to next time step
        t += time_step
        step += 1
        u0 = project(u, V)
gmsh.model.mesh.setOrder(2)  # Command required for quadratic elements

# Use a gmsh helper function to create the mesh. This requires Jorgen's
# file http://jsdokken.com/converted_files/gmsh_helpers.py
mesh, cell_tags = gmsh_model_to_mesh(gmsh.model, cell_data=True, gdim=2)
n = FacetNormal(mesh)


def boundary(x):
    return (x[0]**2 + x[1]**2) < (radius + 1.0e-2)**2


circle_facets = locate_entities_boundary(mesh, 1, boundary)
mt = dolfinx.mesh.MeshTags(mesh, 1, circle_facets, 1)

ds = Measure("ds", subdomain_data=mt)
'''        Incident field, wavenumber and adiabatic absorber functions      '''


def incident(x):
    # Plane wave travelling in positive x-direction
    return np.exp(1.0j * k0 * x[0])


def adiabatic_layer(x):
    '''          Contribution to wavenumber k in absorbing layers          '''
    # In absorbing layer, have k = k0 + 1j * sigma
    # => k^2 = (k0 + 1j*sigma)^2 = k0^2 + 2j*sigma - sigma^2
    # Therefore, the 2j*sigma - sigma^2 piece must be included in the layer.

    # Find borders of width d_absorb in x- and y-directions
示例#6
0
def forward(mu_expression,
            lmbda_expression,
            rho,
            Lx=10,
            Ly=10,
            t_end=1,
            omega_p=5,
            amplitude=5000,
            center=0,
            target=False):
    Lpml = Lx / 10
    #c_p = cp(mu.vector(), lmbda.vector(), rho)
    max_velocity = 200  #c_p.max()

    stable_hx = stable_dx(max_velocity, omega_p)
    nx = int(Lx / stable_hx) + 1
    #nx = max(nx, 60)
    ny = int(Ly * nx / Lx) + 1
    mesh = mesh_generator(Lx, Ly, Lpml, nx, ny)
    used_hx = Lx / nx
    dt = stable_dt(used_hx, max_velocity)
    cfl_ct = cfl_constant(max_velocity, dt, used_hx)
    print(used_hx, stable_hx)
    print(cfl_ct)
    #time.sleep(10)
    PE = FunctionSpace(mesh, "DG", 0)
    mu = interpolate(mu_expression, PE)
    lmbda = interpolate(lmbda_expression, PE)

    m = 2
    R = 10e-8
    t = 0.0
    gamma = 0.50
    beta = 0.25

    ff = MeshFunction("size_t", mesh, mesh.geometry().dim() - 1)
    Dirichlet(Lx, Ly, Lpml).mark(ff, 1)

    # Create function spaces
    VE = VectorElement("CG", mesh.ufl_cell(), 1, dim=2)
    TE = TensorElement("DG", mesh.ufl_cell(), 0, shape=(2, 2), symmetry=True)

    W = FunctionSpace(mesh, MixedElement([VE, TE]))
    F = FunctionSpace(mesh, "CG", 2)
    V = W.sub(0).collapse()
    M = W.sub(1).collapse()

    alpha_0 = Alpha_0(m, stable_hx, R, Lpml)
    alpha_1 = Alpha_1(alpha_0, Lx, Lpml, degree=2)
    alpha_2 = Alpha_2(alpha_0, Ly, Lpml, degree=2)

    beta_0 = Beta_0(m, max_velocity, R, Lpml)
    beta_1 = Beta_1(beta_0, Lx, Lpml, degree=2)
    beta_2 = Beta_2(beta_0, Ly, Lpml, degree=2)

    alpha_1 = interpolate(alpha_1, F)
    alpha_2 = interpolate(alpha_2, F)
    beta_1 = interpolate(beta_1, F)
    beta_2 = interpolate(beta_2, F)

    a_ = alpha_1 * alpha_2
    b_ = alpha_1 * beta_2 + alpha_2 * beta_1
    c_ = beta_1 * beta_2

    Lambda_e = as_tensor([[alpha_2, 0], [0, alpha_1]])
    Lambda_p = as_tensor([[beta_2, 0], [0, beta_1]])

    a_ = alpha_1 * alpha_2
    b_ = alpha_1 * beta_2 + alpha_2 * beta_1
    c_ = beta_1 * beta_2

    Lambda_e = as_tensor([[alpha_2, 0], [0, alpha_1]])
    Lambda_p = as_tensor([[beta_2, 0], [0, beta_1]])

    # Set up boundary condition
    bc = DirichletBC(W.sub(0), Constant(("0.0", "0.0")), ff, 1)

    # Create measure for the source term
    dx = Measure("dx", domain=mesh)
    ds = Measure("ds", domain=mesh, subdomain_data=ff)

    # Set up initial values
    u0 = Function(V)
    u0.set_allow_extrapolation(True)
    v0 = Function(V)
    a0 = Function(V)
    U0 = Function(M)
    V0 = Function(M)
    A0 = Function(M)

    # Test and trial functions
    (u, S) = TrialFunctions(W)
    (w, T) = TestFunctions(W)

    g = ModifiedRickerPulse(0, omega_p, amplitude, center)

    F = rho * inner(a_ * N_ddot(u, u0, a0, v0, dt, beta) \
        + b_ * N_dot(u, u0, v0, a0, dt, beta, gamma) + c_ * u, w) * dx \
        + inner(N_dot(S, U0, V0, A0, dt, beta, gamma).T * Lambda_e + S.T * Lambda_p, grad(w)) * dx \
        - inner(g, w) * ds \
        + inner(compliance(a_ * N_ddot(S, U0, A0, V0, dt, beta) + b_ * N_dot(S, U0, V0, A0, dt, beta, gamma) + c_ * S, u, mu, lmbda), T) * dx \
        - 0.5 * inner(grad(u) * Lambda_p + Lambda_p * grad(u).T + grad(N_dot(u, u0, v0, a0, dt, beta, gamma)) * Lambda_e \
        + Lambda_e * grad(N_dot(u, u0, v0, a0, dt, beta, gamma)).T, T) * dx \

    a, L = lhs(F), rhs(F)

    # Assemble rhs (once)
    A = assemble(a)

    # Create GMRES Krylov solver
    solver = KrylovSolver(A, "gmres")

    # Create solution function
    S = Function(W)

    if target:
        xdmffile_u = XDMFFile("inversion_temporal_file/target/u.xdmf")
        pvd = File("inversion_temporal_file/target/u.pvd")
        xdmffile_u.write(u0, t)
        timeseries_u = TimeSeries(
            "inversion_temporal_file/target/u_timeseries")
    else:
        xdmffile_u = XDMFFile("inversion_temporal_file/obs/u.xdmf")
        xdmffile_u.write(u0, t)
        timeseries_u = TimeSeries("inversion_temporal_file/obs/u_timeseries")

    rec_counter = 0

    while t < t_end - 0.5 * dt:
        t += float(dt)

        if rec_counter % 10 == 0:
            print(
                '\n\rtime: {:.3f} (Progress: {:.2f}%)'.format(
                    t, 100 * t / t_end), )

        g.t = t

        # Assemble rhs and apply boundary condition
        b = assemble(L)
        bc.apply(A, b)

        # Compute solution
        solver.solve(S.vector(), b)
        (u, U) = S.split(True)

        # Update previous time step
        update(u, u0, v0, a0, beta, gamma, dt)
        update(U, U0, V0, A0, beta, gamma, dt)

        xdmffile_u.write(u, t)
        pvd << (u, t)
        timeseries_u.store(u.vector(), t)

        energy = inner(u, u) * dx
        E = assemble(energy)
        print("E = ", E)
        print(u.vector().max())
示例#7
0
mu_pr = kap_by_mu * (mu1 + mu2)  # make this value very high
alph1 = fem.Constant(mesh, 1.0)
alph2 = fem.Constant(mesh, -2.474)
m1 = fem.Constant(mesh, 5.42 * 10**3)
m2 = fem.Constant(mesh, 20.78 * 10**3)
a1 = fem.Constant(mesh, -10.0)
a2 = fem.Constant(mesh, 1.948)
K1 = fem.Constant(mesh, 3507.0 * 10**3)
K2 = fem.Constant(mesh, 10**(-6))
bta1 = fem.Constant(mesh, 1.852)
bta2 = fem.Constant(mesh, 0.26)
eta0 = fem.Constant(mesh, 7014.0 * 10**3)
etaInf = fem.Constant(mesh, 0.1 * 10**3)  # 0.1

# integration measures
dx = Measure("dx", metadata=metadata)
dS = Measure("dS", metadata=metadata)

# stabilization constant
qvals = 5.0 * (mu1 + mu2 + m1 + m2)

# loading rate
ldot = 0.05

# array of `time` like variable for stepping in loading
timeVals = np.linspace(0, 4.0 / ldot, 203)

# delta t values (will change when doing adaptive computation)
dt = timeVals[1] - timeVals[0]

# array of displacement values to apply at right boundary
def dS_from_measure(mesh):
    boundaries = MeshFunction("size_t", mesh, mesh.topology.dim - 1, 1)
    dS = Measure("dS")(subdomain_data=boundaries, domain=mesh)
    return dS
示例#9
0
        def separate(self):
            class _SeparatedParametrizedForm_Replacer(Transformer):
                def __init__(self, mapping):
                    Transformer.__init__(self)
                    self.mapping = mapping

                def operator(self, e, *ops):
                    if e in self.mapping:
                        return self.mapping[e]
                    else:
                        return e._ufl_expr_reconstruct_(*ops)

                def terminal(self, e):
                    return self.mapping.get(e, e)

            logger.log(DEBUG,
                       "***        SEPARATE FORM COEFFICIENTS        ***")

            logger.log(DEBUG, "1. Extract coefficients")
            integral_to_coefficients = dict()
            for integral in self._form.integrals():
                logger.log(
                    DEBUG,
                    "\t Currently on integrand " + str(integral.integrand()))
                self._coefficients.append(list())  # of ParametrizedExpression
                for e in iter_expressions(integral):
                    logger.log(DEBUG, "\t\t Expression " + str(e))
                    pre_traversal_e = [n for n in pre_traversal(e)]
                    tree_nodes_skip = [False for _ in pre_traversal_e]
                    for (n_i, n) in enumerate(pre_traversal_e):
                        if not tree_nodes_skip[n_i]:
                            # Skip expressions which are trivially non parametrized
                            if isinstance(n, Argument):
                                logger.log(
                                    DEBUG, "\t\t Node " + str(n) +
                                    " is skipped because it is an Argument")
                                continue
                            elif isinstance(n, Constant):
                                logger.log(
                                    DEBUG, "\t\t Node " + str(n) +
                                    " is skipped because it is a Constant")
                                continue
                            elif isinstance(n, MultiIndex):
                                logger.log(
                                    DEBUG, "\t\t Node " + str(n) +
                                    " is skipped because it is a MultiIndex")
                                continue
                            # Skip all expressions with at least one leaf which is an Argument
                            for t in traverse_terminals(n):
                                if isinstance(t, Argument):
                                    logger.log(
                                        DEBUG, "\t\t Node " + str(n) +
                                        " is skipped because it contains an Argument"
                                    )
                                    break
                            else:  # not broken
                                logger.log(
                                    DEBUG, "\t\t Node " + str(n) +
                                    " and its descendants are being analyzed for non-parametrized check"
                                )
                                # Make sure to skip all descendants of this node in the outer loop
                                # Note that a map with key set to the expression is not enough to
                                # mark the node as visited, since the same expression may appear
                                # on different sides of the tree
                                pre_traversal_n = [d for d in pre_traversal(n)]
                                for (d_i, d) in enumerate(pre_traversal_n):
                                    assert d == pre_traversal_e[
                                        n_i +
                                        d_i]  # make sure that we are marking the right node
                                    tree_nodes_skip[n_i + d_i] = True
                                # We might be able to strip any (non-parametrized) expression out
                                all_candidates = list()
                                internal_tree_nodes_skip = [
                                    False for _ in pre_traversal_n
                                ]
                                for (d_i, d) in enumerate(pre_traversal_n):
                                    if not internal_tree_nodes_skip[d_i]:
                                        # Skip all expressions where at least one leaf is not parametrized
                                        for t in traverse_terminals(d):
                                            if isinstance(t, BaseExpression):
                                                if wrapping.is_pull_back_expression(
                                                        t
                                                ) and not wrapping.is_pull_back_expression_parametrized(
                                                        t):
                                                    logger.log(
                                                        DEBUG,
                                                        "\t\t\t Descendant node "
                                                        + str(d) +
                                                        " causes the non-parametrized check to break because it contains a non-parametrized pulled back expression"
                                                    )
                                                    break
                                                else:
                                                    parameters = t._parameters
                                                    if "mu_0" not in parameters:
                                                        logger.log(
                                                            DEBUG,
                                                            "\t\t\t Descendant node "
                                                            + str(d) +
                                                            " causes the non-parametrized check to break because it contains a non-parametrized expression"
                                                        )
                                                        break
                                            elif isinstance(t, Constant):
                                                logger.log(
                                                    DEBUG,
                                                    "\t\t\t Descendant node " +
                                                    str(d) +
                                                    " causes the non-parametrized check to break because it contains a constant"
                                                )
                                                break
                                            elif isinstance(
                                                    t, GeometricQuantity
                                            ) and not isinstance(
                                                    t, FacetNormal
                                            ) and self._strict:
                                                logger.log(
                                                    DEBUG,
                                                    "\t\t\t Descendant node " +
                                                    str(d) +
                                                    " causes the non-parametrized check to break because it contains a geometric quantity and strict mode is on"
                                                )
                                                break
                                            elif wrapping.is_problem_solution_type(
                                                    t):
                                                if not wrapping.is_problem_solution(
                                                        t
                                                ) and not wrapping.is_problem_solution_dot(
                                                        t):
                                                    logger.log(
                                                        DEBUG,
                                                        "\t\t\t Descendant node "
                                                        + str(d) +
                                                        " causes the non-parametrized check to break because it contains a non-parametrized function"
                                                    )
                                                    break
                                                elif self._strict:  # solutions are not allowed, break
                                                    if wrapping.is_problem_solution(
                                                            t):
                                                        (
                                                            _, component,
                                                            solution
                                                        ) = wrapping.solution_identify_component(
                                                            t)
                                                        problem = get_problem_from_solution(
                                                            solution)
                                                        logger.log(
                                                            DEBUG,
                                                            "\t\t\t Descendant node "
                                                            + str(d) +
                                                            " causes the non-parametrized check to break because it contains the solution of "
                                                            + problem.name() +
                                                            " (exact problem decorator: "
                                                            + str(
                                                                hasattr(
                                                                    problem,
                                                                    "__is_exact__"
                                                                )) +
                                                            ", component: " +
                                                            str(component) +
                                                            ") and strict mode is on"
                                                        )
                                                        break
                                                    elif wrapping.is_problem_solution_dot(
                                                            t):
                                                        (
                                                            _, component,
                                                            solution_dot
                                                        ) = wrapping.solution_dot_identify_component(
                                                            t)
                                                        problem = get_problem_from_solution_dot(
                                                            solution_dot)
                                                        logger.log(
                                                            DEBUG,
                                                            "\t\t\t Descendant node "
                                                            + str(d) +
                                                            " causes the non-parametrized check to break because it contains the solution_dot of "
                                                            + problem.name() +
                                                            " (exact problem decorator: "
                                                            + str(
                                                                hasattr(
                                                                    problem,
                                                                    "__is_exact__"
                                                                )) +
                                                            ", component: " +
                                                            str(component) +
                                                            ") and strict mode is on"
                                                        )
                                                    else:
                                                        raise RuntimeError(
                                                            "Unidentified solution found"
                                                        )
                                        else:
                                            at_least_one_expression_or_solution = False
                                            for t in traverse_terminals(d):
                                                if isinstance(
                                                        t, BaseExpression
                                                ):  # which is parametrized, because previous for loop was not broken
                                                    at_least_one_expression_or_solution = True
                                                    logger.log(
                                                        DEBUG,
                                                        "\t\t\t Descendant node "
                                                        + str(d) +
                                                        " is a candidate after non-parametrized check because it contains the parametrized expression "
                                                        + str(t))
                                                    break
                                                elif wrapping.is_problem_solution_type(
                                                        t):
                                                    if wrapping.is_problem_solution(
                                                            t):
                                                        at_least_one_expression_or_solution = True
                                                        (
                                                            _, component,
                                                            solution
                                                        ) = wrapping.solution_identify_component(
                                                            t)
                                                        problem = get_problem_from_solution(
                                                            solution)
                                                        logger.log(
                                                            DEBUG,
                                                            "\t\t\t Descendant node "
                                                            + str(d) +
                                                            " is a candidate after non-parametrized check because it contains the solution of "
                                                            + problem.name() +
                                                            " (exact problem decorator: "
                                                            + str(
                                                                hasattr(
                                                                    problem,
                                                                    "__is_exact__"
                                                                )) +
                                                            ", component: " +
                                                            str(component) +
                                                            ")")
                                                        break
                                                    elif wrapping.is_problem_solution_dot(
                                                            t):
                                                        at_least_one_expression_or_solution = True
                                                        (
                                                            _, component,
                                                            solution_dot
                                                        ) = wrapping.solution_dot_identify_component(
                                                            t)
                                                        problem = get_problem_from_solution_dot(
                                                            solution_dot)
                                                        logger.log(
                                                            DEBUG,
                                                            "\t\t\t Descendant node "
                                                            + str(d) +
                                                            " is a candidate after non-parametrized check because it contains the solution_dot of "
                                                            + problem.name() +
                                                            " (exact problem decorator: "
                                                            + str(
                                                                hasattr(
                                                                    problem,
                                                                    "__is_exact__"
                                                                )) +
                                                            ", component: " +
                                                            str(component) +
                                                            ")")
                                                        break
                                            if at_least_one_expression_or_solution:
                                                all_candidates.append(d)
                                                pre_traversal_d = [
                                                    q for q in pre_traversal(d)
                                                ]
                                                for (q_i, q) in enumerate(
                                                        pre_traversal_d):
                                                    assert q == pre_traversal_n[
                                                        d_i +
                                                        q_i]  # make sure that we are marking the right node
                                                    internal_tree_nodes_skip[
                                                        d_i + q_i] = True
                                            else:
                                                logger.log(
                                                    DEBUG,
                                                    "\t\t\t Descendant node " +
                                                    str(d) +
                                                    " has not passed the non-parametrized because it is not a parametrized expression or a solution"
                                                )
                                # Evaluate candidates
                                if len(
                                        all_candidates
                                ) == 0:  # the whole expression was actually non-parametrized
                                    logger.log(
                                        DEBUG, "\t\t Node " + str(n) +
                                        " is skipped because it is a non-parametrized coefficient"
                                    )
                                    continue
                                elif len(
                                        all_candidates
                                ) == 1:  # the whole expression was actually parametrized
                                    logger.log(
                                        DEBUG, "\t\t Node " + str(n) +
                                        " will be accepted because it is a non-parametrized coefficient"
                                    )
                                    pass
                                else:  # part of the expression was not parametrized, and separating the non parametrized part may result in more than one coefficient
                                    if self._strict:  # non parametrized coefficients are not allowed, so split the expression
                                        logger.log(
                                            DEBUG, "\t\t\t Node " + str(n) +
                                            " will be accepted because it is a non-parametrized coefficient with more than one candidate. It will be split because strict mode is on. Its split coefficients are "
                                            + ", ".join([
                                                str(c) for c in all_candidates
                                            ]))
                                    else:  # non parametrized coefficients are allowed, so go on with the whole expression
                                        logger.log(
                                            DEBUG, "\t\t\t Node " + str(n) +
                                            " will be accepted because it is a non-parametrized coefficient with more than one candidate. It will not be split because strict mode is off. Splitting it would have resulted in more than one coefficient, namely "
                                            + ", ".join([
                                                str(c) for c in all_candidates
                                            ]))
                                        all_candidates = [n]
                                # Add the coefficient(s)
                                for candidate in all_candidates:

                                    def preprocess_candidate(candidate):
                                        if isinstance(candidate, Indexed):
                                            assert len(
                                                candidate.ufl_operands) == 2
                                            assert isinstance(
                                                candidate.ufl_operands[1],
                                                MultiIndex)
                                            if all([
                                                    isinstance(
                                                        index, FixedIndex)
                                                    for index in candidate.
                                                    ufl_operands[1].indices()
                                            ]):
                                                logger.log(
                                                    DEBUG,
                                                    "\t\t\t Preprocessed descendant node "
                                                    + str(candidate) +
                                                    " as an Indexed expression with fixed indices, resulting in a candidate "
                                                    + str(candidate) +
                                                    " of type " +
                                                    str(type(candidate)))
                                                return candidate  # no further preprocessing needed
                                            else:
                                                logger.log(
                                                    DEBUG,
                                                    "\t\t\t Preprocessed descendant node "
                                                    + str(candidate) +
                                                    " as an Indexed expression with at least one mute index, resulting in a candidate "
                                                    + str(candidate.
                                                          ufl_operands[0]) +
                                                    " of type " + str(
                                                        type(candidate.
                                                             ufl_operands[0])))
                                                return preprocess_candidate(
                                                    candidate.ufl_operands[0])
                                        elif isinstance(candidate, IndexSum):
                                            assert len(
                                                candidate.ufl_operands) == 2
                                            assert isinstance(
                                                candidate.ufl_operands[1],
                                                MultiIndex)
                                            assert all([
                                                isinstance(index, MuteIndex)
                                                for index in candidate.
                                                ufl_operands[1].indices()
                                            ])
                                            logger.log(
                                                DEBUG,
                                                "\t\t\t Preprocessed descendant node "
                                                + str(candidate) +
                                                " as an IndexSum expression, resulting in a candidate "
                                                +
                                                str(candidate.ufl_operands[0])
                                                + " of type " + str(
                                                    type(candidate.
                                                         ufl_operands[0])))
                                            return preprocess_candidate(
                                                candidate.ufl_operands[0])
                                        elif isinstance(candidate, ListTensor):
                                            candidates = set([
                                                preprocess_candidate(component)
                                                for component in
                                                candidate.ufl_operands
                                            ])
                                            if len(candidates) == 1:
                                                preprocessed_candidate = candidates.pop(
                                                )
                                                logger.log(
                                                    DEBUG,
                                                    "\t\t\t Preprocessed descendant node "
                                                    + str(candidate) +
                                                    " as an ListTensor expression with a unique preprocessed component, resulting in a candidate "
                                                    +
                                                    str(preprocessed_candidate)
                                                    + " of type " + str(
                                                        type(
                                                            preprocessed_candidate
                                                        )))
                                                return preprocess_candidate(
                                                    preprocessed_candidate)
                                            else:
                                                at_least_one_mute_index = False
                                                candidates_from_components = list(
                                                )
                                                for component in candidates:
                                                    assert isinstance(
                                                        component,
                                                        (ComponentTensor,
                                                         Indexed))
                                                    assert len(
                                                        component.ufl_operands
                                                    ) == 2
                                                    assert isinstance(
                                                        component.
                                                        ufl_operands[1],
                                                        MultiIndex)
                                                    if not all([
                                                            isinstance(
                                                                index,
                                                                FixedIndex) for
                                                            index in component.
                                                            ufl_operands[1].
                                                            indices()
                                                    ]):
                                                        at_least_one_mute_index = True
                                                    candidates_from_components.append(
                                                        preprocess_candidate(
                                                            component.
                                                            ufl_operands[0]))
                                                if at_least_one_mute_index:
                                                    candidates_from_components = set(
                                                        candidates_from_components
                                                    )
                                                    assert len(
                                                        candidates_from_components
                                                    ) == 1
                                                    preprocessed_candidate = candidates_from_components.pop(
                                                    )
                                                    logger.log(
                                                        DEBUG,
                                                        "\t\t\t Preprocessed descendant node "
                                                        + str(candidate) +
                                                        " as an ListTensor expression with multiple preprocessed components with at least one mute index, resulting in a candidate "
                                                        +
                                                        str(preprocessed_candidate
                                                            ) + " of type " +
                                                        str(
                                                            type(
                                                                preprocessed_candidate
                                                            )))
                                                    return preprocess_candidate(
                                                        preprocessed_candidate)
                                                else:
                                                    logger.log(
                                                        DEBUG,
                                                        "\t\t\t Preprocessed descendant node "
                                                        + str(candidate) +
                                                        " as an ListTensor expression with multiple preprocessed components with fixed indices, resulting in a candidate "
                                                        + str(candidate) +
                                                        " of type " +
                                                        str(type(candidate)))
                                                    return candidate  # no further preprocessing needed
                                        else:
                                            logger.log(
                                                DEBUG,
                                                "\t\t\t No preprocessing required for descendant node "
                                                + str(candidate) +
                                                " as a coefficient of type " +
                                                str(type(candidate)))
                                            return candidate

                                    preprocessed_candidate = preprocess_candidate(
                                        candidate)
                                    if preprocessed_candidate not in self._coefficients[
                                            -1]:
                                        self._coefficients[-1].append(
                                            preprocessed_candidate)
                                    logger.log(
                                        DEBUG,
                                        "\t\t\t Accepting descendant node " +
                                        str(preprocessed_candidate) +
                                        " as a coefficient of type " +
                                        str(type(preprocessed_candidate)))
                        else:
                            logger.log(
                                DEBUG, "\t\t Node " + str(n) +
                                " to be skipped because it is a descendant of a coefficient which has already been detected"
                            )
                if len(self._coefficients[-1]
                       ) == 0:  # then there were no coefficients to extract
                    logger.log(DEBUG,
                               "\t There were no coefficients to extract")
                    self._coefficients.pop(
                    )  # remove the (empty) element that was added to possibly store coefficients
                else:
                    logger.log(DEBUG, "\t Extracted coefficients are:")
                    for c in self._coefficients[-1]:
                        logger.log(DEBUG, "\t\t" + str(c))
                    integral_to_coefficients[integral] = self._coefficients[-1]

            logger.log(DEBUG,
                       "2. Prepare placeholders and forms with placeholders")
            for integral in self._form.integrals():
                # Prepare measure for the new form (from firedrake/mg/ufl_utils.py)
                measure = Measure(integral.integral_type(),
                                  domain=integral.ufl_domain(),
                                  subdomain_id=integral.subdomain_id(),
                                  subdomain_data=integral.subdomain_data(),
                                  metadata=integral.metadata())
                if integral not in integral_to_coefficients:
                    logger.log(
                        DEBUG, "\t Adding form for integrand " +
                        str(integral.integrand()) + " to unchanged forms")
                    self._form_unchanged.append(integral.integrand() * measure)
                else:
                    logger.log(
                        DEBUG,
                        "\t Preparing form with placeholders for integrand " +
                        str(integral.integrand()))
                    self._placeholders.append(list())  # of Constants
                    placeholders_dict = dict()
                    for c in integral_to_coefficients[integral]:
                        self._placeholders[-1].append(
                            Constant(self._NaN * ones(c.ufl_shape)))
                        placeholders_dict[c] = self._placeholders[-1][-1]
                        logger.log(
                            DEBUG, "\t\t " + str(placeholders_dict[c]) +
                            " is the placeholder for " + str(c))
                    replacer = _SeparatedParametrizedForm_Replacer(
                        placeholders_dict)
                    new_integrand = apply_transformer(integral.integrand(),
                                                      replacer)
                    self._form_with_placeholders.append(new_integrand *
                                                        measure)

            logger.log(
                DEBUG,
                "3. Assert that there are no parametrized expressions left")
            for form in self._form_with_placeholders:
                for integral in form.integrals():
                    for e in pre_traversal(integral.integrand()):
                        if isinstance(e, BaseExpression):
                            assert not (
                                wrapping.is_pull_back_expression(e)
                                and wrapping.
                                is_pull_back_expression_parametrized(e)
                            ), "Form " + str(
                                integral
                            ) + " still contains a parametrized pull back expression"
                            parameters = e._parameters
                            assert "mu_0" not in parameters, "Form " + str(
                                integral
                            ) + " still contains a parametrized expression"

            logger.log(DEBUG, "4. Prepare coefficients hash codes")
            for addend in self._coefficients:
                self._placeholder_names.append(list())  # of string
                for factor in addend:
                    self._placeholder_names[-1].append(
                        wrapping.expression_name(factor))

            logger.log(DEBUG, "5. Assert list length consistency")
            assert len(self._coefficients) == len(self._placeholders)
            assert len(self._coefficients) == len(self._placeholder_names)
            for (c, p, pn) in zip(self._coefficients, self._placeholders,
                                  self._placeholder_names):
                assert len(c) == len(p)
                assert len(c) == len(pn)
            assert len(self._coefficients) == len(self._form_with_placeholders)

            logger.log(DEBUG,
                       "*** DONE - SEPARATE FORM COEFFICIENTS - DONE ***")
            logger.log(DEBUG, "")
示例#10
0
def demo_stacked_cubes(outfile: XDMFFile,
                       theta: float,
                       gmsh: bool = True,
                       quad: bool = False,
                       compare: bool = False,
                       res: float = 0.1):
    log_info(
        f"Run theta:{theta:.2f}, Quad: {quad}, Gmsh {gmsh}, Res {res:.2e}")

    celltype = "quadrilateral" if quad else "triangle"
    if gmsh:
        mesh, mt = gmsh_2D_stacked(celltype, theta)
        mesh.name = f"mesh_{celltype}_{theta:.2f}_gmsh"

    else:
        mesh_name = "mesh"
        filename = f"meshes/mesh_{celltype}_{theta:.2f}.xdmf"

        mesh_2D_dolfin(celltype, theta)
        with XDMFFile(MPI.COMM_WORLD, filename, "r") as xdmf:
            mesh = xdmf.read_mesh(name=mesh_name)
            mesh.name = f"mesh_{celltype}_{theta:.2f}"
            tdim = mesh.topology.dim
            fdim = tdim - 1
            mesh.topology.create_connectivity(tdim, tdim)
            mesh.topology.create_connectivity(fdim, tdim)
            mt = xdmf.read_meshtags(mesh, name="facet_tags")

    # Helper until meshtags can be read in from xdmf
    V = VectorFunctionSpace(mesh, ("Lagrange", 1))

    r_matrix = rotation_matrix([0, 0, 1], theta)
    g_vec = np.dot(r_matrix, [0, -1.25e2, 0])
    g = Constant(mesh, PETSc.ScalarType(g_vec[:2]))

    def bottom_corner(x):
        return np.isclose(x, [[0], [0], [0]]).all(axis=0)

    # Fix bottom corner
    bc_value = np.array((0, ) * mesh.geometry.dim, dtype=PETSc.ScalarType)
    bottom_dofs = locate_dofs_geometrical(V, bottom_corner)
    bc_bottom = dirichletbc(bc_value, bottom_dofs, V)
    bcs = [bc_bottom]

    # Elasticity parameters
    E = PETSc.ScalarType(1.0e3)
    nu = 0
    mu = Constant(mesh, E / (2.0 * (1.0 + nu)))
    lmbda = Constant(mesh, E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu)))

    # Stress computation
    def sigma(v):
        return (2.0 * mu * sym(grad(v)) +
                lmbda * tr(sym(grad(v))) * Identity(len(v)))

    # Define variational problem
    u = TrialFunction(V)
    v = TestFunction(V)
    a = inner(sigma(u), grad(v)) * dx
    ds = Measure("ds", domain=mesh, subdomain_data=mt, subdomain_id=3)
    rhs = inner(Constant(mesh, PETSc.ScalarType(
        (0, 0))), v) * dx + inner(g, v) * ds

    def left_corner(x):
        return np.isclose(x.T, np.dot(r_matrix, [0, 2, 0])).all(axis=1)

    # Create multi point constraint
    mpc = MultiPointConstraint(V)

    with Timer("~Contact: Create contact constraint"):
        nh = create_normal_approximation(V, mt, 4)
        mpc.create_contact_slip_condition(mt, 4, 9, nh)

    with Timer("~Contact: Add non-slip condition at bottom interface"):
        bottom_normal = facet_normal_approximation(V, mt, 5)
        mpc.create_slip_constraint(V, (mt, 5), bottom_normal, bcs=bcs)

    with Timer("~Contact: Add tangential constraint at one point"):
        vertex = locate_entities_boundary(mesh, 0, left_corner)

        tangent = facet_normal_approximation(V, mt, 3, tangent=True)
        mtv = meshtags(mesh, 0, vertex, np.full(len(vertex), 6,
                                                dtype=np.int32))
        mpc.create_slip_constraint(V, (mtv, 6), tangent, bcs=bcs)

    mpc.finalize()
    rtol = 1e-9
    petsc_options = {
        "ksp_rtol": 1e-9,
        "pc_type": "gamg",
        "pc_gamg_type": "agg",
        "pc_gamg_square_graph": 2,
        "pc_gamg_threshold": 0.02,
        "pc_gamg_coarse_eq_limit": 1000,
        "pc_gamg_sym_graph": True,
        "mg_levels_ksp_type": "chebyshev",
        "mg_levels_pc_type": "jacobi",
        "mg_levels_esteig_ksp_type": "cg"
        #  , "help": None, "ksp_view": None
    }

    # Solve Linear problem
    problem = LinearProblem(a, rhs, mpc, bcs=bcs, petsc_options=petsc_options)

    # Build near nullspace
    null_space = rigid_motions_nullspace(mpc.function_space)
    problem.A.setNearNullSpace(null_space)
    u_h = problem.solve()

    it = problem.solver.getIterationNumber()
    if MPI.COMM_WORLD.rank == 0:
        print("Number of iterations: {0:d}".format(it))

    unorm = u_h.vector.norm()
    if MPI.COMM_WORLD.rank == 0:
        print(f"Norm of u: {unorm}")

    # Write solution to file
    ext = "_gmsh" if gmsh else ""
    u_h.name = "u_mpc_{0:s}_{1:.2f}{2:s}".format(celltype, theta, ext)

    outfile.write_mesh(mesh)
    outfile.write_function(u_h, 0.0,
                           f"Xdmf/Domain/Grid[@Name='{mesh.name}'][1]")

    # Solve the MPC problem using a global transformation matrix
    # and numpy solvers to get reference values
    if not compare:
        return
    log_info("Solving reference problem with global matrix (using numpy)")
    with Timer("~MPC: Reference problem"):
        # Generate reference matrices and unconstrained solution
        A_org = assemble_matrix(form(a), bcs)
        A_org.assemble()
        L_org = assemble_vector(form(rhs))
        apply_lifting(L_org, [form(a)], [bcs])
        L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES,
                          mode=PETSc.ScatterMode.REVERSE)
        set_bc(L_org, bcs)

    root = 0
    with Timer("~MPC: Verification"):
        compare_mpc_lhs(A_org, problem.A, mpc, root=root)
        compare_mpc_rhs(L_org, problem.b, mpc, root=root)
        # Gather LHS, RHS and solution on one process
        A_csr = gather_PETScMatrix(A_org, root=root)
        K = gather_transformation_matrix(mpc, root=root)
        L_np = gather_PETScVector(L_org, root=root)
        u_mpc = gather_PETScVector(u_h.vector, root=root)

        if MPI.COMM_WORLD.rank == root:
            KTAK = K.T * A_csr * K
            reduced_L = K.T @ L_np
            # Solve linear system
            d = scipy.sparse.linalg.spsolve(KTAK, reduced_L)
            # Back substitution to full solution vector
            uh_numpy = K @ d

            assert np.allclose(uh_numpy, u_mpc, rtol=rtol)
示例#11
0
        W_element = BlockElement(V_element, Q_element)
    elif discretization == "EG":
        Q_element = FiniteElement("CG", mesh.ufl_cell(), 1)
        D_element = FiniteElement("DG", mesh.ufl_cell(), 0)
        EG_element = Q_element + D_element
        W_element = BlockElement(V_element, EG_element)
    else:
        raise RuntimeError("Invalid discretization")
    W = BlockFunctionSpace(mesh, W_element)

    PM = FunctionSpace(mesh, "DG", 0)
    TM = TensorFunctionSpace(mesh, "DG", 0)

    I = Identity(mesh.topology().dim())

    dx = Measure("dx", domain=mesh, subdomain_data=subdomains)
    ds = Measure("ds", domain=mesh, subdomain_data=boundaries)
    dS = Measure("dS", domain=mesh, subdomain_data=boundaries)

    # Test and trial functions
    vq = BlockTestFunction(W)
    (v, q) = block_split(vq)
    up = BlockTrialFunction(W)
    (u, p) = block_split(up)

    w = BlockFunction(W)
    w0 = BlockFunction(W)
    (u0, p0) = block_split(w0)

    n = FacetNormal(mesh)
    vc = CellVolume(mesh)
    local.array[left_dofs] = E_left * nu_left / ((1 + nu_left) *
                                                 (1 - 2 * nu_left))
    local.array[right_dofs] = E_right * nu_right / ((1 + nu_right) *
                                                    (1 - 2 * nu_right))


# Stress computation
def sigma(v):
    return (2.0 * mu * sym(grad(v)) +
            lmbda * tr(sym(grad(v))) * Identity(len(v)))


# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
dx = Measure("dx", domain=mesh, subdomain_data=ct)
a = inner(sigma(u), grad(v)) * dx
x = SpatialCoordinate(mesh)
rhs = inner(Constant(mesh, PETSc.ScalarType((0, 0))), v) * dx

# Set boundary conditions
u_push = np.array([0.1, 0], dtype=PETSc.ScalarType)
dofs = locate_dofs_geometrical(V, lambda x: np.isclose(x[0], 0))
bc_push = dirichletbc(u_push, dofs, V)
u_fix = np.array([0, 0], dtype=PETSc.ScalarType)
bc_fix = dirichletbc(
    u_fix, locate_dofs_geometrical(V, lambda x: np.isclose(x[0], 2.1)), V)
bcs = [bc_push, bc_fix]


def gather_dof_coordinates(V: FunctionSpace, dofs: np.ndarray):
示例#13
0
    def _basic_form_on_reduced_function_space(form_wrapper, at):
        form = form_wrapper._form
        form_name = form_wrapper.name()
        mu = get_problem_from_parametrized_operator(form_wrapper).mu
        reduced_V = at.get_reduced_function_spaces()
        reduced_subdomain_data = at.get_reduced_subdomain_data()

        if (form_name,
                reduced_V) not in form_on_reduced_function_space__form_cache:
            visited = set()
            replacements = dict()
            truth_problems = list()
            truth_problem_to_components = dict()
            truth_problem_to_exact_truth_problem = dict()
            truth_problem_to_reduced_mesh_solution = dict()
            truth_problem_to_reduced_mesh_interpolator = dict()
            reduced_problem_to_components = dict()
            reduced_problem_to_reduced_mesh_solution = dict()
            reduced_problem_to_reduced_basis_functions = dict()

            # Look for terminals on truth mesh
            for node in wrapping.form_iterator(form, "nodes"):
                if node in visited:
                    continue
                # ... test and trial functions
                elif isinstance(node, Argument):
                    replacements[node] = wrapping.form_argument_replace(
                        node, reduced_V)
                    visited.add(node)
                # ... problem solutions related to nonlinear terms
                elif wrapping.is_problem_solution_or_problem_solution_component_type(
                        node):
                    if wrapping.is_problem_solution_or_problem_solution_component(
                            node):
                        (preprocessed_node, component, truth_solution
                         ) = wrapping.solution_identify_component(node)
                        truth_problem = get_problem_from_solution(
                            truth_solution)
                        truth_problems.append(truth_problem)
                        # Store the component
                        if truth_problem not in truth_problem_to_components:
                            truth_problem_to_components[truth_problem] = list()
                        truth_problem_to_components[truth_problem].append(
                            component)
                        # Get the function space corresponding to preprocessed_node on the reduced mesh
                        auxiliary_reduced_V = at.get_auxiliary_reduced_function_space(
                            truth_problem, component)
                        # Define and store the replacement
                        if truth_problem not in truth_problem_to_reduced_mesh_solution:
                            truth_problem_to_reduced_mesh_solution[
                                truth_problem] = list()
                        replacements[preprocessed_node] = backend.Function(
                            auxiliary_reduced_V)
                        truth_problem_to_reduced_mesh_solution[
                            truth_problem].append(
                                replacements[preprocessed_node])
                        # Get interpolator on reduced mesh
                        if truth_problem not in truth_problem_to_reduced_mesh_interpolator:
                            truth_problem_to_reduced_mesh_interpolator[
                                truth_problem] = list()
                        truth_problem_to_reduced_mesh_interpolator[
                            truth_problem].append(
                                at.get_auxiliary_function_interpolator(
                                    truth_problem, component))
                    else:
                        (
                            auxiliary_problem, component
                        ) = wrapping.get_auxiliary_problem_for_non_parametrized_function(
                            node)
                        preprocessed_node = node
                        # Get the function space corresponding to preprocessed_node on the reduced mesh
                        auxiliary_reduced_V = at.get_auxiliary_reduced_function_space(
                            auxiliary_problem, component)
                        # Get interpolator on reduced mesh
                        auxiliary_truth_problem_to_reduced_mesh_interpolator = at.get_auxiliary_function_interpolator(
                            auxiliary_problem, component)
                        # Define and store the replacement
                        replacements[
                            preprocessed_node] = auxiliary_truth_problem_to_reduced_mesh_interpolator(
                                preprocessed_node)
                    # Make sure to skip any parent solution related to this one
                    visited.add(node)
                    visited.add(preprocessed_node)
                    for parent_node in wrapping.solution_iterator(
                            preprocessed_node):
                        visited.add(parent_node)
                # ... geometric quantities
                elif isinstance(node, GeometricQuantity):
                    if len(reduced_V) == 2:
                        assert reduced_V[0].mesh().ufl_domain(
                        ) == reduced_V[1].mesh().ufl_domain()
                    replacements[node] = type(node)(reduced_V[0].mesh())
                    visited.add(node)
            # ... and replace them
            replaced_form = wrapping.form_replace(form, replacements, "nodes")

            # Look for measures ...
            if len(reduced_V) == 2:
                assert reduced_V[0].mesh().ufl_domain() == reduced_V[1].mesh(
                ).ufl_domain()
            measure_reduced_domain = reduced_V[0].mesh().ufl_domain()
            replacements_measures = dict()
            for integral in wrapping.form_iterator(replaced_form, "integrals"):
                # Prepare measure for the new form (from firedrake/mg/ufl_utils.py)
                integral_subdomain_data = integral.subdomain_data()
                if integral_subdomain_data is not None:
                    integral_reduced_subdomain_data = reduced_subdomain_data[
                        integral_subdomain_data]
                else:
                    integral_reduced_subdomain_data = None
                measure = Measure(
                    integral.integral_type(),
                    domain=measure_reduced_domain,
                    subdomain_id=integral.subdomain_id(),
                    subdomain_data=integral_reduced_subdomain_data,
                    metadata=integral.metadata())
                replacements_measures[integral.integrand(),
                                      integral.integral_type(),
                                      integral.subdomain_id()] = measure
            # ... and replace them
            replaced_form_with_replaced_measures = wrapping.form_replace(
                replaced_form, replacements_measures, "measures")

            # Cache the resulting dicts
            form_on_reduced_function_space__form_cache[(
                form_name, reduced_V)] = replaced_form_with_replaced_measures
            form_on_reduced_function_space__truth_problems_cache[(
                form_name, reduced_V)] = truth_problems
            form_on_reduced_function_space__truth_problem_to_components_cache[(
                form_name, reduced_V)] = truth_problem_to_components
            form_on_reduced_function_space__truth_problem_to_exact_truth_problem_cache[
                (form_name, reduced_V)] = truth_problem_to_exact_truth_problem
            form_on_reduced_function_space__truth_problem_to_reduced_mesh_solution_cache[
                (form_name,
                 reduced_V)] = truth_problem_to_reduced_mesh_solution
            form_on_reduced_function_space__truth_problem_to_reduced_mesh_interpolator_cache[
                (form_name,
                 reduced_V)] = truth_problem_to_reduced_mesh_interpolator
            form_on_reduced_function_space__reduced_problem_to_components_cache[
                (form_name, reduced_V)] = reduced_problem_to_components
            form_on_reduced_function_space__reduced_problem_to_reduced_mesh_solution_cache[
                (form_name,
                 reduced_V)] = reduced_problem_to_reduced_mesh_solution
            form_on_reduced_function_space__reduced_problem_to_reduced_basis_functions_cache[
                (form_name,
                 reduced_V)] = reduced_problem_to_reduced_basis_functions

        # Extract from cache
        replaced_form_with_replaced_measures = form_on_reduced_function_space__form_cache[
            (form_name, reduced_V)]
        truth_problems = form_on_reduced_function_space__truth_problems_cache[(
            form_name, reduced_V)]
        truth_problem_to_components = form_on_reduced_function_space__truth_problem_to_components_cache[
            (form_name, reduced_V)]
        truth_problem_to_exact_truth_problem = form_on_reduced_function_space__truth_problem_to_exact_truth_problem_cache[
            (form_name, reduced_V)]
        truth_problem_to_reduced_mesh_solution = form_on_reduced_function_space__truth_problem_to_reduced_mesh_solution_cache[
            (form_name, reduced_V)]
        truth_problem_to_reduced_mesh_interpolator = form_on_reduced_function_space__truth_problem_to_reduced_mesh_interpolator_cache[
            (form_name, reduced_V)]
        reduced_problem_to_components = form_on_reduced_function_space__reduced_problem_to_components_cache[
            (form_name, reduced_V)]
        reduced_problem_to_reduced_mesh_solution = form_on_reduced_function_space__reduced_problem_to_reduced_mesh_solution_cache[
            (form_name, reduced_V)]
        reduced_problem_to_reduced_basis_functions = form_on_reduced_function_space__reduced_problem_to_reduced_basis_functions_cache[
            (form_name, reduced_V)]

        # Get list of truth and reduced problems that need to be solved, possibly updating cache
        required_truth_problems = list()
        required_reduced_problems = list()
        for truth_problem in truth_problems:
            truth_problem_is_solving = hasattr(truth_problem, "_is_solving")
            if is_training_started(truth_problem):
                reduced_problem = get_reduced_problem_from_problem(
                    truth_problem)
                reduced_problem_is_solving = hasattr(reduced_problem,
                                                     "_is_solving")
            else:
                reduced_problem = None
                reduced_problem_is_solving = False
            if not truth_problem_is_solving:
                if is_training_finished(truth_problem):
                    # Store the component
                    if reduced_problem not in reduced_problem_to_components:
                        reduced_problem_to_components[
                            reduced_problem] = truth_problem_to_components[
                                truth_problem]
                    # Store the replacement
                    if reduced_problem not in reduced_problem_to_reduced_mesh_solution:
                        reduced_problem_to_reduced_mesh_solution[
                            reduced_problem] = truth_problem_to_reduced_mesh_solution[
                                truth_problem]
                    # Get reduced problem basis functions on reduced mesh
                    if reduced_problem not in reduced_problem_to_reduced_basis_functions:
                        reduced_problem_to_reduced_basis_functions[
                            reduced_problem] = list()
                        for component in reduced_problem_to_components[
                                reduced_problem]:
                            reduced_problem_to_reduced_basis_functions[
                                reduced_problem].append(
                                    at.get_auxiliary_basis_functions_matrix(
                                        truth_problem, reduced_problem,
                                        component))
                    # Append to list of required reduced problems
                    required_reduced_problems.append(
                        (reduced_problem, reduced_problem_is_solving))
                else:
                    if (hasattr(truth_problem,
                                "_apply_exact_evaluation_at_stages") and
                            not hasattr(truth_problem, "_apply_EIM_at_stages")
                            and not hasattr(truth_problem,
                                            "_apply_DEIM_at_stages")):
                        # Init truth problem (if required), as it may not have been initialized
                        truth_problem.init()
                        # Append to list of required truth problems which are not currently solving
                        required_truth_problems.append(
                            (truth_problem, False, reduced_problem_is_solving))
                    else:
                        # Store the corresponding exact truth problem
                        if truth_problem not in truth_problem_to_exact_truth_problem:
                            exact_truth_problem = exact_problem(truth_problem)
                            truth_problem_to_exact_truth_problem[
                                truth_problem] = exact_truth_problem
                            # Init exact truth problem (if required), as it may not have been initialized
                            exact_truth_problem.init()
                        else:
                            exact_truth_problem = truth_problem_to_exact_truth_problem[
                                truth_problem]
                        # Store the component
                        if exact_truth_problem not in truth_problem_to_components:
                            truth_problem_to_components[
                                exact_truth_problem] = truth_problem_to_components[
                                    truth_problem]
                        # Store the replacement
                        if exact_truth_problem not in truth_problem_to_reduced_mesh_solution:
                            truth_problem_to_reduced_mesh_solution[
                                exact_truth_problem] = truth_problem_to_reduced_mesh_solution[
                                    truth_problem]
                        # Get interpolator on reduced mesh
                        if exact_truth_problem not in truth_problem_to_reduced_mesh_interpolator:
                            truth_problem_to_reduced_mesh_interpolator[
                                exact_truth_problem] = list()
                            for component in truth_problem_to_components[
                                    exact_truth_problem]:
                                truth_problem_to_reduced_mesh_interpolator[
                                    exact_truth_problem].append(
                                        at.get_auxiliary_function_interpolator(
                                            exact_truth_problem, component))
                        # Append to list of required truth problems which are not currently solving
                        required_truth_problems.append(
                            (exact_truth_problem, False,
                             reduced_problem_is_solving))
            else:
                assert not reduced_problem_is_solving
                # Append to list of required truth problems which are currently solving
                required_truth_problems.append((truth_problem, True, False))

        # Solve truth problems (which have not been reduced yet) associated to nonlinear terms
        for (truth_problem, truth_problem_is_solving,
             reduced_problem_is_solving) in required_truth_problems:
            if not reduced_problem_is_solving:
                # Solve (if necessary) ...
                truth_problem.set_mu(mu)
                if not truth_problem_is_solving:
                    log(
                        PROGRESS,
                        "In form_on_reduced_function_space, requiring truth problem solve for problem "
                        + truth_problem.name())
                    truth_problem.solve()
                else:
                    log(
                        PROGRESS,
                        "In form_on_reduced_function_space, loading current truth problem solution for problem "
                        + truth_problem.name())
            else:
                reduced_problem = get_reduced_problem_from_problem(
                    truth_problem)
                log(
                    PROGRESS,
                    "In form_on_reduced_function_space, replacing current truth problem solution with reduced solution for problem "
                    + reduced_problem.truth_problem.name())
            # ... and assign to reduced_mesh_solution
            for (reduced_mesh_solution, reduced_mesh_interpolator) in zip(
                    truth_problem_to_reduced_mesh_solution[truth_problem],
                    truth_problem_to_reduced_mesh_interpolator[truth_problem]):
                solution_to = reduced_mesh_solution
                if not reduced_problem_is_solving:
                    solution_from = reduced_mesh_interpolator(
                        truth_problem._solution)
                else:
                    solution_from = reduced_mesh_interpolator(
                        reduced_problem.basis_functions[:reduced_problem.
                                                        _solution.N] *
                        reduced_problem._solution)
                backend.assign(solution_to, solution_from)

        # Solve reduced problems associated to nonlinear terms
        for (reduced_problem, is_solving) in required_reduced_problems:
            # Solve (if necessary) ...
            reduced_problem.set_mu(mu)
            if not is_solving:
                log(
                    PROGRESS,
                    "In form_on_reduced_function_space, requiring reduced problem solve for problem "
                    + reduced_problem.truth_problem.name())
                reduced_problem.solve()
            else:
                log(
                    PROGRESS,
                    "In form_on_reduced_function_space, loading current reduced problem solution for problem "
                    + reduced_problem.truth_problem.name())
            # ... and assign to reduced_mesh_solution
            for (reduced_mesh_solution, reduced_basis_functions) in zip(
                    reduced_problem_to_reduced_mesh_solution[reduced_problem],
                    reduced_problem_to_reduced_basis_functions[reduced_problem]
            ):
                solution_to = reduced_mesh_solution
                solution_from_N = OnlineSizeDict()
                for c, v in reduced_problem._solution.N.items():
                    if c in reduced_basis_functions._components_name:
                        solution_from_N[c] = v
                solution_from = online_backend.OnlineFunction(solution_from_N)
                online_backend.online_assign(solution_from,
                                             reduced_problem._solution)
                solution_from = reduced_basis_functions[:solution_from_N] * solution_from
                backend.assign(solution_to, solution_from)

        # Assemble and return
        assembled_replaced_form = wrapping.assemble(
            replaced_form_with_replaced_measures)
        form_rank = assembled_replaced_form.rank()
        return (assembled_replaced_form, form_rank)
示例#14
0
# --------------------------Variational problem---------------------------
# Traditional terms
mu = 1
f = fem.Constant(mesh, PETSc.ScalarType((0, 0)))
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
a = (2 * mu * inner(sym_grad(u), sym_grad(v)) - inner(p, div(v)) -
     inner(div(u), q)) * dx
L = inner(f, v) * dx

# No prescribed shear stress
n = FacetNormal(mesh)
g_tau = tangential_proj(
    fem.Constant(mesh, PETSc.ScalarType(((0, 0), (0, 0)))) * n, n)
ds = Measure("ds", domain=mesh, subdomain_data=mt, subdomain_id=1)

# Terms due to slip condition
# Explained in for instance: https://arxiv.org/pdf/2001.10639.pdf
a -= inner(outer(n, n) * dot(T(u, p, mu), n), v) * ds
L += inner(g_tau, v) * ds

# Solve linear problem
petsc_options = {
    "ksp_type": "preonly",
    "pc_type": "lu",
    "pc_factor_solver_type": "mumps"
}
problem = LinearProblem(a, L, mpc, bcs=bcs, petsc_options=petsc_options)
U = problem.solve()
示例#15
0
    def __init__(self, args, tc, metadata):
        self.has_analytic_solution = True
        self.problem_code = 'WCYL'
        super(Problem, self).__init__(args, tc, metadata)

        self.tc.init_watch('assembleSol', 'Assembled analytic solution', True)
        self.tc.init_watch('analyticP', 'Analytic pressure', True)
        self.tc.init_watch('analyticVnorms',
                           'Computed analytic velocity norms', True)
        self.tc.init_watch('errorP', 'Computed pressure error', True)
        self.tc.init_watch('errorForce', 'Computed force error', True)
        self.tc.init_watch('computePG', 'Computed pressure gradient', True)

        self.name = 'womersley_cylinder'
        self.status_functional_str = 'last H1 velocity error'

        # input parameters
        self.ic = args.ic
        self.factor = args.factor
        self.metadata['factor'] = self.factor
        self.scale_factor.append(self.factor)

        # fixed parameters (used in analytic solution and in BC)
        self.nu = 3.71 * self.args.nufactor  # kinematic viscosity
        self.R = 5.0  # cylinder radius

        self.mesh_volume = pi * 25. * 20.

        # Import gmsh mesh
        self.tc.start('mesh')
        self.mesh, self.facet_function = super(Problem,
                                               self).loadMesh(args.mesh)
        self.dsIn = Measure("ds",
                            subdomain_id=2,
                            subdomain_data=self.facet_function)
        self.dsOut = Measure("ds",
                             subdomain_id=3,
                             subdomain_data=self.facet_function)
        self.dsWall = Measure("ds",
                              subdomain_id=1,
                              subdomain_data=self.facet_function)
        self.normal = FacetNormal(self.mesh)
        print("Mesh name: ", args.mesh, "    ", self.mesh)
        print("Mesh norm max: ", self.mesh.hmax())
        print("Mesh norm min: ", self.mesh.hmin())
        self.tc.end('mesh')

        self.sol_p = None
        self.last_analytic_pressure_norm = None
        self.v_in = None
        self.area = None

        choose_note = {1.0: '', 0.1: 'nuL10', 0.01: 'nuL100', 10.0: 'nuH10'}
        self.precomputed_filename = args.mesh + choose_note[self.args.nufactor]
        print('chosen filename for precomputed solution',
              self.precomputed_filename)

        # partial Bessel functions and coefficients
        self.bessel_parabolic = None
        self.bessel_real = []
        self.bessel_complex = []
        self.coefs_exp = [-8, -6, -4, -2, 2, 4, 6, 8]

        self.listDict.update({
            'u_H1w': {
                'list': [],
                'name': 'corrected velocity H1 error on wall',
                'abrev': 'CE_H1w',
                'scale': self.scale_factor,
                'relative': 'av_norm_H1w',
                'slist': []
            },
            'u2H1w': {
                'list': [],
                'name': 'tentative velocity H1 error on wall',
                'abrev': 'TE_H1w',
                'scale': self.scale_factor,
                'relative': 'av_norm_H1w',
                'slist': []
            },
            'av_norm_H1w': {
                'list': [],
                'name': 'analytic velocity H1 norm on wall',
                'abrev': 'AVN_H1w'
            },
            'a_force_wall': {
                'list': [],
                'name': 'analytic force on wall',
                'abrev': 'AF'
            },
            'a_force_wall_normal': {
                'list': [],
                'name': 'analytic force on wall',
                'abrev': 'AFN'
            },
            'a_force_wall_shear': {
                'list': [],
                'name': 'analytic force on wall',
                'abrev': 'AFS'
            },
            'force_wall': {
                'list': [],
                'name': 'force error on wall',
                'abrev': 'FE',
                'relative': 'a_force_wall',
                'slist': []
            },
            'force_wall_normal': {
                'list': [],
                'name': 'normal force error on wall',
                'abrev': 'FNE',
                'relative': 'a_force_wall',
                'slist': []
            },
            'force_wall_shear': {
                'list': [],
                'name': 'shear force error on wall',
                'abrev': 'FSE',
                'relative': 'a_force_wall',
                'slist': []
            },
        })
示例#16
0
def fwi_si(gt_data, i_guess, n_receivers, noise_lv, path):
    """
    This is the main function of the project.
    Entries 
        gt_data: string path to the ground truth image data
        i_guess: integer pointing the algorithm initialization guess
        n_shots: integer, number of strikes for the FWI
        n_receivers: integer, number of receivers for the FWI
        noise_lv: float type variable that we use to compute noise level
        path: string type variable, path to local results directory
    """

    # Implementing parallel processing at shots level """
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    n_shots = comm.Get_size()

    seism_vel = [4.12, 1.95]
    image_phi = mpimg.imread(gt_data)
    chi0 = np.int64(image_phi == 0)
    chi1 = 1.0 - chi0
    synth_model = seism_vel[0] * chi1 + seism_vel[1] * chi0
    #scale in meter
    xMin = 0.0
    xMax = 1.0
    zMin = 0.0
    zMax = 0.650
    #scale in seconds
    tMin = 0.0
    tMax = 1.0

    # Damping layer width and damping limits
    damp_layer = 0.1 * xMax
    dmp_xMin = xMin + damp_layer
    dmp_xMax = xMax - damp_layer
    dmp_zMax = zMax - damp_layer

    #    Number of grid points are determined by the loaded image size
    #    Nz, Nx are (#) of grid point
    Nz, Nx = synth_model.shape
    delta_x = xMax / Nx
    delta_z = zMax / Nz
    CFL = 0.4
    delta_t = (CFL * min(delta_x, delta_z)) / max(seism_vel)
    gc_t = np.arange(tMin, tMax, delta_t)
    Nt = len(gc_t)

    # Level set parameters
    MainItMax = 5000
    gamma = 0.8
    gamma2 = 0.8
    stop_coeff = 1.0e-8
    add_weight = True
    ls_max = 3
    ls = 0
    beta0_init = 1.5  # 1.2 #0.8 #0.5 #0.3
    beta0 = beta0_init
    beta = beta0
    stop_decision_limit = 150
    stop_decision = 0
    alpha1 = 0.01
    alpha2 = 0.97

    # wave Parameters
    PlotFields = True
    add_noise = False if noise_lv == 0 else True
    src_Zpos = 5.0
    source_peak_frequency = 5.0  # (kilo hertz)

    # Grid coordinates
    gc_x = np.arange(xMin, xMax, delta_x)
    gc_z = np.arange(zMin, zMax, delta_z)

    # Compute receivers
    id_dmp_xMin = np.where(gc_x == dmp_xMin)[0][0]
    id_dmp_xMax = np.where(gc_x == dmp_xMax)[0][0]
    id_dmp_zMax = np.where(gc_z == dmp_zMax)[0][0]
    rec_index = np.linspace(id_dmp_xMin,
                            id_dmp_xMax,
                            n_receivers + 1,
                            dtype='int')
    try:
        assert (len(rec_index) < id_dmp_xMax - id_dmp_xMin)
    except AssertionError:
        "receivers in different positions"

    # Build the HUGE parameter dictionary
    parameters = {
        "gamma": gamma,
        "gamma2": gamma2,
        "ls_max": ls_max,
        "stop_coeff": stop_coeff,
        "add_noise": add_noise,
        "add_weight": add_weight,
        "beta0_init": beta0_init,
        "stop_decision_limit": stop_decision_limit,
        "alpha1": alpha1,
        "alpha2": alpha2,
        "CFL": CFL,
        "source_peak_frequency": source_peak_frequency,
        "src_Zpos": src_Zpos,
        "i_guess": i_guess,
        "n_shots": n_shots,
        "n_receivers": n_receivers,
        "add_weight": add_weight,
        "nz": Nz,
        "nx": Nx,
        "nt": Nt,
        "gc_t": gc_t,
        "gc_x": gc_x,
        "gc_z": gc_z,
        "xMin": xMin,
        "xMax": xMax,
        "zMin": zMin,
        "zMax": zMax,
        "tMin": tMin,
        "tMax": tMax,
        "hz": delta_z,
        "hx": delta_x,
        "ht": delta_t,
        "dmp_xMin": dmp_xMin,
        "dmp_xMax": dmp_xMax,
        "dmp_zMax": dmp_zMax,
        "dmp_layer": damp_layer,
        "id_dmp_xMin": id_dmp_xMin,
        "id_dmp_xMax": id_dmp_xMax,
        "id_dmp_zMax": id_dmp_zMax,
        "rec": gc_x[rec_index],
        "rec_index": rec_index,
        'noise_lv': noise_lv,
        "path": path,
        "path_misfit": path + 'misfit/',
        "path_phi": path + 'phi/'
    }

    # Compute initial guess matrix
    if rank == 0:
        outputs_and_paths(parameters)
        gnu_data(image_phi, 'ground_truth.dat', parameters)
        mkDirectory(parameters["path_phi"])

    comm.Barrier()
    phi_mat = initial_guess(parameters)
    ind = inside_shape(phi_mat)
    ind_c = np.ones_like(phi_mat) - ind
    vel_field = seism_vel[0] * ind + seism_vel[1] * ind_c

    # Initialization of Fenics-Dolfin functions
    # ----------------------------------------
    # Define mesh for the entire domain Omega
    # ----------------------------------------
    mesh = fc.RectangleMesh(comm, fc.Point(xMin, zMin), fc.Point(xMax, zMax),
                            Nx - 1, Nz - 1)
    # ----------------------------------------
    # Function spaces
    # ----------------------------------------
    V = fc.FunctionSpace(mesh, "Lagrange", 1)
    VF = fc.VectorFunctionSpace(mesh, "Lagrange", 1)
    theta = fc.TrialFunction(VF)
    csi = fc.TestFunction(VF)

    # ----------------------------------------
    # Define boundaries of the domain
    # ----------------------------------------
    tol = fc.DOLFIN_EPS  # tolerance for coordinate comparisons

    class Left(fc.SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and abs(x[0] - xMin) < tol

    class Right(fc.SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and abs(x[0] - xMax) < tol

    class Bottom(fc.SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and abs(x[1] - zMin) < tol

    class Top(fc.SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and abs(x[1] - zMax) < tol

    # --------------------------------------
    # Initialize sub-domain instances
    # --------------------------------------
    left = Left()
    top = Top()
    right = Right()
    bottom = Bottom()
    # ----------------------------------------------
    # Initialize mesh function for boundary domains
    # ----------------------------------------------
    boundaries = fc.MeshFunction("size_t", mesh, mesh.topology().dim() - 1)
    domains = fc.MeshFunction("size_t", mesh, mesh.topology().dim())
    left.mark(boundaries, 3)
    top.mark(boundaries, 4)
    right.mark(boundaries, 5)
    bottom.mark(boundaries, 6)
    # ---------------------------------------
    # Define operator for speed vector theta
    # ---------------------------------------
    dtotal = Measure("dx")
    dircond = 1
    # ---------------------------------------
    # setting shape derivative weights
    # re-balancing sensibility to be greater at the bottom
    # ---------------------------------------
    wei_equation = '1.0e8*(pow(x[0] - 0.5, 16) + pow(x[1] - 0.325, 10))+100'
    wei = fc.Expression(str(wei_equation), degree=1)

    # Building the left hand side of the bi-linear system
    # to obtain the descendant direction from shape derivative
    if dircond < 4:
        bcF = [
            fc.DirichletBC(VF, (0, 0), boundaries, 3),
            fc.DirichletBC(VF, (0, 0), boundaries, 4),
            fc.DirichletBC(VF, (0, 0), boundaries, 5),
            fc.DirichletBC(VF, (0, 0), boundaries, 6)
        ]
    if dircond == 1:
        lhs = wei * alpha1 * inner(grad(theta), grad(csi)) * dtotal \
          + wei * alpha2 * inner(theta, csi) * dtotal
    #
    elif dircond == 2:
        lhs = alpha1 * inner(grad(theta), grad(csi)) * \
            dtotal + alpha2 * inner(theta, csi) * dtotal
    elif dircond == 3:
        lhs = inner(grad(theta), grad(csi)) * dtotal
    elif dircond == 5:
        lhs = inner(grad(theta), grad(csi)) * \
            dtotal + inner(theta, csi) * dtotal

    aV = fc.assemble(lhs)
    #
    if dircond < 4:
        for bc in bcF:
            bc.apply(aV)
    #
    # solver_V = fc.LUSolver(aV, "mumps")
    solver_V = fc.LUSolver(aV)
    # ------------------------------
    # Initialize Level set function
    # ------------------------------
    phi = fc.Function(V)
    phivec = phi.vector()
    phivalues = phivec.get_local()  # empty values
    my_first, my_last = V.dofmap().ownership_range()

    tabcoord = V.tabulate_dof_coordinates().reshape((-1, 2))

    unowned = V.dofmap().local_to_global_unowned()
    dofs = list(
        filter(
            lambda dof: V.dofmap().local_to_global_index(dof) not in unowned,
            [i for i in range(my_last - my_first)]))

    tabcoord = tabcoord[dofs]
    phivalues[:] = phi_mat.reshape(Nz * Nx)[dofs]  # assign values
    phivec.set_local(phivalues)
    phivec.apply('insert')

    cont = 0
    boundaries = fc.MeshFunction("size_t", mesh, mesh.topology().dim() - 1)
    domains = fc.MeshFunction("size_t", mesh, mesh.topology().dim())
    # -----------------------------
    # Define measures
    # -----------------------------
    dx = Measure('dx')(subdomain_data=domains)

    # -------------------------------
    # Define function Omega1
    # -------------------------------

    class Omega1(fc.SubDomain):
        def __init__(self) -> None:
            super(Omega1, self).__init__()

        def inside(self, x, on_boundary):
            return True if phi(x) <= 0 and x[0] >= xMin and x[0] <= xMax and x[
                1] >= zMin and x[1] <= zMax else False

    # instantiate variables
    eta = dmp(parameters)

    source = Source(parameters)
    FT = source.inject()

    phi_mat_old = np.zeros_like(phi_mat)
    vel_field_new = np.zeros_like(vel_field)
    theta1_mat = np.zeros((Nz * Nx))
    theta2_mat = np.zeros_like(theta1_mat)
    MainItEff = 0
    MainIt = 0
    stop_decision = 0
    st_mem_usage = 0.0
    adj_mem_usage = 0.0
    Jevaltotal = np.zeros((MainItMax))
    norm_theta = np.zeros((MainItMax))

    # path to recording phi function
    # path to recording misfit function
    if rank == 0:
        plot_mat(parameters, 'Damping', 'Damping function', eta)
        mkDirectory(parameters["path_phi"])
        mkDirectory(parameters["path_misfit"])

    comm.Barrier()
    # -------------------------------
    # Seismograms
    # -------------------------------
    wavesolver = WaveSolver(parameters, eta)
    start = time.time()
    d_send = np.empty((Nz, Nx, Nt), np.dtype('float'))
    d = wavesolver.measurements(d_send[0:Nz, 0:Nx, 0:Nt], synth_model,
                                FT[rank, 0:Nz, 0:Nx, 0:Nt], add_noise)
    seismograms = d[0, rec_index, 0:Nt].copy(order='C')
    end = time.time()

    # Plot Seismograms
    if PlotFields:
        print("{:.1f}s to build synthetic seismograms".format(end - start))
        plotMeasurements(parameters, seismograms, rank)
        if rank == 0:
            plot_displacement_field(parameters, d)

    sys.stdout.flush()
    del (d, d_send)
    ###################################################
    # Main Loop
    ###################################################
    gradshape = ShapeDerivative(parameters, csi, V, dtotal, seism_vel)
    while MainIt < MainItMax:
        # ----------------------------------------------
        # Initialize mesh function for boundary domains
        # ----------------------------------------------
        if MainIt > 0:
            vel_field = vel_field_new

        domains.set_all(0)
        omega1 = Omega1()
        omega1.mark(domains, 1)
        dx = Measure('dx')(subdomain_data=domains)

        u = np.empty((Nz, Nx, Nt), np.dtype('float'))
        P = np.empty((Nz, Nx, Nt), np.dtype('float'))

        if MainIt > 0:
            vel_field = vel_field_new
        # ------------------------------------
        # Compute STATE. u stands for displacement field
        # ------------------------------------
        start = time.time()
        u[0:Nz, 0:Nx, 0:Nt] = wavesolver.state(u[0:Nz, 0:Nx, 0:Nt], vel_field,
                                               FT[rank, 0:Nz, 0:Nx, 0:Nt])
        end = time.time()
        # ------------------------------------
        # Compute ADJOINT. P stands for the adjoint variable
        # ------------------------------------
        start1 = time.time()
        tr_u = u[0, rec_index, 0:Nt].copy(order='C')
        misfit = tr_u - seismograms
        P[0:Nz, 0:Nx, 0:Nt] = wavesolver.adjoint(P[0:Nz, 0:Nx, 0:Nt],
                                                 vel_field, misfit)
        end1 = time.time()
        comm.Barrier()
        print(
            '{:.1f}s to compute state and {:.1f}s to compute adjoint with {:d} shots. '
            .format(end - start, end1 - start1, n_shots))

        del (start, end, start1, end1)

        # Plot state/adjoint in 1st-iteration only
        if MainIt == 0 and PlotFields:
            if rank == 0:
                mkDirectory(path + 'initial_state_%03d/' % (n_shots))
                plotadjoint(parameters, P[0:Nz, 0:Nx, 0:Nt])
            folder_name = 'initial_state_%03d/' % (n_shots)
            plotstate(parameters, u[0:Nz, 0:Nx, 0:Nt], folder_name, rank)
            # plot_displacement_field(parameters, u[1, 0:Nz, 0:Nx, 0:Nt])
            st_mem_usage = (u.size * u.itemsize) / 1_073_741_824  # 1GB
            adj_mem_usage = (P.size * P.itemsize) / 1_073_741_824  # 1GB

        # Plotting reconstructions
        if rank == 0 and (MainItEff % 10 == 0
                          or stop_decision == stop_decision_limit - 1):
            plottype1(parameters, synth_model, phi_mat, cont)
            plottype2(parameters, synth_model, phi_mat, cont)
            plottype3(parameters, synth_model, phi_mat, MainIt, cont)
            plotcostfunction(parameters, Jevaltotal, MainItEff)
            plotnormtheta(parameters, norm_theta, MainItEff)
            np.save(path + 'last_phi_mat.npy', phi_mat)
            gnu_data(phi_mat, 'reconstruction.dat', parameters)

        plot_misfit(parameters, 'misfit', 'Misfit', misfit,
                    rank) if (MainItEff % 50 == 0 and PlotFields) else None

        # -------------------------
        # Compute Cost Function
        # -------------------------
        J_omega = np.zeros((1))
        l2_residual = np.sum(np.power(misfit, 2), axis=0)

        if MainIt == 0 and add_weight:
            weights = 1.0e-5

        comm.Reduce(simpson_rule(l2_residual[0:Nt], gc_t), J_omega, op=MPI.SUM)
        Jevaltotal[MainItEff] = 0.5 * (J_omega / weights)
        del (J_omega)
        # -------------------------
        # Evaluate shape derivative
        # -------------------------
        start = time.time()
        shapeder = (1.0 / weights) * gradshape.compute(u[0:Nz, 0:Nx, 0:Nt],
                                                       P[0:Nz, 0:Nx, 0:Nt], dx)
        # Build the rhs of bi-linear system
        shapeder = fc.assemble(shapeder)
        end = time.time()
        print('{}s to compute shape derivative.'.format(end - start))
        del (start, end)
        del (u, P)

        with open(path + "cost_function.txt", "a") as file_costfunction:
            file_costfunction.write('{:d} - {:.4e} \n'.format(
                MainItEff, Jevaltotal[MainItEff]))
        # ====================================
        # ---------- Line search -------------
        # ====================================
        if MainIt > 0 and Jevaltotal[MainItEff] > Jevaltotal[
                MainItEff - 1] and ls < ls_max:
            ls = ls + 1
            beta = beta * gamma
            phi_mat = phi_mat_old
            # ------------------------------------------------------------
            # Update level set function using the descent direction theta
            # ------------------------------------------------------------
            hj_input = [
                theta1_mat, theta2_mat, phi_mat, parameters, beta, MainItEff
            ]
            phi_mat = hamiltonjacobi(*hj_input)
            del (hj_input)
            ind = inside_shape(phi_mat)
            ind_c = np.ones_like(phi_mat) - ind
            vel_field_new = seism_vel[0] * ind + seism_vel[1] * ind_c

            phivec = phi.vector()
            phivalues = phivec.get_local()  # empty values
            my_first, my_last = V.dofmap().ownership_range()

            tabcoord = V.tabulate_dof_coordinates().reshape((-1, 2))
            set_trace()
            unowned = V.dofmap().local_to_global_unowned()
            dofs = list(
                filter(
                    lambda dof: V.dofmap().local_to_global_index(dof) not in
                    unowned, [i for i in range(my_last - my_first)]))

            tabcoord = tabcoord[dofs]
            phivalues[:] = phi_mat.reshape(Nz * Nx)[dofs]  # assign values
            phivec.set_local(phivalues)
            phivec.apply('insert')

        else:
            print("----------------------------------------------")
            print("Record in: {}".format(path))
            print("----------------------------------------------")
            print("ITERATION NUMBER (MainItEff)  : {:d}".format(MainItEff))
            print("ITERATION NUMBER (MainIt)  : {:d}".format(MainIt))
            print("----------------------------------------------")
            print("Grid Size                : {:d} x {:d}".format(Nx, Nz))
            print("State memory usage       : {:.4f} GB".format(st_mem_usage))
            print("Adjoint memory usage     : {:.4f} GB".format(adj_mem_usage))
            print("----------------------------------------------")
            print("Line search  iterations  : {:d}".format(ls))
            print("Step length beta         : {:.4e}".format(beta))
            if ls == ls_max:
                beta0 = max(beta0 * gamma2, 0.1 * beta0_init)
            if ls == 0:
                beta0 = min(beta0 / gamma2, 1.0)
            ls = 0
            MainItEff = MainItEff + 1
            beta = beta0  # /(0.999**MainIt)

            theta = fc.Function(VF)
            solver_V.solve(theta.vector(), -1.0 * shapeder)

            # ------------------------------------
            # Compute norm theta and grad(phi)
            # ------------------------------------
            mpi_comm = theta.function_space().mesh().mpi_comm()
            arraytheta = theta.vector().get_local()
            theta_gathered = mpi_comm.gather(arraytheta, root=0)

            # parei aqui !!!!!
            comm.Barrier()
            if rank == 0:
                set_trace()
                theta_vec = theta.vector()[fc.vertex_to_dof_map(VF)]
                theta1_mat = theta_vec[0:len(theta_vec):2].reshape(Nz, Nx)
                theta2_mat = theta_vec[1:len(theta_vec):2].reshape(Nz, Nx)
            norm_theta[MainItEff - 1] = np.sqrt(
                theta1_mat.reshape(Nz * Nx).dot(theta1_mat.reshape(Nx * Nz)) +
                theta2_mat.reshape(Nz * Nx).dot(theta2_mat.reshape(Nx * Nz)))
            max_gnp = np.sqrt(fc.assemble(dot(grad(phi), grad(phi)) * dtotal))
            print("Norm(grad(phi))          : {:.4e}".format(max_gnp))
            print("L2-norm of theta         : {:.4e}".format(
                norm_theta[MainItEff - 1]))
            print("Cost functional          : {:.4e}".format(
                Jevaltotal[MainItEff - 1]))

            # ------------------------------------------------------------
            # Update level set function using the descent direction theta
            # ------------------------------------------------------------
            phi_mat_old = phi_mat

            hj_input = [
                theta1_mat, theta2_mat, phi_mat, parameters, beta,
                MainItEff - 1
            ]

            phi_mat = hamiltonjacobi(*hj_input)

            del (hj_input)
            phi.vector()[:] = phi_mat.reshape(
                (Nz) * (Nx))[fc.dof_to_vertex_map(V)]
            ind = inside_shape(phi_mat)
            ind_c = np.ones_like(phi_mat) - ind
            vel_field_new = seism_vel[0] * ind + seism_vel[1] * ind_c

            # ----------------
            # Computing error
            # ----------------
            error_area = np.abs(chi1 - ind)
            relative_error = np.sum(error_area) / np.sum(chi0)
            print('relative error           : {:.3f}%'.format(100 *
                                                              relative_error))

            with open(path + "error.txt", "a") as text_file:
                text_file.write(f'{MainIt} {np.round(relative_error,3):>3}\n')

            # Plot actual phi function
            if MainIt % 50 == 0:
                plot_mat3D(parameters, 'phi_3D', phi_mat, MainIt)
                plot_countour(parameters, 'phi_contour', phi_mat, MainIt)
                phi_ind = '%03d_' % (MainIt)
                np.save(parameters["path_phi"] + phi_ind + 'phi.npy', phi_mat)

            # --------------------------------
            # Reinitialize level set function
            # --------------------------------
            if np.mod(MainItEff, 10) == 0:
                phi_mat = reinit(Nz, Nx, phi_mat)

            # ====================================
            # -------- Stopping criterion --------
            # ====================================
            if MainItEff > 5:
                stop0 = stop_coeff * (Jevaltotal[1] - Jevaltotal[2])
                stop1 = Jevaltotal[MainItEff - 2] - Jevaltotal[MainItEff - 1]
                if stop1 < stop0:
                    stop_decision = stop_decision + 1
                if stop_decision == stop_decision_limit:
                    MainIt = MainItMax + 1
                print("stop0                    : {:.4e}".format(stop0))
                print("stop1                    : {:.4e}".format(stop1))
            print("Stopping step            : {:d} of {:d}".format(
                stop_decision, stop_decision_limit))
            print("----------------------------------------------\n")
            cont += 1

        MainIt += 1

    return None
    def _basic_form_on_reduced_function_space(form_wrapper, at):
        form = form_wrapper._form
        form_name = form_wrapper.name()
        form_problem = get_problem_from_parametrized_operator(form_wrapper)
        reduced_V = at.get_reduced_function_spaces()
        reduced_subdomain_data = at.get_reduced_subdomain_data()
        mu = form_problem.mu
        if hasattr(form_problem, "set_time"):
            t = form_problem.t
        else:
            t = None

        if (form_name, reduced_V) not in form_cache:
            visited = set()
            replacements = dict()
            truth_problems = list()
            truth_problem_to_components = {  # outer dict index over time derivative
                0: dict(),
                1: dict()
            }
            truth_problem_to_exact_truth_problem = dict()
            truth_problem_to_reduced_mesh_solution = dict()
            truth_problem_to_reduced_mesh_solution_dot = dict()
            truth_problem_to_reduced_mesh_interpolator = {  # outer dict index over time derivative
                0: dict(),
                1: dict()
            }
            reduced_problem_to_components = {  # outer dict index over time derivative
                0: dict(),
                1: dict()
            }
            reduced_problem_to_reduced_mesh_solution = dict()
            reduced_problem_to_reduced_mesh_solution_dot = dict()
            reduced_problem_to_reduced_basis_functions = {  # outer dict index over time derivative
                0: dict(),
                1: dict()
            }

            # Look for terminals on truth mesh
            logger.log(DEBUG, "Traversing terminals of form " + form_name)
            for node in wrapping.form_iterator(form, "nodes"):
                if node in visited:
                    continue
                # ... test and trial functions
                elif isinstance(node, Argument):
                    logger.log(
                        DEBUG, "\tFound argument, number: " +
                        str(node.number()) + ", part: " + str(node.part()))
                    replacements[node] = wrapping.form_argument_replace(
                        node, reduced_V)
                    visited.add(node)
                # ... problem solutions related to nonlinear terms
                elif wrapping.is_problem_solution_type(node):
                    node_is_problem_solution = wrapping.is_problem_solution(
                        node)
                    node_is_problem_solution_dot = wrapping.is_problem_solution_dot(
                        node)
                    if node_is_problem_solution or node_is_problem_solution_dot:
                        if node_is_problem_solution:
                            (preprocessed_node, component, truth_solution
                             ) = wrapping.solution_identify_component(node)
                            truth_problem = get_problem_from_solution(
                                truth_solution)
                            logger.log(
                                DEBUG,
                                "\tFound problem solution of truth problem " +
                                truth_problem.name() +
                                " (exact problem decorator: " +
                                str(hasattr(truth_problem, "__is_exact__")) +
                                ", component: " + str(component) + ")")
                            # Time derivative key for components and interpolator dicts
                            time_derivative = 0
                        elif node_is_problem_solution_dot:
                            (preprocessed_node, component, truth_solution_dot
                             ) = wrapping.solution_dot_identify_component(node)
                            truth_problem = get_problem_from_solution_dot(
                                truth_solution_dot)
                            logger.log(
                                DEBUG,
                                "\tFound problem solution dot of truth problem "
                                + truth_problem.name() +
                                " (exact problem decorator: " +
                                str(hasattr(truth_problem, "__is_exact__")) +
                                ", component: " + str(component) + ")")
                            # Time derivative key for components and interpolator dicts
                            time_derivative = 1
                        # Store truth problem
                        if truth_problem not in truth_problems:
                            truth_problems.append(truth_problem)
                        # Store the component
                        if truth_problem not in truth_problem_to_components[
                                time_derivative]:
                            truth_problem_to_components[time_derivative][
                                truth_problem] = list()
                        if component not in truth_problem_to_components[
                                time_derivative][truth_problem]:
                            truth_problem_to_components[time_derivative][
                                truth_problem].append(component)
                            # Get the function space corresponding to preprocessed_node on the reduced mesh
                            auxiliary_reduced_V = at.get_auxiliary_reduced_function_space(
                                truth_problem, component)
                            # Define and store the replacement
                            assert preprocessed_node not in replacements  # as it is related to a new truth solution component
                            replacements[preprocessed_node] = backend.Function(
                                auxiliary_reduced_V)
                            if time_derivative == 0:
                                if truth_problem not in truth_problem_to_reduced_mesh_solution:
                                    truth_problem_to_reduced_mesh_solution[
                                        truth_problem] = list()
                                truth_problem_to_reduced_mesh_solution[
                                    truth_problem].append(
                                        replacements[preprocessed_node])
                            elif time_derivative == 1:
                                if truth_problem not in truth_problem_to_reduced_mesh_solution_dot:
                                    truth_problem_to_reduced_mesh_solution_dot[
                                        truth_problem] = list()
                                truth_problem_to_reduced_mesh_solution_dot[
                                    truth_problem].append(
                                        replacements[preprocessed_node])
                            # Get interpolator on reduced mesh
                            if truth_problem not in truth_problem_to_reduced_mesh_interpolator[
                                    time_derivative]:
                                truth_problem_to_reduced_mesh_interpolator[
                                    time_derivative][truth_problem] = list()
                            truth_problem_to_reduced_mesh_interpolator[
                                time_derivative][truth_problem].append(
                                    at.get_auxiliary_function_interpolator(
                                        truth_problem, component))
                    else:
                        (
                            preprocessed_node, component, auxiliary_problem
                        ) = wrapping.get_auxiliary_problem_for_non_parametrized_function(
                            node)
                        logger.log(
                            DEBUG, "\tFound non parametrized function " +
                            str(preprocessed_node) +
                            " associated to auxiliary problem " +
                            str(auxiliary_problem.name()) + ", component: " +
                            str(component))
                        if preprocessed_node not in replacements:
                            # Get interpolator on reduced mesh
                            auxiliary_truth_problem_to_reduced_mesh_interpolator = at.get_auxiliary_function_interpolator(
                                auxiliary_problem, component)
                            # Define and store the replacement
                            replacements[
                                preprocessed_node] = auxiliary_truth_problem_to_reduced_mesh_interpolator(
                                    preprocessed_node)
                    # Make sure to skip any parent solution related to this one
                    visited.add(node)
                    visited.add(preprocessed_node)
                    for parent_node in wrapping.solution_iterator(
                            preprocessed_node):
                        visited.add(parent_node)
                # ... geometric quantities
                elif isinstance(node, GeometricQuantity):
                    logger.log(DEBUG,
                               "\tFound geometric quantity " + str(node))
                    if len(reduced_V) == 2:
                        assert reduced_V[0].mesh().ufl_domain(
                        ) == reduced_V[1].mesh().ufl_domain()
                    replacements[node] = type(node)(reduced_V[0].mesh())
                    visited.add(node)
                else:
                    visited.add(node)
            # ... and replace them
            replaced_form = wrapping.form_replace(form, replacements, "nodes")

            # Look for measures ...
            if len(reduced_V) == 2:
                assert reduced_V[0].mesh().ufl_domain() == reduced_V[1].mesh(
                ).ufl_domain()
            measure_reduced_domain = reduced_V[0].mesh().ufl_domain()
            replacements_measures = dict()
            for integral in wrapping.form_iterator(replaced_form, "integrals"):
                # Prepare measure for the new form (from firedrake/mg/ufl_utils.py)
                integral_subdomain_data = integral.subdomain_data()
                if integral_subdomain_data is not None:
                    integral_reduced_subdomain_data = reduced_subdomain_data[
                        integral_subdomain_data]
                else:
                    integral_reduced_subdomain_data = None
                measure = Measure(
                    integral.integral_type(),
                    domain=measure_reduced_domain,
                    subdomain_id=integral.subdomain_id(),
                    subdomain_data=integral_reduced_subdomain_data,
                    metadata=integral.metadata())
                replacements_measures[integral.integrand(),
                                      integral.integral_type(),
                                      integral.subdomain_id()] = measure
            # ... and replace them
            replaced_form_with_replaced_measures = wrapping.form_replace(
                replaced_form, replacements_measures, "measures")

            # Cache the resulting dicts
            form_cache[(form_name,
                        reduced_V)] = replaced_form_with_replaced_measures
            truth_problems_cache[(form_name, reduced_V)] = truth_problems
            truth_problem_to_components_cache[(
                form_name, reduced_V)] = truth_problem_to_components
            truth_problem_to_exact_truth_problem_cache[(
                form_name, reduced_V)] = truth_problem_to_exact_truth_problem
            truth_problem_to_reduced_mesh_solution_cache[(
                form_name, reduced_V)] = truth_problem_to_reduced_mesh_solution
            truth_problem_to_reduced_mesh_solution_dot_cache[(
                form_name,
                reduced_V)] = truth_problem_to_reduced_mesh_solution_dot
            truth_problem_to_reduced_mesh_interpolator_cache[(
                form_name,
                reduced_V)] = truth_problem_to_reduced_mesh_interpolator
            reduced_problem_to_components_cache[(
                form_name, reduced_V)] = reduced_problem_to_components
            reduced_problem_to_reduced_mesh_solution_cache[(
                form_name,
                reduced_V)] = reduced_problem_to_reduced_mesh_solution
            reduced_problem_to_reduced_mesh_solution_dot_cache[(
                form_name,
                reduced_V)] = reduced_problem_to_reduced_mesh_solution_dot
            reduced_problem_to_reduced_basis_functions_cache[(
                form_name,
                reduced_V)] = reduced_problem_to_reduced_basis_functions

        # Extract from cache
        replaced_form_with_replaced_measures = form_cache[(form_name,
                                                           reduced_V)]
        truth_problems = truth_problems_cache[(form_name, reduced_V)]
        truth_problem_to_components = truth_problem_to_components_cache[(
            form_name, reduced_V)]
        truth_problem_to_exact_truth_problem = truth_problem_to_exact_truth_problem_cache[
            (form_name, reduced_V)]
        truth_problem_to_reduced_mesh_solution = truth_problem_to_reduced_mesh_solution_cache[
            (form_name, reduced_V)]
        truth_problem_to_reduced_mesh_solution_dot = truth_problem_to_reduced_mesh_solution_dot_cache[
            (form_name, reduced_V)]
        truth_problem_to_reduced_mesh_interpolator = truth_problem_to_reduced_mesh_interpolator_cache[
            (form_name, reduced_V)]
        reduced_problem_to_components = reduced_problem_to_components_cache[(
            form_name, reduced_V)]
        reduced_problem_to_reduced_mesh_solution = reduced_problem_to_reduced_mesh_solution_cache[
            (form_name, reduced_V)]
        reduced_problem_to_reduced_mesh_solution_dot = reduced_problem_to_reduced_mesh_solution_dot_cache[
            (form_name, reduced_V)]
        reduced_problem_to_reduced_basis_functions = reduced_problem_to_reduced_basis_functions_cache[
            (form_name, reduced_V)]

        # Get list of truth and reduced problems that need to be solved, possibly updating cache
        required_truth_problems = list()
        required_reduced_problems = list()
        for truth_problem in truth_problems:
            truth_problem_is_solving = hasattr(truth_problem, "_is_solving")
            if is_training_started(truth_problem):
                reduced_problem = get_reduced_problem_from_problem(
                    truth_problem)
                reduced_problem_is_solving = hasattr(reduced_problem,
                                                     "_is_solving")
            else:
                reduced_problem = None
                reduced_problem_is_solving = False
            if not truth_problem_is_solving:
                if is_training_finished(truth_problem):
                    logger.log(
                        DEBUG, "Truth problem " + truth_problem.name() +
                        " (exact problem decorator: " +
                        str(hasattr(truth_problem, "__is_exact__")) +
                        ") is not currently solving, and its offline stage has finished: truth problem will be replaced by reduced problem"
                    )
                    # Store the replacement for solution
                    if (reduced_problem
                            not in reduced_problem_to_reduced_mesh_solution
                            and truth_problem
                            in truth_problem_to_reduced_mesh_solution):
                        reduced_problem_to_reduced_mesh_solution[
                            reduced_problem] = truth_problem_to_reduced_mesh_solution[
                                truth_problem]
                        # Store the component
                        assert reduced_problem not in reduced_problem_to_components[
                            0]
                        assert truth_problem in truth_problem_to_components[0]
                        reduced_problem_to_components[0][
                            reduced_problem] = truth_problem_to_components[0][
                                truth_problem]
                        # Get reduced problem basis functions on reduced mesh
                        assert reduced_problem not in reduced_problem_to_reduced_basis_functions[
                            0]
                        reduced_problem_to_reduced_basis_functions[0][
                            reduced_problem] = [
                                at.get_auxiliary_basis_functions_matrix(
                                    truth_problem, component) for component in
                                reduced_problem_to_components[0]
                                [reduced_problem]
                            ]
                    # Store the replacement for solution_dot
                    if (reduced_problem
                            not in reduced_problem_to_reduced_mesh_solution_dot
                            and truth_problem
                            in truth_problem_to_reduced_mesh_solution_dot):
                        reduced_problem_to_reduced_mesh_solution_dot[
                            reduced_problem] = truth_problem_to_reduced_mesh_solution_dot[
                                truth_problem]
                        # Store the component
                        assert reduced_problem not in reduced_problem_to_components[
                            1]
                        assert truth_problem in truth_problem_to_components[1]
                        reduced_problem_to_components[1][
                            reduced_problem] = truth_problem_to_components[1][
                                truth_problem]
                        # Get reduced problem basis functions on reduced mesh
                        assert reduced_problem not in reduced_problem_to_reduced_basis_functions[
                            1]
                        reduced_problem_to_reduced_basis_functions[1][
                            reduced_problem] = [
                                at.get_auxiliary_basis_functions_matrix(
                                    truth_problem, component) for component in
                                reduced_problem_to_components[1]
                                [reduced_problem]
                            ]
                    # Append to list of required reduced problems
                    required_reduced_problems.append(
                        (reduced_problem, reduced_problem_is_solving))
                else:
                    if (hasattr(truth_problem,
                                "_apply_exact_evaluation_at_stages") and
                            not hasattr(truth_problem, "_apply_EIM_at_stages")
                            and not hasattr(truth_problem,
                                            "_apply_DEIM_at_stages")):
                        logger.log(
                            DEBUG, "Truth problem " + truth_problem.name() +
                            " (exact problem decorator: " +
                            str(hasattr(truth_problem, "__is_exact__")) +
                            ") is not currently solving, its offline stage has not finished, and only @ExactParametrizedFunctions has been used: truth solve of this truth problem instance will be called"
                        )
                        # Init truth problem (if required), as it may not have been initialized
                        truth_problem.init()
                        # Append to list of required truth problems which are not currently solving
                        required_truth_problems.append(
                            (truth_problem, False, reduced_problem_is_solving))
                    else:
                        logger.log(
                            DEBUG, "Truth problem " + truth_problem.name() +
                            " (exact problem decorator: " +
                            str(hasattr(truth_problem, "__is_exact__")) +
                            ") is not currently solving, its offline stage has not finished, and either @ExactParametrizedFunctions has not been used or it has been used in combination with @DEIM or @EIM: truth solve on an auxiliary instance (with exact problem decorator) will be called, to prevent early initialization of DEIM/EIM data structures"
                        )
                        # Store the corresponding exact truth problem
                        if truth_problem not in truth_problem_to_exact_truth_problem:
                            exact_truth_problem = exact_problem(truth_problem)
                            truth_problem_to_exact_truth_problem[
                                truth_problem] = exact_truth_problem
                            # Init exact truth problem (if required), as it may not have been initialized
                            exact_truth_problem.init()
                        else:
                            exact_truth_problem = truth_problem_to_exact_truth_problem[
                                truth_problem]
                        # Store the replacement for solution
                        if (exact_truth_problem
                                not in truth_problem_to_reduced_mesh_solution
                                and truth_problem
                                in truth_problem_to_reduced_mesh_solution):
                            truth_problem_to_reduced_mesh_solution[
                                exact_truth_problem] = truth_problem_to_reduced_mesh_solution[
                                    truth_problem]
                            # Store the component
                            assert exact_truth_problem not in truth_problem_to_components[
                                0]
                            assert truth_problem in truth_problem_to_components[
                                0]
                            truth_problem_to_components[0][
                                exact_truth_problem] = truth_problem_to_components[
                                    0][truth_problem]
                            # Get interpolator on reduced mesh
                            assert exact_truth_problem not in truth_problem_to_reduced_mesh_interpolator[
                                0]
                            assert truth_problem in truth_problem_to_reduced_mesh_interpolator[
                                0]
                            truth_problem_to_reduced_mesh_interpolator[0][
                                exact_truth_problem] = truth_problem_to_reduced_mesh_interpolator[
                                    0][truth_problem]
                        # Store the replacement for solution_dot
                        if (exact_truth_problem not in
                                truth_problem_to_reduced_mesh_solution_dot
                                and truth_problem
                                in truth_problem_to_reduced_mesh_solution_dot):
                            truth_problem_to_reduced_mesh_solution_dot[
                                exact_truth_problem] = truth_problem_to_reduced_mesh_solution_dot[
                                    truth_problem]
                            # Store the component
                            assert exact_truth_problem not in truth_problem_to_components[
                                1]
                            assert truth_problem in truth_problem_to_components[
                                1]
                            truth_problem_to_components[1][
                                exact_truth_problem] = truth_problem_to_components[
                                    1][truth_problem]
                            # Get interpolator on reduced mesh
                            assert exact_truth_problem not in truth_problem_to_reduced_mesh_interpolator[
                                1]
                            assert truth_problem in truth_problem_to_reduced_mesh_interpolator[
                                1]
                            truth_problem_to_reduced_mesh_interpolator[1][
                                exact_truth_problem] = truth_problem_to_reduced_mesh_interpolator[
                                    1][truth_problem]
                        # Append to list of required truth problems which are not currently solving
                        required_truth_problems.append(
                            (exact_truth_problem, False,
                             reduced_problem_is_solving))
            else:
                logger.log(
                    DEBUG, "Truth problem " + truth_problem.name() +
                    " (exact problem decorator: " +
                    str(hasattr(truth_problem, "__is_exact__")) +
                    ") is currently solving: current truth solution will be loaded"
                )
                assert not reduced_problem_is_solving
                # Append to list of required truth problems which are currently solving
                required_truth_problems.append((truth_problem, True, False))

        # Solve truth problems (which have not been reduced yet) associated to nonlinear terms
        for (truth_problem, truth_problem_is_solving,
             reduced_problem_is_solving) in required_truth_problems:
            if not reduced_problem_is_solving:
                # Solve (if necessary)
                truth_problem.set_mu(mu)
                if not truth_problem_is_solving:
                    logger.log(
                        DEBUG, "Requiring truth problem solve for problem " +
                        truth_problem.name() + " (exact problem decorator: " +
                        str(hasattr(truth_problem, "__is_exact__")) + ")")
                    truth_problem.solve()
                else:
                    logger.log(
                        DEBUG,
                        "Loading current truth problem solution for problem " +
                        truth_problem.name() + " (exact problem decorator: " +
                        str(hasattr(truth_problem, "__is_exact__")) + ")")
            else:
                reduced_problem = get_reduced_problem_from_problem(
                    truth_problem)
                logger.log(
                    DEBUG,
                    "Replacing current truth problem solution with reduced solution for problem "
                    + reduced_problem.truth_problem.name())
            # Assign to reduced_mesh_solution
            if truth_problem in truth_problem_to_reduced_mesh_solution:
                for (reduced_mesh_solution, reduced_mesh_interpolator) in zip(
                        truth_problem_to_reduced_mesh_solution[truth_problem],
                        truth_problem_to_reduced_mesh_interpolator[0]
                    [truth_problem]):
                    solution_to = reduced_mesh_solution
                    if t is None:
                        if not reduced_problem_is_solving:
                            solution_from = reduced_mesh_interpolator(
                                truth_problem._solution)
                        else:
                            solution_from = reduced_mesh_interpolator(
                                reduced_problem.
                                basis_functions[:reduced_problem._solution.N] *
                                reduced_problem._solution)
                    else:
                        if not reduced_problem_is_solving:
                            if not truth_problem_is_solving:
                                solution_from = reduced_mesh_interpolator(
                                    truth_problem._solution_over_time.at(t))
                            else:
                                solution_from = reduced_mesh_interpolator(
                                    truth_problem._solution)
                        else:
                            solution_from = reduced_mesh_interpolator(
                                reduced_problem.
                                basis_functions[:reduced_problem._solution.N] *
                                reduced_problem._solution)
                    backend.assign(solution_to, solution_from)
            # Assign to reduced_mesh_solution_dot
            if truth_problem in truth_problem_to_reduced_mesh_solution_dot:
                for (reduced_mesh_solution_dot,
                     reduced_mesh_interpolator) in zip(
                         truth_problem_to_reduced_mesh_solution_dot[
                             truth_problem],
                         truth_problem_to_reduced_mesh_interpolator[1]
                         [truth_problem]):
                    solution_dot_to = reduced_mesh_solution_dot
                    assert t is not None
                    if not reduced_problem_is_solving:
                        if not truth_problem_is_solving:
                            solution_dot_from = reduced_mesh_interpolator(
                                truth_problem._solution_dot_over_time.at(t))
                        else:
                            solution_dot_from = reduced_mesh_interpolator(
                                truth_problem._solution_dot)
                    else:
                        solution_dot_from = reduced_mesh_interpolator(
                            reduced_problem.basis_functions[:reduced_problem.
                                                            _solution_dot.N] *
                            reduced_problem._solution_dot)
                    backend.assign(solution_dot_to, solution_dot_from)

        # Solve reduced problems associated to nonlinear terms
        for (reduced_problem, is_solving) in required_reduced_problems:
            # Solve (if necessary)
            reduced_problem.set_mu(mu)
            if not is_solving:
                logger.log(
                    DEBUG, "Requiring reduced problem solve for problem " +
                    reduced_problem.truth_problem.name())
                reduced_problem.solve()
            else:
                logger.log(
                    DEBUG,
                    "Loading current reduced problem solution for problem " +
                    reduced_problem.truth_problem.name())
            # Assign to reduced_mesh_solution
            if reduced_problem in reduced_problem_to_reduced_mesh_solution:
                for (reduced_mesh_solution, reduced_basis_functions) in zip(
                        reduced_problem_to_reduced_mesh_solution[
                            reduced_problem],
                        reduced_problem_to_reduced_basis_functions[0]
                    [reduced_problem]):
                    solution_to = reduced_mesh_solution
                    solution_from_N = OnlineSizeDict()
                    for c, v in reduced_problem._solution.N.items():
                        if c in reduced_basis_functions._components_name:
                            solution_from_N[c] = v
                    solution_from = online_backend.OnlineFunction(
                        solution_from_N)
                    if t is None or is_solving:
                        online_backend.online_assign(solution_from,
                                                     reduced_problem._solution)
                    else:
                        online_backend.online_assign(
                            solution_from,
                            reduced_problem._solution_over_time.at(t))
                    solution_from = reduced_basis_functions[:solution_from_N] * solution_from
                    backend.assign(solution_to, solution_from)
            # Assign to reduced_mesh_solution_dot
            if reduced_problem in reduced_problem_to_reduced_mesh_solution_dot:
                for (reduced_mesh_solution_dot,
                     reduced_basis_functions) in zip(
                         reduced_problem_to_reduced_mesh_solution_dot[
                             reduced_problem],
                         reduced_problem_to_reduced_basis_functions[1]
                         [reduced_problem]):
                    solution_dot_to = reduced_mesh_solution_dot
                    solution_dot_from_N = OnlineSizeDict()
                    for c, v in reduced_problem._solution_dot.N.items():
                        if c in reduced_basis_functions._components_name:
                            solution_dot_from_N[c] = v
                    solution_dot_from = online_backend.OnlineFunction(
                        solution_dot_from_N)
                    assert t is not None
                    if is_solving:
                        online_backend.online_assign(
                            solution_dot_from, reduced_problem._solution_dot)
                    else:
                        online_backend.online_assign(
                            solution_dot_from,
                            reduced_problem._solution_dot_over_time.at(t))
                    solution_dot_from = reduced_basis_functions[:
                                                                solution_dot_from_N] * solution_dot_from
                    backend.assign(solution_dot_to, solution_dot_from)

        # Assemble and return
        assembled_replaced_form = wrapping.assemble(
            replaced_form_with_replaced_measures)
        if not isinstance(assembled_replaced_form, Number):
            form_rank = assembled_replaced_form.rank()
        else:
            form_rank = 0
        return (assembled_replaced_form, form_rank)
示例#18
0
    def __init__(self, args, tc, metadata):
        self.has_analytic_solution = False
        self.problem_code = 'REAL'
        super(Problem, self).__init__(args, tc, metadata)

        self.name = 'test on real mesh'
        self.status_functional_str = 'outflow/inflow'
        self.last_inflow = 0

        # time settings
        self.itp_lengths = {
            1: 1.0,
            2: 0.9375,
        }
        self.cycle_length = self.itp_lengths[self.args.itp]

        # input parameters
        self.nu = self.args.nu  # kinematic viscosity
        self.factor = args.factor
        self.metadata['factor'] = self.factor
        self.scale_factor.append(self.factor)

        self.tc.start('mesh')
        # Import mesh
        try:
            self.mesh, self.facet_function = super(Problem,
                                                   self).loadMesh(args.mesh)
            info("Mesh name: " + args.mesh + "    " + str(self.mesh))
            f_ini = open('meshes/' + args.mesh + '.ini', 'r')
            reader = csv.reader(f_ini, delimiter=' ', escapechar='\\')
        except (EnvironmentError, RuntimeError):
            print(
                'Unable to open mesh.hdf5 or mesh.ini file. Check if the mesh was prepared to be used '
                'with \"real\" problem.')
            exit(1)

        # load inflows and outflows (interfaces) from mesh.ini file
        obj = None
        self.interfaces = []
        for row in reader:
            if not row:
                pass
            elif row[0] == 'volume':
                self.mesh_volume = float(row[1])
            elif row[0] == 'in':
                if obj is not None:
                    self.interfaces.append(obj)
                obj = {'inflow': True, 'number': row[1]}
            elif row[0] == 'out':
                if obj is not None:
                    self.interfaces.append(obj)
                obj = {'inflow': False, 'number': row[1]}
            else:
                if len(row) == 2:  # scalar values
                    obj[row[0]] = row[1]
                else:  # vector values
                    obj[row[0]] = [float(f) for f in row[1:]]
        self.interfaces.append(obj)
        f_ini.close()
        self.tc.end('mesh')

        # collect inflows and outflows into separate lists
        self.outflow_area = 0
        self.inflows = []
        self.outflows = []
        for obj in self.interfaces:
            if not obj['inflow']:
                self.outflow_area += float(obj['S'])
                self.outflows.append(obj)
            else:
                self.inflows.append(obj)
        info('Outflow area: %f' % self.outflow_area)

        # self.dsWall = Measure("ds", subdomain_id=1, subdomain_data=self.facet_function)
        self.normal = FacetNormal(self.mesh)

        # generate measures, collect measure lists
        self.inflow_measures = []
        for obj in self.interfaces:
            obj['measure'] = Measure("ds",
                                     subdomain_id=int(obj['number']),
                                     subdomain_data=self.facet_function)
            if obj['inflow']:
                self.inflow_measures.append(obj['measure'])
            else:
                self.outflow_measures.append(obj['measure'])

        self.listDict.update({
            'outflow': {
                'list': [],
                'name': 'outflow rate',
                'abrev': 'OUT',
                'slist': []
            },
            'inflow': {
                'list': [],
                'name': 'inflow rate',
                'abrev': 'IN',
                'slist': []
            },
            'oiratio': {
                'list': [],
                'name': 'outflow/inflow ratio (mass conservation)',
                'abrev': 'O/I',
                'slist': []
            },
        })
        for obj in self.outflows:
            n = obj['number']
            self.listDict.update({
                'outflow' + n: {
                    'list': [],
                    'name': 'outflow rate ' + n,
                    'abrev': 'OUT' + n,
                    'slist': []
                }
            })
        self.can_force_outflow = True
示例#19
0
def dx_from_measure(mesh):
    subdomains = MeshFunction("size_t", mesh, mesh.topology.dim, 1)
    dx = Measure("dx")(subdomain_data=subdomains, domain=mesh)
    dx = dx(1)
    return dx
plot(facet_function)

f_mesh = HDF5File(mpi_comm_world(), 'meshes/' + meshName + '.hdf5', 'w')
f_mesh.write(mesh, 'mesh')
f_mesh.write(facet_function, 'facet_function')
f_mesh.close()

# compute volume of mesh
V = FunctionSpace(mesh, 'Lagrange', 1)
one = interpolate(Expression('1.'), V)
volume = assemble(one * dx)

# compute real areas of boudary parts
for obj in itertools.chain(inflows, outflows):
    dS = Measure("ds",
                 subdomain_id=obj['number'],
                 subdomain_data=facet_function)
    obj['S'] = assemble(one * dS)

# compute reference coefs
for inf in inflows:
    inf['reference_coef'] = inf['reference_radius'] * inf[
        'reference_radius'] / (inf['radius'] * inf['radius'])

# create .ini file ====================
f_ini = open('meshes/' + meshName + '.ini', 'w')
w = csv.writer(f_ini, delimiter=' ', escapechar='\\', quoting=csv.QUOTE_NONE)
w.writerow(['volume', volume])
for inf in inflows:
    w.writerow(['in', inf['number']])
    w.writerow(['normal'] + inf['normal'])
示例#21
0
    def __init__(self, args, tc, metadata):
        self.has_analytic_solution = True
        self.problem_code = 'SCYL'
        super(Problem, self).__init__(args, tc, metadata)

        self.tc.init_watch('errorP', 'Computed pressure error', True)
        self.tc.init_watch('errorV', 'Computed velocity error', True)
        self.tc.init_watch('errorForce', 'Computed force error', True)
        self.tc.init_watch('errorVtest', 'Computed velocity error test', True)
        self.tc.init_watch('computePG', 'Computed pressure gradient', True)

        self.name = 'steady_cylinder'
        self.status_functional_str = 'last H1 velocity error'

        # input parameters
        self.ic = args.ic
        self.factor = args.factor
        self.metadata['factor'] = self.factor
        self.scale_factor.append(self.factor)

        # fixed parameters (used in analytic solution and in BC)
        self.nu = 3.71  # kinematic viscosity
        self.R = 5.0  # cylinder radius

        self.mesh_volume = pi * 25. * 20.

        # Import gmsh mesh
        self.tc.start('mesh')
        self.mesh, self.facet_function = super(Problem,
                                               self).loadMesh(args.mesh)
        self.dsIn = Measure("ds",
                            subdomain_id=2,
                            subdomain_data=self.facet_function)
        self.dsOut = Measure("ds",
                             subdomain_id=3,
                             subdomain_data=self.facet_function)
        self.dsWall = Measure("ds",
                              subdomain_id=1,
                              subdomain_data=self.facet_function)
        self.normal = FacetNormal(self.mesh)
        print("Mesh name: ", args.mesh, "    ", self.mesh)
        print("Mesh norm max: ", self.mesh.hmax())
        print("Mesh norm min: ", self.mesh.hmin())
        self.tc.end('mesh')

        self.sol_p = None
        self.analytic_gradient = None
        self.analytic_pressure_norm = None
        self.v_in = None
        self.area = None

        self.analytic_v_norm_L2 = None
        self.analytic_v_norm_H1 = None
        self.analytic_v_norm_H1w = None

        self.listDict.update({
            'u_H1w': {
                'list': [],
                'name': 'corrected velocity H1 error on wall',
                'abrev': 'CE_H1w',
                'scale': self.scale_factor,
                'relative': 'av_norm_H1w',
                'slist': []
            },
            'u2H1w': {
                'list': [],
                'name': 'tentative velocity H1 error on wall',
                'abrev': 'TE_H1w',
                'scale': self.scale_factor,
                'relative': 'av_norm_H1w',
                'slist': []
            },
            'av_norm_H1w': {
                'list': [],
                'name': 'analytic velocity H1 norm on wall',
                'abrev': 'AVN_H1w'
            },
            'a_force_wall': {
                'list': [],
                'name': 'analytic force on wall',
                'abrev': 'AF'
            },
            'a_force_wall_normal': {
                'list': [],
                'name': 'analytic force on wall',
                'abrev': 'AFN'
            },
            'a_force_wall_shear': {
                'list': [],
                'name': 'analytic force on wall',
                'abrev': 'AFS'
            },
            'force_wall': {
                'list': [],
                'name': 'force error on wall',
                'abrev': 'FE',
                'relative': 'a_force_wall',
                'slist': []
            },
            'force_wall_normal': {
                'list': [],
                'name': 'normal force error on wall',
                'abrev': 'FNE',
                'relative': 'a_force_wall',
                'slist': []
            },
            'force_wall_shear': {
                'list': [],
                'name': 'shear force error on wall',
                'abrev': 'FSE',
                'relative': 'a_force_wall',
                'slist': []
            },
        })
    alpha_2 = interpolate(alpha_2(degree=2), F)
    beta_1  = interpolate(beta_1(degree=2), F)
    beta_2  = interpolate(beta_2(degree=2), F)

    a_ = alpha_1*alpha_2
    b_ = alpha_1*beta_2 + alpha_2*beta_1
    c_ = beta_1*beta_2

    Lambda_e = as_tensor([[alpha_2, 0],[0, alpha_1]])
    Lambda_p = as_tensor([[beta_2, 0],[0, beta_1]])

    # Set up boundary condition
    bc = DirichletBC(W.sub(0), Constant(("0.0", "0.0")), ff, 1)

    # Create measure for the source term
    dx = Measure("dx", domain=mesh, subdomain_data=mf)
    ds = Measure("ds", subdomain_data=ff, domain=mesh)

    # Source term
    #f = ricker_pulse(t=0.0, omega=omega_p, degree=1)
    # Set up initial values
    u0 = Function(V)
    u0.set_allow_extrapolation(True)
    v0 = Function(V)
    a0 = Function(V)
    U0 = Function(M)
    V0 = Function(M)
    A0 = Function(M)

    # Test and trial functions
    (u, S) = TrialFunctions(W)
示例#23
0
        subdomains = MeshFunction("size_t", mesh, 2, 0)
        subdomain_0.mark(subdomains, 0)
        subdomain_1.mark(subdomains, 1)

        lmbda = MaterialProperty(lambdas[0],
                                 lambdas[1],
                                 subdomains=subdomains,
                                 degree=0)
        rho = MaterialProperty(rhos[0],
                               rhos[1],
                               subdomains=subdomains,
                               degree=0)
        mu = MaterialProperty(mus[0], mus[1], subdomains=subdomains, degree=0)

        # MEASURE
        dx = Measure("dx", domain=mesh, subdomain_data=subdomains)

    elif type_of_medium == "oblique":
        layer_start_0 = 0
        layer_start_1 = layer_end_0 = Ly / 2
        layer_end_1 = Ly + Lpml

        layer_start_slope_0 = 0
        layer_start_slope_1 = layer_end_slope_0 = 1 / 3
        layer_end_slope_1 = 0

        subdomain_0 = ObliqueLayer(Lx, Ly, Lpml, layer_start_0, layer_end_0,
                                   layer_start_slope_0, layer_end_slope_0)
        subdomain_1 = ObliqueLayer(Lx, Ly, Lpml, layer_start_1, layer_end_1,
                                   layer_start_slope_1, layer_end_slope_1)
        subdomains = MeshFunction("size_t", mesh, 2, 0)
示例#24
0
           range_max=2 * v_in,
           window_width=width,
           window_height=height)
plt.write_png('%s/correct' % dir)
# v_in_expr = Expression('(t<1.0)?t*v:v', v=Constant(v_in), t=0.0)
# v_in_expr = Expression('(t<1.0)?(1-cos(pi*t))*v*0.5:v', v=Constant(v_in), t=0.0)
bcp = DirichletBC(Q, Constant(0.0), boundary_parts, 2)
bcu = DirichletBC(V, v_in_expr, boundary_parts, 1)
foo = Function(Q)
null_vec = Vector(foo.vector())
Q.dofmap().set(null_vec, 1.0)
null_vec *= 1.0 / null_vec.norm('l2')
# print(null_vec.array())
null_space = VectorSpaceBasis([null_vec])

ds = Measure("ds", subdomain_data=boundary_parts)

# Define forms (dont redefine functions used here)
# step 1
u0 = Function(V)
u1 = Function(V)
p0 = Function(Q)
u_tent = TrialFunction(V)
v = TestFunction(V)
# U_ = 1.5*u0 - 0.5*u1
# nonlinearity = inner(dot(0.5 * (u_tent.dx(0) + u0.dx(0)), U_), v) * dx
# F_tent = (1./dt)*inner(u_tent - u0, v) * dx + nonlinearity\
#     + nu*inner(0.5 * (u_tent.dx(0) + u0.dx(0)), v.dx(0)) * dx + inner(p0.dx(0), v) * dx\
#     - inner(f, v)*dx     # solve to u_
# using explicite scheme: so LHS has interpretation as heat equation, RHS are sources
F_tent = (1./dt)*inner(u_tent - u0, v)*dx + inner(dot(u0.dx(0), u0), v)*dx + nu*inner((u_tent.dx(0) + u0.dx(0)), v.dx(0)) * dx + inner(p0.dx(0), v) * dx\