Example #1
0
    "mat_type": "aij",
    "snes_type": "ksponly",
    "ksp_type": "preonly",
    "pc_type": "lu",
    "pc_factor_mat_solver_type": "mumps",
}
problem = LinearVariationalProblem(a, L, q)
solver = LinearVariationalSolver(problem, solver_parameters=params)


# --- Get gauge data

radius = 20.0e+03*pow(0.5, level)  # The finer the mesh, the more precise the indicator region
for gauge in gauges:
    loc = op.gauges[gauge]['coords']
    op.gauges[gauge]['indicator'] = interpolate(ellipse([loc + (radius,)], mesh), P0)
    area = assemble(op.gauges[gauge]['indicator']*dx)
    op.gauges[gauge]['indicator'].assign(op.gauges[gauge]['indicator']/area)
    op.sample_timeseries(gauge, sample=op.gauges[gauge]['sample'], detide=True)
    op.gauges[gauge]['weight'] = 1.0
    if normalise:
        t = 0.0
        maxvalue = 0.0
        while t < op.end_time - 1.0e-05:
            if t < op.gauges[gauge]['arrival_time'] or t > op.gauges[gauge]['departure_time']:
                t += op.dt
                continue
            maxvalue = max(maxvalue, op.gauges[gauge]['interpolator'](t)**2)
            t += op.dt
        assert not np.isclose(maxvalue, 0.0)
        op.gauges[gauge]['weight'] /= maxvalue
Example #2
0
m = Function(R).assign(optimum)
basis_function = Function(V)
psi, phi = basis_function.split()
loc = (0.7e+06, 4.2e+06)
radii = (48e+03, 96e+03)
angle = pi / 12
phi.interpolate(gaussian([loc + radii], mesh, rotation=angle))

# Define gauge indicators
radius = 20.0e+03 * pow(
    0.5, level)  # The finer the mesh, the more precise the indicator region
P0 = FunctionSpace(mesh, "DG", 0)
for gauge in gauges:
    loc = op.gauges[gauge]["coords"]
    op.gauges[gauge]['indicator'] = interpolate(
        ellipse([loc + (radius, )], mesh), P0)
    area = assemble(op.gauges[gauge]['indicator'] * dx)
    op.gauges[gauge]['indicator'].assign(op.gauges[gauge]['indicator'] / area)


def solve_forward(control, store=False):
    """
    Solve forward problem.
    """
    q_.project(control * basis_function)
    for gauge in gauges:
        op.gauges[gauge]['init'] = eta_.at(op.gauges[gauge]['coords'])
        if store:
            op.gauges[gauge]['data'] = [op.gauges[gauge]['init']]

    t = 0.0
Example #3
0
    def _get_update_forcings_forward(self, prob, i):  # TODO: Use QoICallback
        from adapt_utils.misc import ellipse

        self.J = 0 if np.isclose(self.regularisation,
                                 0.0) else self.get_regularisation_term(prob)
        quadrature_weight = Constant(1.0)
        scaling = Constant(0.5 * self.qoi_scaling)

        # Account for timeseries shift
        # ============================
        #   This can be troublesome business. With synthetic data, we can actually get away with not
        #   shifting, provided we are solving the linearised equations. However, in the nonlinear
        #   case and when using real data, we should make sure that the timeseries are comparable
        #   by always shifting them by the initial elevation at each gauge. This isn't really a
        #   problem for the continuous adjoint method. However, for discrete adjoint we need to
        #   annotate the initial gauge evaluation. Until point evaluation is annotated in Firedrake,
        #   the best thing is to just use the initial surface *field*. This does modify the QoI, but
        #   it shouldn't be too much of a problem if the mesh is sufficiently fine (and hence the
        #   indicator regions are sufficiently small.
        u, eta = prob.fwd_solutions[i].split()
        if self.synthetic:
            self.eta_init = Constant(0.0)
        else:
            # TODO: Use point evaluation once it is annotated
            self.eta_init = Function(eta)

        mesh = prob.meshes[i]
        radius = 20.0e+03 * pow(
            0.5, self.level)  # The finer the mesh, the smaller the region
        for gauge in self.gauges:
            gauge_dat = self.gauges[gauge]
            gauge_dat["obs"] = Constant(
                0.0)  # Constant associated with free surface observations

            # Setup interpolator
            if not self.synthetic:
                sample = 1 if self.noisy_data else gauge_dat["sample"]
                self.sample_timeseries(gauge, sample=sample, detide=True)

            # Assemble an area-normalised indicator function
            x, y = gauge_dat["coords"]
            disc = ellipse([(x, y, radius)], mesh)
            area = assemble(disc * dx, annotate=False)
            gauge_dat["indicator"] = interpolate(disc / area, prob.P0[i])
            I = gauge_dat["indicator"]

            # Get initial pointwise and area averaged values
            gauge_dat["init"] = eta.at(gauge_dat["coords"])
            gauge_dat["init_smooth"] = assemble(I * eta * dx, annotate=False)

            # Initialise arrays for storing timeseries
            if self.save_timeseries:
                gauge_dat["timeseries"] = []
                gauge_dat["timeseries_smooth"] = []
                gauge_dat["diff"] = []
                gauge_dat["diff_smooth"] = []
                if not self.synthetic or "data" not in self.gauges[gauge]:
                    gauge_dat["data"] = []
                if hasattr(self, 'nx') and hasattr(self, 'ny'):
                    if self.nx == self.ny == 1:
                        self.adjoint_free_gradient = 0.0

        def update_forcings(t):
            """
            Evaluate free surface elevation at gauges, compute the contribution to the quantity of
            interest from the current timestep and store data in :attr:`self.gauges`.

            NOTE: `update_forcings` is called one timestep along so we shift time back.
            """
            dt = self.dt
            t = t - dt
            quadrature_weight.assign(0.5 *
                                     dt if t < 0.5 * dt or t >= self.end_time -
                                     0.5 * dt else dt)
            # FIXME: Quadrature weights should differ across gauges
            u, eta = prob.fwd_solutions[i].split()
            for gauge in self.gauges:
                gauge_dat = self.gauges[gauge]
                I = gauge_dat["indicator"]

                # Weightings
                if t < gauge_dat[
                        "arrival_time"]:  # We don't want to fit before the tsunami arrives
                    continue
                if t > gauge_dat["departure_time"]:
                    continue

                # Point evaluation and average value at gauges
                if self.save_timeseries:
                    eta_discrete = eta.at(
                        gauge_dat["coords"]) - gauge_dat["init"]
                    gauge_dat["timeseries"].append(eta_discrete)
                    eta_smoothed = assemble(
                        I * eta * dx,
                        annotate=False) - gauge_dat["init_smooth"]
                    gauge_dat["timeseries_smooth"].append(eta_smoothed)
                if self.synthetic and gauge_dat["data"] == []:
                    continue

                # Read data
                idx = prob.iteration - gauge_dat["offset"]
                obs = gauge_dat["data"][idx] if self.synthetic else float(
                    gauge_dat["interpolator"](t))
                gauge_dat["obs"].assign(obs)
                if self.save_timeseries:
                    if not self.synthetic:
                        gauge_dat["data"].append(obs)

                    # Discrete form of error
                    gauge_dat["diff"].append(
                        0.5 * (eta_discrete - gauge_dat["obs"].dat.data[0])**2)

                # Continuous form of error
                #   NOTES:
                #     * The initial free surface *field* is subtracted in some cases.
                #     * Factor of half is included in `scaling`
                #     * Quadrature weights and timestep included in `weight`
                diff = eta - self.eta_init - gauge_dat["obs"]
                wq = scaling * quadrature_weight * gauge_dat["weight"]
                self.J += assemble(wq * I * diff * diff * dx)
                if self.save_timeseries:
                    gauge_dat["diff_smooth"].append(
                        assemble(I * diff * diff * dx, annotate=False))
                    if hasattr(self, "adjoint_free_gradient"):
                        # NOTE: Factor of 2 counteracts the half in scaling
                        self.adjoint_free_gradient += assemble(2 * wq * I *
                                                               diff * eta * dx,
                                                               annotate=False)

        return update_forcings