Beispiel #1
0
    def test_adjoint_cell_model_parameter(self, cell_model, Scheme):
        """ Test that the gradient computed with the adjoint model is correct. """

        if isinstance(cell_model, fails_with_RK4) and Scheme == "RK4":
            pytest.xfail(
                "RK4 is unstable for some models with this timestep (0.01)")

        if isinstance(cell_model,
                      fails_with_forward_euler) and Scheme == "ForwardEuler":
            pytest.xfail(
                "ForwardEuler is unstable for some models with this timestep (0.01)"
            )

        J, Jhat, m, Jics = self.tlm_adj_setup_cellmodel_parameters(
            cell_model, Scheme)

        # Seed for taylor test
        seed = seed_collection_adm.get(cell_model.__class__)

        # Compute gradient with respect to vs.
        info_green("Computing gradient")
        dJdics = compute_gradient(J, m, forget=False)
        assert (dJdics is not None), "Gradient is None (#fail)."
        conv_rate = taylor_test(Jhat, m, Jics, dJdics, seed=seed)

        # Check that minimal rate is greater than some given number
        assert_greater(conv_rate, 1.9)
Beispiel #2
0
    def test_tlm_cell_model_parameter(self, cell_model, Scheme):
        if Scheme == "ForwardEuler":
            pytest.xfail(
                "Forward Euler is unstable for some models with this timestep (0.01)"
            )

        if isinstance(cell_model, fails_with_RK4) and Scheme == "RK4":
            pytest.xfail(
                "RK4 is unstable for some models with this timestep (0.01)")

        J, Jhat, m, Jics = self.tlm_adj_setup_cellmodel_parameters(
            cell_model, Scheme)

        # Seed for taylor test
        seed = seed_collection_tlm.get(cell_model.__class__)

        # Stop annotating
        parameters["adjoint"]["stop_annotating"] = True

        # Check TLM correctness
        info_green("Computing gradient")
        dJdics = compute_gradient_tlm(J, m, forget=False)
        assert (dJdics is not None), "Gradient is None (#fail)."
        conv_rate_tlm = taylor_test(Jhat, m, Jics, dJdics, seed=seed)

        assert_greater(conv_rate_tlm, 1.8)
Beispiel #3
0
    def test_tlm_initial(self, cell_model, Scheme):
        "Test that we can compute the gradient for some given functional"

        if Scheme == "ForwardEuler":
            pytest.xfail(
                "Forward Euler is unstable for some models with this timestep (0.01)"
            )

        if isinstance(cell_model, fails_with_RK4) and Scheme == "RK4":
            pytest.xfail(
                "RK4 is unstable for some models with this timestep (0.01)")

        J, Jhat, m, Jics = self.tlm_adj_setup_initial_conditions(
            cell_model, Scheme)

        # Seed for taylor test
        seed = seed_collection_tlm.get(cell_model.__class__)

        # Check TLM correctness
        info_green("Computing gradient")
        dJdics = compute_gradient_tlm(J, m, forget=False)
        assert (dJdics is not None), "Gradient is None (#fail)."
        conv_rate_tlm = taylor_test(Jhat, m, Jics, dJdics, seed=seed)

        assert_greater(conv_rate_tlm, 1.8)
        return
    def test_adjoint(self, Solver, solver_type):
        """Test that adjoint model of basic bidomain solver converges at 2nd order."""
        info_green("Running adjoint basic (%s)" % solver_type)

        J, Jhat, m, Jics = self.tlm_adj_setup(Solver, solver_type)

        # Check adjoint correctness
        dJdics = compute_gradient(J, m, forget=False)
        assert (dJdics is not None), "Gradient is None (#fail)."
        conv_rate = taylor_test(Jhat, m, Jics, dJdics, seed=1e-3)

        # Check that minimal convergence rate is greater than some given number
        assert_greater(conv_rate, 1.9)
    def test_taylor_remainder(self):
        "Run Taylor remainder tests for selection of models and solvers."
        set_dolfin_parameters()
        model = Model()

        params = BasicSingleCellSolver.default_parameters()
        params["theta"] = theta
        time = Constant(0.0)
        solver = BasicSingleCellSolver(model, time, params=params)

        # Get initial conditions (Projection of expressions
        # don't get annotated, which is fine, because there is
        # no need.)
        ics = project(model.initial_conditions(), solver.VS)

        # Run forward model
        info_green("Running forward %s with theta %g" % (model, theta))
        self._run(solver, model, ics)

        # Define functional
        (vs_, vs) = solver.solution_fields()
        form = lambda w: inner(w, w)*dx
        J = Functional(form(vs)*dt[FINISH_TIME])

        # Compute value of functional with current ics
        Jics = assemble(form(vs))

        # Compute gradient with respect to vs_ (ics?)
        dJdics = compute_gradient(J, Control(vs_),
                                  forget=False)

        # Stop annotating
        parameters["adjoint"]["stop_annotating"] = True

        # Set-up runner
        def Jhat(ics):
            self._run(solver, model, ics)
            (vs_, vs) = solver.solution_fields()
            return assemble(form(vs))

        # Run taylor test
        if isinstance(model, Tentusscher_2004_mcell):
            seed=1.e-5
        else:
            seed=None

        conv_rate = taylor_test(Jhat, Control(vs_),
                                Jics, dJdics, seed=seed)

        # Check that minimal rate is greater than some given number
        assert_greater(conv_rate, 1.8)
    def test_AdjointModelOfSplittingSolver_PassesTaylorTest(self, Solver, solver_type):
        """Test that basic and optimised splitting solvers yield
        very comparative results when configured identically."""

        J, Jhat, m, Jics = self.tlm_adj_setup(Solver, solver_type)

        # Check adjoint model correctness
        info_green("Compute gradient with adjoint linear model")
        dJdics = compute_gradient(J, m, forget=False)

        assert (dJdics is not None), "Gradient is None (#fail)."
        conv_rate = taylor_test(Jhat, m, Jics, dJdics, seed=1.)

        # Check that minimal convergence rate is greater than some given number
        assert_greater(conv_rate, 1.9)
Beispiel #7
0
    def test_TangentLinearModelOfSplittingSolver_PassesTaylorTest(
            self, Solver, solver_type):
        """Test that basic and optimised splitting solvers yield
        very comparative results when configured identically."""
        if isinstance(Solver, BasicSplittingSolver):
            pytest.mark.xfail("PETSc error 73: Object is in wring state")

        J, Jhat, m, Jics = self.tlm_adj_setup(Solver, solver_type)

        # Check TLM correctness
        info_green("Compute gradient with tangent linear model")
        dJdics = compute_gradient_tlm(J, m, forget=False)

        assert (dJdics is not None), "Gradient is None (#fail)."
        conv_rate_tlm = taylor_test(Jhat, m, Jics, dJdics, seed=1)

        # Check that minimal convergence rate is greater than some given number
        assert_greater(conv_rate_tlm, 1.9)