def test_compute_adjoint(self): "Test that we can compute the adjoint for some given functional" set_dolfin_parameters() model = Model() params = BasicSingleCellSolver.default_parameters() params["theta"] = theta time = Constant(0.0) solver = BasicSingleCellSolver(model, time, params=params) # Get initial conditions (Projection of expressions # don't get annotated, which is fine, because there is # no need.) ics = project(model.initial_conditions(), solver.VS) # Run forward model info_green("Running forward %s with theta %g" % (model, theta)) self._run(solver, model, ics) (vs_, vs) = solver.solution_fields() # Define functional and compute gradient etc J = Functional(inner(vs_, vs_) * dx * dt[FINISH_TIME]) # Compute adjoint info_green("Computing adjoint") z = compute_adjoint(J) # Check that no vs_ adjoint is None (== 0.0!) for (value, var) in z: if var.name == "vs_": msg = "Adjoint solution for vs_ is None (#fail)." assert (value is not None), msg
def test_compute_gradient(self): "Test that we can compute the gradient for some given functional" set_dolfin_parameters() model = Model() params = BasicSingleCellSolver.default_parameters() params["theta"] = theta time = Constant(0.0) solver = BasicSingleCellSolver(model, time, params=params) # Get initial conditions (Projection of expressions # don't get annotated, which is fine, because there is # no need.) ics = project(model.initial_conditions(), solver.VS) # Run forward model info_green("Running forward %s with theta %g" % (model, theta)) self._run(solver, model, ics) # Define functional (vs_, vs) = solver.solution_fields() J = Functional(inner(vs, vs) * dx * dt[FINISH_TIME]) # Compute gradient with respect to vs_. Highly unclear # why with respect to ics and vs fail. info_green("Computing gradient") dJdics = compute_gradient(J, Control(vs_)) assert (dJdics is not None), "Gradient is None (#fail)." print(dJdics.vector().array())
def _run_solve(self, model, time, theta): "Run two time steps for the given model with the given theta solver." dt = 0.01 T = 2 * dt interval = (0.0, T) # Initialize solver params = BasicSingleCellSolver.default_parameters() params["theta"] = theta params["enable_adjoint"] = False solver = BasicSingleCellSolver(model, time, params=params) # Assign initial conditions (vs_, vs) = solver.solution_fields() vs_.assign(model.initial_conditions()) # Solve for a couple of steps solutions = solver.solve(interval, dt) for ((t0, t1), vs) in solutions: pass # Check that we are at the end time assert_almost_equal(t1, T, 1e-10) return vs.vector()
def test_taylor_remainder(self): "Run Taylor remainder tests for selection of models and solvers." set_dolfin_parameters() model = Model() params = BasicSingleCellSolver.default_parameters() params["theta"] = theta time = Constant(0.0) solver = BasicSingleCellSolver(model, time, params=params) # Get initial conditions (Projection of expressions # don't get annotated, which is fine, because there is # no need.) ics = project(model.initial_conditions(), solver.VS) # Run forward model info_green("Running forward %s with theta %g" % (model, theta)) self._run(solver, model, ics) # Define functional (vs_, vs) = solver.solution_fields() form = lambda w: inner(w, w)*dx J = Functional(form(vs)*dt[FINISH_TIME]) # Compute value of functional with current ics Jics = assemble(form(vs)) # Compute gradient with respect to vs_ (ics?) dJdics = compute_gradient(J, Control(vs_), forget=False) # Stop annotating parameters["adjoint"]["stop_annotating"] = True # Set-up runner def Jhat(ics): self._run(solver, model, ics) (vs_, vs) = solver.solution_fields() return assemble(form(vs)) # Run taylor test if isinstance(model, Tentusscher_2004_mcell): seed=1.e-5 else: seed=None conv_rate = taylor_test(Jhat, Control(vs_), Jics, dJdics, seed=seed) # Check that minimal rate is greater than some given number assert_greater(conv_rate, 1.8)
def test_replay(self): "Test that replay reports success for basic single cell solver" set_dolfin_parameters() model = Model() # Initialize solver params = BasicSingleCellSolver.default_parameters() params["theta"] = theta time = Constant(0.0) solver = BasicSingleCellSolver(model, time, params=params) info_green("Running %s with theta %g" % (model, theta)) ics = project(model.initial_conditions(), solver.VS).copy(deepcopy=True, name="ics") self._run(solver, model, ics) info_green("Replaying") success = replay_dolfin(tol=0.0, stop=True) assert_true(success)
def _run(cell): if dolfin_adjoint: from dolfin_adjoint import adj_reset adj_reset() solver = BasicSingleCellSolver(cell, Constant(0.0)) # Setup initial condition (vs_, vs) = solver.solution_fields() vs_.vector()[0] = 30. # Non-resting state vs_.vector()[1] = 0. T = 2 solutions = solver.solve((0, T), 0.25) times = [] v_values = [] s_values = [] for ((t0, t1), vs) in solutions: times += [0.5 * (t0 + t1)] v_values.append(vs.vector()[0]) s_values.append(vs.vector()[1]) return (v_values, s_values, times)
def test_fitzhugh_nagumo_manual(self): """Test that the manually written FitzHugh-Nagumo model gives comparable results to a given reference from Sundnes et al, 2006.""" class Stimulus(Expression): def __init__(self, **kwargs): self.t = kwargs["t"] def eval(self, value, x): if float(self.t) >= 50 and float(self.t) < 60: v_amp = 125 value[0] = 0.05 * v_amp else: value[0] = 0.0 if dolfin_adjoint: from dolfin_adjoint import adj_reset adj_reset() cell = FitzHughNagumoManual() time = Constant(0.0) cell.stimulus = Stimulus(t=time, degree=0) solver = BasicSingleCellSolver(cell, time) # Setup initial condition (vs_, vs) = solver.solution_fields() ic = cell.initial_conditions() vs_.assign(ic) # Initial set-up interval = (0, 400) dt = 1.0 times = [] v_values = [] s_values = [] # Solve solutions = solver.solve(interval, dt=dt) for (timestep, vs) in solutions: (t0, t1) = timestep times += [(t0 + t1) / 2] v_values += [vs.vector()[0]] s_values += [vs.vector()[1]] # Regression test v_max_reference = 2.6883308148064152e+01 s_max_reference = 6.8660144687023219e+01 tolerance = 1.e-14 print "max(v_values) %.16e" % max(v_values) print "max(s_values) %.16e" % max(s_values) msg = "Maximal %s value does not match reference: diff is %.16e" v_diff = abs(max(v_values) - v_max_reference) s_diff = abs(max(s_values) - s_max_reference) assert (v_diff < tolerance), msg % ("v", v_diff) assert (s_diff < tolerance), msg % ("s", s_diff) # Correctness test import os if int(os.environ.get("DOLFIN_NOPLOT", 0)) != 1: import pylab pylab.plot(times, v_values, 'b*') pylab.plot(times, s_values, 'r-')