def solve(self, var, b): x = dolfin.Function(self.test_function().function_space()) # b is a libadjoint object (form or function) (a, L) = (self.data, b.data) # First: check if L is None (meaning zero) if L is None: x_vec = adjlinalg.Vector(x) return x_vec if isinstance(L, dolfin.Function): b_vec = L.vector() L = None else: b_vec = dolfin.assemble(L) # Next: if necessary, create a new solver and add to dictionary idx = a.arguments() if idx not in caching.localsolvers: if dolfin.parameters["adjoint"]["debug_cache"]: dolfin.info_red("Creating new LocalSolver") newsolver = dolfin.LocalSolver(a, None, solver_type=self.solver_parameters["solver_type"]) if self.solver_parameters["factorize"] : newsolver.factorize() caching.localsolvers[idx] = newsolver else: if dolfin.parameters["adjoint"]["debug_cache"]: dolfin.info_green("Reusing LocalSolver") # Get the right solver from the solver dictionary solver = caching.localsolvers[idx] solver.solve_local(x.vector(), b_vec, b.fn_space.dofmap()) x_vec = adjlinalg.Vector(x) return x_vec
def test_default_basic_single_cell_solver(self, cell_model, theta): "Test basic single cell solver." time = Constant(0.0) model = cell_model Model = cell_model.__class__ if Model == supported_cell_models[3] and theta > 0: pytest.xfail("failing configuration (but should work)") model.stimulus = Expression("1000*t", t=time, degree=1) info_green("\nTesting %s" % model) vec_solve = self._run_solve(model, time, theta) if Model == supported_cell_models[3] and theta == 0: pytest.xfail("failing configuration (but should work)") if Model in self.references and theta in self.references[Model]: ind, ref_value = self.references[Model][theta] print("vec_solve", vec_solve.array()) print("ind", ind, "ref", ref_value) assert_almost_equal(vec_solve[ind], ref_value, 1e-10) else: info_red("Missing references for %r, %r" % (Model, theta))
def print_benchmark_report(solver_timings, failed_solvers): # Let's analyse the result of the benchmark test: solver_timings = sorted(solver_timings.iteritems(), key=operator.itemgetter(1)) failed_solvers = sorted(failed_solvers.iteritems(), key=operator.itemgetter(1)) dolfin.info_blue("***********************************************") dolfin.info_blue("********** Solver benchmark results: **********") dolfin.info_blue("***********************************************") for solver, timing in solver_timings: dolfin.info_blue("%s: %.6f s" % (solver, timing)) for solver, reason in failed_solvers: dolfin.info_red("%s: %s" % (solver, reason))
def check_if_kill(folder): """Check if user has put a file named killoasis in folder.""" found = 0 if 'killoasis' in listdir(folder): found = 1 collective = MPI.sum(mpi_comm_world(), found) if collective > 0: if MPI.rank(mpi_comm_world()) == 0: remove(path.join(folder, 'killoasis')) info_red('killoasis Found! Stopping simulations cleanly...') return True else: return False
def check_if_reset_statistics(folder): """Check if user has put a file named resetoasis in folder.""" found = 0 if 'resetoasis' in listdir(folder): found = 1 collective = MPI.sum(mpi_comm_world(), found) if collective > 0: if MPI.rank(mpi_comm_world()) == 0: remove(path.join(folder, 'resetoasis')) info_red('resetoasis Found!') return True else: return False
def compare_against_reference(self, sol, Model, Scheme): ''' Compare the model solution with the reference solution. ''' try: ind, ref_value = self.references[Model][Scheme] except KeyError: info_red("Missing references for %s, %s: value is %g" % (Model, Scheme, sol[0])) return info("Value for %s, %s is %g" % (Model, Scheme, sol[ind])) if ref_value != "nan": assert_almost_equal(float(sol[ind]), float(ref_value), tolerance=1e-6)
def create_initial_folders(folder, restart_folder, sys_comp, tstep, info_red, scalar_components, output_timeseries_as_vector, **NS_namespace): """Create necessary folders.""" info_red("Creating initial folders") # To avoid writing over old data create a new folder for each run if MPI.rank(mpi_comm_world()) == 0: try: makedirs(folder) except OSError: pass MPI.barrier(mpi_comm_world()) newfolder = path.join(folder, 'data') if restart_folder: newfolder = path.join(newfolder, restart_folder.split('/')[-2]) else: if not path.exists(newfolder): newfolder = path.join(newfolder, '1') else: previous = listdir(newfolder) previous = max(map(eval, previous)) if previous else 0 newfolder = path.join(newfolder, str(previous + 1)) MPI.barrier(mpi_comm_world()) if MPI.rank(mpi_comm_world()) == 0: if not restart_folder: #makedirs(path.join(newfolder, "Voluviz")) #makedirs(path.join(newfolder, "Stats")) #makedirs(path.join(newfolder, "VTK")) makedirs(path.join(newfolder, "Timeseries")) makedirs(path.join(newfolder, "Checkpoint")) tstepfolder = path.join(newfolder, "Timeseries") tstepfiles = {} comps = sys_comp if output_timeseries_as_vector: comps = ['p', 'u'] + scalar_components for ui in comps: tstepfiles[ui] = XDMFFile( mpi_comm_world(), path.join(tstepfolder, ui + '_from_tstep_{}.xdmf'.format(tstep))) tstepfiles[ui].parameters["rewrite_function_mesh"] = False tstepfiles[ui].parameters["flush_output"] = True return newfolder, tstepfiles
def create_initial_folders(folder, restart_folder, sys_comp, tstep, info_red, scalar_components, output_timeseries_as_vector, **NS_namespace): """Create necessary folders.""" info_red("Creating initial folders") # To avoid writing over old data create a new folder for each run if MPI.rank(mpi_comm_world()) == 0: try: makedirs(folder) except OSError: pass MPI.barrier(mpi_comm_world()) newfolder = path.join(folder, 'data') if restart_folder: newfolder = path.join(newfolder, restart_folder.split('/')[-2]) else: if not path.exists(newfolder): newfolder = path.join(newfolder, '1') else: previous = listdir(newfolder) previous = max(map(eval, previous)) if previous else 0 newfolder = path.join(newfolder, str(previous + 1)) MPI.barrier(mpi_comm_world()) if MPI.rank(mpi_comm_world()) == 0: if not restart_folder: #makedirs(path.join(newfolder, "Voluviz")) #makedirs(path.join(newfolder, "Stats")) #makedirs(path.join(newfolder, "VTK")) makedirs(path.join(newfolder, "Timeseries")) makedirs(path.join(newfolder, "Checkpoint")) tstepfolder = path.join(newfolder, "Timeseries") tstepfiles = {} comps = sys_comp if output_timeseries_as_vector: comps = ['p', 'u'] + scalar_components for ui in comps: tstepfiles[ui] = XDMFFile(mpi_comm_world(), path.join(tstepfolder, ui+'_from_tstep_{}.xdmf'.format(tstep))) tstepfiles[ui].parameters["rewrite_function_mesh"] = False tstepfiles[ui].parameters["flush_output"] = True return newfolder, tstepfiles
def solve(self, var, b): if reuse_factorization is False: return adjlinalg.Matrix.solve(self, var, b) if var.type in ['ADJ_TLM', 'ADJ_ADJOINT']: bcs = [utils.homogenize(bc) for bc in self.bcs if isinstance(bc, dolfin.DirichletBC)] + [bc for bc in self.bcs if not isinstance(bc, dolfin.DirichletBC)] else: bcs = self.bcs if var.type in ['ADJ_FORWARD', 'ADJ_TLM']: solver = lu_solvers[idx] if solver is None: A = assembly.assemble(self.data); [bc.apply(A) for bc in bcs] lu_solvers[idx] = LUSolver(A) lu_solvers[idx].parameters["reuse_factorization"] = True solver = lu_solvers[idx] else: if adj_lu_solvers[idx] is None: A = assembly.assemble(self.data); [bc.apply(A) for bc in bcs] adj_lu_solvers[idx] = LUSolver(A) adj_lu_solvers[idx].parameters["reuse_factorization"] = True solver = adj_lu_solvers[idx] x = adjlinalg.Vector(dolfin.Function(self.test_function().function_space())) if b.data is None: # This means we didn't get any contribution on the RHS of the adjoint system. This could be that the # simulation ran further ahead than when the functional was evaluated, or it could be that the # functional is set up incorrectly. dolfin.info_red("Warning: got zero RHS for the solve associated with variable %s" % var) else: if isinstance(b.data, dolfin.Function): b_vec = b.data.vector().copy() else: b_vec = dolfin.assemble(b.data) [bc.apply(b_vec) for bc in bcs] solver.solve(x.data.vector(), b_vec, annotate=False) return x
def solve(self, var, b): if reuse_factorization is False: return adjlinalg.Matrix.solve(self, var, b) if var.type in ['ADJ_TLM', 'ADJ_ADJOINT']: bcs = [dolfin.homogenize(bc) for bc in self.bcs if isinstance(bc, dolfin.DirichletBC)] + [bc for bc in self.bcs if not isinstance(bc, dolfin.DirichletBC)] else: bcs = self.bcs if var.type in ['ADJ_FORWARD', 'ADJ_TLM']: solver = lu_solvers[idx] if solver is None: A = assembly.assemble(self.data); [bc.apply(A) for bc in bcs] lu_solvers[idx] = LUSolver(A) lu_solvers[idx].parameters["reuse_factorization"] = True solver = lu_solvers[idx] else: if adj_lu_solvers[idx] is None: A = assembly.assemble(self.data); [bc.apply(A) for bc in bcs] adj_lu_solvers[idx] = LUSolver(A) adj_lu_solvers[idx].parameters["reuse_factorization"] = True solver = adj_lu_solvers[idx] x = adjlinalg.Vector(dolfin.Function(self.test_function().function_space())) if b.data is None: # This means we didn't get any contribution on the RHS of the adjoint system. This could be that the # simulation ran further ahead than when the functional was evaluated, or it could be that the # functional is set up incorrectly. dolfin.info_red("Warning: got zero RHS for the solve associated with variable %s" % var) else: if isinstance(b.data, dolfin.Function): b_vec = b.data.vector().copy() else: b_vec = dolfin.assemble(b.data) [bc.apply(b_vec) for bc in bcs] solver.solve(x.data.vector(), b_vec, annotate=False) return x
def solve(self, var, b): x = dolfin.Function(self.test_function().function_space()) # b is a libadjoint object (form or function) (a, L) = (self.data, b.data) # First: check if L is None (meaning zero) if L is None: x_vec = adjlinalg.Vector(x) return x_vec if isinstance(L, dolfin.Function): b_vec = L.vector() L = None else: b_vec = dolfin.assemble(L) # Next: if necessary, create a new solver and add to dictionary idx = a.arguments() if idx not in caching.localsolvers: if dolfin.parameters["adjoint"]["debug_cache"]: dolfin.info_red("Creating new LocalSolver") newsolver = dolfin.LocalSolver( a, None, solver_type=self.solver_parameters["solver_type"]) if self.solver_parameters["factorize"]: newsolver.factorize() caching.localsolvers[idx] = newsolver else: if dolfin.parameters["adjoint"]["debug_cache"]: dolfin.info_green("Reusing LocalSolver") # Get the right solver from the solver dictionary solver = caching.localsolvers[idx] solver.solve_local(x.vector(), b_vec, b.fn_space.dofmap()) x_vec = adjlinalg.Vector(x) return x_vec
def read_vtu(filename, space): """ Read a vtu file with the supplied filename base, with fields on the supplied FunctionSpace. Return a dict with the Function names as keys and the Function s as values. Vector fields are not currently supported. Currently only works with a single MPI process. """ if not isinstance(filename, str): raise InvalidArgumentException("filename must be a string") if not isinstance(space, dolfin.FunctionSpaceBase): raise InvalidArgumentException("space must be a FunctionSpace") if dolfin.MPI.num_processes() > 1: raise NotImplementedException("read_vtu cannot be used with more than one MPI process") mesh = space.mesh() dim = mesh.geometry().dim() if isinstance(space, dolfin.VectorFunctionSpace): raise NotImplementedException("VectorFunctionSpace s not supported by read_vtu") elif not space.num_sub_spaces() == 0: raise NotImplementedException("Subspaces not supported by read_vtu") e = space.ufl_element() degree = e.degree() assert(e.cell().geometric_dimension() == dim) assert(e.cell().topological_dimension() == dim) if (not e.family() in ["Lagrange", "Discontinuous Lagrange"] or not dim in [1, 2, 3] or (dim == 1 and not degree in [1, 2, 3]) or (dim in [2, 3] and not degree in [1, 2])) and \ (not e.family() == "Discontinuous Lagrange" or not dim in [1, 2, 3] or not degree == 0): raise NotImplementedException('Element family "%s" with degree %i in %i dimension(s) not supported by read_vtu' % (e.family(), degree, dim)) n = space.dim() n_cells = mesh.num_cells() dof = space.dofmap() if dim == 1: cell_map = None elif dim == 2: if degree in [0, 1]: cell_map = None else: cell_map = {0:0, 1:1, 2:2, 3:5, 4:3, 5:4} else: if degree in [0, 1]: cell_map = None else: cell_map = {0:0, 1:1, 2:2, 3:3, 4:9, 5:6, 6:8, 7:7, 8:5, 9:4} filename = "%s.vtu" % filename reader = vtk.vtkXMLUnstructuredGridReader() reader.SetFileName(filename) reader.Update() vtu = reader.GetOutput() if degree == 0: assert(vtu.GetNumberOfCells() == n) else: assert(vtu.GetNumberOfPoints() == n) assert(vtu.GetNumberOfCells() == n_cells) fields = {} x = dolfin.interpolate(dolfin.Expression("x[0]"), space).vector().array() X = numpy.empty((x.shape[0], dim), dtype = x.dtype) X[:, 0] = x if dim > 1: X[:, 1] = dolfin.interpolate(dolfin.Expression("x[1]"), space).vector().array() if dim > 2: X[:, 2] = dolfin.interpolate(dolfin.Expression("x[2]"), space).vector().array() if degree == 0: for i in xrange(n_cells): cell = dof.cell_dofs(i) x = X[cell[0], :] vtu_cell = vtu.GetCell(i).GetPointIds() vtu_x = numpy.array([vtu.GetPoint(vtu_cell.GetId(j))[:dim] for j in xrange(vtu_cell.GetNumberOfIds())]) mag = abs(vtu_x).max(0) tol = 2.0e-15 * mag if any(abs(vtu_x.mean(0) - x) > tol): dolfin.info_red("Relative coordinate error: %.16e" % (abs(vtu_x.mean(0) - x) / mag).max()) raise IOException("Invalid coordinates") for i in xrange(vtu.GetCellData().GetNumberOfArrays()): cell_data = vtu.GetCellData().GetArray(i) if not cell_data.GetNumberOfComponents() == 1: raise NotImplementException("%i components not supported by read_vtu" % cell_data.GetNumberOfComponents()) assert(cell_data.GetNumberOfTuples() == n) name = cell_data.GetName() assert(not name in fields) data = numpy.empty(n) for j in xrange(n_cells): cell = dof.cell_dofs(j) data[cell[0]] = cell_data.GetTuple1(j) field = dolfin.Function(space, name = name) field.vector().set_local(data) field.vector().apply("insert") fields[name] = field else: for i in xrange(n_cells): cell = dof.cell_dofs(i) vtu_cell = vtu.GetCell(i).GetPointIds() assert(len(cell) == vtu_cell.GetNumberOfIds()) if cell_map is None: for j in xrange(vtu_cell.GetNumberOfIds()): if not (X[cell[j]] == vtu.GetPoint(vtu_cell.GetId(j))[:dim]).all(): dolfin.info_red("Coordinate error: %.16e" % (abs(X[cell[j]] - vtu.GetPoint(vtu_cell.GetId(j))[:dim]).max())) raise IOException("Invalid coordinates") else: for j in xrange(vtu_cell.GetNumberOfIds()): if not (X[cell[cell_map[j]]] == vtu.GetPoint(vtu_cell.GetId(j))[:dim]).all(): dolfin.info_red("Coordinate error: %.16e" % (abs(X[cell[cell_map[j]]] - vtu.GetPoint(vtu_cell.GetId(j))[:dim]).max())) raise IOException("Invalid coordinates") for i in xrange(vtu.GetPointData().GetNumberOfArrays()): point_data = vtu.GetPointData().GetArray(i) if not point_data.GetNumberOfComponents() == 1: raise NotImplementException("%i components not supported by read_vtu" % point_data.GetNumberOfComponents()) assert(point_data.GetNumberOfTuples() == n) name = point_data.GetName() assert(not name in fields) data = numpy.empty(n) for j in xrange(n_cells): cell = dof.cell_dofs(j) vtu_cell = vtu.GetCell(j).GetPointIds() assert(len(cell) == vtu_cell.GetNumberOfIds()) if cell_map is None: for k in xrange(vtu_cell.GetNumberOfIds()): data[cell[k]] = point_data.GetTuple1(vtu_cell.GetId(k)) else: for k in xrange(vtu_cell.GetNumberOfIds()): data[cell[cell_map[k]]] = point_data.GetTuple1(vtu_cell.GetId(k)) field = dolfin.Function(space, name = name) field.vector().set_local(data) field.vector().apply("insert") fields[name] = field return fields
def solve(*args, **kwargs): ''' This function overwrites the dolfin.solve function but provides additional functionality to benchmark different solver/preconditioner settings. The arguments of equivalent to dolfin.solve except some (optional) additional parameters: - benchmark = [True, False]: If True, the problem will be solved with all different solver/precondition combinations and the results reported. If False, the problem is solved using the default solver settings. - solve: An optional function parameter that is called instead of dolfin.solve. This parameter is useful if dolfin.solve is overwritten by a custom solver routine. - solver_exclude: A list of solvers that are to be excluded from the benchmark. - preconditioner_exclude: A list of preconditioners that are to be excluded from the benchmark. ''' # Retrieve the extended benchmark arguments. if 'benchmark' in kwargs: benchmark = kwargs.pop('benchmark') else: benchmark = False if 'solve' in kwargs: solve = kwargs.pop('solve') else: solve = dolfin.fem.solving.solve if 'solver_exclude' in kwargs: solver_exclude = kwargs.pop('solver_exclude') else: solver_exclude = [] if 'preconditioner_exclude' in kwargs: preconditioner_exclude = kwargs.pop('preconditioner_exclude') else: preconditioner_exclude = [] if benchmark: dolfin.info_blue("Running solver benchmark...") solver_parameters_set = solver_parameters(solver_exclude, preconditioner_exclude) solver_timings = {} failed_solvers = {} ret = None # Perform the benchmark for parameters in solver_parameters_set: solver_failed = False # Replace the existing solver setting with the benchmark one's. new_args, new_kwargs = replace_solver_settings(args, kwargs, parameters) # Solve the problem timer = dolfin.Timer("Solver benchmark") timer.start() try: ret = solve(*new_args, **new_kwargs) except RuntimeError as e: solver_failed = True if 'diverged' in e.message.lower(): failure_reason = 'diverged' else: failure_reason = 'unknown' from IPython.Shell import IPShellEmbed ipshell = IPShellEmbed() ipshell() pass timer.stop() # Save the result parameters_str = parameters["linear_solver"] + ", " + parameters["preconditioner"] if solver_failed: dolfin.info_red(parameters_str + ": solver failed.") failed_solvers[parameters_str] = failure_reason else: dolfin.info(parameters_str + ": " + str(timer.value()) + "s.") solver_timings[parameters_str] = timer.value() # Print the report print_benchmark_report(solver_timings, failed_solvers) else: ret = solve(*args, **kwargs) return ret
def solve(selfmat, var, b): if selfmat.adjoint: operators = transpose_operators(selfmat.operators) else: operators = selfmat.operators # Fetch/construct the solver if var.type in ['ADJ_FORWARD', 'ADJ_TLM']: solver = krylov_solvers[idx] need_to_set_operator = self._need_to_reset_operator else: if adj_krylov_solvers[idx] is None: need_to_set_operator = True adj_krylov_solvers[idx] = KrylovSolver( *solver_parameters) else: need_to_set_operator = self._need_to_reset_operator solver = adj_krylov_solvers[idx] solver.parameters.update(parameters) self._need_to_reset_operator = False if selfmat.adjoint: (nsp_, tnsp_) = (tnsp, nsp) else: (nsp_, tnsp_) = (nsp, tnsp) x = dolfin.Function(fn_space) if selfmat.initial_guess is not None and var.type == 'ADJ_FORWARD': x.vector()[:] = selfmat.initial_guess.vector() if b.data is None: dolfin.info_red( "Warning: got zero RHS for the solve associated with variable %s" % var) return adjlinalg.Vector(x) if var.type in ['ADJ_TLM', 'ADJ_ADJOINT']: selfmat.bcs = [ utils.homogenize(bc) for bc in selfmat.bcs if isinstance(bc, dolfin.cpp.DirichletBC) ] + [ bc for bc in selfmat.bcs if not isinstance(bc, dolfin.cpp.DirichletBC) ] # This is really hideous. Sorry. if isinstance(b.data, dolfin.Function): rhs = b.data.vector().copy() [bc.apply(rhs) for bc in selfmat.bcs] if need_to_set_operator: if assemble_system: # if we called assemble_system, rather than assemble v = dolfin.TestFunction(fn_space) (A, rhstmp) = dolfin.assemble_system( operators[0], dolfin.inner(b.data, v) * dolfin.dx, selfmat.bcs) if has_preconditioner: (P, rhstmp) = dolfin.assemble_system( operators[1], dolfin.inner(b.data, v) * dolfin.dx, selfmat.bcs) solver.set_operators(A, P) else: solver.set_operator(A) else: # we called assemble A = dolfin.assemble(operators[0]) [bc.apply(A) for bc in selfmat.bcs] if has_preconditioner: P = dolfin.assemble(operators[1]) [bc.apply(P) for bc in selfmat.bcs] solver.set_operators(A, P) else: solver.set_operator(A) else: if assemble_system: # if we called assemble_system, rather than assemble (A, rhs) = dolfin.assemble_system( operators[0], b.data, selfmat.bcs) if need_to_set_operator: if has_preconditioner: (P, rhstmp) = dolfin.assemble_system( operators[1], b.data, selfmat.bcs) solver.set_operators(A, P) else: solver.set_operator(A) else: # we called assemble A = dolfin.assemble(operators[0]) rhs = dolfin.assemble(b.data) [bc.apply(A) for bc in selfmat.bcs] [bc.apply(rhs) for bc in selfmat.bcs] if need_to_set_operator: if has_preconditioner: P = dolfin.assemble(operators[1]) [bc.apply(P) for bc in selfmat.bcs] solver.set_operators(A, P) else: solver.set_operator(A) # Set the nullspace for the linear operator if nsp_ is not None and need_to_set_operator: dolfin.as_backend_type(A).set_nullspace(nsp_) # (Possibly override the user in) orthogonalize # the right-hand-side if tnsp_ is not None: tnsp_.orthogonalize(rhs) solver.solve(x.vector(), rhs) return adjlinalg.Vector(x)
try: from dolfin import BackwardEuler except ImportError: from dolfin import info_red info_red("Need dolfin > 1.2.0 for ode_solver test.") import sys; sys.exit(0) from dolfin import * from dolfin_adjoint import * import ufl.algorithms if not hasattr(MultiStageScheme, "to_tlm"): info_red("Need dolfin > 1.2.0 for ode_solver test.") import sys; sys.exit(0) # Import cell model (rhs, init_values, default_parameters) import tentusscher_2004_mcell as model import random random.seed(42) parameters["form_compiler"]["cpp_optimize_flags"] = "-O3 -ffast-math -march=native" params = model.default_parameters() state_init = model.init_values() mesh = UnitIntervalMesh(1) num_states = state_init.value_size() V = VectorFunctionSpace(mesh, "CG", 1, dim=num_states) def main(u, form, time, Scheme, dt):
def solve(self, var, b): if self.adjoint: operators = transpose_operators(self.operators) else: operators = self.operators solver = dolfin.LinearSolver(*solver_parameters) solver.parameters.update(parameters) x = dolfin.Function(fn_space) if self.initial_guess is not None and var.type == 'ADJ_FORWARD': x.vector()[:] = self.initial_guess.vector() if b.data is None: dolfin.info_red("Warning: got zero RHS for the solve associated with variable %s" % var) return adjlinalg.Vector(x) if var.type in ['ADJ_TLM', 'ADJ_ADJOINT']: self.bcs = [utils.homogenize(bc) for bc in self.bcs if isinstance(bc, dolfin.cpp.DirichletBC)] + [bc for bc in self.bcs if not isinstance(bc, dolfin.cpp.DirichletBC)] # This is really hideous. Sorry. if isinstance(b.data, dolfin.Function): rhs = b.data.vector().copy() [bc.apply(rhs) for bc in self.bcs] if assemble_system: # if we called assemble_system, rather than assemble v = dolfin.TestFunction(fn_space) (A, rhstmp) = dolfin.assemble_system(operators[0], dolfin.inner(b.data, v)*dolfin.dx, self.bcs) if has_preconditioner: (P, rhstmp) = dolfin.assemble_system(operators[1], dolfin.inner(b.data, v)*dolfin.dx, self.bcs) solver.set_operators(A, P) else: solver.set_operator(A) else: # we called assemble A = dolfin.assemble(operators[0]) [bc.apply(A) for bc in self.bcs] # Set nullspace if nsp: dolfin.as_backend_type(A).set_nullspace(nsp) nsp.orthogonalize(b); if has_preconditioner: P = dolfin.assemble(operators[1]) [bc.apply(P) for bc in self.bcs] solver.set_operators(A, P) else: solver.set_operator(A) else: if assemble_system: # if we called assemble_system, rather than assemble (A, rhs) = dolfin.assemble_system(operators[0], b.data, self.bcs) if has_preconditioner: (P, rhstmp) = dolfin.assemble_system(operators[1], b.data, self.bcs) solver.set_operators(A, P) else: solver.set_operator(A) else: # we called assemble A = dolfin.assemble(operators[0]) rhs = dolfin.assemble(b.data) [bc.apply(A) for bc in self.bcs] [bc.apply(rhs) for bc in self.bcs] # Set nullspace if nsp: dolfin.as_backend_type(A).set_nullspace(nsp) nsp.orthogonalize(rhs); if has_preconditioner: P = dolfin.assemble(operators[1]) [bc.apply(P) for bc in self.bcs] solver.set_operators(A, P) else: solver.set_operator(A) solver.solve(x.vector(), rhs) return adjlinalg.Vector(x)
def info_red(*args, **kwargs): if get_rank() == 0: dolfin.info_red(*args, **kwargs)
def info_red(self, message, values, log_keys, time=None): info_red(message % values) self._log_data(values, log_keys, time)
try: from dolfin import BackwardEuler except ImportError: from dolfin import info_red info_red("Need dolfin > 1.2.0 for ode_solver test.") import sys sys.exit(0) from dolfin import * from dolfin_adjoint import * import ufl.algorithms if not hasattr(MultiStageScheme, "to_tlm"): info_red("Need dolfin > 1.2.0 for ode_solver test.") import sys sys.exit(0) mesh = UnitIntervalMesh(1) #R = FunctionSpace(mesh, "R", 0) # in my opinion, should work, but doesn't R = VectorFunctionSpace(mesh, "CG", 1, dim=2) def main(u, form, time, Solver, dt): scheme = Solver(form, u, time) scheme.t().assign(float(time)) xs = [float(time)] ys = [u.vector().array()[0]] solver = PointIntegralSolver(scheme)
try: from dolfin import BackwardEuler except ImportError: from dolfin import info_red info_red("Need dolfin > 1.2.0 for ode_solver test.") import sys sys.exit(0) from dolfin import * from dolfin_adjoint import * import ufl.algorithms if not hasattr(MultiStageScheme, "to_tlm"): info_red("Need dolfin > 1.2.0 for ode_solver test.") import sys sys.exit(0) # Import cell model (rhs, init_values, default_parameters) import tentusscher_2004_mcell as model import random random.seed(42) parameters["form_compiler"][ "cpp_optimize_flags"] = "-O3 -ffast-math -march=native" params = model.default_parameters() state_init = model.init_values() mesh = UnitIntervalMesh(1) num_states = state_init.value_size()
def solve(*args, **kwargs): ''' This function overwrites the dolfin.solve function but provides additional functionality to benchmark different solver/preconditioner settings. The arguments of equivalent to dolfin.solve except some (optional) additional parameters: - benchmark = [True, False]: If True, the problem will be solved with all different solver/precondition combinations and the results reported. If False, the problem is solved using the default solver settings. - solve: An optional function parameter that is called instead of dolfin.solve. This parameter is useful if dolfin.solve is overwritten by a custom solver routine. - solver_exclude: A list of solvers that are to be excluded from the benchmark. - preconditioner_exclude: A list of preconditioners that are to be excluded from the benchmark. - return_best: Option to return the fastest solver result only. ''' # Retrieve the extended benchmark arguments. if kwargs.has_key('benchmark'): benchmark = kwargs.pop('benchmark') else: benchmark = False if kwargs.has_key('solve'): solve = kwargs.pop('solve') else: solve = dolfin.fem.solving.solve if kwargs.has_key('solver_exclude'): solver_exclude = kwargs.pop('solver_exclude') else: solver_exclude = [] if kwargs.has_key('preconditioner_exclude'): preconditioner_exclude = kwargs.pop('preconditioner_exclude') else: preconditioner_exclude = [] if benchmark: dolfin.info_blue("Running solver benchmark...") solver_parameters_set = solver_parameters(solver_exclude, preconditioner_exclude) solver_timings = {} failed_solvers = {} ret = None # Perform the benchmark for parameters in solver_parameters_set: solver_failed = False # Replace the existing solver setting with the benchmark one's. new_args, new_kwargs = replace_solver_settings( args, kwargs, parameters) ## print "args,", new_args ## print "kwargs;", new_kwargs # Solve the problem timer = dolfin.Timer("Solver benchmark") timer.start() try: ret = solve(*new_args) except RuntimeError as e: if 'diverged' in e.message.lower(): failure_reason = 'diverged' else: failure_reason = 'unknown' pass timer.stop() #Check to see if the solver returned a zero solution if np.all(args[1].array() == 0.0): solver_failed = True failure_reason = 'Zero Solution' # Save the result parameters_str = parameters["linear_solver"] + ", " + parameters[ "preconditioner"] if solver_failed: if not kwargs.has_key("return_best"): dolfin.info_red(parameters_str + ": solver failed.") failed_solvers[parameters_str] = failure_reason else: # print parameters_str if not kwargs.has_key("return_best"): dolfin.info(parameters_str + ": " + str(timer.value()) + "s.") solver_timings[parameters_str] = timer.value() # Print the report if kwargs.has_key("return_best"): sortedtimings = sorted(solver_timings.iteritems(), key=operator.itemgetter(1)) ret = { k[0]: solver_timings[k[0]] for k in sortedtimings[:int(kwargs["return_best"])] } print_benchmark_report(ret, {}) else: print_benchmark_report(solver_timings, failed_solvers) else: ret = solve(*args) return ret
def solve(self, var, b): if self.adjoint: operators = transpose_operators(self.operators) else: operators = self.operators solver = dolfin.LinearSolver(*solver_parameters) solver.parameters.update(parameters) x = dolfin.Function(fn_space) if self.initial_guess is not None and var.type == 'ADJ_FORWARD': x.vector()[:] = self.initial_guess.vector() if b.data is None: dolfin.info_red( "Warning: got zero RHS for the solve associated with variable %s" % var) return adjlinalg.Vector(x) if var.type in ['ADJ_TLM', 'ADJ_ADJOINT']: self.bcs = [ utils.homogenize(bc) for bc in self.bcs if isinstance(bc, dolfin.cpp.DirichletBC) ] + [ bc for bc in self.bcs if not isinstance(bc, dolfin.cpp.DirichletBC) ] # This is really hideous. Sorry. if isinstance(b.data, dolfin.Function): rhs = b.data.vector().copy() [bc.apply(rhs) for bc in self.bcs] if assemble_system: # if we called assemble_system, rather than assemble v = dolfin.TestFunction(fn_space) (A, rhstmp) = dolfin.assemble_system( operators[0], dolfin.inner(b.data, v) * dolfin.dx, self.bcs) if has_preconditioner: (P, rhstmp) = dolfin.assemble_system( operators[1], dolfin.inner(b.data, v) * dolfin.dx, self.bcs) solver.set_operators(A, P) else: solver.set_operator(A) else: # we called assemble A = dolfin.assemble(operators[0]) [bc.apply(A) for bc in self.bcs] # Set nullspace if nsp: dolfin.as_backend_type(A).set_nullspace(nsp) nsp.orthogonalize(b) if has_preconditioner: P = dolfin.assemble(operators[1]) [bc.apply(P) for bc in self.bcs] solver.set_operators(A, P) else: solver.set_operator(A) else: if assemble_system: # if we called assemble_system, rather than assemble (A, rhs) = dolfin.assemble_system( operators[0], b.data, self.bcs) if has_preconditioner: (P, rhstmp) = dolfin.assemble_system( operators[1], b.data, self.bcs) solver.set_operators(A, P) else: solver.set_operator(A) else: # we called assemble A = dolfin.assemble(operators[0]) rhs = dolfin.assemble(b.data) [bc.apply(A) for bc in self.bcs] [bc.apply(rhs) for bc in self.bcs] # Set nullspace if nsp: dolfin.as_backend_type(A).set_nullspace(nsp) nsp.orthogonalize(rhs) if has_preconditioner: P = dolfin.assemble(operators[1]) [bc.apply(P) for bc in self.bcs] solver.set_operators(A, P) else: solver.set_operator(A) solver.solve(x.vector(), rhs) return adjlinalg.Vector(x)
def derivative_action(self, dependencies, values, variable, contraction_vector, hermitian): expressions.update_expressions(self.frozen_expressions) constant.update_constants(self.frozen_constants) if not hermitian: if self.solver not in caching.pis_fwd_to_tlm: dolfin.info_blue("No TLM solver, creating ... ") creation_timer = dolfin.Timer("to_adm") if contraction_vector.data is not None: tlm_scheme = self.scheme.to_tlm(contraction_vector.data) else: tlm_scheme = self.scheme.to_tlm(dolfin.Function(self.fn_space)) creation_time = creation_timer.stop() dolfin.info_red("TLM creation time: %s" % creation_time) tlm_solver = dolfin.PointIntegralSolver(tlm_scheme) tlm_solver.parameters.update(self.solver.parameters) caching.pis_fwd_to_tlm[self.solver] = tlm_solver else: tlm_solver = caching.pis_fwd_to_tlm[self.solver] tlm_scheme = tlm_solver.scheme() if contraction_vector.data is not None: tlm_scheme.contraction.assign(contraction_vector.data) else: tlm_scheme.contraction.vector().zero() coeffs = [ x for x in ufl.algorithms.extract_coefficients(tlm_scheme.rhs_form()) if hasattr(x, "function_space") ] for (coeff, value) in zip(coeffs, values): coeff.assign(value.data) tlm_scheme.t().assign(self.time) tlm_solver.step(self.dt) return adjlinalg.Vector(tlm_scheme.solution()) else: if self.solver not in caching.pis_fwd_to_adj: dolfin.info_blue("No ADM solver, creating ... ") creation_timer = dolfin.Timer("to_adm") if contraction_vector.data is not None: adm_scheme = self.scheme.to_adm(contraction_vector.data) else: adm_scheme = self.scheme.to_adm(dolfin.Function(self.fn_space)) creation_time = creation_timer.stop() dolfin.info_red("ADM creation time: %s" % creation_time) adm_solver = dolfin.PointIntegralSolver(adm_scheme) adm_solver.parameters.update(self.solver.parameters) caching.pis_fwd_to_adj[self.solver] = adm_solver else: adm_solver = caching.pis_fwd_to_adj[self.solver] adm_scheme = adm_solver.scheme() if contraction_vector.data is not None: adm_scheme.contraction.assign(contraction_vector.data) else: adm_scheme.contraction.vector().zero() coeffs = [ x for x in ufl.algorithms.extract_coefficients(adm_scheme.rhs_form()) if hasattr(x, "function_space") ] for (coeff, value) in zip(coeffs, values): coeff.assign(value.data) adm_scheme.t().assign(self.time) adm_solver.step(self.dt) return adjlinalg.Vector(adm_scheme.solution())
def solve(self, var, b): if self.adjoint: operators = transpose_operators(self.operators) else: operators = self.operators # Fetch/construct the solver if var.type in ['ADJ_FORWARD', 'ADJ_TLM']: solver = krylov_solvers[idx] need_to_set_operator = False else: if adj_krylov_solvers[idx] is None: need_to_set_operator = True adj_krylov_solvers[idx] = KrylovSolver(*solver_parameters) else: need_to_set_operator = False solver = adj_krylov_solvers[idx] solver.parameters.update(parameters) if self.adjoint: (nsp_, tnsp_) = (tnsp, nsp) else: (nsp_, tnsp_) = (nsp, tnsp) x = dolfin.Function(fn_space) if self.initial_guess is not None and var.type == 'ADJ_FORWARD': x.vector()[:] = self.initial_guess.vector() if b.data is None: dolfin.info_red("Warning: got zero RHS for the solve associated with variable %s" % var) return adjlinalg.Vector(x) if var.type in ['ADJ_TLM', 'ADJ_ADJOINT']: self.bcs = [utils.homogenize(bc) for bc in self.bcs if isinstance(bc, dolfin.cpp.DirichletBC)] + [bc for bc in self.bcs if not isinstance(bc, dolfin.cpp.DirichletBC)] # This is really hideous. Sorry. if isinstance(b.data, dolfin.Function): rhs = b.data.vector().copy() [bc.apply(rhs) for bc in self.bcs] if need_to_set_operator: if assemble_system: # if we called assemble_system, rather than assemble v = dolfin.TestFunction(fn_space) (A, rhstmp) = dolfin.assemble_system(operators[0], dolfin.inner(b.data, v)*dolfin.dx, self.bcs) if has_preconditioner: (P, rhstmp) = dolfin.assemble_system(operators[1], dolfin.inner(b.data, v)*dolfin.dx, self.bcs) solver.set_operators(A, P) else: solver.set_operator(A) else: # we called assemble A = dolfin.assemble(operators[0]) [bc.apply(A) for bc in self.bcs] if has_preconditioner: P = dolfin.assemble(operators[1]) [bc.apply(P) for bc in self.bcs] solver.set_operators(A, P) else: solver.set_operator(A) else: if assemble_system: # if we called assemble_system, rather than assemble (A, rhs) = dolfin.assemble_system(operators[0], b.data, self.bcs) if need_to_set_operator: if has_preconditioner: (P, rhstmp) = dolfin.assemble_system(operators[1], b.data, self.bcs) solver.set_operators(A, P) else: solver.set_operator(A) else: # we called assemble A = dolfin.assemble(operators[0]) rhs = dolfin.assemble(b.data) [bc.apply(A) for bc in self.bcs] [bc.apply(rhs) for bc in self.bcs] if need_to_set_operator: if has_preconditioner: P = dolfin.assemble(operators[1]) [bc.apply(P) for bc in self.bcs] solver.set_operators(A, P) else: solver.set_operator(A) # Set the nullspace for the linear operator if nsp_ is not None and need_to_set_operator: dolfin.as_backend_type(A).set_nullspace(nsp_) # (Possibly override the user in) orthogonalize # the right-hand-side if tnsp_ is not None: tnsp_.orthogonalize(rhs) solver.solve(x.vector(), rhs) return adjlinalg.Vector(x)
def solve(*args, **kwargs): ''' This function overwrites the dolfin.solve function but provides additional functionality to benchmark different solver/preconditioner settings. The arguments of equivalent to dolfin.solve except some (optional) additional parameters: - benchmark = [True, False]: If True, the problem will be solved with all different solver/precondition combinations and the results reported. If False, the problem is solved using the default solver settings. - solve: An optional function parameter that is called instead of dolfin.solve. This parameter is useful if dolfin.solve is overwritten by a custom solver routine. - solver_exclude: A list of solvers that are to be excluded from the benchmark. - preconditioner_exclude: A list of preconditioners that are to be excluded from the benchmark. - return_best: Option to return the fastest solver result only. ''' # Retrieve the extended benchmark arguments. if kwargs.has_key('benchmark'): benchmark = kwargs.pop('benchmark') else: benchmark = False if kwargs.has_key('solve'): solve = kwargs.pop('solve') else: solve = dolfin.fem.solving.solve if kwargs.has_key('solver_exclude'): solver_exclude = kwargs.pop('solver_exclude') else: solver_exclude = [] if kwargs.has_key('preconditioner_exclude'): preconditioner_exclude = kwargs.pop('preconditioner_exclude') else: preconditioner_exclude = [] if benchmark: dolfin.info_blue("Running solver benchmark...") solver_parameters_set = solver_parameters(solver_exclude, preconditioner_exclude) solver_timings = {} failed_solvers = {} ret = None # Perform the benchmark for parameters in solver_parameters_set: solver_failed = False # Replace the existing solver setting with the benchmark one's. new_args, new_kwargs = replace_solver_settings(args, kwargs, parameters) ## print "args,", new_args ## print "kwargs;", new_kwargs # Solve the problem timer = dolfin.Timer("Solver benchmark") timer.start() try: ret = solve(*new_args) except RuntimeError as e: if 'diverged' in e.message.lower(): failure_reason = 'diverged' else: failure_reason = 'unknown' pass timer.stop() #Check to see if the solver returned a zero solution if np.all(args[1].array() == 0.0): solver_failed = True failure_reason = 'Zero Solution' # Save the result parameters_str = parameters["linear_solver"] + ", " + parameters["preconditioner"] if solver_failed: if not kwargs.has_key("return_best"): dolfin.info_red(parameters_str + ": solver failed.") failed_solvers[parameters_str] = failure_reason else: # print parameters_str if not kwargs.has_key("return_best"): dolfin.info(parameters_str + ": " + str(timer.value()) + "s.") solver_timings[parameters_str] = timer.value() # Print the report if kwargs.has_key("return_best"): sortedtimings = sorted(solver_timings.iteritems(), key=operator.itemgetter(1)) ret = {k[0]:solver_timings[k[0]] for k in sortedtimings[:int(kwargs["return_best"])]} print_benchmark_report(ret,{}) else: print_benchmark_report(solver_timings, failed_solvers) else: ret = solve(*args) return ret
from dolfin import * try: from beatadjoint import BasicCardiacODESolver except ImportError: info_red("Need beatadjoint to run") import sys sys.exit(0) try: from dolfin import BackwardEuler except ImportError: from dolfin import info_red info_red("Need dolfin > 1.2.0 for ode_solver test.") import sys sys.exit(0) from dolfin_adjoint import * import ufl.algorithms if not hasattr(MultiStageScheme, "to_tlm"): info_red("Need dolfin > 1.2.0 for ode_solver test.") import sys sys.exit(0) # Import cell model (rhs, init_values, default_parameters) from beatadjoint.cellmodels.fitzhughnagumo import Fitzhughnagumo as model set_log_level(ERROR) domain = UnitIntervalMesh(1)
def derivative_action(self, dependencies, values, variable, contraction_vector, hermitian): expressions.update_expressions(self.frozen_expressions) constant.update_constants(self.frozen_constants) if not hermitian: if self.solver not in caching.pis_fwd_to_tlm: dolfin.info_blue("No TLM solver, creating ... ") creation_timer = dolfin.Timer("to_adm") if contraction_vector.data is not None: tlm_scheme = self.scheme.to_tlm(contraction_vector.data) else: tlm_scheme = self.scheme.to_tlm(dolfin.Function(self.fn_space)) creation_time = creation_timer.stop() dolfin.info_red("TLM creation time: %s" % creation_time) tlm_solver = dolfin.PointIntegralSolver(tlm_scheme) tlm_solver.parameters.update(self.solver.parameters) caching.pis_fwd_to_tlm[self.solver] = tlm_solver else: tlm_solver = caching.pis_fwd_to_tlm[self.solver] tlm_scheme = tlm_solver.scheme() if contraction_vector.data is not None: tlm_scheme.contraction.assign(contraction_vector.data) else: tlm_scheme.contraction.vector().zero() coeffs = [x for x in ufl.algorithms.extract_coefficients(tlm_scheme.rhs_form()) if hasattr(x, 'function_space')] for (coeff, value) in zip(coeffs, values): coeff.assign(value.data) tlm_scheme.t().assign(self.time) tlm_solver.step(self.dt) return adjlinalg.Vector(tlm_scheme.solution()) else: if self.solver not in caching.pis_fwd_to_adj: dolfin.info_blue("No ADM solver, creating ... ") creation_timer = dolfin.Timer("to_adm") if contraction_vector.data is not None: adm_scheme = self.scheme.to_adm(contraction_vector.data) else: adm_scheme = self.scheme.to_adm(dolfin.Function(self.fn_space)) creation_time = creation_timer.stop() dolfin.info_red("ADM creation time: %s" % creation_time) adm_solver = dolfin.PointIntegralSolver(adm_scheme) adm_solver.parameters.update(self.solver.parameters) caching.pis_fwd_to_adj[self.solver] = adm_solver else: adm_solver = caching.pis_fwd_to_adj[self.solver] adm_scheme = adm_solver.scheme() if contraction_vector.data is not None: adm_scheme.contraction.assign(contraction_vector.data) else: adm_scheme.contraction.vector().zero() coeffs = [x for x in ufl.algorithms.extract_coefficients(adm_scheme.rhs_form()) if hasattr(x, 'function_space')] for (coeff, value) in zip(coeffs, values): coeff.assign(value.data) adm_scheme.t().assign(self.time) adm_solver.step(self.dt) return adjlinalg.Vector(adm_scheme.solution())
def solve(selfmat, var, b): if selfmat.adjoint: operators = transpose_operators(selfmat.operators) else: operators = selfmat.operators # Fetch/construct the solver if var.type in ['ADJ_FORWARD', 'ADJ_TLM']: solver = petsc_krylov_solvers[idx] need_to_set_operator = self._need_to_reset_operator else: if adj_petsc_krylov_solvers[idx] is None: need_to_set_operator = True adj_petsc_krylov_solvers[idx] = PETScKrylovSolver(*solver_parameters) adj_ksp = adj_petsc_krylov_solvers[idx].ksp() fwd_ksp = petsc_krylov_solvers[idx].ksp() adj_ksp.setOptionsPrefix(fwd_ksp.getOptionsPrefix()) adj_ksp.setType(fwd_ksp.getType()) adj_ksp.pc.setType(fwd_ksp.pc.getType()) adj_ksp.setFromOptions() else: need_to_set_operator = self._need_to_reset_operator solver = adj_petsc_krylov_solvers[idx] # FIXME: work around DOLFIN bug #583 try: solver.parameters.convergence_norm_type except: solver.parameters.convergence_norm_type = "preconditioned" # end FIXME solver.parameters.update(parameters) self._need_to_reset_operator = False if selfmat.adjoint: (nsp_, tnsp_) = (tnsp, nsp) else: (nsp_, tnsp_) = (nsp, tnsp) x = dolfin.Function(fn_space) if selfmat.initial_guess is not None and var.type == 'ADJ_FORWARD': x.vector()[:] = selfmat.initial_guess.vector() if b.data is None: dolfin.info_red("Warning: got zero RHS for the solve associated with variable %s" % var) return adjlinalg.Vector(x) if var.type in ['ADJ_TLM', 'ADJ_ADJOINT']: selfmat.bcs = [utils.homogenize(bc) for bc in selfmat.bcs if isinstance(bc, dolfin.cpp.DirichletBC)] + [bc for bc in selfmat.bcs if not isinstance(bc, dolfin.cpp.DirichletBC)] # This is really hideous. Sorry. if isinstance(b.data, dolfin.Function): rhs = b.data.vector().copy() [bc.apply(rhs) for bc in selfmat.bcs] if need_to_set_operator: if assemble_system: # if we called assemble_system, rather than assemble v = dolfin.TestFunction(fn_space) (A, rhstmp) = dolfin.assemble_system(operators[0], dolfin.inner(b.data, v)*dolfin.dx, selfmat.bcs) if has_preconditioner: (P, rhstmp) = dolfin.assemble_system(operators[1], dolfin.inner(b.data, v)*dolfin.dx, selfmat.bcs) solver.set_operators(A, P) else: solver.set_operator(A) else: # we called assemble A = dolfin.assemble(operators[0]) [bc.apply(A) for bc in selfmat.bcs] if has_preconditioner: P = dolfin.assemble(operators[1]) [bc.apply(P) for bc in selfmat.bcs] solver.set_operators(A, P) else: solver.set_operator(A) else: if assemble_system: # if we called assemble_system, rather than assemble (A, rhs) = dolfin.assemble_system(operators[0], b.data, selfmat.bcs) if need_to_set_operator: if has_preconditioner: (P, rhstmp) = dolfin.assemble_system(operators[1], b.data, selfmat.bcs) solver.set_operators(A, P) else: solver.set_operator(A) else: # we called assemble A = dolfin.assemble(operators[0]) rhs = dolfin.assemble(b.data) [bc.apply(A) for bc in selfmat.bcs] [bc.apply(rhs) for bc in selfmat.bcs] if need_to_set_operator: if has_preconditioner: P = dolfin.assemble(operators[1]) [bc.apply(P) for bc in selfmat.bcs] solver.set_operators(A, P) else: solver.set_operator(A) if need_to_set_operator: print "|A|: %.6e" % A.norm("frobenius") # Set the nullspace for the linear operator if nsp_ is not None and need_to_set_operator: dolfin.as_backend_type(A).set_nullspace(nsp_) # (Possibly override the user in) orthogonalize # the right-hand-side if tnsp_ is not None: tnsp_.orthogonalize(rhs) print "%s: |b|: %.6e" % (var, rhs.norm("l2")) solver.solve(x.vector(), rhs) return adjlinalg.Vector(x)
try: from dolfin import BackwardEuler except ImportError: from dolfin import info_red info_red("Need dolfin > 1.2.0 for ode_solver test.") import sys; sys.exit(0) from dolfin import * from dolfin_adjoint import * import ufl.algorithms if not hasattr(MultiStageScheme, "to_tlm"): info_red("Need dolfin > 1.2.0 for ode_solver test.") import sys; sys.exit(0) # Import cell model (rhs, init_values, default_parameters) import fitzhughnagumo as model params = model.default_parameters() state_init = model.init_values() mesh = UnitIntervalMesh(1000) #R = FunctionSpace(mesh, "R", 0) # in my opinion, should work, but doesn't num_states = state_init.value_size() V = VectorFunctionSpace(mesh, "CG", 1, dim=num_states) def main(u, form, time, Scheme, dt): scheme = Scheme(form, u, time) scheme.t().assign(float(time))
def info_red(*args, **kwargs): if MPI.process_number() == 0: dolfin.info_red(*args, **kwargs)
from dolfin import * try: from beatadjoint import BasicCardiacODESolver except ImportError: info_red("Need beatadjoint to run") import sys; sys.exit(0) try: from dolfin import BackwardEuler except ImportError: from dolfin import info_red info_red("Need dolfin > 1.2.0 for ode_solver test.") import sys; sys.exit(0) from dolfin_adjoint import * import ufl.algorithms if not hasattr(MultiStageScheme, "to_tlm"): info_red("Need dolfin > 1.2.0 for ode_solver test.") import sys; sys.exit(0) # Import cell model (rhs, init_values, default_parameters) from beatadjoint.cellmodels.fitzhughnagumo import Fitzhughnagumo as model set_log_level(ERROR) domain = UnitIntervalMesh(1) num_states = len(model().initial_conditions()((0.0,))) V = None
def read_vtu(filename, space): """ Read a vtu file with the supplied filename base, with fields on the supplied FunctionSpace. Return a dict with the Function names as keys and the Function s as values. Vector fields are not currently supported. Currently only works with a single MPI process. """ if not isinstance(filename, str): raise InvalidArgumentException("filename must be a string") if not isinstance(space, dolfin.FunctionSpaceBase): raise InvalidArgumentException("space must be a FunctionSpace") if dolfin.MPI.size(dolfin.mpi_comm_world()) > 1: raise NotImplementedException( "read_vtu cannot be used with more than one MPI process") mesh = space.mesh() dim = mesh.geometry().dim() if isinstance(space, dolfin.VectorFunctionSpace): raise NotImplementedException( "VectorFunctionSpace s not supported by read_vtu") elif not space.num_sub_spaces() == 0: raise NotImplementedException("Subspaces not supported by read_vtu") e = space.ufl_element() degree = e.degree() assert (e.cell().geometric_dimension() == dim) assert (e.cell().topological_dimension() == dim) if (not e.family() in ["Lagrange", "Discontinuous Lagrange"] or not dim in [1, 2, 3] or (dim == 1 and not degree in [1, 2, 3]) or (dim in [2, 3] and not degree in [1, 2])) and \ (not e.family() == "Discontinuous Lagrange" or not dim in [1, 2, 3] or not degree == 0): raise NotImplementedException( 'Element family "%s" with degree %i in %i dimension(s) not supported by read_vtu' % (e.family(), degree, dim)) n = space.dim() n_cells = mesh.num_cells() dof = space.dofmap() if dim == 1: cell_map = None elif dim == 2: if degree in [0, 1]: cell_map = None else: cell_map = {0: 0, 1: 1, 2: 2, 3: 5, 4: 3, 5: 4} else: if degree in [0, 1]: cell_map = None else: cell_map = { 0: 0, 1: 1, 2: 2, 3: 3, 4: 9, 5: 6, 6: 8, 7: 7, 8: 5, 9: 4 } filename = "%s.vtu" % filename reader = vtk.vtkXMLUnstructuredGridReader() reader.SetFileName(filename) reader.Update() vtu = reader.GetOutput() if degree == 0: assert (vtu.GetNumberOfCells() == n) else: assert (vtu.GetNumberOfPoints() == n) assert (vtu.GetNumberOfCells() == n_cells) fields = {} x = dolfin.interpolate(dolfin.Expression("x[0]"), space).vector().array() X = numpy.empty((x.shape[0], dim), dtype=x.dtype) X[:, 0] = x if dim > 1: X[:, 1] = dolfin.interpolate(dolfin.Expression("x[1]"), space).vector().array() if dim > 2: X[:, 2] = dolfin.interpolate(dolfin.Expression("x[2]"), space).vector().array() if degree == 0: for i in range(n_cells): cell = dof.cell_dofs(i) x = X[cell[0], :] vtu_cell = vtu.GetCell(i).GetPointIds() vtu_x = numpy.array([ vtu.GetPoint(vtu_cell.GetId(j))[:dim] for j in range(vtu_cell.GetNumberOfIds()) ]) mag = abs(vtu_x).max(0) tol = 2.0e-15 * mag if any(abs(vtu_x.mean(0) - x) > tol): dolfin.info_red("Relative coordinate error: %.16e" % (abs(vtu_x.mean(0) - x) / mag).max()) raise IOException("Invalid coordinates") for i in range(vtu.GetCellData().GetNumberOfArrays()): cell_data = vtu.GetCellData().GetArray(i) if not cell_data.GetNumberOfComponents() == 1: raise NotImplementedException( "%i components not supported by read_vtu" % cell_data.GetNumberOfComponents()) assert (cell_data.GetNumberOfTuples() == n) name = cell_data.GetName() assert (not name in fields) data = numpy.empty(n) for j in range(n_cells): cell = dof.cell_dofs(j) data[cell[0]] = cell_data.GetTuple1(j) field = dolfin.Function(space, name=name) field.vector().set_local(data) field.vector().apply("insert") fields[name] = field else: for i in range(n_cells): cell = dof.cell_dofs(i) vtu_cell = vtu.GetCell(i).GetPointIds() assert (len(cell) == vtu_cell.GetNumberOfIds()) if cell_map is None: for j in range(vtu_cell.GetNumberOfIds()): if not (X[cell[j]] == vtu.GetPoint( vtu_cell.GetId(j))[:dim]).all(): dolfin.info_red( "Coordinate error: %.16e" % (abs(X[cell[j]] - vtu.GetPoint(vtu_cell.GetId(j))[:dim]).max())) raise IOException("Invalid coordinates") else: for j in range(vtu_cell.GetNumberOfIds()): if not (X[cell[cell_map[j]]] == vtu.GetPoint( vtu_cell.GetId(j))[:dim]).all(): dolfin.info_red( "Coordinate error: %.16e" % (abs(X[cell[cell_map[j]]] - vtu.GetPoint(vtu_cell.GetId(j))[:dim]).max())) raise IOException("Invalid coordinates") for i in range(vtu.GetPointData().GetNumberOfArrays()): point_data = vtu.GetPointData().GetArray(i) if not point_data.GetNumberOfComponents() == 1: raise NotImplementedException( "%i components not supported by read_vtu" % point_data.GetNumberOfComponents()) assert (point_data.GetNumberOfTuples() == n) name = point_data.GetName() assert (not name in fields) data = numpy.empty(n) for j in range(n_cells): cell = dof.cell_dofs(j) vtu_cell = vtu.GetCell(j).GetPointIds() assert (len(cell) == vtu_cell.GetNumberOfIds()) if cell_map is None: for k in range(vtu_cell.GetNumberOfIds()): data[cell[k]] = point_data.GetTuple1(vtu_cell.GetId(k)) else: for k in range(vtu_cell.GetNumberOfIds()): data[cell[cell_map[k]]] = point_data.GetTuple1( vtu_cell.GetId(k)) field = dolfin.Function(space, name=name) field.vector().set_local(data) field.vector().apply("insert") fields[name] = field return fields
def solve(*args, **kwargs): ''' This function overwrites the dolfin.solve function but provides additional functionality to benchmark different solver/preconditioner settings. The arguments of equivalent to dolfin.solve except some (optional) additional parameters: - benchmark = [True, False]: If True, the problem will be solved with all different solver/precondition combinations and the results reported. If False, the problem is solved using the default solver settings. - solve: An optional function parameter that is called instead of dolfin.solve. This parameter is useful if dolfin.solve is overwritten by a custom solver routine. - solver_exclude: A list of solvers that are to be excluded from the benchmark. - preconditioner_exclude: A list of preconditioners that are to be excluded from the benchmark. ''' # Retrieve the extended benchmark arguments. if kwargs.has_key('benchmark'): benchmark = kwargs.pop('benchmark') else: benchmark = False if kwargs.has_key('solve'): solve = kwargs.pop('solve') else: solve = dolfin.fem.solving.solve if kwargs.has_key('solver_exclude'): solver_exclude = kwargs.pop('solver_exclude') else: solver_exclude = [] if kwargs.has_key('preconditioner_exclude'): preconditioner_exclude = kwargs.pop('preconditioner_exclude') else: preconditioner_exclude = [] if benchmark: dolfin.info_blue("Running solver benchmark...") solver_parameters_set = solver_parameters(solver_exclude, preconditioner_exclude) solver_timings = {} failed_solvers = {} ret = None # Perform the benchmark for parameters in solver_parameters_set: solver_failed = False # Replace the existing solver setting with the benchmark one's. new_args, new_kwargs = replace_solver_settings( args, kwargs, parameters) # Solve the problem timer = dolfin.Timer("Solver benchmark") timer.start() try: ret = solve(*new_args, **new_kwargs) except RuntimeError as e: solver_failed = True if 'diverged' in e.message.lower(): failure_reason = 'diverged' else: failure_reason = 'unknown' from IPython.Shell import IPShellEmbed ipshell = IPShellEmbed() ipshell() pass timer.stop() # Save the result parameters_str = parameters["linear_solver"] + ", " + parameters[ "preconditioner"] if solver_failed: dolfin.info_red(parameters_str + ": solver failed.") failed_solvers[parameters_str] = failure_reason else: dolfin.info(parameters_str + ": " + str(timer.value()) + "s.") solver_timings[parameters_str] = timer.value() # Print the report print_benchmark_report(solver_timings, failed_solvers) else: ret = solve(*args, **kwargs) return ret