def __init__(self, model, config): """ Initialize solver. Initialize all of the physics classes specified as 'on' in the config object. """ self.model = model self.config = config self.config['mode'] = 'transient' # initialize velocity solver : if self.config['velocity']['on']: if self.config['velocity']['approximation'] == 'fo': self.velocity_instance = VelocityBP(model, config) elif self.config['velocity']['approximation'] == 'stokes': self.velocity_instance = VelocityStokes(model, config) else: print "Please choose 'fo' or 'stokes'. " # initialized enthalpy solver : if self.config['enthalpy']['on']: self.enthalpy_instance = Enthalpy(model, config) # initialize age solver : if self.config['age']['on']: self.age_instance = Age(model, config) # initialize surface climate solver : if self.config['surface_climate']['on']: self.surface_climate_instance = SurfaceClimate(model, config) # initialize free surface solver : if config['free_surface']['on']: self.surface_instance = FreeSurface(model, config) self.M_prev = 1.0 # Set up files for logging time dependent solutions to paraview files. if config['log']: self.file_U = File(self.config['output_path'] + 'U.pvd') self.file_T = File(self.config['output_path'] + 'T.pvd') self.file_S = File(self.config['output_path'] + 'S.pvd') self.file_a = File(self.config['output_path'] + 'age.pvd') self.dheight = [] self.mass = [] self.t_log = [] self.step_time = [] self.M_prev = 1.0
def __init__(self, space_name, variable_name, function_space): # Define initial interface values self.interface_new = Function(function_space) self.interface_old = Function(function_space) # Define initial values for time loop self.new = Function(function_space) self.old = Function(function_space) # Create arrays of empty functions and iterations self.array = [] self.iterations = [] # Create pvd files self.pvd = File(f"solutions/{space_name}/pvd/{variable_name}.pvd") # Remember space self.function_space = function_space # Remember space and variable names self.space_name = space_name self.variable_name = variable_name # Create HDF5 counter self.HDF5_counter = 0
def gen_rect_mesh(nx, ny, xmin, xmax, ymin, ymax, outfile, direction='right'): mesh = RectangleMesh(MPI.COMM_SELF, Point(xmin, ymin), Point(xmax, ymax), nx, ny, direction) File(MPI.COMM_SELF, outfile) << mesh
def write_one_file(self, name, data, extension='.pvd'): """ Save a single file of FEniCS Function <data> named <name> to the DataOutput instance's directory. Extension may be '.xml' or '.pvd'. """ s = "::: writing file %s :::" % (name + extension) print_text(s, self.color) file_handle = File(self.directory + name + extension) file_handle << data
def distribute_mesh(mesh): """Distribute a local copy of a mesh Required argument: mesh: a sequential copy of the mesh to be distributed. Only the copy on the rank 0 process is used. This mesh will be distribuetd among all processes connected to MPI.COMM_WORLD so that it can be used for FEniCS computations. Return value: the distributed mesh """ scomm = MPI.COMM_SELF wcomm = MPI.COMM_WORLD gmesh = gather_mesh(mesh) if wcomm.rank == 0: assert (gmesh.mpi_comm().size == 1) # check it's sequential with tempfile.TemporaryDirectory() as td: meshfile = os.path.join(td, 'mesh.xml') logGATHER('writing ' + meshfile) File(MPI.COMM_SELF, meshfile) << gmesh logGATHER('broadcasting meshfile') meshfile = wcomm.bcast(meshfile, root=0) try: with open(meshfile, 'r') as mf: # rank 0 dmesh = fe.Mesh(wcomm, meshfile) logGATHER('dmesh', dmesh) except FileNotFoundError: logGATHER('meshfile', meshfile, 'not found') wcomm.Barrier() # wait for everyone to read it logGATHER('destroying temp directory') # context manager destroyed, temp dir gets deleted else: meshfile = None logGATHER('receiving broadcast meshfile') meshfile = wcomm.bcast(meshfile, root=0) logGATHER('meshfile', meshfile) try: with open(meshfile, 'r') as mf: # rank != 0 dmesh = fe.Mesh(wcomm, meshfile) logGATHER('dmesh', dmesh) except FileNotFoundError: logGATHER('meshfile', meshfile, 'not found') wcomm.Barrier() return (dmesh)
# apply Dirichlet boundary condition on coupling interface bcs.append(precice.create_coupling_dirichlet_boundary_condition(V)) a, L = lhs(F), rhs(F) # Time-stepping u_np1 = Function(V) F_known_u = u_np1 * v / dt * dx + alpha * dot(grad(u_np1), grad(v)) * dx - u_n * v / dt * dx u_np1.rename("T", "") t = 0 u_D.t = t + precice._precice_tau assert (dt == precice._precice_tau) file_out = File("Solid/VTK/%s.pvd" % solver_name) while precice.is_coupling_ongoing(): # Compute solution solve(a == L, u_np1, bcs) # Dirichlet problem obtains flux from solution and sends flux on boundary to Neumann problem fluxes = fluxes_from_temperature_full_domain(F_known_u, V, k) is_converged = precice.advance(fluxes, dt) if is_converged: # Update previous solution if abs(t % dt_out) < 10e-5 or abs( t % dt_out ) < 10e-5: # just a very complicated way to only produce output if t is a multiple of dt_out
t = 0 # reference solution at t=0 u_ref = interpolate(u_D, V) u_ref.rename("reference", " ") # mark mesh w.r.t ranks mesh_rank = MeshFunction("size_t", mesh, mesh.topology().dim()) if problem is ProblemType.NEUMANN: mesh_rank.set_all(MPI.rank(MPI.comm_world) + 4) else: mesh_rank.set_all(MPI.rank(MPI.comm_world) + 0) mesh_rank.rename("myRank", "") # Generating output files temperature_out = File("out/%s.pvd" % precice.get_participant_name()) ref_out = File("out/ref%s.pvd" % precice.get_participant_name()) error_out = File("out/error%s.pvd" % precice.get_participant_name()) ranks = File("out/ranks%s.pvd" % precice.get_participant_name()) # output solution and reference solution at t=0, n=0 n = 0 print('output u^%d and u_ref^%d' % (n, n)) temperature_out << u_n ref_out << u_ref ranks << mesh_rank error_total, error_pointwise = compute_errors(u_n, u_ref, V) error_out << error_pointwise # set t_1 = t_0 + dt, this gives u_D^1
u_np1.rename("Source-Data", "") elif args.drain: u_n.rename("Drain-Data", "") u_np1.rename("Drain-Data", "") t = 0 mesh_rank = MeshFunction("size_t", mesh, mesh.topology().dim()) if args.source: mesh_rank.set_all(MPI.rank(MPI.comm_world) + 4) else: mesh_rank.set_all(MPI.rank(MPI.comm_world) + 0) mesh_rank.rename("myRank", "") # Generating output files solution_out = File("output/%s.pvd" % precice.get_participant_name()) ranks = File("output/ranks%s.pvd" % precice.get_participant_name()) # output solution and reference solution at t=0, n=0 n = 0 print('output u^%d and u_ref^%d' % (n, n)) solution_out << u_n ranks << mesh_rank while precice.is_coupling_ongoing(): # write checkpoint if precice.is_action_required(precice.action_write_iteration_checkpoint()): precice.store_checkpoint(u_n, t, n) read_data = precice.read_data()
# apply constant Dirichlet boundary condition at bottom edge # apply Dirichlet boundary condition on coupling interface bcs = [ DirichletBC(V, coupling_expression, coupling_boundary), DirichletBC(V, u_D, bottom_boundary) ] a, L = lhs(F), rhs(F) # Time-stepping u_np1 = Function(V) t = 0 u_D.t = t + dt # mark mesh w.r.t ranks ranks = File("Solid/VTK/ranks%s.pvd.pvd" % precice.get_participant_name()) mesh_rank = MeshFunction("size_t", mesh, mesh.topology().dim()) mesh_rank.set_all(MPI.rank(MPI.comm_world)) mesh_rank.rename("myRank", "") ranks << mesh_rank # Create output file file_out = File("Solid/VTK/%s.pvd" % precice.get_participant_name()) file_out << u_n print("output vtk for time = {}".format(float(t))) n = 0 fluxes = Function(V_g) fluxes.rename("Fluxes", "")
nx = 50 ny = 50 nz = 10 model = Model() model.generate_uniform_mesh(nx, ny, nz, xmin=0, xmax=L, ymin=0, ymax=L, generate_pbcs=True) Surface = Expression('- x[0] * tan(alpha)', alpha=alpha, element=model.Q.ufl_element()) Bed = Expression( '- x[0] * tan(alpha) - 1000.0 + 500.0 * ' \ + ' sin(2*pi*x[0]/L) * sin(2*pi*x[1]/L)', alpha=alpha, L=L, element=model.Q.ufl_element()) File('./results_A_BP/' + 'B.pvd') << interpolate(Bed, model.Q) model.set_geometry(Surface, Bed, deform=True) model.set_parameters(IceParameters()) model.calculate_boundaries() model.initialize_variables() F = SteadySolver(model, config) F.solve()
# the finite-element mesh used : mesh = UnitSquareMesh(n_x, n_x) # initialize the model : grid_model = GridModel(mesh, out_dir, verbose=False) # create the main model to perform MPM calculations : model = Model(out_dir, grid_model, dt, verbose=False) # add the materials to the model : model.add_material(disk1) model.add_material(disk2) # files for saving grid variables : m_file = File(out_dir + '/m.pvd') u_file = File(out_dir + '/u.pvd') a_file = File(out_dir + '/a.pvd') f_file = File(out_dir + '/f.pvd') # callback function saves result : def cb_ftn(): if model.iter % save_int == 0: model.retrieve_cpp_grid_m() model.retrieve_cpp_grid_U3() model.retrieve_cpp_grid_f_int() model.retrieve_cpp_grid_a3() grid_model.save_pvd(grid_model.m, 'm', f=m_file, t=model.t) grid_model.save_pvd(grid_model.U3, 'U3', f=u_file, t=model.t) grid_model.save_pvd(grid_model.a3, 'a3', f=a_file, t=model.t)
def makeSaveMonitor(self, prefix=None): """Make a saveMonitor for use as a TS monitor Note that makesaveMonitor is not itself a monitor function. It returns a callable that can be used as a save monitor. Typical usage: (saveMonitor, closer) = ts.makeSaveMonitor(prefix='solution') ts.setMonitor(save_Monitor) ts.solve() ... closer() Optional keyword argument: prefix=None: The filename prefix to use for the saved files (may begin with a file path. If not provided, the prefix is "solution' prepended to a string produced by the uuid function uuid4. """ if not prefix: prefix = 'solution' + str(uuid4()) + '_' # # save basic info about FunctionSpace # fsname = prefix + 'rank' + str(self.comm.rank) + '_fsinfo.h5' if not hasattr(self, 'remap'): self.lmesh = gather_mesh(self.ksdg.mesh) logTS('makeSaveMonitor: self.lmesh', self.lmesh) self.remap = dofremap(self.ksdg, gathered_mesh=self.lmesh) logTS('makeSaveMonitor: self.remap', self.remap) if hasattr(self.ksdg, 'omesh'): self.omesh = gather_mesh(self.ksdg.omesh) if self.comm.rank == 0: logTS('self.lmesh.mpi_comm().size', self.lmesh.mpi_comm().size) lmeshf = File(MPI.COMM_SELF, prefix + '_mesh.xml.gz') lmeshf << self.lmesh logTS('self.lmesh saved') if hasattr(self, 'omesh'): logTS('self.omesh.mpi_comm().size', self.omesh.mpi_comm().size) omeshf = File(MPI.COMM_SELF, prefix + '_omesh.xml.gz') omeshf << self.omesh logTS('self.omesh saved') try: scomm = fe.mpi_comm_self() except AttributeError: scomm = MPI.COMM_SELF fsf = HDF5File(scomm, fsname, 'w') fs = self.ksdg.sol.function_space() fsf.write(self.ksdg.sol, 'sol') fsf.write(self.ksdg.mesh, 'mesh') if self.comm.rank == 0: fsf.write(self.lmesh, 'lmesh') fsf.close() fsf = h5py.File(fsname, 'r+') fsf['/mpi/rank'] = self.comm.rank fsf['/mpi/size'] = self.comm.size if self.comm.rank == 0: fsf['remap'] = self.remap if hasattr(self.ksdg, 'srhos'): rhofss = [rho.function_space() for rho in self.ksdg.srhos] else: rhofss = [self.ksdg.srho.function_space()] if hasattr(self.ksdg, 'sUs'): Ufss = [U.function_space() for U in self.ksdg.sUs] else: Ufss = [self.ksdg.sU.function_space()] dofmap = fs.dofmap() fsinfo = {} fsf['degree'] = self.ksdg.degree if hasattr(self.ksdg, 'nelements'): fsf['nelements'] = self.ksdg.nelements if hasattr(self.ksdg, 'periodic'): fsf['periodic'] = self.ksdg.periodic else: fsf['periodic'] = False if hasattr(self.ksdg, 'ligands'): fsf['ligands'] = pickle.dumps(self.ksdg.ligands, protocol=0) else: fsf['ligands'] = False if hasattr(self.ksdg, 'param_names'): fsf['param_names'] = pickle.dumps(self.ksdg.param_names, protocol=0) else: fsf['param_names'] = pickle.dumps([], protocol=0) if hasattr(self.ksdg, 'param_funcs'): try: dillfunc = dill.dumps(self.ksdg.param_funcs, protocol=0) fsf['param_funcs'] = np.void(dillfunc) # # retrieve using fsf['param_funcs'].tobytes() # except ValueError as e: traceback.print_exc() fsf['param_funcs'] = dill.dumps([], protocol=0) else: fsf['param_funcs'] = dill.dumps([], protocol=0) if hasattr(self.ksdg, 'params0'): fsf['params0'] = pickle.dumps(self.ksdg.params0, protocol=0) else: fsf['params0'] = pickle.dumps({}, protocol=0) dim = self.ksdg.dim fsf['dim'] = dim try: fsf['ghost_mode'] = self.ksdg.mesh.ghost_mode() except AttributeError: pass fsf['off_process_owner'] = dofmap.off_process_owner() owneddofs = dofmap.ownership_range() fsf['ownership_range'] = owneddofs logFSINFO('owneddofs', owneddofs) ltgu = np.zeros(len(dofmap.local_to_global_unowned()), dtype=int) ltgu[:] = dofmap.local_to_global_unowned() fsf['local_to_global_unowned'] = ltgu ltgi = np.zeros(owneddofs[1] - owneddofs[0], dtype=int) logFSINFO('np.array(dofmap.dofs())', np.array(dofmap.dofs())) logFSINFO( 'dofmap local_to_global_indexes', *owneddofs, np.array( [dofmap.local_to_global_index(d) for d in range(*owneddofs)])) logFSINFO( 'dofmap local_to_global_indexes', 0, owneddofs[1] - owneddofs[0], np.array([ dofmap.local_to_global_index(d) for d in range(owneddofs[1] - owneddofs[0]) ])) ltgi = np.array( np.array( [dofmap.local_to_global_index(d) for d in range(*owneddofs)])) fsf['local_to_global_index'] = ltgi fsf['tabulate_local_to_global_dofs'] = dofmap.tabulate_local_to_global_dofs( ) logFSINFO('tabulate_local_to_global_dofs', dofmap.tabulate_local_to_global_dofs()) # fsf['shared_nodes'] = dofmap.shared_nodes() try: fsf['neighbours'] = repr(dofmap.neighbours()) except (AttributeError, TypeError): pass try: fsf['dofmap_str'] = dofmap.str(True) except (AttributeError, TypeError): fsf['dofmap_str'] = repr(dofmap) try: fsf['dofmap_id'] = dofmap.id() except (AttributeError, TypeError): pass try: fsf['dofmap_label'] = dofmap.label() except (AttributeError, TypeError): pass try: fsf['dofmap_name'] = dofmap.name() except (AttributeError, TypeError): pass try: fsf['max_cell_dimension'] = dofmap.max_cell_dimension() except (AttributeError, TypeError): pass try: fsf['max_element_dofs'] = dofmap.max_element_dofs() except (AttributeError, TypeError): pass try: fsf['dofmap_parameters'] = list(dofmap.parameters.iteritems()) except (AttributeError, TypeError): try: fsf['dofmap_parameters'] = list(dofmap.parameters.items()) except (AttributeError, TypeError): pass try: fsf['dofmap_thisown'] = dofmap.thisown except (AttributeError, TypeError): pass try: fsf['is_view'] = dofmap.is_view() except (AttributeError, TypeError): pass for d in range(dim + 1): fsf['entity_closure_dofs' + str(d)] = \ dofmap.entity_closure_dofs(self.ksdg.mesh, d) fsf['entity_dofs' + str(d)] = \ dofmap.entity_dofs(self.ksdg.mesh, d) fsf['dofcoords'] = np.reshape(fs.tabulate_dof_coordinates(), (-1, dim)) try: fsf['block_size'] = dofmap.block_size() except (AttributeError, TypeError): pass try: fsf['dofs'] = dofmap.dofs() except (AttributeError, TypeError): pass for i in range(len(rhofss)): fsf['rho_dofs/' + str(i)] = rhofss[i].dofmap().dofs() for i in range(len(Ufss)): fsf['U_dofs/' + str(i)] = Ufss[i].dofmap().dofs() dofspercell = dofmap.cell_dofs(0).size ncells = self.ksdg.mesh.cells().shape[0] celldofs = np.zeros((ncells, dofspercell), dtype=np.int) for cell in range(ncells): celldofs[cell] = dofmap.cell_dofs(cell) fsf['cell_dofs'] = celldofs fsf.close() logTS('creating KSDGTimeSeries') tsname = prefix + '_ts.h5' if self.comm.rank == 0: tsf = KSDGTimeSeries(tsname, 'w') tsf.close() def closeSaveMonitor(): if self.comm.rank == 0: tsf.close() def saveMonitor(ts, k, t, u): # # make a local copy of the dof vector # # logTS('saveMonitor entered') psol = fe.as_backend_type(self.ksdg.sol.vector()).vec() u.copy(psol) self.ksdg.sol.vector().apply('insert') lu = self.ksdg.sol.vector().gather_on_zero() if ts.comm.rank == 0: lu = lu[self.remap] # # reopen and close every time, so file valid after abort # tsf = KSDGTimeSeries(tsname, 'r+') tsf.store(lu, t, k=k) tsf.close() return (saveMonitor, closeSaveMonitor)
from fenics import FunctionSpace, interpolate, File from utils import reference_data, exact_kepler # get initial data and mesh for a Kelper problem with # energy H=-1.5 (elliptic orbit) and initial momentum L=0.5. H = -1.5 L = 0.5 (_, _, gexp, mesh) = reference_data(H=H, L=L, mesh_type='unstructured', mesh_size=24, padding=0.07) (q0, p0, T, S, (c, a, b), sol, t2s) = exact_kepler(H, L) g = interpolate(gexp, FunctionSpace(mesh, "Regge", 1)) File("plots/kepler_metric.pvd") << g
F += precice.create_coupling_neumann_boundary_condition(v) a, L = lhs(F), rhs(F) # Time-stepping u_np1 = Function(V) F_known_u = u_np1 * v * dx + dt * dot(grad(u_np1), grad(v)) * dx - (u_n + dt * f) * v * dx u_np1.rename("Temperature", "") t = 0 # reference solution at t=0 u_e = interpolate(u_D, V) u_e.rename("reference", " ") file_out = File("out/%s.pvd" % solver_name) ref_out = File("out/ref%s.pvd" % solver_name) # output solution and reference solution at t=0, n=0 n = 0 print('output u^%d and u_ref^%d' % (n, n)) file_out << u_n ref_out << u_e # set t_1 = t_0 + dt, this gives u_D^1 u_D.t = t + precice._precice_tau assert (dt == precice._precice_tau) while precice.is_coupling_ongoing(): # Compute solution u^n+1, use bcs u_D^n+1, u^n and coupling bcs
Forces_x, Forces_y = precice.create_force_boundary_condition(V) a_form = lhs(res) L_form = rhs(res) # Prepare for time-stepping t = 0.0 n = 0 time = [] u_tip = [] time.append(0.0) u_tip.append(0.0) E_ext = 0 displacement_out = File("Solid/FSI-S/u_fsi.pvd") u_n.rename("Displacement", "") u_np1.rename("Displacement", "") displacement_out << u_n # time loop for coupling while precice.is_coupling_ongoing(): A, b = assemble_system(a_form, L_form, bc) b_forces = b.copy( ) # b is the same for every iteration, only forces change for ps in Forces_x: ps.apply(b_forces) for ps in Forces_y:
u = TrialFunction(V) v = TestFunction(V) F = u * v / dt * dx + alpha * dot(grad(u), grad(v)) * dx - u_n * v / dt * dx # apply Dirichlet boundary condition on coupling interface bcs.append(precice.create_coupling_dirichlet_boundary_condition(V)) a, L = lhs(F), rhs(F) # Time-stepping u_np1 = Function(V) F_known_u = u_np1 * v / dt * dx + alpha * dot(grad(u_np1), grad(v)) * dx - u_n * v / dt * dx t = 0 u_D.t = t + dt file_out = File("Solid/VTK/%s.pvd" % precice._solver_name) n=0 while precice.is_coupling_ongoing(): # Compute solution solve(a == L, u_np1, bcs) # Dirichlet problem obtains flux from solution and sends flux on boundary to Neumann problem fluxes = fluxes_from_temperature_full_domain(F_known_u, V, k) t, n, precice_timestep_is_complete, precice_dt = precice.advance(fluxes, u_np1, u_n, t, dt, n) dt = np.min([fenics_dt, precice_dt]) # todo we could also consider deciding on time stepping size inside the adapter if precice_timestep_is_complete: if abs(t % dt_out) < 10e-5 or abs(t % dt_out) < 10e-5: # just a very complicated way to only produce output if t is a multiple of dt_out
# parameters for Time-Stepping t = 0.0 n = 0 time = [] u_tip = [] time.append(0.0) u_tip.append(0.0) E_ext = 0 # mark mesh w.r.t ranks mesh_rank = MeshFunction("size_t", mesh, mesh.topology().dim()) mesh_rank.set_all(MPI.rank(MPI.comm_world) + 0) mesh_rank.rename("myRank", "") displacement_out = File("Solid/FSI-S/u_fsi.pvd") ranks = File("Solid/FSI-S/ranks%s.pvd" % precice.get_participant_name()) u_n.rename("Displacement", "") u_np1.rename("Displacement", "") displacement_out << u_n ranks << mesh_rank while precice.is_coupling_ongoing(): if precice.is_action_required( precice.action_write_iteration_checkpoint()): # write checkpoint precice.store_checkpoint(u_n, t, n) # read data from preCICE and get a new coupling expression read_data = precice.read_data()
# Prepare for time-stepping #parameters for Time-Stepping #T = 1.0 t = 0.0 n = 0 time = [] u_tip = [] time.append(0.0) u_tip.append(0.0) E_ext = 0 if Case is StructureCase.DUMMY2D: displacement_out = File("Solid/Dummy2DOut/u_2dd.pvd") elif Case is StructureCase.DUMMY3D: displacement_out = File("Solid/Dummy3DOut/u_3dd.pvd") elif Case is StructureCase.OPENFOAM: displacement_out = File("Solid/FSI-S/u_fsi.pvd") elif Case is StructureCase.RFERENCE: displacement_out = File("Reference/u_ref.pvd") displacement_out << u_n #time loop for coupling if Case is not StructureCase.RFERENCE:
from utils import plot_mesh, exact_kepler from mshr import Circle, generate_mesh from sympy2fenics import sympy2exp # exact kepler orbit H = -1.5 L = 0.5 (q0, p0, _, S, (c, a, b), _, _) = exact_kepler(H, L) # symbolic computation for the jacobi metric for schwarzschild geodesics x, y = sympy.var('x[0], x[1]') E, m, M = sympy.var('E, m, M') r = sympy.sqrt(x * x + y * y) g = E**2 - m**2 + 2 * M * m**2 / r f = 1 / (1 - 2 * M / r) ds = f * g / r**2 * sympy.Matrix( ((f * x**2 + y**2, (f - 1) * x * y), ((f - 1) * x * y, x**2 + f * y**2))) # choose parameters so that the schwarzschild solution is close to the kelper # the most import parameter is M. M = 0.0025 m = 1.0 / M**0.5 E = (2.0 * H + m**2)**0.5 gexp = Expression(sympy2exp(ds), E=E, m=m, M=M, degree=8) # build a large enough domain domain = Circle(Point(c), b + 0.45) mesh = generate_mesh(domain, 32) g = interpolate(gexp, FunctionSpace(mesh, "Regge", 1)) File("plots/schwarzschild_jacobi_metric.pvd") << g
# residual a_np1 = update_a(du, u_n, v_n, a_n, ufl=True) v_np1 = update_v(a_np1, u_n, v_n, a_n, ufl=True) res = m(avg(a_n, a_np1, alpha_m), v) + k(avg(u_n, du, alpha_f), v) a_form = lhs(res) L_form = rhs(res) # parameters for Time-Stepping t = 0.0 n = 0 E_ext = 0 displacement_out = File("output/u_fsi.pvd") u_n.rename("Displacement", "") u_np1.rename("Displacement", "") displacement_out << u_n while precice.is_coupling_ongoing(): if precice.is_action_required( precice.action_write_iteration_checkpoint()): # write checkpoint precice.store_checkpoint(u_n, t, n) # read data from preCICE and get a new coupling expression read_data = precice.read_data() # Update the point sources on the coupling boundary with the new read data
def solve(self): r""" Perform the optimization. First, we define functions that return the objective function and Jacobian. These are passed to scipy's fmin_l_bfgs_b, which is a python wrapper for the Fortran code of Nocedal et. al. The functions are needed to make the calculation of the search direction and update of search point take place globally, across all proccessors, rather than on a per-processor basis. We also specify bounds: :Condition: .. math:: \beta_{2} > 0 """ model = self.model config = self.config def get_global(m): """ Takes a distributed object and returns a numpy array that contains all global values. """ if type(m) == float: return array(m) # return a numPy array of values or single value of Constant : if type(m) == Constant: a = p = zeros(m.value_size()) m.eval(a, p) return a # return a numPy array of values of a FEniCS function : elif type(m) in (function.Function, functions.function.Function): m_v = m.vector() m_a = DoubleArray(m.vector().size()) try: m.vector().gather(m_a, arange(m_v.size(), dtype='intc')) return array(m_a.array()) except TypeError: return m.vector().gather(arange(m_v.size(), dtype='intc')) # The following type had to be added to the orginal function so that # it could accomodate the return from the adjoint system solve. elif type(m) == cpp.la.Vector: m_a = DoubleArray(m.size()) try: m.gather(m_a, arange(m.size(), dtype='intc')) return array(m_a.array()) except TypeError: return m.gather(arange(m.size(), dtype='intc')) else: raise TypeError, 'Unknown parameter type %s.' % str(type(m)) def set_local_from_global(m, m_global_array): """ Sets the local values of the distrbuted object m to the values contained in the global array m_global_array. """ # This had to be changed, because the dolfin-adjoint constant.Constant is # different from the constant of dolfin. if type(m) == Constant: if m.rank() == 0: m.assign(m_global_array[0]) else: m.assign(Constant(tuple(m_global_array))) elif type(m) in (function.Function, functions.function.Function): begin, end = m.vector().local_range() m_a_local = m_global_array[begin:end] m.vector().set_local(m_a_local) m.vector().apply('insert') else: raise TypeError, 'Unknown parameter type' def I(c_array, *args): """ Solve forward model with given control, calculate objective function """ n = len(c_array) / len(config['adjoint']['control_variable']) for ii, c in enumerate(config['adjoint']['control_variable']): set_local_from_global(c, c_array[ii * n:(ii + 1) * n]) self.forward_model.solve() I = assemble( self.adjoint_instance.I) #FIXME: ISMIP_HOM inverse C fails return I def J(c_array, *args): """ Solve adjoint model, calculate gradient """ # dolfin.adjoint method: n = len(c_array) / len(config['adjoint']['control_variable']) for ii, c in enumerate(config['adjoint']['control_variable']): set_local_from_global(c, c_array[ii * n:(ii + 1) * n]) self.adjoint_instance.solve() # This is not the best place for this, but we leave it here for now # so that we can see the impact of every line search update on the # variables of interest. Js = [] for JJ in self.adjoint_instance.J: Js.extend(get_global(assemble(JJ))) Js = array(Js) # FIXME: project and extrude ruin the output for paraview, we just # save when finished for now. U = project(as_vector([model.u, model.v, model.w])) dSdt = project(- (model.u*model.S.dx(0) + model.v*model.S.dx(1)) \ + model.w + model.adot) file_b_pvd << model.extrude(model.beta2, 3, 2) file_u_pvd << U file_dSdt_pvd << dSdt return Js #=========================================================================== # Set up file I/O path = config['output_path'] file_b_pvd = File(path + 'beta2.pvd') file_u_pvd = File(path + 'U.pvd') file_dSdt_pvd = File(path + 'dSdt.pvd') # Switching over to the parallel version of the optimization that is found # in the dolfin-adjoint optimize.py file: maxfun = config['adjoint']['max_fun'] bounds_list = config['adjoint']['bounds'] beta_0 = [] for mm in config['adjoint']['control_variable']: beta_0.extend(get_global(mm)) beta_0 = array(beta_0) # Shut up all processors but the first one. if MPI.rank(mpi_comm_world()) != 0: iprint = -1 else: iprint = 1 b = [] # convert bounds to an array of tuples and serialize it in parallel environ. for bounds in bounds_list: bounds_arr = [] for i in range(2): if type(bounds[i]) == int or type(bounds[i]) == float: bounds_arr.append(bounds[i] * ones(model.beta2.vector().size())) else: bounds_arr.append(get_global(bounds[i])) b.append(array(bounds_arr).T) bounds = vstack(b) print bounds # minimize I with initial guess beta_0 and gradient J : mopt, f, d = fmin_l_bfgs_b(I, beta_0, fprime=J, bounds=bounds, maxfun=maxfun, iprint=iprint) n = len(mopt) / len(config['adjoint']['control_variable']) for ii, c in enumerate(config['adjoint']['control_variable']): set_local_from_global(c, mopt[ii * n:(ii + 1) * n])
# apply constant Dirichlet boundary condition at bottom edge # apply Dirichlet boundary condition on coupling interface bcs = [ DirichletBC(V, coupling_expression, coupling_boundary), DirichletBC(V, u_D, bottom_boundary) ] a, L = lhs(F), rhs(F) # Time-stepping u_np1 = Function(V) t = 0 u_D.t = t + dt # mark mesh w.r.t ranks ranks = File("output/ranks%s.pvd.pvd" % precice.get_participant_name()) mesh_rank = MeshFunction("size_t", mesh, mesh.topology().dim()) mesh_rank.set_all(MPI.rank(MPI.comm_world)) mesh_rank.rename("myRank", "") ranks << mesh_rank # Create output file file_out = File("output/%s.pvd" % precice.get_participant_name()) file_out << u_n print("output vtk for time = {}".format(float(t))) n = 0 fluxes = Function(V_g) fluxes.rename("Fluxes", "")
def solve(self): """ Solve the problem using a Picard iteration, evaluating the velocity, enthalpy, surface mass balance, temperature boundary condition, and the age equation. Turn off any solver by editing the appropriate config dict entry to "False". If config['coupled']['on'] is "False", solve only once. """ model = self.model config = self.config T0 = config['velocity']['T0'] outpath = config['output_path'] # Set the initial Picard iteration (PI) parameters # L_\infty norm in velocity between iterations inner_error = inf # number of iterations counter = 0 # previous velocity for norm calculation u_prev = project(model.u, model.Q).vector().array() # set an inner tolerance for PI inner_tol = config['coupled']['inner_tol'] max_iter = config['coupled']['max_iter'] # Initialize a temperature field for visc. calc. if config['velocity']['use_T0']: model.T.vector().set_local(T0 * ones(len(model.T.vector().array()))) if not config['coupled']['on']: max_iter = 1 # Perform a Picard iteration until the L_\infty norm of the velocity # difference is less than tolerance while inner_error > inner_tol and counter < max_iter: # Solve surface mass balance and temperature boundary condition if config['surface_climate']['on']: self.surface_climate_instance.solve() # Solve velocity if config['velocity']['on']: self.velocity_instance.solve() U = project(as_vector([model.u, model.v, model.w])) if config['log']: File(outpath + 'U.pvd') << U # if the velocity solve is full-stokes, save pressure too : if config['velocity']['approximation'] == 'stokes': File(outpath + 'P.pvd') << model.P print_min_max(U, 'U') # Solve enthalpy (temperature, water content) if config['enthalpy']['on']: self.enthalpy_instance.solve() if config['log']: File(outpath + 'T.pvd') << model.T # save temperature File(outpath + 'Mb.pvd') << model.Mb # save melt rate File(outpath + 'W.pvd') << model.W # save water content print_min_max(model.T, 'T') # Calculate L_infinity norm if config['coupled']['on']: u_new = project(model.u, model.Q).vector().array() diff = (u_prev - u_new) inner_error = diff.max() u_prev = u_new counter += 1 print 'Picard iteration %i (max %i) done: r = %.3e (tol %.3e)' \ % (counter, max_iter, inner_error, inner_tol) # Solve age equation if config['age']['on']: self.age_instance.solve() if config['log']: File(outpath + 'age.pvd') << model.age # save age
def solve_flem(model_space, physical_space, flow, u_n, mesh, V, bc, dt, num_steps, out_time, plot, statistics, name): """ Solve for landscape evolution This function does hte hard work. First the model domain is created. Then we loop through time and solve the diffusion equation to solve for landscape evolution. Output can be saved as vtk files at every "out_time" specified. Plots using fenics inbuilt library can be visualised at every "plot_time" This function returns a 1d numpy array of time, sediment flux and if statistics is turned on a 2d numpy array of the final wavelength of the landscape. :param model_space: list of domain variables, [lx,ly,res] :param physical_space: list of physical parameters, [kappa, c, nexp, alpha, U] :param flow: 0 = MFD node-to-node; 1 = MFD cell-to-cell; 2 = SD node-to-node; 3 = SD cell-to-cell :param u_n: elevation function :param mesh: dolphyn mesh :param V: fenics functionspace :param bc: boundary conditions :param dt: time step size in years :param num_steps: number of time steps :param out_time: time steps to output vtk files (0=none) :param plot: plot sediment flux (0=off,1=on) :param statistics: output statistics of landscape (0=off,1=on) :param name: directory name for output vtk files :return: sed_flux, time, wavelength """ # Domain dimensions lx = model_space[0] ly = model_space[1] # Physical parameters kappa = physical_space[0] # diffusion coefficient c = physical_space[1] # discharge transport coefficient nexp = physical_space[2] # discharge exponent alpha = physical_space[3] # precipitation rate De = c * pow(alpha * ly, nexp) / kappa uamp = physical_space[4] * ly / kappa # uplift dt = dt * kappa / (ly * ly) # time step size sed_flux = np.zeros(num_steps) # array to store sediment flux time = np.zeros(num_steps) # Define variational problem u = TrialFunction(V) v = TestFunction(V) f = Constant(uamp) # 0 = MFD node-to-node; 1 = MFD cell-to-cell; 2 = SD node-to-node; 3 = SD cell-to-cell if flow == 0: q_n = mfd_nodenode(mesh, V, u_n, De, nexp) if flow == 1: q_n = mfd_cellcell(mesh, V, u_n, De, nexp) if flow == 2: q_n = sd_nodenode(mesh, V, u_n, De, nexp) if flow == 3: q_n = sd_cellcell(mesh, V, u_n, De, nexp) F = u * v * dx + dt * q_n * dot(grad(u), grad(v)) * dx - (u_n + dt * f) * v * dx a, L = lhs(F), rhs(F) # Solution and sediment flux u = Function(V) q_s = Expression('u0 + displ - u1', u0=u_n, displ=Constant(uamp * dt), u1=u, degree=2) # Iterate t = 0 i = 0 for n in range(num_steps): # This needs to become an option! # Double rain fall # if n == 501: # alpha = 2 # De = c*pow(alpha*ly,nexp)/kappa # Update current time t += dt # Compute solution solve(a == L, u, bc) # Calculate sediment flux sed_flux[i] = assemble(q_s * dx(mesh)) time[i] = t i += 1 # Update previous solution u_n.assign(u) # Update flux # 0 = MFD node-to-node; 1 = MFD cell-to-cell; 2 = SD node-to-node; 3 = SD cell-to-cell if flow == 0: q = mfd_nodenode(mesh, V, u_n, De, nexp) if flow == 1: q = mfd_cellcell(mesh, V, u_n, De, nexp) if flow == 2: q = sd_nodenode(mesh, V, u_n, De, nexp) if flow == 3: q = sd_cellcell(mesh, V, u_n, De, nexp) q_n.assign(q) # Output solutions if out_time != 0: if np.mod(n, out_time) == 0: filename = '%s/u_solution_%d.pvd' % (name, n) vtkfile = File(filename) vtkfile << u filename = '%s/q_solution_%d.pvd' % (name, n) vtkfile = File(filename) vtkfile << q # Post processing if plot != 0: plt.plot(time * 1e-6 * ly * ly / kappa, sed_flux / dt * kappa, 'k', linewidth=2) plt.xlabel('Time (Myr)') plt.ylabel('Sediment Flux (m^2/yr)') sedname = '%s/sed_flux_%d.svg' % (name, model_space[2]) plt.savefig(sedname, format='svg') plt.clf() if out_time != 0: # Output last elevation filename = '%s/u_solution_%d_%d.pvd' % (name, model_space[2], n) vtkfile = File(filename) u.rename("elv", "elevation") vtkfile << u # Output last water flux filename = '%s/q_solution_%d_%d.pvd' % (name, model_space[2], n) vtkfile = File(filename) q.rename("flx", "flux") vtkfile << q # Calculate valley spacing from peak to peak in water flux tol = 0.001 # avoid hitting points outside the domain y = np.linspace(0 + tol, 1 - tol, 100) x = np.linspace(0.01, lx / ly - 0.01, 20) wavelength = np.zeros(len(x)) if statistics != 0: i = 0 for ix in x: points = [(ix, y_) for y_ in y] # 2D points q_line = np.array([q(point) for point in points]) indexes = peakutils.indexes(q_line, thres=0.05, min_dist=5) if len(indexes) > 1: wavelength[i] = sum(np.diff(y[indexes])) / (len(indexes) - 1) else: wavelength[i] = 0 i += 1 if plot != 0: plt.plot(y * 1e-3 * ly, q_line * kappa / ly, 'k', linewidth=2) plt.plot(y[indexes] * 1e-3 * ly, q_line[indexes] * kappa / ly, '+r') plt.xlabel('Distance (km)') plt.ylabel('Water Flux (m/yr)') watername = '%s/water_flux_spacing_%d.svg' % (name, model_space[2]) plt.savefig(watername, format='svg') plt.clf() return sed_flux, time, wavelength
# there are two ways to do this: either via the project() method or the interpolate() method; # projecy() is very popular, but since we have a closed form solution here we want to use interpolate() # in order to recover the exact solution within machine-error precision # Define variational problem u = TrialFunction(V) v = TestFunction(V) f = Constant(0) F = u * v * dx + dt * dot(grad(u), grad(v)) * dx - (u_n + dt * f) * v * dx # in general, we manually define the bilinear form a(:, :) containing the unknown solution u and # the right-hand-side term L(:) with known terms, however this might be difficult in complicated expressions, # therefore we can rely on the following synthax of FEniCS that computes a and L automatically a, L = lhs(F), rhs(F) # Create VTK file for saving solution vtkfile = File('../../outputs/heat_gaussian/solution.pvd') # !!!DEVELOPER WARNING!!! change the location to save if you need to, but do not pollute the rest of repo # the the outputs directory is set to be ignored by git, so you won't be able to pull or push any file within! # this file will keep a ledger of all files generated during execution, which can be later viewed with ParaView # Time-stepping u = Function(V) t = 0 for n in range(num_steps): t += dt # Update current time solve(a == L, u, bc) # Compute solution vtkfile << (u, t) # Save to file u_n.assign(u) # this is important as we want to keep track of the previous solution in the backward Euler schema # notice that we should not assign u_n = u as this would prevent us from considering u_n and u as
if problem is ProblemType.NEUMANN: # apply Neumann boundary condition on coupling interface, modify weak form correspondingly F += precice.create_coupling_neumann_boundary_condition(v) a, L = lhs(F), rhs(F) # Time-stepping u_np1 = Function(V) u_np1.rename("Temperature", "") t = 0 # reference solution at t=0 u_ref = interpolate(u_D, V) u_ref.rename("reference", " ") temperature_out = File("out/%s.pvd" % precice.get_solver_name()) ref_out = File("out/ref%s.pvd" % precice.get_solver_name()) error_out = File("out/error%s.pvd" % precice.get_solver_name()) # output solution and reference solution at t=0, n=0 n = 0 print('output u^%d and u_ref^%d' % (n, n)) temperature_out << u_n ref_out << u_ref error_total, error_pointwise = compute_errors(u_n, u_ref, V) error_out << error_pointwise # set t_1 = t_0 + dt, this gives u_D^1 u_D.t = t + dt( 0) # call dt(0) to evaluate FEniCS Constant. Todo: is there a better way?
def write_mesh_to_file(self, filename): # Output mesh File(filename) << self.new_mesh