def solve(tstep, w_, w_1, solvers, enable_EC, enable_NS, **namespace): """ Solve equations. """ timer_outer = df.Timer("Solve system") if enable_EC: timer_inner = df.Timer("Solve subproblem EC") df.mpi_comm_world().barrier() solvers["EC"].solve() timer_inner.stop() if enable_NS: # Step 1: Predict u timer = df.Timer("NS: Predict velocity.") solvers["NSu"]["predict"].solve() timer.stop() # Step 2: Pressure correction timer = df.Timer("NS: Pressure correction") solvers["NSp"].solve() timer.stop() # Step 3: Velocity correction timer = df.Timer("NS: Velocity correction") solvers["NSu"]["correct"].solve() timer.stop() timer_outer.stop()
def test_Probes_vectorfunctionspace_2D(VF2, dirpath): u0 = interpolate(Expression(('x[0]', 'x[1]')), VF2) x = array([[0.5, 0.5], [0.4, 0.4], [0.3, 0.3]]) p = Probes(x.flatten(), VF2) # Probe twice p(u0) p(u0) # Check both snapshots p0 = p.array(N=0) if MPI.rank(mpi_comm_world()) == 0: assert round(p0[0, 0] - 0.5, 7) == 0 assert round(p0[1, 1] - 0.4, 7) == 0 assert round(p0[2, 1] - 0.3, 7) == 0 p0 = p.array(N=1) if MPI.rank(mpi_comm_world()) == 0: assert round(p0[0, 0] - 0.5, 7) == 0 assert round(p0[1, 0] - 0.4, 7) == 0 assert round(p0[2, 1] - 0.3, 7) == 0 p0 = p.array(filename=dirpath+'dumpvector2D') if MPI.rank(mpi_comm_world()) == 0: assert round(p0[0, 0, 0] - 0.5, 7) == 0 assert round(p0[1, 1, 1] - 0.4, 7) == 0 assert round(p0[2, 0, 1] - 0.3, 7) == 0 f = open(dirpath+'dumpvector2D_all.probes', 'r') p1 = load(f) assert round(p1[0, 0, 0] - 0.5, 7) == 0 assert round(p1[1, 1, 0] - 0.4, 7) == 0 assert round(p1[2, 1, 1] - 0.3, 7) == 0
def pytest_generate_tests(metafunc): if 'dim' in metafunc.fixturenames: metafunc.parametrize("dim", [2, 3]) # Set random seed new_seed = MPI.sum(mpi_comm_world(), randint(0, 1e6)) / MPI.size( mpi_comm_world()) seed(new_seed) # TODO: Make options to select all or subset of schemes for this factory, # copy from or look at regression conftest, if 'scheme_factory' in metafunc.fixturenames: metafunc.parametrize("scheme_factory", create_scheme_factories()) if 'D' in metafunc.fixturenames: metafunc.parametrize("D", [2, 3]) if 'start_time' in metafunc.fixturenames: start_times = [0.0] if metafunc.config.option.all: start_times += list(0.8 * random(3)) metafunc.parametrize("start_time", start_times) if 'end_time' in metafunc.fixturenames: end_times = [2.0] if metafunc.config.option.all: end_times += list(1.2 + 0.8 * random(3)) metafunc.parametrize("end_time", end_times) if 'dt' in metafunc.fixturenames: dts = [0.1] if metafunc.config.option.all: dts += [0.05 + 0.05 * random(), 0.2 + 0.2 * random()] metafunc.parametrize("dt", dts)
def _clean_casedir(self): "Cleans out all files produced by cbcpost in the current casedir." MPI.barrier(mpi_comm_world()) if on_master_process(): if os.path.isdir(self.get_casedir()): try: playlog = self._fetch_playlog() except: playlog = None if playlog is not None: all_fields = [] for v in playlog.values(): all_fields += v.get("fields", {}).keys() all_fields = list(set(all_fields)) playlog.close() for field in all_fields: rmtree(os.path.join(self.get_casedir(), field)) for f in [ "mesh.hdf5", "play.db", "params.txt", "params.pickle" ]: if os.path.isfile(os.path.join(self.get_casedir(), f)): os.remove(os.path.join(self.get_casedir(), f)) MPI.barrier(mpi_comm_world())
def test_Probes_functionspace_3D(V3, dirpath): u0 = interpolate(Expression('x[0]', degree=1), V3) x = array([[0.5, 0.5, 0.5], [0.4, 0.4, 0.4], [0.3, 0.3, 0.3]]) p = Probes(x.flatten(), V3) # Probe twice p(u0) p(u0) # Check both snapshots p0 = p.array(N=0) if MPI.rank(mpi_comm_world()) == 0: assert round(p0[0] - 0.5, 7) == 0 assert round(p0[1] - 0.4, 7) == 0 assert round(p0[2] - 0.3, 7) == 0 p0 = p.array(N=1) if MPI.rank(mpi_comm_world()) == 0: assert round(p0[0] - 0.5, 7) == 0 assert round(p0[1] - 0.4, 7) == 0 assert round(p0[2] - 0.3, 7) == 0 p0 = p.array(filename=dirpath + 'dump') if MPI.rank(mpi_comm_world()) == 0: assert round(p0[0, 0] - 0.5, 7) == 0 assert round(p0[1, 1] - 0.4, 7) == 0 assert round(p0[2, 1] - 0.3, 7) == 0 p1 = load(dirpath + 'dump_all.npy') assert round(p1[0, 0, 0] - 0.5, 7) == 0 assert round(p1[1, 0, 1] - 0.4, 7) == 0 assert round(p1[2, 0, 1] - 0.3, 7) == 0
def test_Probes_vectorfunctionspace_2D(VF2, dirpath): u0 = interpolate(Expression(('x[0]', 'x[1]'), degree=1), VF2) x = array([[0.5, 0.5], [0.4, 0.4], [0.3, 0.3]]) p = Probes(x.flatten(), VF2) # Probe twice p(u0) p(u0) # Check both snapshots p0 = p.array(N=0) if MPI.rank(mpi_comm_world()) == 0: assert round(p0[0, 0] - 0.5, 7) == 0 assert round(p0[1, 1] - 0.4, 7) == 0 assert round(p0[2, 1] - 0.3, 7) == 0 p0 = p.array(N=1) if MPI.rank(mpi_comm_world()) == 0: assert round(p0[0, 0] - 0.5, 7) == 0 assert round(p0[1, 0] - 0.4, 7) == 0 assert round(p0[2, 1] - 0.3, 7) == 0 p0 = p.array(filename=dirpath + 'dumpvector2D') if MPI.rank(mpi_comm_world()) == 0: assert round(p0[0, 0, 0] - 0.5, 7) == 0 assert round(p0[1, 1, 1] - 0.4, 7) == 0 assert round(p0[2, 0, 1] - 0.3, 7) == 0 f = open(dirpath + 'dumpvector2D_all.probes', 'r') p1 = load(f) assert round(p1[0, 0, 0] - 0.5, 7) == 0 assert round(p1[1, 1, 0] - 0.4, 7) == 0 assert round(p1[2, 1, 1] - 0.3, 7) == 0
def __init__(self, V, options=None): # See if we have dofmap if not is_function_space(V): raise ValueError("V is not a function space.") # Only allow 2d and 3d meshes if V.mesh().geometry().dim() == 1: raise ValueError("Only 2d and 3d meshes are supported.") # Get MPI info try: from dolfin import mpi_comm_world self.mpi_size = MPI.size(mpi_comm_world()) self.mpi_rank = MPI.rank(mpi_comm_world()) except ImportError: self.mpi_size = MPI.num_processes() self.mpi_rank = MPI.process_number() # Analyze the space V self.V = V self.dofmaps = extract_dofmaps(self.V) self.bounds = bounds(self.V) # Rewrite default plotting options if they are provided by user self.options = {"colors": {"mesh_entities": "hsv", "mesh": "Blues"}, "xkcd": False, "markersize": 40} if options is not None: self.options.update(options) # Keep track of the plots self.plots = []
def _correct_postprocessing(self, restart_timestep): "Removes data from casedir found at timestep>restart_timestep." playlog = self._pp.get_playlog('r') playlog_to_remove = {} for k, v in playlog.items(): if int(k) >= restart_timestep: #playlog_to_remove[k] = playlog.pop(k) playlog_to_remove[k] = playlog[k] playlog.close() MPI.barrier(mpi_comm_world()) if on_master_process(): playlog = self._pp.get_playlog() [playlog.pop(k) for k in playlog_to_remove.keys()] playlog.close() MPI.barrier(mpi_comm_world()) all_fields_to_clean = [] for k, v in playlog_to_remove.items(): if "fields" not in v: continue else: all_fields_to_clean += v["fields"].keys() all_fields_to_clean = list(set(all_fields_to_clean)) for fieldname in all_fields_to_clean: self._clean_field(fieldname, restart_timestep)
def _clean_xdmf(self, fieldname, del_metadata): basename = os.path.join(self._pp.get_savedir(fieldname), fieldname) if os.path.isfile(basename + ".xdmf"): MPI.barrier(mpi_comm_world()) i = 0 while True: h5_filename = basename + "_RS" + str(i) + ".h5" if not os.path.isfile(h5_filename): break i = i + 1 xdmf_filename = basename + "_RS" + str(i) + ".xdmf" MPI.barrier(mpi_comm_world()) if on_master_process(): os.rename(basename + ".h5", h5_filename) os.rename(basename + ".xdmf", xdmf_filename) f = open(xdmf_filename, 'r').read() new_f = open(xdmf_filename, 'w') new_f.write( f.replace( os.path.split(basename)[1] + ".h5", os.path.split(h5_filename)[1])) new_f.close() MPI.barrier(mpi_comm_world())
def create_function_from_metadata(pp, fieldname, metadata, saveformat): "Create a function from metadata" assert metadata['type'] == 'Function' # Load mesh if saveformat == 'hdf5': mesh = Mesh() hdf5file = HDF5File(mpi_comm_world(), os.path.join(pp.get_savedir(fieldname), fieldname+'.hdf5'), 'r') hdf5file.read(mesh, "Mesh", False) del hdf5file elif saveformat == 'xml' or saveformat == 'xml.gz': mesh = Mesh() hdf5file = HDF5File(mpi_comm_world(), os.path.join(pp.get_savedir(fieldname), "mesh.hdf5"), 'r') hdf5file.read(mesh, "Mesh", False) del hdf5file # Replace loaded mesh if same mesh is loaded previously mesh = MeshPool(mesh, tolerance=0.0) shape = eval(metadata["element_value_shape"]) degree = eval(metadata["element_degree"]) family = eval(metadata["element_family"]) # Get space from existing function spaces if mesh is the same spaces = SpacePool(mesh) space = spaces.get_custom_space(family, degree, shape) return Function(space, name=fieldname)
def solve(w_, t, dt, q_rhs, solvers, enable_EC, enable_NS, use_iterative_solvers, bcs, **namespace): """ Solve equations. """ # Update the time-dependent source terms # Update the time-dependent source terms for qi in q_rhs.values(): qi.t = t+dt # Update the time-dependent boundary conditions for boundary_name, bcs_fields in bcs.iteritems(): for field, bc in bcs_fields.iteritems(): if isinstance(bc.value, df.Expression): bc.value.t = t+dt timer_outer = df.Timer("Solve system") for subproblem, enable in zip(["EC", "NS"], [enable_EC, enable_NS]): if enable: timer_inner = df.Timer("Solve subproblem " + subproblem) df.mpi_comm_world().barrier() if subproblem == "NS" and use_iterative_solvers: solver, a, L, bcs = solvers[subproblem] A = df.assemble(a) b = df.assemble(L) for bc in bcs: bc.apply(A) bc.apply(b) solver.set_operator(A) solver.solve(w_["NS"].vector(), b) else: solvers[subproblem].solve() timer_inner.stop() timer_outer.stop()
def compute(self, get): u = get(self.valuename) if u is None: return None if not hasattr(self, "u"): self.before_first_compute(get) if LooseVersion(dolfin_version()) > LooseVersion("1.6.0"): rank = len(u.ufl_shape) else: rank = u.rank() if rank > 0: u = u.split() U = [] for i, _u in enumerate(u): U.append(_interpolate(self.us[i], _u)) #U.append(self._ft.interpolate_nonmatching_mesh(_u, self.us.function_space())) MPI.barrier(mpi_comm_world()) self.assigner.assign(self.u, U) else: _interpolate(self.u, u) MPI.barrier(mpi_comm_world()) # FIXME: This gives a PETSc-error (VecCopy). Unnecessary interpolation used instead. #self.u.assign(U) #self.u.assign(interpolate(U, self.u.function_space())) return self.u
def casedir(request): # Some code here copied from dolfin_utils dev: # Get directory name of test_foo.py file testfile = request.module.__file__ testfiledir = os.path.dirname(os.path.abspath(testfile)) # Construct name test_foo_tempdir from name test_foo.py testfilename = os.path.basename(testfile) outputname = testfilename.replace(".py", "_casedir") # Get function name test_something from test_foo.py function = request.function.__name__ # Join all of these to make a unique path for this test function basepath = os.path.join(testfiledir, outputname) casedir = os.path.join(basepath, function) # Unlike the dolfin_utils tempdir fixture, here we make sure the directory is _deleted_: gc.collect() # Workaround for possible dolfin deadlock MPI.barrier(mpi_comm_world()) try: # Only on root node in parallel if MPI.size(mpi_comm_world()) == 1 or MPI.rank(mpi_comm_world()) == 0: shutil.rmtree(casedir) except: pass MPI.barrier(mpi_comm_world()) return casedir
def test_Probes_vectorfunctionspace_2D(): mesh = UnitSquareMesh(4, 4) V = VectorFunctionSpace(mesh, 'CG', 1) u0 = interpolate(Expression(('x[0]', 'x[1]')), V) x = array([[0.5, 0.5], [0.4, 0.4], [0.3, 0.3]]) p = Probes(x.flatten(), V) # Probe twice p(u0) p(u0) # Check both snapshots p0 = p.array(N=0) if MPI.rank(mpi_comm_world()) == 0: nose.tools.assert_almost_equal(p0[0, 0], 0.5) nose.tools.assert_almost_equal(p0[1, 1], 0.4) nose.tools.assert_almost_equal(p0[2, 1], 0.3) p0 = p.array(N=1) if MPI.rank(mpi_comm_world()) == 0: nose.tools.assert_almost_equal(p0[0, 0], 0.5) nose.tools.assert_almost_equal(p0[1, 0], 0.4) nose.tools.assert_almost_equal(p0[2, 1], 0.3) p0 = p.array(filename='dumpvector2D') if MPI.rank(mpi_comm_world()) == 0: nose.tools.assert_almost_equal(p0[0, 0, 0], 0.5) nose.tools.assert_almost_equal(p0[1, 1, 1], 0.4) nose.tools.assert_almost_equal(p0[2, 0, 1], 0.3) f = open('dumpvector2D_all.probes', 'r') p1 = load(f) nose.tools.assert_almost_equal(p1[0, 0, 0], 0.5) nose.tools.assert_almost_equal(p1[1, 1, 0], 0.4) nose.tools.assert_almost_equal(p1[2, 1, 1], 0.3)
def gc_barrier(): """Internal utility to easily switch on and off calls to gc.collect() and MPI.barrier(world) in all fixtures here. Helps make the tests deterministic when debugging. """ gc.collect() if MPI.size(mpi_comm_world()) > 1: MPI.barrier(mpi_comm_world())
def _clean_hdf5(self, fieldname, del_metadata): delete_from_hdf5_file = ''' namespace dolfin { #include <hdf5.h> void delete_from_hdf5_file(const MPI_Comm comm, const std::string hdf5_filename, const std::string dataset, const bool use_mpiio) { //const hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); // Open file existing file for append //hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, plist_id); hid_t hdf5_file_id = HDF5Interface::open_file(comm, hdf5_filename, "a", use_mpiio); H5Ldelete(hdf5_file_id, dataset.c_str(), H5P_DEFAULT); HDF5Interface::close_file(hdf5_file_id); } } ''' cpp_module = compile_extension_module( delete_from_hdf5_file, additional_system_headers=["dolfin/io/HDF5Interface.h"]) hdf5filename = os.path.join(self._pp.get_savedir(fieldname), fieldname + '.hdf5') if not os.path.isfile(hdf5filename): return for k, v in del_metadata.items(): if 'hdf5' not in v: continue else: cpp_module.delete_from_hdf5_file( mpi_comm_world(), hdf5filename, v['hdf5']['dataset'], MPI.size(mpi_comm_world()) > 1) hdf5tmpfilename = os.path.join(self._pp.get_savedir(fieldname), fieldname + '_tmp.hdf5') #import ipdb; ipdb.set_trace() MPI.barrier(mpi_comm_world()) if on_master_process(): # status, result = getstatusoutput("h5repack -V") status, result = -1, -1 if status != 0: cbc_warning( "Unable to run h5repack. Will not repack hdf5-files before replay, which may cause bloated hdf5-files." ) else: subprocess.call("h5repack %s %s" % (hdf5filename, hdf5tmpfilename), shell=True) os.remove(hdf5filename) os.rename(hdf5tmpfilename, hdf5filename) MPI.barrier(mpi_comm_world())
def solve(w_, solvers, enable_PF, enable_EC, enable_NS, **namespace): """ Solve equations. """ timer_outer = df.Timer("Solve system") for subproblem, enable in zip(["PF", "EC", "NS"], [enable_PF, enable_EC, enable_NS]): if enable: timer_inner = df.Timer("Solve subproblem " + subproblem) df.mpi_comm_world().barrier() solvers[subproblem].solve() timer_inner.stop() timer_outer.stop()
def check_if_reset_statistics(folder): """Check if user has put a file named resetoasis in folder.""" found = 0 if 'resetoasis' in listdir(folder): found = 1 collective = MPI.sum(mpi_comm_world(), found) if collective > 0: if MPI.rank(mpi_comm_world()) == 0: remove(path.join(folder, 'resetoasis')) info_red('resetoasis Found!') return True else: return False
def check_if_kill(folder): """Check if user has put a file named killoasis in folder.""" found = 0 if 'killoasis' in listdir(folder): found = 1 collective = MPI.sum(mpi_comm_world(), found) if collective > 0: if MPI.rank(mpi_comm_world()) == 0: remove(path.join(folder, 'killoasis')) info_red('killoasis Found! Stopping simulations cleanly...') return True else: return False
def backup_playlog(self): "Create a backup of the playlog" if MPI.rank(mpi_comm_world()) == 0: casedir = self.postproc.get_casedir() playlog_file = os.path.join(casedir, "play.db") i = 0 backup_file = playlog_file + ".bak" + str(i) while os.path.isfile(backup_file): i += 1 backup_file = playlog_file + ".bak" + str(i) os.system("cp %s %s" % (playlog_file, backup_file)) MPI.barrier(mpi_comm_world())
def broadcast(array, from_process): "Broadcast array to all processes" if not hasattr(broadcast, "cpp_module"): cpp_code = """ namespace dolfin { std::vector<double> broadcast(const MPI_Comm mpi_comm, const Array<double>& inarray, int from_process) { int this_process = dolfin::MPI::rank(mpi_comm); std::vector<double> outvector(inarray.size()); if(this_process == from_process) { for(int i=0; i<inarray.size(); i++) { outvector[i] = inarray[i]; } } dolfin::MPI::barrier(mpi_comm); dolfin::MPI::broadcast(mpi_comm, outvector, from_process); return outvector; } } """ cpp_module = df.compile_extension_module( cpp_code, additional_system_headers=["dolfin/common/MPI.h"], ) broadcast.cpp_module = cpp_module cpp_module = broadcast.cpp_module if df.MPI.rank(df.mpi_comm_world()) == from_process: array = np.array(array, dtype=np.float) shape = array.shape shape = np.array(shape, dtype=np.float_) else: array = np.array([], dtype=np.float) shape = np.array([], dtype=np.float_) shape = cpp_module.broadcast(df.mpi_comm_world(), shape, from_process) array = array.flatten() out_array = cpp_module.broadcast(df.mpi_comm_world(), array, from_process) if len(shape) > 1: out_array = out_array.reshape(*shape) return out_array
def _clean_files(self, fieldname, del_metadata): for k, v in del_metadata.items(): for i in v.values(): MPI.barrier(mpi_comm_world()) try: i["filename"] except: continue fullpath = os.path.join(self._pp.get_savedir(fieldname), i['filename']) if on_master_process(): os.remove(fullpath) MPI.barrier(mpi_comm_world()) """
def __init__(self, Ys, fs, init_t = 0.0, init_dt = 1.0, dt_max = 2.0, tol = 1e-3, verbose = False): # Fenics functions for each of the unknowns self.Ys = Ys # The number of unknowns self.num_unknowns = len(self.Ys) # Slope functions corresponding to each unknown self.fs = fs # Current time self.t = init_t # Initial time step self.dt = init_dt # Maximum time step self.dt_max = dt_max # Tolerance self.tol = tol # List of lists. Each sublist stores the slope functions evaluated at the # last 5 solutions for the ith unknown self.f_ns = [[] for i in range(self.num_unknowns)] # List of solution vectors at last time step self.prev_ys = None # Flag for first step self.first = True # Output stuff? self.verbose = verbose # Store the last five solution times self.ts = [] # An object for computing integrals of basis functions for Lagrange # polynomials. This is used to determine the coefficients for the AB # and AM methods given the last five solution times self.l_int = LagrangeInt() # Process rank self.MPI_rank = MPI.rank(mpi_comm_world())
def load(scale, inner_size, curve_gen): '''Load meshes for 3d-2d-1d problems''' h5_file = generate(scale, inner_size) comm = df.mpi_comm_world() h5 = df.HDF5File(comm, h5_file, 'r') mesh = df.Mesh() h5.read(mesh, 'mesh', False) surfaces = df.MeshFunction('size_t', mesh, mesh.topology().dim()-1) h5.read(surfaces, 'facet') volumes = df.MeshFunction('size_t', mesh, mesh.topology().dim()) h5.read(volumes, 'physical') # The 3d mesh is just mesh mesh_3d = mesh # Mesh for 2d is EmbeddedMesh using tags (1, 2, 3, 4) mesh_2d = EmbeddedMesh(surfaces, (1, 2, 3, 4)) # 1d mesh from tagged facets of 2d facet_f = curve_gen(mesh_2d) mesh_1d = EmbeddedMesh(facet_f, 1) return mesh_3d, mesh_2d, mesh_1d
def test_convert_diffpack_2d(self): from dolfin import Mesh, MPI, MeshFunction, mpi_comm_world if MPI.size(mpi_comm_world()) != 1: return fname = os.path.join("data", "diffpack_tri") dfname = fname + ".xml" # Read triangle file and convert to a dolfin xml mesh file meshconvert.diffpack2xml(fname + ".grid", dfname) # Read in dolfin mesh and check number of cells and vertices mesh = Mesh(dfname) self.assertEqual(mesh.num_vertices(), 41) self.assertEqual(mesh.num_cells(), 64) self.assertEqual(len(mesh.domains().markers(2)), 64) mf_basename = dfname.replace(".xml", "_marker_%d.xml") for marker, num in [(1, 10), (2, 5), (3, 5)]: mf_name = mf_basename % marker mf = MeshFunction("size_t", mesh, mf_name) self.assertEqual(sum(mf.array() == marker), num) os.unlink(mf_name) # Clean up os.unlink(dfname)
def save_tstep_solution_h5(tstep, q_, u_, newfolder, tstepfiles, constrained_domain, output_timeseries_as_vector, u_components, AssignedVectorFunction, scalar_components, NS_parameters): """Store solution on current timestep to XDMF file.""" timefolder = path.join(newfolder, 'Timeseries') if output_timeseries_as_vector: # project or store velocity to vector function space for comp, tstepfile in tstepfiles.iteritems(): if comp == "u": V = q_['u0'].function_space() # First time around create vector function and assigners if not hasattr(tstepfile, 'uv'): tstepfile.uv = AssignedVectorFunction(u_) # Assign solution to vector tstepfile.uv() # Store solution vector tstepfile << (tstepfile.uv, float(tstep)) elif comp in q_: tstepfile << (q_[comp], float(tstep)) else: tstepfile << (tstepfile.function, float(tstep)) else: for comp, tstepfile in tstepfiles.iteritems(): tstepfile << (q_[comp], float(tstep)) if MPI.rank(mpi_comm_world()) == 0: if not path.exists(path.join(timefolder, "params.dat")): f = open(path.join(timefolder, 'params.dat'), 'w') cPickle.dump(NS_parameters, f)
def postprocessor(request): r_dens = 1.0e-2 r_visc = 1.0e-0 rank = df.MPI.rank(df.mpi_comm_world()) scriptdir = os.path.dirname(os.path.realpath(__file__)) outdir = os.path.join(scriptdir, __name__) proc = Postprocessor(r_dens, r_visc, outdir) # Decide what should be plotted proc.register_fixed_variables(( ("r_dens", r_dens), ("r_visc", r_visc), )) # Dump empty postprocessor into a file for later use filename = "proc_{}.pickle".format(proc.basename) proc.dump_to_file(rank, filename) # Create plots if plotting is enabled otherwise do nothing if not os.environ.get("DOLFIN_NOPLOT"): proc.create_plots(rank) #pyplot.show(); exit() # uncomment to explore current layout of plots def fin(): print("\nteardown postprocessor") request.addfinalizer(fin) return proc
def csr_to_petsc4py(csr_matrix): '''Convert Scipy's csr matrix to PETSc matrix.''' assert MPI.size(mpi_comm_world()) == 1, 'mat_to_csr assumes single process' if isinstance(csr_matrix, list): return [csr_to_pets4py(mat) for mat in csr_matrix] # None is zero block elif csr_matrix is None: return None else: A = csr_matrix csr = (A.indptr, A.indices, A.data) # Convert to PETSc n_rows, n_cols = A.shape A_petsc = PETSc.Mat().createAIJ(size=A.shape, csr=csr) # Now set local to global mapping for indices. This is supposed to run in # serial only so these are identities. row_lgmap = PETSc.LGMap().create(list(arange(n_rows, dtype=int))) if not n_rows == n_cols: col_lgmap = PETSc.LGMap().create(list(arange(n_cols, dtype=int))) else: col_lgmap = row_lgmap A_petsc.setLGMap(row_lgmap, col_lgmap) A_petsc.assemble() return A_petsc
def create_PETScMatrix(shape, mpi_comm=None, rows=None, cols=None, values=None): """ Create and set up PETScMatrix of arbitrary size using petsc4py. """ if df.has_petsc4py(): from petsc4py import PETSc else: print( 'Dolfin is not compiled with petsc4py! Cannot create PETScMatrix of arbitrary size.' ) exit() if mpi_comm is None: mpi_comm = df.mpi_comm_world() mat = PETSc.Mat() mat.create(mpi_comm) mat.setSizes(shape) mat.setType('aij') mat.setUp() mat.setValues(rows, cols, values) mat.assemble() return mat
def mat_to_csr(mat): '''Convert any dolfin.Matrix to csr matrix in scipy.''' assert MPI.size(mpi_comm_world()) == 1, 'mat_to_csr assumes single process' # We can handle blocks if isinstance(mat, (list, ndarray, block_mat)): return [mat_to_csr(mat_) for mat_ in mat] # Number block can anly be zero and for bmat these are None elif isinstance(mat, (int, float)): assert abs(mat) < 1E-15 return None # Actual matrix else: rows = [0] cols = [] values = [] for row in range(mat.size(0)): cols_, values_ = mat.getrow(row) rows.append(len(cols_)+rows[-1]) cols.extend(cols_) values.extend(values_) shape = mat.size(0), mat.size(1) return csr_matrix((asarray(values, dtype='float'), asarray(cols, dtype='int'), asarray(rows, dtype='int')), shape)
def save_to_h5(fname, mesh, *args, **kwargs): with df.HDF5File(df.mpi_comm_world(), fname, "w") as h5file: h5file.write(mesh, "mesh") for i in args: h5file.write(i, "/".join([i.label(), i.name()]))
def __init__(self,V,sigma=1.25,s=0.0625,mean=None,rel_tol=1e-10,max_iter=100,**kwargs): self.V=V self.dim=V.dim() self.dof_coords=get_dof_coords(V) self.sigma=sigma self.s=s self.mean=mean self.mpi_comm=kwargs['mpi_comm'] if 'mpi_comm' in kwargs else df.mpi_comm_world() # mass matrix and its inverse M_form=df.inner(df.TrialFunction(V),df.TestFunction(V))*df.dx self.M=df.PETScMatrix() df.assemble(M_form,tensor=self.M) self.Msolver = df.PETScKrylovSolver("cg", "jacobi") self.Msolver.set_operator(self.M) self.Msolver.parameters["maximum_iterations"] = max_iter self.Msolver.parameters["relative_tolerance"] = rel_tol self.Msolver.parameters["error_on_nonconvergence"] = True self.Msolver.parameters["nonzero_initial_guess"] = False # square root of mass matrix self.rtM=self._get_rtmass() # kernel matrix and its square root self.K=self._get_ker() self.rtK = _get_sqrtm(self.K,'K') # set solvers for op in ['K']: operator=getattr(self, op) solver=self._set_solver(operator,op) setattr(self, op+'solver', solver) if mean is None: self.mean=df.Vector() self.init_vector(self.mean,0)
def revert_casedir(self): "Use backup playlog to remove all (newly introduced) fields." if MPI.rank(mpi_comm_world()) == 0: current_playlog = dict(self.postproc.get_playlog('r')) casedir = self.postproc.get_casedir() playlog_file = os.path.join(casedir, "play.db") i = 0 backup_file = playlog_file + ".bak" + str(i) while os.path.isfile(backup_file): i += 1 backup_file = playlog_file + ".bak" + str(i) i -= 1 backup_file = playlog_file + ".bak" + str(i) os.system("cp %s %s" % (backup_file, playlog_file)) os.system("rm %s" % backup_file) backup_playlog = self.postproc.get_playlog('r') assert set(current_playlog.keys()) == set(backup_playlog.keys()) current_fields = set() backup_fields = set() keys = [k for k in current_playlog.keys() if k.isdigit()] for k in keys: current_fields.update(current_playlog[k].get("fields", dict()).keys()) backup_fields.update(backup_playlog[k].get("fields", dict()).keys()) fields_to_remove = current_fields - backup_fields for field in fields_to_remove: os.system("rm -r %s/%s" % (casedir, field))
def setUp(self): np.random.seed(1) #self.dim = np.random.randint(1, high=5) self.dim = 1 self.means = np.random.uniform(-10, high=10., size=self.dim) self.chol = np.tril( np.random.uniform(1, high=10, size=(self.dim, self.dim))) self.cov = np.dot(self.chol, self.chol.T) self.precision = np.linalg.inv(self.cov) mesh = dl.RectangleMesh(dl.mpi_comm_world(), dl.Point(0.0, 0.0), dl.Point(3, 2), 6, 4) if self.dim > 1: self.Rn = dl.VectorFunctionSpace(mesh, "R", 0, dim=self.dim) else: self.Rn = dl.FunctionSpace(mesh, "R", 0) self.test_prior = GaussianRealPrior(self.Rn, self.cov) m = dl.Function(self.Rn) m.vector().zero() m.vector().set_local(self.means) self.test_prior.mean.axpy(1., m.vector())
def save_fields(fields, h5name): comm = dolfin.mpi_comm_world() fgroup = "microstructure" try: check_h5group(h5name, fgroup, delete=True, comm=comm) except RuntimeError: pass with dolfin.HDF5File(comm, h5name, 'a') as h5file: names = [] for field in fields: label = field.label() \ if field.label().rfind('a Function') == -1 else "" name = "_".join(filter(None, [str(field), label])) fsubgroup = "{}/{}".format(fgroup, name) h5file.write(field, fsubgroup) h5file.attributes(fsubgroup)['name'] = field.name() names.append(name) elm = field.function_space().ufl_element() family, degree = elm.family(), elm.degree() fspace = '{}_{}'.format(family, degree) h5file.attributes(fgroup)['space'] = fspace h5file.attributes(fgroup)['names'] = ":".join(names)
def compute(self, get): # Get field to probe u = get(self.valuename) # Evaluate in all points self.probes(u) # Fetch array with probe values at this timestep #results = self.probes.array(self._probetimestep) results = self.probes.array() if MPI.rank(mpi_comm_world()) != 0: results = np.array([], dtype=np.float_) if results.shape == (): results = results.reshape(1,) # Broadcast array to all processes if self.params.broadcast_results: results = broadcast(results, 0) self.probes.clear() # Return as list to store without 'array(...)' text. if u.value_rank() > 0: if len(results.shape) == 1: return list(results) return list(tuple(res) for res in results) elif results.size == 1: return float(results) else: return list(results)
def distribution(number): "Get distribution of number on all processes" if not hasattr(distribution, "cpp_module"): cpp_code = """ namespace dolfin { std::vector<unsigned int> distribution(const MPI_Comm mpi_comm, int number) { // Variables to help in synchronization int num_processes = dolfin::MPI::size(mpi_comm); int this_process = dolfin::MPI::rank(mpi_comm); std::vector<uint> distribution(num_processes); for(uint i=0; i<num_processes; i++) { if(i==this_process) { distribution[i] = number; } dolfin::MPI::barrier(mpi_comm); dolfin::MPI::broadcast(mpi_comm, distribution, i); } return distribution; } } """ distribution.cpp_module = df.compile_extension_module( cpp_code, additional_system_headers=["dolfin/common/MPI.h"] ) cpp_module = distribution.cpp_module return cpp_module.distribution(df.mpi_comm_world(), number)
def error(self, ys1, ys2) : errors = [] for i in range(self.num_unknowns): err = MPI.max(mpi_comm_world(), (abs(ys2[i] - ys1[i])).max()) errors.append(err) return errors
def create_initial_folders(folder, restart_folder, sys_comp, tstep, info_red, scalar_components, output_timeseries_as_vector, **NS_namespace): """Create necessary folders.""" info_red("Creating initial folders") # To avoid writing over old data create a new folder for each run if MPI.rank(mpi_comm_world()) == 0: try: makedirs(folder) except OSError: pass MPI.barrier(mpi_comm_world()) newfolder = path.join(folder, 'data') if restart_folder: newfolder = path.join(newfolder, restart_folder.split('/')[-2]) else: if not path.exists(newfolder): newfolder = path.join(newfolder, '1') else: previous = listdir(newfolder) previous = max(map(eval, previous)) if previous else 0 newfolder = path.join(newfolder, str(previous + 1)) MPI.barrier(mpi_comm_world()) if MPI.rank(mpi_comm_world()) == 0: if not restart_folder: #makedirs(path.join(newfolder, "Voluviz")) #makedirs(path.join(newfolder, "Stats")) #makedirs(path.join(newfolder, "VTK")) makedirs(path.join(newfolder, "Timeseries")) makedirs(path.join(newfolder, "Checkpoint")) tstepfolder = path.join(newfolder, "Timeseries") tstepfiles = {} comps = sys_comp if output_timeseries_as_vector: comps = ['p', 'u'] + scalar_components for ui in comps: tstepfiles[ui] = XDMFFile(mpi_comm_world(), path.join(tstepfolder, ui+'_from_tstep_{}.xdmf'.format(tstep))) tstepfiles[ui].parameters["rewrite_function_mesh"] = False tstepfiles[ui].parameters["flush_output"] = True return newfolder, tstepfiles
def _create_tempdir(request): # Get directory name of test_foo.py file testfile = request.module.__file__ testfiledir = os.path.dirname(os.path.abspath(testfile)) # Construct name test_foo_tempdir from name test_foo.py testfilename = os.path.basename(testfile) outputname = testfilename.replace(".py", "_tempdir") # Get function name test_something from test_foo.py function = request.function.__name__ # Join all of these to make a unique path for this test function basepath = os.path.join(testfiledir, outputname) path = os.path.join(basepath, function) # Add a sequence number to avoid collisions when tests are otherwise parameterized if MPI.rank(mpi_comm_world()) == 0: _create_tempdir._sequencenumber[path] += 1 sequencenumber = _create_tempdir._sequencenumber[path] sequencenumber = MPI.sum(mpi_comm_world(), sequencenumber) else: sequencenumber = MPI.sum(mpi_comm_world(), 0) path += "__" + str(sequencenumber) # Delete and re-create directory on root node if MPI.rank(mpi_comm_world()) == 0: # First time visiting this basepath, delete the old and create a new if basepath not in _create_tempdir._basepaths: _create_tempdir._basepaths.add(basepath) #if os.path.exists(basepath): # shutil.rmtree(basepath) # Make sure we have the base path test_foo_tempdir for this test_foo.py file if not os.path.exists(basepath): os.mkdir(basepath) # Delete path from old test run #if os.path.exists(path): # shutil.rmtree(path) # Make sure we have the path for this test execution: e.g. test_foo_tempdir/test_something__3 if not os.path.exists(path): os.mkdir(path) MPI.barrier(mpi_comm_world()) return path
def GenerateH5(self,force=False): h5FilePath = self.GetFilePath('.h5') if(not os.path.isfile(h5FilePath) or \ force==True): h5File = dolfin.HDF5File(dolfin.mpi_comm_world(),h5FilePath,'w') mesh,bndry = self.LoadXMLMesh(force) h5File.write(mesh,'mesh') h5File.write(bndry,'bndry')
def test_GaussDivergence(mesh): dim = mesh.topology().dim() expr = ["%s*x[%s]" % (dim, i) for i in range(dim)] V = VectorFunctionSpace(mesh, "CG", 1) u = interpolate(Expression(tuple(expr)), V) divu = gauss_divergence(u) DIVU = divu.vector().array() point_0 = all(abs(DIVU - dim * dim) < 1e-13) if MPI.rank(mpi_comm_world()) == 0: assert point_0
def test_Probes_functionspace_2D(V2): u0 = interpolate(Expression('x[0]'), V2) x = array([[0.5, 0.5], [0.4, 0.4], [0.3, 0.3]]) p = Probes(x.flatten(), V2) # Probe twice p(u0) p(u0) # Check both snapshots p0 = p.array(N=0) if MPI.rank(mpi_comm_world()) == 0: assert round(p0[0] - 0.5, 7) == 0 assert round(p0[1] - 0.4, 7) == 0 assert round(p0[2] - 0.3, 7) == 0 p0 = p.array(N=1) if MPI.rank(mpi_comm_world()) == 0: assert round(p0[0] - 0.5, 7) == 0 assert round(p0[1] - 0.4, 7) == 0 assert round(p0[2] - 0.3, 7) == 0
def _compute_errors(problem, mesh_sizes): mesh_generator, solution, f, cell_type = problem() max_degree = 20 if solution['degree'] > max_degree: warnings.warn(('Expression degree (%r) > maximum degree (%d). ' 'Truncating.') % (solution['degree'], max_degree) ) degree = 20 else: degree = solution['degree'] sol = Expression((smp.printing.ccode(solution['value'][0]), smp.printing.ccode(solution['value'][1])), t=0.0, degree=degree, cell=cell_type ) errors = numpy.empty(len(mesh_sizes)) hmax = numpy.empty(len(mesh_sizes)) for k, mesh_size in enumerate(mesh_sizes): mesh, dx, ds = mesh_generator(mesh_size) hmax[k] = MPI.max(mpi_comm_world(), mesh.hmax()) V = FunctionSpace(mesh, 'CG', 1) # TODO don't hardcode Mu, Sigma, ... phi_approx = mcyl.solve_maxwell(V, dx, Mu={0: 1.0}, Sigma={0: 1.0}, omega=1.0, f_list=[{0: f['value']}], convections={}, tol=1.0e-12, bcs=None, compute_residuals=False, verbose=False ) #plot(sol0, mesh=mesh, title='sol') #plot(phi_approx[0][0], title='approx') ##plot(fenics_sol - theta_approx, title='diff') #interactive() #exit() # errors[k] = errornorm(sol, phi_approx[0]) # Compute the numerical order of convergence. order = numpy.empty(len(errors) - 1) for i in range(len(errors) - 1): order[i] = numpy.log(errors[i + 1] / errors[i]) \ / numpy.log(hmax[i + 1] / hmax[i]) return errors, order, hmax
def write_steady_file(self, output_file_name): output_file = HDF5File(mpi_comm_world(), output_file_name + ".hdf5", "w") ### Write variables output_file.write(self.mesh, "mesh") output_file.write(self.B, "B") output_file.write(self.H, "H") output_file.write(self.m, "m_0") output_file.write(self.u_b, "u_b_0") output_file.write(self.h, "h_0") output_file.write(self.boundaries, "boundaries") output_file.write(self.k, "k_0")
def __init__(self,S,mpi_comm=mpi_comm_world(), init_vector = None): self.S = S self.tmp = Vector(mpi_comm) self.my_init_vector = init_vector if self.my_init_vector is None: if hasattr(self.S, "init_vector"): self.my_init_vector = self.S.init_vector elif hasattr(self.S, "operator"): self.my_init_vector = self.S.operator().init_vector elif hasattr(self.S, "get_operator"): self.my_init_vector = self.S.get_operator().init_vector
def test_StatisticsProbes_vector_3D(VF3): u0 = interpolate(Expression(('x[0]', 'x[1]', 'x[2]'), degree=1), VF3) x = array([[0.5, 0.25, 0.25], [0.4, 0.4, 0.4], [0.3, 0.3, 0.3]]) probes = StatisticsProbes(x.flatten(), VF3) for i in range(5): probes(u0) p = probes.array() if MPI.rank(mpi_comm_world()) == 0: assert round(p[0,0] - 2.5, 7) == 0 assert round(p[0,4] - 0.3125, 7) == 0
def petsc_serial_matrix(test_space, trial_space, nnz=None): ''' PETsc.Mat from trial_space to test_space to be filled in the with block. The spaces can be represented by intergers meaning generic R^n. ''' # Decide local to global map # For our custom case everything is serial if is_number(test_space) and is_number(trial_space): comm = mpi_comm_world().tompi4py() # Local same as global sizes = [[test_space, test_space], [trial_space, trial_space]] row_map = PETSc.IS().createStride(test_space, 0, 1, comm) col_map = PETSc.IS().createStride(trial_space, 0, 1, comm) # With function space this can be extracted else: mesh = test_space.mesh() comm = mesh.mpi_comm().tompi4py() row_map = test_space.dofmap() col_map = trial_space.dofmap() sizes = [[row_map.index_map().size(IndexMap.MapSize_OWNED), row_map.index_map().size(IndexMap.MapSize_GLOBAL)], [col_map.index_map().size(IndexMap.MapSize_OWNED), col_map.index_map().size(IndexMap.MapSize_GLOBAL)]] row_map = map(int, row_map.tabulate_local_to_global_dofs()) col_map = map(int, col_map.tabulate_local_to_global_dofs()) assert comm.size == 1 lgmap = lambda indices: (PETSc.LGMap().create(indices, comm=comm) if isinstance(indices, list) else PETSc.LGMap().createIS(indices)) row_lgmap, col_lgmap = map(lgmap, (row_map, col_map)) # Alloc mat = PETSc.Mat().createAIJ(sizes, nnz=nnz, comm=comm) mat.setUp() mat.setLGMap(row_lgmap, col_lgmap) mat.assemblyBegin() # Fill yield mat # Tear down mat.assemblyEnd()
def test_StatisticsProbes_segregated_2D(V2): u0 = interpolate(Expression('x[0]', degree=1), V2) v0 = interpolate(Expression('x[1]', degree=1), V2) x = array([[0.5, 0.25], [0.4, 0.4], [0.3, 0.3]]) probes = StatisticsProbes(x.flatten(), V2, True) for i in range(5): probes(u0, v0) p = probes.array() if MPI.rank(mpi_comm_world()) == 0: assert round(p[0,0] - 2.5, 7) == 0 assert round(p[0,4] - 0.625, 7) == 0
def test_Probes_functionspace_2D(): mesh = UnitSquareMesh(4, 4) V = FunctionSpace(mesh, 'CG', 1) u0 = interpolate(Expression('x[0]'), V) x = array([[0.5, 0.5], [0.4, 0.4], [0.3, 0.3]]) p = Probes(x.flatten(), V) # Probe twice p(u0) p(u0) # Check both snapshots p0 = p.array(N=0) if MPI.rank(mpi_comm_world()) == 0: nose.tools.assert_almost_equal(p0[0], 0.5) nose.tools.assert_almost_equal(p0[1], 0.4) nose.tools.assert_almost_equal(p0[2], 0.3) p0 = p.array(N=1) if MPI.rank(mpi_comm_world()) == 0: nose.tools.assert_almost_equal(p0[0], 0.5) nose.tools.assert_almost_equal(p0[1], 0.4) nose.tools.assert_almost_equal(p0[2], 0.3)
def __init__(self, model): # Process number self.MPI_rank = MPI.rank(mpi_comm_world()) ### Get a few fields and parameters from the model # Effective pressure N = model.N # Sliding speed u_b = model.u_b # Initial model time t0 = model.t # Rate factor A = model.pcs['A'] # Distance between bumps l_r = model.pcs['l_r'] # Bump height h_r = model.h_r # Initial sheet height h0 = model.h.vector().array() # Bump height vector h_r_n = h_r.vector().array() ### Set up the sheet height ODE # Right hand side for the gap height ODE def rhs(t, h_n): # Ensure that the sheet height is positive h_n[h_n < 0.0] = 0.0 # Sheet opening term w_n = u_b.vector().array() * (h_r_n - h_n) / l_r # Ensure that the opening term is non-negative w_n[w_n < 0.0] = 0.0 # Sheet closure term v_n = A * h_n * N.vector().array()**3 # Return the time rate of change of the sheet dhdt = w_n - v_n return dhdt # Set up ODE solver ode_solver = ode(rhs).set_integrator('vode', method='adams', max_step = 60.0 * 5.0) ode_solver.set_initial_value(h0, t0) ### Set local variables self.ode_solver = ode_solver self.model = model
def __init__(self, filename, functionspace): self.functionspace = functionspace self.h5filename = filename + '.h5' self.jsonfilename = filename + '.json' print("Debug: ({}/{}) opening file {}".format(mpi_rank, mpi_size, self.h5filename)) self.h5file = df.HDF5File(df.mpi_comm_world(), self.h5filename, 'w') self.field_index = 0 self.t_array = [] self.fieldsDict = OrderedDict() self.dump_metadata(self.jsonfilename, self.fieldsDict)
def save_checkpoint_solution_h5(tstep, q_, q_1, newfolder, u_components, NS_parameters): """Overwrite solution in Checkpoint folder. For safety reasons, in case the solver is interrupted, take backup of solution first. Must be restarted using the same mesh-partitioning. This will be fixed soon. (MM) """ checkpointfolder = path.join(newfolder, "Checkpoint") NS_parameters["num_processes"] = MPI.size(mpi_comm_world()) if MPI.rank(mpi_comm_world()) == 0: if path.exists(path.join(checkpointfolder, "params.dat")): system('cp {0} {1}'.format(path.join(checkpointfolder, "params.dat"), path.join(checkpointfolder, "params_old.dat"))) f = open(path.join(checkpointfolder, "params.dat"), 'w') cPickle.dump(NS_parameters, f) MPI.barrier(mpi_comm_world()) for ui in q_: h5file = path.join(checkpointfolder, ui+'.h5') oldfile = path.join(checkpointfolder, ui+'_old.h5') # For safety reasons... if path.exists(h5file): if MPI.rank(mpi_comm_world()) == 0: system('cp {0} {1}'.format(h5file, oldfile)) MPI.barrier(mpi_comm_world()) ### newfile = HDF5File(mpi_comm_world(), h5file, 'w') newfile.flush() newfile.write(q_[ui].vector(), '/current') if ui in u_components: newfile.write(q_1[ui].vector(), '/previous') if path.exists(oldfile): if MPI.rank(mpi_comm_world()) == 0: system('rm {0}'.format(oldfile)) MPI.barrier(mpi_comm_world()) if MPI.rank(mpi_comm_world()) == 0 and path.exists(path.join(checkpointfolder, "params_old.dat")): system('rm {0}'.format(path.join(checkpointfolder, "params_old.dat")))
def mesh(self): "Return the dolfin mesh" # If no mesh is stored read in from UnstructuredGridData if self._mesh is None: self._mesh = vtk_ug_to_dolfin_mesh(self.reader.GetOutput()) # Small sanity check, only works in parallel if MPI.size(mpi_comm_world()) == 1: assert(self._mesh.num_vertices() == \ self.reader.GetOutput().GetNumberOfPoints() and \ self._mesh.num_cells() == \ self.reader.GetOutput().GetNumberOfCells()) return self._mesh
def print_text(text, color=None, atrb=0, cls=None): """ Print text ``text`` from calling class ``cls`` to the screen. :param text: the text to print :param color: the color of the text to print :param atrb: attributes to send use by ``colored`` package :param cls: the calling class :type text: string :type color: string :type atrb: int :type cls: object """ if MPI.rank(mpi_comm_world())==0: print(get_text(text, color, atrb, cls))
def init_from_restart(restart_folder, sys_comp, uc_comp, u_components, q_, q_1, q_2, **NS_namespace): """Initialize solution from checkpoint files """ if restart_folder: for ui in sys_comp: filename = path.join(restart_folder, ui + '.h5') hdf5_file = HDF5File(mpi_comm_world(), filename, "r") hdf5_file.read(q_[ui].vector(), "/current", False) q_[ui].vector().apply('insert') # Check for the solution at a previous timestep as well if ui in uc_comp: q_1[ui].vector().zero() q_1[ui].vector().axpy(1., q_[ui].vector()) q_1[ui].vector().apply('insert') if ui in u_components: hdf5_file.read(q_2[ui].vector(), "/previous", False) q_2[ui].vector().apply('insert')