Beispiel #1
0
 def load(self, vec, filename="control.dat"):
     """
     Load a vector from a file.
     DumbCheckpoint requires that the mesh, FunctionSpace and parallel
     decomposition are identical between store and load.
     """
     viewer = PETSc.Viewer().createBinary(filename, mode="r")
     vec.vec_wo().load(viewer)
Beispiel #2
0
    def store(self, vec, filename="control.dat"):
        """
        Store the vector to a file to be reused in a later computation.
        DumbCheckpoint requires that the mesh, FunctionSpace and parallel
        decomposition are identical between store and load.

        """
        viewer = PETSc.Viewer().createBinary(filename, mode="w")
        viewer.view(vec.vec_ro())
Beispiel #3
0
def save_mesh(mesh, fname, fpath='.'):
    """
    :arg mesh: mesh to be saved in DMPlex format.
    :kwarg fname: file name (without '.h5' extension).
    :arg fpath: directory to store the file.
    """
    if COMM_WORLD.size > 1:
        raise IOError("Saving a mesh to HDF5 only works in serial.")
    try:
        plex = mesh.topology_dm
    except AttributeError:
        plex = mesh._topology_dm  # Backwards compatability
    viewer = PETSc.Viewer().createHDF5(os.path.join(fpath, fname + '.h5'), 'w')
    viewer(plex)
Beispiel #4
0
def export_final_state(inputdir, bathymetry_2d):
    """
    Export fields to be used in a subsequent simulation
    """
    if not os.path.exists(inputdir):
        os.makedirs(inputdir)
    print_output("Exporting fields for subsequent simulation")

    chk = DumbCheckpoint(inputdir + "/bathymetry", mode=FILE_CREATE)
    chk.store(bathymetry_2d, name="bathymetry")
    File(inputdir + '/bathout.pvd').write(bathymetry_2d)
    chk.close()
    plex = bathymetry_2d.function_space().mesh()._topology_dm
    viewer = PETSc.Viewer().createHDF5(inputdir + '/myplex.h5', 'w')
    viewer(plex)
Beispiel #5
0
def _from_gmsh(filename):
    """Read a Gmsh .msh file from `filename`"""

    # Create a read-only PETSc.Viewer
    gmsh_viewer = PETSc.Viewer().create()
    gmsh_viewer.setType("ascii")
    gmsh_viewer.setFileMode("r")
    gmsh_viewer.setFileName(filename)
    gmsh_plex = PETSc.DMPlex().createGmsh(gmsh_viewer)

    if gmsh_plex.hasLabel("Face Sets"):
        boundary_ids = gmsh_plex.getLabelIdIS("Face Sets").getIndices()
        gmsh_plex.createLabel("boundary_ids")
        for bid in boundary_ids:
            faces = gmsh_plex.getStratumIS("Face Sets", bid).getIndices()
            for f in faces:
                gmsh_plex.setLabelValue("boundary_ids", f, bid)

    return gmsh_plex
Beispiel #6
0
def export_final_state(
    inputdir,
    uv,
    elev,
):
    """
    Export fields to be used in a subsequent simulation
    """
    if not os.path.exists(inputdir):
        os.makedirs(inputdir)
    print_output("Exporting fields for subsequent simulation")
    chk = DumbCheckpoint(inputdir + "/velocity", mode=FILE_CREATE)
    chk.store(uv, name="velocity")
    File(inputdir + '/velocityout.pvd').write(uv)
    chk.close()
    chk = DumbCheckpoint(inputdir + "/elevation", mode=FILE_CREATE)
    chk.store(elev, name="elevation")
    File(inputdir + '/elevationout.pvd').write(elev)
    chk.close()

    plex = elev.function_space().mesh()._topology_dm
    viewer = PETSc.Viewer().createHDF5(inputdir + '/myplex.h5', 'w')
    viewer(plex)
Beispiel #7
0
    def fixed_point_iteration(self, **parsed_args):
        """
        Apply a goal-oriented metric-based mesh adaptation
        fixed point iteration loop for a tidal farm
        modelling problem.
        """
        parsed_args = AttrDict(parsed_args)
        options = self.options
        expected = {
            "miniter",
            "maxiter",
            "load_index",
            "qoi_rtol",
            "element_rtol",
            "error_indicator",
            "approach",
            "h_min",
            "h_max",
            "turbine_h_min",
            "turbine_h_max",
            "a_max",
            "target_complexity",
            "base_complexity",
            "flux_form",
            "norm_order",
            "no_final_run",
        }
        if not expected.issubset(set(parsed_args.keys())):
            missing = expected.difference(set(parsed_args.keys()))
            raise AttributeError(f"Missing required arguments {missing}")
        output_dir = options.output_directory
        end_time = options.simulation_end_time
        dt = options.timestep
        approach = parsed_args.approach
        h_min = parsed_args.h_min
        h_max = parsed_args.h_max
        turbine_h_min = parsed_args.turbine_h_min
        turbine_h_max = parsed_args.turbine_h_max
        a_max = parsed_args.a_max
        num_timesteps = int(np.round(end_time / dt))
        target = num_timesteps * parsed_args.target_complexity
        base = num_timesteps * parsed_args.base_complexity
        num_subintervals = self.num_subintervals
        timesteps = [dt] * num_subintervals
        p = parsed_args.norm_order
        no_final_run = parsed_args.no_final_run
        if COMM_WORLD.size > 1:
            raise NotImplementedError(
                "Mmg2d only supports serial mesh adaptation")

        # Enter fixed point iteration
        miniter = parsed_args.miniter
        maxiter = parsed_args.maxiter
        if miniter > maxiter:
            print_output(
                f"miniter {miniter} and maxiter {maxiter} are incompatible")
            miniter = maxiter
            print_output(f"Setting miniter={miniter}, maxiter={maxiter}")
        qoi_rtol = parsed_args.qoi_rtol
        element_rtol = parsed_args.element_rtol
        converged_reason = None
        load_index = parsed_args.load_index
        if load_index > 0:
            self.keep_log = True
        fp_iteration = load_index
        msg = "Termination due to {:s} after {:d} iterations"
        cells = [] if load_index == 0 else list(
            np.load(f"{output_dir}/num_cells_progress.npy"))
        qois = [] if load_index == 0 else list(
            np.load(f"{output_dir}/J_progress.npy"))
        self.J = 0 if load_index == 0 else qois[-1]

        def check_cell_count_convergence():
            if len(cells) >= max(2, miniter):
                elements_converged = True
                for nc, nc_old in zip(cells[-1], cells[-2]):
                    if abs(nc - nc_old) > element_rtol * nc_old:
                        elements_converged = False
                        break
                if elements_converged:
                    return "converged element counts"

        def check_qoi_convergence():
            if len(qois) >= max(2, miniter):
                if abs(qois[-1] - qois[-2]) < qoi_rtol * qois[-2]:
                    return "converged quantity of interest"

        # Check for convergence (of loaded data)
        converged_reason = check_qoi_convergence(
        ) or check_cell_count_convergence()

        # Load meshes, if requested
        if load_index > 0:
            for i in range(num_subintervals):
                fname = f"{output_dir}/mesh_fp{fp_iteration}_{i}"
                if not os.path.exists(fname + ".h5"):
                    raise IOError(f"Cannot load mesh file {fname}.")
                print_output(f"\n--- Loading plex {i+1}\n{fname}")
                plex = PETSc.DMPlex().create()
                plex.createFromFile(fname + ".h5")
                self.meshes[i] = Mesh(plex)

        # Do final run, if loaded data has already converged
        if converged_reason is not None:
            if not no_final_run:
                print(f"converged_reason: {converged_reason}")
                self._final_run()
            print_output(msg.format(converged_reason, fp_iteration + 1))
            print_output(f"Energy output: {self.J/3.6e+09} MWh")
            return

        # Enter the fixed point iteration loop
        while fp_iteration <= maxiter:
            print_output(
                f"Start time for fp_iteration {fp_iteration}: {datetime.datetime.now()}"
            )
            outfiles = AttrDict({})
            if fp_iteration < miniter:
                converged_reason = None
            elif fp_iteration == maxiter:
                converged_reason = converged_reason or "maximum number of iterations reached"

            # Ramp up the target complexity
            target_ramp = ramp_complexity(base, target, fp_iteration)

            # Create metrics
            kw = dict(metric_parameters=dict(dm_plex_metric_verbosity=10))
            metrics = [RiemannianMetric(mesh, **kw) for mesh in self.meshes]
            metric_fns = [metric.function for metric in metrics]

            # Load metric data, if available
            loaded = False
            if fp_iteration == load_index:
                for i, metric in enumerate(metric_fns):
                    fpath = self.root_dir if load_index == 0 else output_dir
                    ext = "" if load_index == 0 else "_fp{fp_iteration}"
                    fname = f"{fpath}/metric{i}{ext}"
                    if os.path.exists(fname + ".h5"):
                        print_output(
                            f"\n--- Loading metric on mesh {i+1}\n{fname}")
                        try:
                            with DumbCheckpoint(fname, mode=FILE_READ) as chk:
                                chk.load(metric, name="Metric")
                            loaded = True
                        except Exception:
                            raise IOError(
                                f"Cannot load metric data on mesh {i+1}")
                    elif loaded:
                        raise IOError("Remove partial metric data")
            if not loaded:

                # Solve forward and adjoint on each subinterval
                if converged_reason is None:
                    print_output(
                        f"\n--- Forward-adjoint sweep {fp_iteration}\n")
                    solutions = self.solve_adjoint()
                else:
                    if not no_final_run:
                        print(f"converged_reason: {converged_reason}")
                        self._final_run()

                # Check for QoI convergence
                converged_reason = converged_reason or check_qoi_convergence()
                if converged_reason is not None:
                    if not no_final_run:
                        print(f"converged_reason: {converged_reason}")
                        self._final_run()
                qois.append(self.J)
                np.save(f"{output_dir}/J_progress.npy", qois)

                # Escape if converged
                if converged_reason is not None:
                    print_output(msg.format(converged_reason,
                                            fp_iteration + 1))
                    print_output(f"Energy output: {self.J/3.6e+09} MWh")
                    break

                # Create vtu output files
                outfiles.forward = File(f"{output_dir}/Forward2d.pvd")
                outfiles.forward_old = File(f"{output_dir}/ForwardOld2d.pvd")
                outfiles.adjoint_next = File(f"{output_dir}/AdjointNext2d.pvd")
                outfiles.adjoint = File(f"{output_dir}/Adjoint2d.pvd")

                # Construct metric
                with pyadjoint.stop_annotating():
                    print_output(f"\n--- Error estimation {fp_iteration}\n")
                    for i, mesh in enumerate(self.meshes):
                        options.rebuild_mesh_dependent_components(mesh)
                        options.get_bnd_conditions(
                            self.function_spaces.swe2d[i])
                        update_forcings = options.update_forcings

                        # Create error estimator
                        ee = ErrorEstimator(
                            options,
                            mesh=mesh,
                            metric=approach,
                            error_estimator=parsed_args.error_indicator,
                        )

                        # Loop over all exported timesteps
                        N = len(solutions.swe2d.adjoint[i])
                        for j in range(N):
                            if i < num_subintervals - 1 and j == N - 1:
                                continue

                            # Plot fields
                            args = []
                            for f in outfiles:
                                args.extend(solutions.swe2d[f][i][j].split())
                                name = "adjoint " if "adjoint" in f else ""
                                args[-2].rename(
                                    (name + "velocity").capitalize())
                                args[-1].rename(
                                    (name + "elevation").capitalize())
                                outfiles[f].write(*args[-2:])

                            # Construct metric at current timestep
                            t = i * end_time / num_subintervals + dt * (j + 1)
                            update_forcings(t)
                            metric_step = ee.metric(*args, **parsed_args)

                            # Apply trapezium rule
                            metric_step *= 0.5 * dt if j in (0, N - 1) else dt
                            metric_fns[i] += metric_step

                        # Stash metric data
                        print_output(
                            f"\n--- Storing metric data on mesh {i+1}\n")
                        fname = f"{output_dir}/metric{i}_fp{fp_iteration}"
                        with DumbCheckpoint(fname, mode=FILE_CREATE) as chk:
                            chk.store(metric_fns[i], name="Metric")
                        if fp_iteration == 0:
                            fname = f"{self.root_dir}/metric{i}"
                            with DumbCheckpoint(fname,
                                                mode=FILE_CREATE) as chk:
                                chk.store(metric_fns[i], name="Metric")

            # Apply space-time normalisation
            print_output(f"\n--- Metric processing {fp_iteration}\n")
            space_time_normalise(metric_fns, end_time, timesteps, target_ramp,
                                 p)

            # Enforce element constraints, accounting for turbines
            hmins = []
            hmaxs = []
            for mesh in self.meshes:
                P0 = get_functionspace(mesh, "DG", 0)
                hmin = Function(P0).assign(h_min)
                hmax = Function(P0).assign(h_max)
                for tag in self.qoi_farm_ids.flatten():
                    hmin.assign(turbine_h_min, subset=mesh.cell_subset(tag))
                    hmax.assign(turbine_h_max, subset=mesh.cell_subset(tag))
                hmins.append(hmin)
                hmaxs.append(hmax)
            enforce_element_constraints(metric_fns, hmins, hmaxs, a_max)

            # Plot metrics
            outfiles.metric = File(f"{output_dir}/Metric2d.pvd")
            for metric in metric_fns:
                outfiles.metric.write(metric)

            # Adapt meshes
            print_output(f"\n--- Mesh adaptation {fp_iteration}\n")
            outfiles.mesh = File(f"{output_dir}/Mesh2d.pvd")
            for i, metric in enumerate(metrics):
                self.meshes[i] = Mesh(adapt(self.meshes[i], metric))
                outfiles.mesh.write(self.meshes[i].coordinates)
            cells.append([mesh.num_cells() for mesh in self.meshes])
            np.save(f"{output_dir}/num_cells_progress.npy", cells)

            # Check for convergence of element count
            check_cell_count_convergence()

            # Save mesh data to disk
            for i, mesh in enumerate(self.meshes):
                fname = f"{output_dir}/mesh_fp{fp_iteration+1}_{i}.h5"
                viewer = PETSc.Viewer().createHDF5(fname, "w")
                viewer(mesh.topology_dm)

            # Increment
            print_output(
                f"End time for fp_iteration {fp_iteration}: {datetime.datetime.now()}"
            )
            fp_iteration += 1
        print_output(msg.format(converged_reason, fp_iteration + 1))
        print_output(f"Energy output: {self.J/3.6e+09} MWh")
Beispiel #8
0
def output_time(start, end, **kwargs):
    """
    Used by ``explosive_source.py`` at the end of a run to record to file
    useful information.
    """
    verbose = kwargs.get('verbose', False)
    tofile = kwargs.get('tofile', False)
    meshid = kwargs.get('meshid', 'default_mesh')
    ntimesteps = kwargs.get('ntimesteps', 0)
    nloops = kwargs.get('nloops', 0)
    tile_size = kwargs.get('tile_size', 0)
    partitioning = kwargs.get('partitioning', 'chunk')
    extra_halo = 'yes' if kwargs.get('extra_halo', False) else 'no'
    explicit_mode = kwargs.get('explicit_mode', None)
    glb_maps = 'yes' if kwargs.get('glb_maps', False) else 'no'
    poly_order = kwargs.get('poly_order', -1)
    domain = kwargs.get('domain', 'default_domain')
    coloring = kwargs.get('coloring', 'default')
    prefetch = 'yes' if kwargs.get('prefetch', False) else 'no'
    function_spaces = kwargs.get('function_spaces', [])
    backend = os.environ.get("SLOPE_BACKEND", "SEQUENTIAL")

    name = os.path.splitext(os.path.basename(sys.argv[0]))[0]  # Cut away the extension

    avg = lambda v: (sum(v) / len(v)) if v else 0.0

    # Where do I store the output ?
    output_dir = os.getcwd()

    # Find number of processes, and number of threads per process
    rank = MPI.COMM_WORLD.rank
    num_procs = MPI.COMM_WORLD.size
    num_threads = int(os.environ.get("OMP_NUM_THREADS", 1)) if backend == 'OMP' else 1

    # What execution mode is this?
    if num_procs == 1 and num_threads == 1:
        versions = ['sequential', 'openmp', 'mpi', 'mpi_openmp']
    elif num_procs == 1 and num_threads > 1:
        versions = ['openmp']
    elif num_procs > 1 and num_threads == 1:
        versions = ['mpi']
    else:
        versions = ['mpi_openmp']

    # Determine the total execution time (Python + kernel execution + MPI cost
    if rank in range(1, num_procs):
        MPI.COMM_WORLD.isend([start, end], dest=0)
    elif rank == 0:
        starts, ends = [0]*num_procs, [0]*num_procs
        starts[0], ends[0] = start, end
        for i in range(1, num_procs):
            starts[i], ends[i] = MPI.COMM_WORLD.recv(source=i)
        min_start, max_end = min(starts), max(ends)
        tot = round(max_end - min_start, 3)
        print "Time stepping: ", tot, "s"

    # Exit if user doesn't want timings to be recorded
    if not tofile:
        return

    # Determine (on rank 0):
    # ACT - Average Compute Time, pure kernel execution -
    # ACCT - Average Compute and Communication Time (ACS + MPI cost)
    # For this, first dump PETSc performance log info to temporary file as
    # currently there's no other clean way of accessing the times in petsc4py
    logfile = os.path.join(output_dir, 'seigenlog.py')
    vwr = PETSc.Viewer().createASCII(logfile)
    vwr.pushFormat(PETSc.Viewer().Format().ASCII_INFO_DETAIL)
    PETSc.Log().view(vwr)
    PETSc.Options().delValue('log_view')
    if rank == 0:
        with open(logfile, 'r') as f:
            content = f.read()
        exec(content) in globals(), locals()
        compute_times = [Stages['Main Stage']['ParLoopCKernel'][i]['time'] for i in range(num_procs)]
        mpi_times = [Stages['Main Stage']['ParLoopHaloEnd'][i]['time'] for i in range(num_procs)]
        ACT = round(avg(compute_times), 3)
        AMT = round(avg(mpi_times), 3)
        ACCT = ACT + AMT
        print "Average Compute Time: ", ACT, "s"
        print "Average Compute and Communication Time: ", ACCT, "s"

    # Determine if a multi-node execution
    platform = os.environ.get('NODENAME', 'unknown')

    # Adjust /tile_size/ and /version/ based on the problem that was actually run
    assert nloops >= 0
    if nloops == 0:
        tile_size = 0
        mode = "untiled"
    elif explicit_mode:
        mode = "fs%d" % explicit_mode
    else:
        mode = "loops%d" % nloops

    ### Print timings to file ###

    def fix(values):
        new_values = []
        for v in values:
            try:
                new_v = int(v)
            except ValueError:
                try:
                    new_v = float(v)
                except ValueError:
                    new_v = v.strip()
            if new_v != '':
                new_values.append(new_v)
        return tuple(new_values)

    if rank == 0:
        for version in versions:
            timefile = os.path.join(output_dir, "times", name, "poly_%d" % poly_order, domain,
                                    meshid, version, platform, "np%d_nt%d.txt" % (num_procs, num_threads))
            # Create directory and file (if not exist)
            if not os.path.exists(os.path.dirname(timefile)):
                os.makedirs(os.path.dirname(timefile))
            if not os.path.exists(timefile):
                open(timefile, 'a').close()
            # Read the old content, add the new time value, order
            # everything based on <execution time, #loops tiled>, write
            # back to the file (overwriting existing content)
            with open(timefile, "r+") as f:
                lines = [line.split('|') for line in f if line.strip()][2:]
                lines = [fix(i) for i in lines]
                lines += [(tot, ACT, ACCT, ntimesteps, mode, tile_size, partitioning,
                           extra_halo, glb_maps, coloring, prefetch)]
                lines.sort(key=lambda x: x[0])
                template = "| " + "%9s | " * 11
                prepend = template % ('time', 'ACT', 'ACCT', 'timesteps', 'mode', 'tilesize',
                                      'partmode', 'extrahalo', 'glbmaps', 'coloring', 'prefetch')
                lines = "\n".join([prepend, '-'*133] + [template % i for i in lines]) + "\n"
                f.seek(0)
                f.write(lines)
                f.truncate()

    ### Print DoFs summary to file ###

    dofsfile = os.path.join(output_dir, "times", name, "dofs_summary.txt")
    if rank == 0 and not os.path.exists(dofsfile):
        with open(dofsfile, 'a') as f:
            f.write("poly:numprocs:[fs1_dofs;fs2_dofs;...]\n")
    tot_dofs = [MPI.COMM_WORLD.allreduce(fs.dof_count, op=mpi4py.MPI.SUM) for fs in function_spaces]
    if rank == 0:
        with open(dofsfile, "a") as f:
            f.write("%d:%d:%s\n" % (poly_order, num_procs, ';'.join([str(i) for i in tot_dofs])))

    ### Print summary output to screen ###

    if rank == 0 and verbose:
        for i in range(num_procs):
            fs_info = ", ".join(["%s=%d" % (fs.name, fs.dof_count) for fs in function_spaces])
            tot_time = compute_times[i] + mpi_times[i]
            offC = (ends[i] - starts[i]) - tot_time
            offCperc = (offC / (ends[i] - starts[i]))*100
            mpiPerc = (mpi_times[i] / (ends[i] - starts[i]))*100
            print "Rank %d: comp=%.2fs, mpi=%.2fs -- tot=%.2fs (py=%.2fs, %.2f%%; mpi_oh=%.2f%%; fs=[%s])" % \
                (i, compute_times[i], mpi_times[i], tot_time, offC, offCperc, mpiPerc, fs_info)
        sys.stdout.flush()
    MPI.COMM_WORLD.barrier()

    # Clean up
    if rank == 0:
        os.remove(logfile)
Beispiel #9
0
solver_params = {
    'pyamg_tol': pyamg_tol,
    'pyamg_maxiter': pyamg_maxiter,
    'ksp_monitor': None,
}
solver = vs.LinearVariationalSolver(problem, solver_parameters=solver_params)

# Build matrix and store text file
A = assemble(a).M.handle
store_mat = True
if store_mat:
    dim = mesh.geometric_dimension()
    file_name = f"matrix_txt_files/helmholtz-{dim}D-h{h}.txt"
    from firedrake.petsc import PETSc
    myviewer = PETSc.Viewer().createASCII(
        file_name,
        mode=PETSc.Viewer.Format.ASCII_COMMON,
        comm=PETSc.COMM_WORLD)
    A.view(myviewer)

# set up pyamg preconditioner and solve
pc = solver.snes.getKSP().pc
pc.setType(pc.Type.PYTHON)
pc.setPythonContext(
    AMGTransmissionPreconditioner(wave_number,
                                  fspace,
                                  A,
                                  tol=pyamg_tol,
                                  maxiter=pyamg_maxiter,
                                  use_plane_waves=True))

solver.solve()