def get_opencg_material(openmoc_material): """Return an OpenCG material corresponding to an OpenMOC material. Parameters ---------- openmoc_material : openmoc.Material OpenMOC material Returns ------- opencg_material : opencg.Material Equivalent OpenCG material """ cv.check_type('openmoc_material', openmoc_material, openmoc.Material) global OPENCG_MATERIALS material_id = openmoc_material.getId() # If this Material was already created, use it if material_id in OPENCG_MATERIALS: return OPENCG_MATERIALS[material_id] # Create an OpenCG Material to represent this OpenMOC Material name = openmoc_material.getName() opencg_material = opencg.Material(material_id=material_id, name=name) # Add the OpenMOC Material to the global collection of all OpenMOC Materials OPENMOC_MATERIALS[material_id] = openmoc_material # Add the OpenCG Material to the global collection of all OpenCG Materials OPENCG_MATERIALS[material_id] = opencg_material return opencg_material
def is_opencg_surface_compatible(opencg_surface): """Determine whether OpenCG surface is compatible with OpenMOC geometry. A surface is considered compatible if there is a one-to-one correspondence between OpenMOC and OpenCG surface types. Note that some OpenCG surfaces, e.g. SquarePrism, do not have a one-to-one correspondence with OpenMOC surfaces but can still be converted into an equivalent collection of OpenMOC surfaces. Parameters ---------- opencg_surface : opencg.Surface OpenCG surface Returns ------- bool Whether OpenCG surface is compatible with OpenMOC """ cv.check_type('opencg_surface', opencg_surface, opencg.Surface) if opencg_surface.type in ['z-squareprism']: return False else: return True
def get_opencg_cell(openmoc_cell): """Return an OpenCG cell corresponding to an OpenMOC cell. Parameters ---------- openmoc_cell : openmoc.Cell OpenMOC cell Returns ------- opencg_cell : opencg.Cell Equivalent OpenCG cell """ cv.check_type('openmoc_cell', openmoc_cell, openmoc.Cell) global OPENCG_CELLS cell_id = openmoc_cell.getId() # If this Cell was already created, use it if cell_id in OPENCG_CELLS: return OPENCG_CELLS[cell_id] # Create an OpenCG Cell to represent this OpenMOC Cell name = openmoc_cell.getName() opencg_cell = opencg.Cell(cell_id, name) if (openmoc_cell.getType() == openmoc.MATERIAL): fill = openmoc_cell.getFillMaterial() opencg_cell.fill = get_opencg_material(fill) elif (openmoc_cell.getType() == openmoc.FILL): fill = openmoc_cell.getFillUniverse() if isinstance(fill, openmoc.Lattice): opencg_cell.fill = get_opencg_lattice(fill) else: opencg_cell.fill = get_opencg_universe(fill) if openmoc_cell.isRotated(): rotation = openmoc_cell.getRotation(3) opencg_cell.rotation = rotation if openmoc_cell.isTranslated(): translation = openmoc_cell.getTranslation(3) opencg_cell.translation = translation surfaces = openmoc_cell.getSurfaces() for surf_id, surface_halfspace in surfaces.items(): halfspace = surface_halfspace._halfspace surface = surface_halfspace._surface opencg_cell.add_surface(get_opencg_surface(surface), halfspace) # Add the OpenMOC Cell to the global collection of all OpenMOC Cells OPENMOC_CELLS[cell_id] = openmoc_cell # Add the OpenCG Cell to the global collection of all OpenCG Cells OPENCG_CELLS[cell_id] = opencg_cell return opencg_cell
def get_mesh_cell_indices(self, point): """Get the mesh cell indices for a point within the geometry. Parameters ---------- point : openmoc.Point A point within the geometry Returns ------- indices : 2- or 3-tuple of Integral The mesh cell indices for the point. If the mesh is 2D then indices for x and y are returned; if the mesh is 3d indices for x, y, and z are returned. """ cv.check_type('point', point, openmoc.Point) # Extract the x,y,z coordinates from the OpenMOC Point x, y, z = point.getX(), point.getY(), point.getZ() # Translate the point with respect to the center of the mesh x -= (self.upper_right[0] + self.lower_left[0]) / 2. y -= (self.upper_right[1] + self.lower_left[1]) / 2. if len(self.dimension) != 2: z -= (self.upper_right[2] + self.lower_left[2]) / 2. # Compute the mesh cell indices mesh_x = (x + self.dimension[0] * self.width[0] * 0.5) / self.width[0] mesh_y = (y + self.dimension[1] * self.width[1] * 0.5) / self.width[1] if len(self.dimension) == 2: mesh_z = 0 else: mesh_z = (z + self.dimension[2] * self.width[2] * 0.5) / self.width[2] # Round the mesh cell indices down mesh_x = int(math.floor(mesh_x)) mesh_y = int(math.floor(mesh_y)) mesh_z = int(math.floor(mesh_z)) # Throw error if indices are outside of the Mesh if len(self.dimension) == 2: if (mesh_x < 0 or mesh_x >= self.dimension[0]) or \ (mesh_y < 0 or mesh_y >= self.dimension[1]): py_printf('ERROR', 'Unable to find cell since indices (%d, ' + '%d, %d) are outside mesh', mesh_x, mesh_y, mesh_z) else: if (mesh_x < 0 or mesh_x >= self.dimension[0]) or \ (mesh_y < 0 or mesh_y >= self.dimension[1]) or \ (mesh_z < 0 or mesh_z >= self.dimension[2]): py_printf('ERROR', 'Unable to find cell since indices (%d, ' + '%d, %d) are outside mesh', mesh_x, mesh_y, mesh_z) # Return mesh cell indices if len(self.dimension) == 2: return mesh_x, mesh_y else: return mesh_x, mesh_y, mesh_z
def get_openmoc_lattice(opencg_lattice): """Return an OpenMOC lattice corresponding to an OpenCG lattice. Parameters ---------- opencg_lattice : opencg.Lattice OpenCG lattice Returns ------- openmoc_lattice : openmoc.Lattice Equivalent OpenMOC lattice """ cv.check_type('opencg_lattice', opencg_lattice, opencg.Lattice) global OPENMOC_LATTICES lattice_id = opencg_lattice.id # If this Lattice was already created, use it if lattice_id in OPENMOC_LATTICES: return OPENMOC_LATTICES[lattice_id] name = str(opencg_lattice.name) dimension = opencg_lattice.dimension width = opencg_lattice.width offset = opencg_lattice.offset universes = opencg_lattice.universes # Initialize an empty array for the OpenMOC nested Universes in this Lattice universe_array = np.ndarray(tuple(dimension[::-1]), dtype=openmoc.Universe) # Create OpenMOC Universes for each unique nested Universe in this Lattice unique_universes = opencg_lattice.get_unique_universes() for universe_id, universe in unique_universes.items(): unique_universes[universe_id] = get_openmoc_universe(universe) # Build the nested Universe array for z in range(dimension[2]): for y in range(dimension[1]): for x in range(dimension[0]): universe_id = universes[z][y][x].id universe_array[z][dimension[1] - y - 1][x] = unique_universes[universe_id] openmoc_lattice = openmoc.Lattice(lattice_id, name) openmoc_lattice.setWidth(width[0], width[1], width[2]) openmoc_lattice.setUniverses(universe_array.tolist()) openmoc_lattice.setOffset(offset[0], offset[1], offset[2]) # Add the OpenMOC Lattice to the global collection of all OpenMOC Lattices OPENMOC_LATTICES[lattice_id] = openmoc_lattice # Add the OpenCG Lattice to the global collection of all OpenCG Lattices OPENCG_LATTICES[lattice_id] = opencg_lattice return openmoc_lattice
def get_mesh_cell_indices(self, point): """Get the mesh cell indices for a point within the geometry. Parameters ---------- point : openmoc.Point A point within the geometry Returns ------- indices : 2- or 3-tuple of Integral The mesh cell indices for the point. If the mesh is 2D then indices for x and y are returned; if the mesh is 3d indices for x, y, and z are returned. """ cv.check_type('point', point, openmoc.Point) # Extract the x,y,z coordinates from the OpenMOC Point x, y, z = point.getX(), point.getY(), point.getZ() # Translate the point with respect to the center of the mesh x -= (self.upper_right[0] + self.lower_left[0]) / 2. y -= (self.upper_right[1] + self.lower_left[1]) / 2. if len(self.dimension) != 2: z -= (self.upper_right[2] + self.lower_left[2]) / 2. # Compute the mesh cell indices mesh_x = (x + self.dimension[0] * self.width[0] * 0.5) / self.width[0] mesh_y = (y + self.dimension[1] * self.width[1] * 0.5) / self.width[1] if len(self.dimension) == 2: mesh_z = 0 else: mesh_z = (z + self.dimension[2] * self.width[2] * 0.5) / self.width[2] # Round the mesh cell indices down mesh_x = int(math.floor(mesh_x)) mesh_y = int(math.floor(mesh_y)) mesh_z = int(math.floor(mesh_z)) # Throw error if indices are outside of the Mesh if len(self.dimension) == 2: if (mesh_x < 0 or mesh_x >= self.dimension[0]) or \ (mesh_y < 0 or mesh_y >= self.dimension[1]): return np.nan, np.nan, np.nan else: if (mesh_x < 0 or mesh_x >= self.dimension[0]) or \ (mesh_y < 0 or mesh_y >= self.dimension[1]) or \ (mesh_z < 0 or mesh_z >= self.dimension[2]): return np.nan, np.nan, np.nan # Return mesh cell indices if len(self.dimension) == 2: return mesh_x, mesh_y else: return mesh_x, mesh_y, mesh_z
def get_openmoc_lattice(opencg_lattice): """Return an OpenMOC lattice corresponding to an OpenCG lattice. Parameters ---------- opencg_lattice : opencg.Lattice OpenCG lattice Returns ------- openmoc_lattice : openmoc.Lattice Equivalent OpenMOC lattice """ cv.check_type('opencg_lattice', opencg_lattice, opencg.Lattice) global OPENMOC_LATTICES lattice_id = opencg_lattice.id # If this Lattice was already created, use it if lattice_id in OPENMOC_LATTICES: return OPENMOC_LATTICES[lattice_id] name = str(opencg_lattice.name) dimension = opencg_lattice.dimension width = opencg_lattice.width offset = opencg_lattice.offset universes = opencg_lattice.universes # Initialize an empty array for the OpenMOC nested Universes in this Lattice universe_array = np.ndarray(tuple(dimension[::-1]), dtype=openmoc.Universe) # Create OpenMOC Universes for each unique nested Universe in this Lattice unique_universes = opencg_lattice.get_unique_universes() for universe_id, universe in unique_universes.items(): unique_universes[universe_id] = get_openmoc_universe(universe) # Build the nested Universe array for z in range(dimension[2]): for y in range(dimension[1]): for x in range(dimension[0]): universe_id = universes[z][y][x].id universe_array[z][dimension[1]-y-1][x] = unique_universes[universe_id] openmoc_lattice = openmoc.Lattice(lattice_id, name) openmoc_lattice.setWidth(width[0], width[1], width[2]) openmoc_lattice.setUniverses(universe_array.tolist()) openmoc_lattice.setOffset(offset[0], offset[1], offset[2]) # Add the OpenMOC Lattice to the global collection of all OpenMOC Lattices OPENMOC_LATTICES[lattice_id] = openmoc_lattice # Add the OpenCG Lattice to the global collection of all OpenCG Lattices OPENCG_LATTICES[lattice_id] = opencg_lattice return openmoc_lattice
def get_openmoc_cell(opencg_cell): """Return an OpenMOC cell corresponding to an OpenCG cell. Parameters ---------- opencg_cell : opencg.Cell OpenCG cell Returns ------- openmoc_cell : openmoc.Cell Equivalent OpenMOC cell """ cv.check_type('openmoc_cell', opencg_cell, opencg.Cell) global OPENMOC_CELLS cell_id = opencg_cell.id # If this Cell was already created, use it if cell_id in OPENMOC_CELLS: return OPENMOC_CELLS[cell_id] # Create an OpenMOC Cell to represent this OpenCG Cell name = str(opencg_cell.name) openmoc_cell = openmoc.Cell(cell_id, name) fill = opencg_cell.fill if opencg_cell.type == 'universe': openmoc_cell.setFill(get_openmoc_universe(fill)) elif opencg_cell.type == 'lattice': openmoc_cell.setFill(get_openmoc_lattice(fill)) else: openmoc_cell.setFill(get_openmoc_material(fill)) if opencg_cell.rotation is not None: rotation = np.asarray(opencg_cell.rotation, dtype=np.float64) openmoc_cell.setRotation(rotation) if opencg_cell.translation is not None: translation = np.asarray(opencg_cell.translation, dtype=np.float64) openmoc_cell.setTranslation(translation) surfaces = opencg_cell.surfaces for surface_id in surfaces: surface = surfaces[surface_id][0] halfspace = int(surfaces[surface_id][1]) openmoc_cell.addSurface(halfspace, get_openmoc_surface(surface)) # Add the OpenMOC Cell to the global collection of all OpenMOC Cells OPENMOC_CELLS[cell_id] = openmoc_cell # Add the OpenCG Cell to the global collection of all OpenCG Cells OPENCG_CELLS[cell_id] = opencg_cell return openmoc_cell
def tally_fission_rates(self, solver, volume='integrated', nu=False): """Compute the fission rates in each mesh cell. NOTE: This method assumes that the mesh perfectly aligns with the flat source region mesh used in the OpenMOC calculation. NOTE: The user must supply 'fission' as well as 'nu-fission' multi-group cross sections to each material in the geometry. Although 'nu-fission' is all that is required for an MOC calculation, 'fission' is what is used to compute the fission rates. Parameters ---------- solver : {openmoc.CPUSolver, openmoc.GPUSolver, openmoc.VectorizedSolver} The solver used to compute the flux volume : {'averaged' ,'integrated'} Compute volume-averaged or volume-integrated fission rates nu : bool Find 'nu-fission' rates instead of 'fission' rates Returns ------- tally : numpy.ndarray of Real A NumPy array of the fission rates tallied in each mesh cell """ global solver_types cv.check_type('solver', solver, solver_types) cv.check_value('volume', volume, ('averaged', 'integrated')) geometry = solver.getGeometry() num_fsrs = int(geometry.getNumTotalFSRs()) # Compute the volume- and energy-integrated fission rates for each FSR fission_rates = \ solver.computeFSRFissionRates(int(geometry.getNumTotalFSRs()), nu) # Initialize a 2D or 3D NumPy array in which to tally tally = np.zeros(tuple(self.dimension), dtype=np.float) # Tally the fission rates in each FSR to the corresponding mesh cell for fsr in range(num_fsrs): point = geometry.getFSRPoint(fsr) mesh_indices = self.get_mesh_cell_indices(point) if np.nan not in mesh_indices: tally[mesh_indices] += fission_rates[fsr] # Average the fission rates by mesh cell volume if needed if volume == 'averaged': tally /= self.mesh_cell_volume return tally
def make_opencg_cells_compatible(opencg_universe): """Make all cells in an OpenCG universe compatible with OpenMOC. Parameters ---------- opencg_universe : opencg.Universe Universe to check """ if isinstance(opencg_universe, opencg.Lattice): return cv.check_type('opencg_universe', opencg_universe, opencg.Universe) # Check all OpenCG Cells in this Universe for compatibility with OpenMOC opencg_cells = opencg_universe.cells for cell_id, opencg_cell in opencg_cells.items(): # Check each of the OpenCG Surfaces for OpenMOC compatibility surfaces = opencg_cell.surfaces for surface_id in surfaces: surface = surfaces[surface_id][0] halfspace = surfaces[surface_id][1] # If this Surface is not compatible with OpenMOC, create compatible # OpenCG cells with a compatible version of this OpenCG Surface if not is_opencg_surface_compatible(surface): # Get the one or more OpenCG Cells compatible with OpenMOC # NOTE: This does not necessarily make OpenCG fully compatible. # It only removes the incompatible Surface and replaces it with # compatible OpenCG Surface(s). The recursive call at the end # of this block is necessary in the event that there are more # incompatible Surfaces in this Cell that are not accounted for. cells = \ get_compatible_opencg_cells(opencg_cell, surface, halfspace) # Remove the non-compatible OpenCG Cell from the Universe opencg_universe.remove_cell(opencg_cell) # Add the compatible OpenCG Cells to the Universe opencg_universe.add_cells(cells) # Make recursive call to look at the updated state of the # OpenCG Universe and return return make_opencg_cells_compatible(opencg_universe) # If all OpenCG Cells in the OpenCG Universe are compatible, return return
def tally_fission_rates(self, solver, volume='integrated'): """Compute the fission rates in each mesh cell. NOTE: This method assumes that the mesh perfectly aligns with the flat source region mesh used in the OpenMOC calculation. NOTE: The user must supply 'fission' as well as 'nu-fission' multi-group cross sections to each material in the geometry. Although 'nu-fission' is all that is required for an MOC calculation, 'fission' is what is used to compute the fission rates. Parameters ---------- solver : {openmoc.CPUSolver, openmoc.GPUSolver, openmoc.VectorizedSolver} The solver used to compute the flux volume : {'averaged' ,'integrated'} Compute volume-averaged or volume-integrated fission rates Returns ------- tally : numpy.ndarray of Real A NumPy array of the fission rates tallied in each mesh cell """ cv.check_type('solver', solver, openmoc.Solver) cv.check_value('volume', volume, ('averaged', 'integrated')) geometry = solver.getGeometry() num_fsrs = geometry.getNumFSRs() # Compute the volume- and energy-integrated fission rates for each FSR fission_rates = solver.computeFSRFissionRates(geometry.getNumFSRs()) # Initialize a 2D or 3D NumPy array in which to tally tally = np.zeros(tuple(self.dimension), dtype=np.float) # Tally the fission rates in each FSR to the corresponding mesh cell for fsr in range(num_fsrs): point = geometry.getFSRPoint(fsr) mesh_indices = self.get_mesh_cell_indices(point) tally[mesh_indices] += fission_rates[fsr] # Average the fission rates by mesh cell volume if needed if volume == 'averaged': tally /= self.mesh_cell_volume return tally
def __init__(self, moc_solver): """Initialize an IRAMSolver. Parameters ---------- moc_solver : openmoc.Solver The OpenMOC solver to use in the eigenmode calculation """ cv.check_type('moc_solver', moc_solver, openmoc.Solver) self._moc_solver = moc_solver # Determine the floating point precision for Solver if self._moc_solver.isUsingDoublePrecision(): self._precision = np.float64 else: self._precision = np.float32 # Determine if the user passed in a CUDA-enabled GPUSolver if 'GPUSolver' in type(moc_solver).__name__: self._with_cuda = True else: self._with_cuda = False # Allow solver to compute negative fluxes self._moc_solver.allowNegativeFluxes(True) # Compute the size of the LinearOperators used in the eigenvalue problem geometry = self._moc_solver.getGeometry() num_FSRs = geometry.getNumFSRs() num_groups = geometry.getNumEnergyGroups() self._op_size = num_FSRs * num_groups # Initialize solution-dependent class attributes to None self._num_modes = None self._interval = None self._outer_tol = None self._inner_tol = None self._A_op = None self._M_op = None self._F_op = None self._a_count = None self._m_count = None self._eigenvalues = None self._eigenvectors = None
def get_openmoc_universe(opencg_universe): """Return an OpenMOC universe corresponding to an OpenCG universe. Parameters ---------- opencg_universe : opencg.Universe OpenCG universe Returns ------- openmoc_universe : openmoc.Universe Equivalent OpenMOC universe """ cv.check_type('opencg_universe', opencg_universe, opencg.Universe) global OPENMOC_UNIVERSES universe_id = opencg_universe.id # If this Universe was already created, use it if universe_id in OPENMOC_UNIVERSES: return OPENMOC_UNIVERSES[universe_id] # Make all OpenCG Cells and Surfaces in this Universe compatible with OpenMOC make_opencg_cells_compatible(opencg_universe) # Create an OpenMOC Universe to represent this OpenCG Universe name = str(opencg_universe.name) openmoc_universe = openmoc.Universe(universe_id, name) # Convert all OpenCG Cells in this Universe to OpenMOC Cells opencg_cells = opencg_universe.cells for cell_id, opencg_cell in opencg_cells.items(): openmoc_cell = get_openmoc_cell(opencg_cell) openmoc_universe.addCell(openmoc_cell) # Add the OpenMOC Universe to the global collection of all OpenMOC Universes OPENMOC_UNIVERSES[universe_id] = openmoc_universe # Add the OpenCG Universe to the global collection of all OpenCG Universes OPENCG_UNIVERSES[universe_id] = opencg_universe return openmoc_universe
def get_subdivided_universe(self, target): """Subdivide some universe along the specified mesh. Parameter: ---------- * target : openmoc.Universe the target universe to subdivide Returns: -------- openmoc.Universe filled with a cell containing `target' discretized along the Subdivider """ cv.check_type("target", target, openmoc.Universe) self.setWidth(*self.deltas) if self.ndim == 2: return self._subdivide2d(target) else: return self._subdivide3d(target)
def from_lattice(cls, lattice, division=1): """Create a mesh from an existing lattice Parameters ---------- lattice : openmoc.Lattice Uniform rectangular lattice used as a template for this mesh. division : int Number of mesh cells per lattice cell. If not specified, there will be 1 mesh cell per lattice cell. Returns ------- openmoc.process.Mesh Mesh instance """ cv.check_type("lattice", lattice, openmoc.Lattice) cv.check_type("division", division, Integral) if lattice.getNonUniform(): raise ValueError("Lattice must be uniform.") shape = np.array((lattice.getNumX(), lattice.getNumY(), lattice.getNumZ())) width = np.array((lattice.getWidthX(), lattice.getWidthY(), lattice.getWidthZ())) lleft = np.array((lattice.getMinX(), lattice.getMinY(), lattice.getMinZ())) uright = lleft + shape*width uright[np.isinf(width)] = np.inf mesh = cls() mesh.width = width mesh.lower_left = lleft mesh.upper_right = uright mesh.dimension = [s*division for s in shape] return mesh
def get_opencg_geometry(openmoc_geometry): """Return an OpenCG geometry corresponding to an OpenMOC geometry. Parameters ---------- openmoc_geometry : openmoc.Geometry OpenMOC geometry Returns ------- opencg_geometry : opencg.Geometry Equivalent OpenCG geometry """ cv.check_type('openmoc_geometry', openmoc_geometry, openmoc.Geometry) # Clear dictionaries and auto-generated IDs OPENMOC_MATERIALS.clear() OPENCG_MATERIALS.clear() OPENMOC_SURFACES.clear() OPENCG_SURFACES.clear() OPENMOC_CELLS.clear() OPENCG_CELLS.clear() OPENMOC_UNIVERSES.clear() OPENCG_UNIVERSES.clear() OPENMOC_LATTICES.clear() OPENCG_LATTICES.clear() openmoc_root_universe = openmoc_geometry.getRootUniverse() opencg_root_universe = get_opencg_universe(openmoc_root_universe) opencg_geometry = opencg.Geometry() opencg_geometry.root_universe = opencg_root_universe opencg_geometry.initialize_cell_offsets() return opencg_geometry
def upper_right(self, upper_right): cv.check_type('mesh upper_right', upper_right, Iterable, Real) cv.check_length('mesh upper_right', upper_right, 2, 3) self._upper_right = upper_right
def store_simulation_state(solver, fluxes=False, sources=False, fission_rates=False, use_hdf5=False, filename='simulation-state', directory = 'simulation-states', append=True, note=''): """Store all of the data for an OpenMOC simulation to a binary file for downstream data processing. This routine may be used to store the following: * type of Solver used * floating point precision * exponential evaluation method * number of FSRs * number of materials * number of energy groups * number of azimuthal angles * number of polar angles * track spacing * number of tracks * number of track segments * number of source iterations * source convergence tolerance * converged $k_{eff}$ * total runtime [seconds] * number of OpenMP or CUDA threads In addition, the routine can optionally store the FSR scalar fluxes, FSR sources, and pin and assembly fission rates. The routine may export the simulation data to either an HDF5 or a Python pickle binary file. Users may tell the routine to either create a new binary output file, or append to an existing file using a timestamp to record multiple simulation states to the same file. Parameters ---------- solver : openmoc.Solver The solver used to compute the flux fluxes : bool Whether to store FSR scalar fluxes (False by default) sources : bool Whether to store FSR sources (False by default) fission_rates : bool Whether to store fission rates (False by default) use_hdf5 : bool Whether to export to HDF5 (True by default) or Python pickle file filename : str The filename to use (default is 'simulation-state.h5') directory : str The directory to use (default is 'simulation-states') append : bool Append to existing file or create new one (False by default) note : str, optional An optional string note to include in state file Examples -------- This routine may be called from Python as follows: >>> store_simulation_state(solver, fluxes=True, source=True, \ fission_rates=True, use_hdf5=True) See Also -------- restore_simulation_state(...) """ cv.check_type('solver', solver, openmoc.Solver) cv.check_type('fluxes', fluxes, bool) cv.check_type('sources', sources, bool) cv.check_type('fission_rates', fission_rates, bool) cv.check_type('use_hdf5', use_hdf5, bool) cv.check_type('filename', filename, basestring) cv.check_type('directory', directory, basestring) cv.check_type('append', append, bool) cv.check_type('note', note, basestring) # Make directory if it does not exist if not os.path.exists(directory): os.makedirs(directory) # Get the day and time to construct the appropriate groups in the file time = datetime.datetime.now() year = time.year month = time.month day = time.day hr = time.hour mins = time.minute sec = time.second # Determine the Solver type solver_type = '' if 'CPUSolver' in str(solver.__class__): solver_type = 'CPUSolver' elif 'VectorizedSolver' in str(solver.__class__): solver_type = 'VectorizedSolver' elif 'GPUSolver' in str(solver.__class__): solver_type = 'GPUSolver' # Determine the floating point precision level if solver.isUsingDoublePrecision(): precision = 'double' else: precision = 'single' # Determine whether we are using the exponential # linear interpolation for exponential evaluations if solver.isUsingExponentialInterpolation(): method = 'linear interpolation' else: method = 'exp intrinsic' # Determine whether the Solver has initialized Coarse Mesh Finite # Difference Acceleration (CMFD) if solver.getGeometry().getCmfd() is not None: cmfd = True else: cmfd = False # Get the Geometry and TrackGenerator from the solver geometry = solver.getGeometry() track_generator = solver.getTrackGenerator() # Retrieve useful data from the Solver, Geometry and TrackGenerator num_FSRs = geometry.getNumFSRs() num_materials = geometry.getNumMaterials() num_groups = geometry.getNumEnergyGroups() zcoord = track_generator.getZCoord() num_tracks = track_generator.getNumTracks() num_segments = track_generator.getNumSegments() spacing = track_generator.getTrackSpacing() num_azim = track_generator.getNumAzim() num_polar = solver.getNumPolarAngles() num_iters = solver.getNumIterations() thresh = solver.getConvergenceThreshold() tot_time = solver.getTotalTime() keff = solver.getKeff() if solver_type is 'GPUSolver': num_threads = solver.getNumThreadsPerBlock() num_blocks = solver.getNumThreadBlocks() else: num_threads = solver.getNumThreads() # If the user requested to store the FSR fluxes if fluxes: # Allocate array scalar_fluxes = np.zeros((num_FSRs, num_groups)) # Get the scalar flux for each FSR and energy group for i in range(num_FSRs): for j in range(num_groups): scalar_fluxes[i,j] = solver.getFlux(i,j+1) # If the user requested to store the FSR sources if sources: # Allocate array sources_array = np.zeros((num_FSRs, num_groups)) # Get the scalar flux for each FSR and energy group for i in range(num_FSRs): for j in range(num_groups): sources_array[i,j] = solver.getFSRSource(i,j+1) # If using HDF5 if use_hdf5: if append: f = h5py.File(directory + '/' + filename + '.h5', 'a') else: f = h5py.File(directory + '/' + filename + '.h5', 'w') # Create groups for the day in the HDF5 file day_key = '{0:02}-{1:02}-{2:02}'.format(month, day, year) day_group = f.require_group(day_key) # Create group for the time - use counter in case two simulations # write simulation state at the exact same hour,minute, and second time_key = '{0:02}:{1:02}:{2:02}'.format(hr, mins, sec) counter = 0 while time_key in day_group.keys(): time_key = '{0:02}:{1:02}:{2:02}-{3}'.format(hr, mins, sec, counter) counter += 1 time_group = day_group.require_group(time_key) # Store a note for this simulation state if not note is '': time_group.attrs['note'] = note # Store simulation data to the HDF5 file time_group.create_dataset('solver type', data=solver_type) time_group.create_dataset('# FSRs', data=num_FSRs) time_group.create_dataset('# materials', data=num_materials) time_group.create_dataset('# energy groups', data=num_groups) time_group.create_dataset('z coord', data=zcoord) time_group.create_dataset('# tracks', data=num_tracks) time_group.create_dataset('# segments', data=num_segments) time_group.create_dataset('track spacing [cm]', data=spacing) time_group.create_dataset('# azimuthal angles', data=num_azim) time_group.create_dataset('# polar angles', data=num_polar) time_group.create_dataset('# iterations', data=num_iters) time_group.create_dataset('convergence threshold', data=thresh) time_group.create_dataset('exponential', data=method) time_group.create_dataset('floating point', data=precision) time_group.create_dataset('CMFD', data=cmfd) time_group.create_dataset('time [sec]', data=tot_time) time_group.create_dataset('keff', data=keff) if solver_type is 'GPUSolver': time_group.create_dataset('# threads per block', data=num_threads) time_group.create_dataset('# thread blocks', data=num_blocks) else: time_group.create_dataset('# threads', data=num_threads) if fluxes: time_group.create_dataset('FSR scalar fluxes', data=scalar_fluxes) if sources: time_group.create_dataset('FSR sources', data=sources_array) if fission_rates: compute_fission_rates(solver, use_hdf5=True) fission_rates_file = h5py.File('fission-rates/fission-rates.h5', 'r') f.copy(fission_rates_file, time_group, name='fission-rates') fission_rates_file.close() # Close the HDF5 file f.close() # If not using HDF5, we are pickling all of the data else: filename = directory + '/' + filename + '.pkl' if os.path.exists(filename) and append: sim_states = pickle.load(open(filename, 'rb')) else: sim_states = {} # Create strings for the day and time day = str(month).zfill(2)+'-'+str(day).zfill(2)+'-'+str(year) time = str(hr).zfill(2)+':'+str(mins).zfill(2)+':'+str(sec).zfill(2) # Create dictionaries for this day and time within the pickled file if not day in sim_states.keys(): sim_states[day] = {} sim_states[day][time] = {} state = sim_states[day][time] # Store a note for this simulation state if not note is '': state['note'] = note # Store simulation data to a Python dictionary state['solver type'] = solver_type state['# FSRs'] = num_FSRs state['# materials'] = num_materials state['# energy groups'] = num_groups state['z coord'] = zcoord state['# tracks'] = num_tracks state['# segments'] = num_segments state['track spacing [cm]'] = spacing state['# azimuthal angles'] = num_azim state['# polar angles'] = num_polar state['# iterations'] = num_iters state['convergence threshold'] = thresh state['exponential'] = method state['floating point'] = precision state['CMFD'] = cmfd state['time [sec]'] = tot_time state['keff'] = keff if solver_type is 'GPUSolver': state['# threads per block'] = num_threads state['# thread blocks'] = num_blocks else: state['# threads'] = num_threads if fluxes: state['FSR scalar fluxes'] = scalar_fluxes if sources: state['FSR sources'] = sources_array if fission_rates: compute_fission_rates(solver, False) state['fission-rates'] = \ pickle.load(open('fission-rates/fission-rates.pkl', 'rb')) # Pickle the simulation states to a file pickle.dump(sim_states, open(filename, 'wb')) # Pickle the simulation states to a file pickle.dump(sim_states, open(filename, 'wb'))
def restore_simulation_state(filename='simulation-state.h5', directory='simulation-states'): """Restore all of the data for an OpenMOC simulation from a binary file for downstream data processing to a Python dictionary. This routine may import the simulation state from either an HDF5 or a Python pickle binary file created by the store_simulation_state(...) method. The method may be used to restore the following information: * type of Solver used * floating point precision * exponential evaluation method * number of FSRs * number of materials * number of energy groups * number of azimuthal angles * number of polar angles * track spacing * number of tracks * number of track segments * number of source iterations * source convergence tolerance * converged $k_{eff}$ * total runtime [seconds] * number of OpenMP or CUDA threads Note: If the fission rates were stored in a hdf5 binary file, they are not restored and returned in this method. Paramters --------- filename : str The simulation state filename string directory : str The directory where to find the simulation state file Returns ------- states : dict The dictionary of key/value pairs for simulation state data Examples -------- This method may be called from Python as follows: >>> restore_simulation_state(filename='simulation-state-v1.3.h5') See Also -------- store_simulation_state(...) """ cv.check_type('filename', filename, basestring) cv.check_type('directory', directory, basestring) filename = directory + '/' + filename if not os.path.isfile(filename): py_printf('ERROR', 'Unable restore simulation state since "{0}" ' + \ 'is not an existing simulation state file'.format(filename)) # If using HDF5 if '.h5' in filename or '.hdf5' in filename: import h5py # Create a file handle f = h5py.File(filename, 'r') states = {} # Loop over all simulation state timestamps by day for day in f.keys(): # Create sub-dictionary for this day states[day] = {} # Loop over all simulation state timestamps by time of day for time in f[day]: # Create sub-dictionary for this simulation state dataset = f[day][time] states[day][time] = {} state = states[day][time] # Extract simulation state data solver_type = str(dataset['solver type']) num_FSRs = int(dataset['# FSRs'][...]) num_materials = int(dataset['# materials'][...]) num_tracks = int(dataset['# tracks'][...]) num_segments = int(dataset['# segments'][...]) spacing = int(dataset['track spacing [cm]'][...]) num_azim = int(dataset['# azimuthal angles'][...]) num_polar = int(dataset['# polar angles'][...]) num_iters = int(dataset['# iterations'][...]) thresh = float(dataset['convergence threshold'][...]) method = str(dataset['exponential'][...]) precision = str(dataset['floating point'][...]) cmfd = str(dataset['CMFD'][...]) time = float(dataset['time [sec]'][...]) keff = float(dataset['keff'][...]) # Store simulation state data in sub-dictionary state['solver type'] = solver_type state['# FSRs'] = num_FSRs state['# materials'] = num_materials state['# tracks'] = num_tracks state['# segments'] = num_segments state['track spacing [cm]'] = spacing state['# azimuthal angles'] = num_azim state['# polar angles'] = num_polar state['# iterations'] = num_iters state['convergence threshold'] = thresh state['exponential'] = method state['floating point'] = precision state['CMFD'] = cmfd state['time [sec]'] = time state['keff'] = keff if solver_type is 'GPUSolver': state['# threads per block'] = \ int(dataset['# threads per block']) state['# thread blocks'] = int(dataset['# thread blocks']) else: state['# threads'] = int(dataset['# threads'][...]) if 'FSR scalar fluxes' in dataset: state['FSR scalar fluxes'] = \ dataset['FSR scalar fluxes'][...] if 'FSR sources' in dataset: state['FSR sources'] = dataset['FSR sources'][...] if 'note' in dataset: state['note'] = str(dataset['note']) if 'fission-rates' in dataset: py_printf( 'WARNING', 'The restore_simulation_state(...)' + 'method does not yet support fission rates') return states # If using a Python pickled file elif '.pkl' in filename: states = pickle.load(open(filename, 'rb')) return states # If file does not have a recognizable extension else: py_printf( 'WARNING', 'Unable to restore the simulation states file %s' + ' since it does not have a supported file extension. Only ' + '*.h5, *.hdf5, and *.pkl files are supported', filename) return {}
def dimension(self, dimension): cv.check_type('mesh dimension', dimension, Iterable, Integral) cv.check_length('mesh dimension', dimension, 2, 3) self._dimension = dimension
def get_openmoc_geometry(opencg_geometry): """Return an OpenMOC geometry corresponding to an OpenCG geometry. Parameters ---------- opencg_geometry : opencg.Geometry OpenCG geometry Returns ------- openmoc_geometry : openmoc.Geometry Equivalent OpenMOC geometry """ cv.check_type('opencg_geometry', opencg_geometry, opencg.Geometry) # Deep copy the goemetry since it may be modified to make all Surfaces # compatible with OpenMOC's specifications opencg_geometry.assign_auto_ids() opencg_geometry = copy.deepcopy(opencg_geometry) # Update Cell bounding boxes in Geometry opencg_geometry.update_bounding_boxes() # Clear dictionaries and auto-generated IDs OPENMOC_MATERIALS.clear() OPENCG_MATERIALS.clear() OPENMOC_SURFACES.clear() OPENCG_SURFACES.clear() OPENMOC_CELLS.clear() OPENCG_CELLS.clear() OPENMOC_UNIVERSES.clear() OPENCG_UNIVERSES.clear() OPENMOC_LATTICES.clear() OPENCG_LATTICES.clear() # Make the entire geometry "compatible" before assigning auto IDs universes = opencg_geometry.get_all_universes() for universe_id, universe in universes.items(): make_opencg_cells_compatible(universe) opencg_geometry.assign_auto_ids() opencg_root_universe = opencg_geometry.root_universe openmoc_root_universe = get_openmoc_universe(opencg_root_universe) openmoc_geometry = openmoc.Geometry() openmoc_geometry.setRootUniverse(openmoc_root_universe) # Update OpenMOC's auto-generated object IDs (e.g., Surface, Material) # with the maximum of those created from the OpenCG objects all_materials = openmoc_geometry.getAllMaterials() all_surfaces = openmoc_geometry.getAllSurfaces() all_cells = openmoc_geometry.getAllCells() all_universes = openmoc_geometry.getAllUniverses() max_material_id = max(all_materials.keys()) max_surface_id = max(all_surfaces.keys()) max_cell_id = max(all_cells.keys()) max_universe_id = max(all_universes.keys()) openmoc.maximize_material_id(max_material_id + 1) openmoc.maximize_surface_id(max_surface_id + 1) openmoc.maximize_cell_id(max_cell_id + 1) openmoc.maximize_universe_id(max_universe_id + 1) return openmoc_geometry
def store_simulation_state(solver, fluxes=False, sources=False, fission_rates=False, use_hdf5=False, filename='simulation-state', directory='simulation-states', append=True, note=''): """Store all of the data for an OpenMOC simulation to a binary file for downstream data processing. This routine may be used to store the following: * type of Solver used * floating point precision * exponential evaluation method * number of FSRs * number of materials * number of energy groups * number of azimuthal angles * number of polar angles * track spacing * number of tracks * number of track segments * number of source iterations * source convergence tolerance * converged $k_{eff}$ * total runtime [seconds] * number of OpenMP or CUDA threads In addition, the routine can optionally store the FSR scalar fluxes, FSR sources, and pin and assembly fission rates. The routine may export the simulation data to either an HDF5 or a Python pickle binary file. Users may tell the routine to either create a new binary output file, or append to an existing file using a timestamp to record multiple simulation states to the same file. Parameters ---------- solver : openmoc.Solver The solver used to compute the flux fluxes : bool Whether to store FSR scalar fluxes (False by default) sources : bool Whether to store FSR sources (False by default) fission_rates : bool Whether to store fission rates (False by default) use_hdf5 : bool Whether to export to HDF5 (True by default) or Python pickle file filename : str The filename to use (default is 'simulation-state.h5') directory : str The directory to use (default is 'simulation-states') append : bool Append to existing file or create new one (False by default) note : str, optional An optional string note to include in state file Examples -------- This routine may be called from Python as follows: >>> store_simulation_state(solver, fluxes=True, source=True, \ fission_rates=True, use_hdf5=True) See Also -------- restore_simulation_state(...) """ global solver_types cv.check_type('solver', solver, solver_types) cv.check_type('fluxes', fluxes, bool) cv.check_type('sources', sources, bool) cv.check_type('fission_rates', fission_rates, bool) cv.check_type('use_hdf5', use_hdf5, bool) cv.check_type('filename', filename, basestring) cv.check_type('directory', directory, basestring) cv.check_type('append', append, bool) cv.check_type('note', note, basestring) # Make directory if it does not exist if not os.path.exists(directory): os.makedirs(directory) # Get the day and time to construct the appropriate groups in the file time = datetime.datetime.now() year = time.year month = time.month day = time.day hr = time.hour mins = time.minute sec = time.second # Determine the Solver type solver_type = '' if 'CPUSolver' in str(solver.__class__): solver_type = 'CPUSolver' elif 'VectorizedSolver' in str(solver.__class__): solver_type = 'VectorizedSolver' elif 'GPUSolver' in str(solver.__class__): solver_type = 'GPUSolver' # Determine the floating point precision level if solver.isUsingDoublePrecision(): precision = 'double' else: precision = 'single' # Determine whether we are using the exponential # linear interpolation for exponential evaluations if solver.isUsingExponentialInterpolation(): method = 'linear interpolation' else: method = 'exp intrinsic' # Determine whether the Solver has initialized Coarse Mesh Finite # Difference Acceleration (CMFD) if solver.getGeometry().getCmfd() is not None: cmfd = True else: cmfd = False # Get the Geometry and TrackGenerator from the solver geometry = solver.getGeometry() track_generator = solver.getTrackGenerator() # Retrieve useful data from the Solver, Geometry and TrackGenerator num_FSRs = geometry.getNumFSRs() num_materials = geometry.getNumMaterials() num_groups = geometry.getNumEnergyGroups() zcoord = track_generator.getZCoord() num_tracks = track_generator.getNumTracks() num_segments = track_generator.getNumSegments() spacing = track_generator.getDesiredAzimSpacing() num_azim = track_generator.getNumAzim() num_polar = solver.getNumPolarAngles() num_iters = solver.getNumIterations() thresh = solver.getConvergenceThreshold() tot_time = solver.getTotalTime() keff = solver.getKeff() if solver_type is 'GPUSolver': num_threads = solver.getNumThreadsPerBlock() num_blocks = solver.getNumThreadBlocks() else: num_threads = solver.getNumThreads() # If the user requested to store the FSR fluxes if fluxes: # Allocate array scalar_fluxes = np.zeros((num_FSRs, num_groups)) # Get the scalar flux for each FSR and energy group for i in range(num_FSRs): for j in range(num_groups): scalar_fluxes[i, j] = solver.getFlux(i, j + 1) # If the user requested to store the FSR sources if sources: # Allocate array sources_array = np.zeros((num_FSRs, num_groups)) # Get the scalar flux for each FSR and energy group for i in range(num_FSRs): for j in range(num_groups): sources_array[i, j] = solver.getFSRSource(i, j + 1) # If using HDF5 if use_hdf5: if append: f = h5py.File(directory + '/' + filename + '.h5', 'a') else: f = h5py.File(directory + '/' + filename + '.h5', 'w') # Create groups for the day in the HDF5 file day_key = '{0:02}-{1:02}-{2:02}'.format(month, day, year) day_group = f.require_group(day_key) # Create group for the time - use counter in case two simulations # write simulation state at the exact same hour,minute, and second time_key = '{0:02}:{1:02}:{2:02}'.format(hr, mins, sec) counter = 0 while time_key in day_group.keys(): time_key = '{0:02}:{1:02}:{2:02}-{3}'.format( hr, mins, sec, counter) counter += 1 time_group = day_group.require_group(time_key) # Store a note for this simulation state if not note is '': time_group.attrs['note'] = note # Store simulation data to the HDF5 file time_group.create_dataset('solver type', data=solver_type) time_group.create_dataset('# FSRs', data=num_FSRs) time_group.create_dataset('# materials', data=num_materials) time_group.create_dataset('# energy groups', data=num_groups) time_group.create_dataset('z coord', data=zcoord) time_group.create_dataset('# tracks', data=num_tracks) time_group.create_dataset('# segments', data=num_segments) time_group.create_dataset('track spacing [cm]', data=spacing) time_group.create_dataset('# azimuthal angles', data=num_azim) time_group.create_dataset('# polar angles', data=num_polar) time_group.create_dataset('# iterations', data=num_iters) time_group.create_dataset('convergence threshold', data=thresh) time_group.create_dataset('exponential', data=method) time_group.create_dataset('floating point', data=precision) time_group.create_dataset('CMFD', data=cmfd) time_group.create_dataset('time [sec]', data=tot_time) time_group.create_dataset('keff', data=keff) if solver_type is 'GPUSolver': time_group.create_dataset('# threads per block', data=num_threads) time_group.create_dataset('# thread blocks', data=num_blocks) else: time_group.create_dataset('# threads', data=num_threads) if fluxes: time_group.create_dataset('FSR scalar fluxes', data=scalar_fluxes) if sources: time_group.create_dataset('FSR sources', data=sources_array) if fission_rates: compute_fission_rates(solver, use_hdf5=True) fission_rates_file = h5py.File('fission-rates/fission-rates.h5', 'r') f.copy(fission_rates_file, time_group, name='fission-rates') fission_rates_file.close() # Close the HDF5 file f.close() # If not using HDF5, we are pickling all of the data else: filename = directory + '/' + filename + '.pkl' if os.path.exists(filename) and append: sim_states = pickle.load(open(filename, 'rb')) else: sim_states = {} # Create strings for the day and time day = str(month).zfill(2) + '-' + str(day).zfill(2) + '-' + str(year) time = str(hr).zfill(2) + ':' + str(mins).zfill(2) + ':' + str( sec).zfill(2) # Create dictionaries for this day and time within the pickled file if not day in sim_states.keys(): sim_states[day] = {} sim_states[day][time] = {} state = sim_states[day][time] # Store a note for this simulation state if not note is '': state['note'] = note # Store simulation data to a Python dictionary state['solver type'] = solver_type state['# FSRs'] = num_FSRs state['# materials'] = num_materials state['# energy groups'] = num_groups state['z coord'] = zcoord state['# tracks'] = num_tracks state['# segments'] = num_segments state['track spacing [cm]'] = spacing state['# azimuthal angles'] = num_azim state['# polar angles'] = num_polar state['# iterations'] = num_iters state['convergence threshold'] = thresh state['exponential'] = method state['floating point'] = precision state['CMFD'] = cmfd state['time [sec]'] = tot_time state['keff'] = keff if solver_type is 'GPUSolver': state['# threads per block'] = num_threads state['# thread blocks'] = num_blocks else: state['# threads'] = num_threads if fluxes: state['FSR scalar fluxes'] = scalar_fluxes if sources: state['FSR sources'] = sources_array if fission_rates: compute_fission_rates(solver, False) state['fission-rates'] = \ pickle.load(open('fission-rates/fission-rates.pkl', 'rb')) # Pickle the simulation states to a file pickle.dump(sim_states, open(filename, 'wb')) # Pickle the simulation states to a file pickle.dump(sim_states, open(filename, 'wb'))
def restore_simulation_state(filename='simulation-state.h5', directory='simulation-states'): """Restore all of the data for an OpenMOC simulation from a binary file for downstream data processing to a Python dictionary. This routine may import the simulation state from either an HDF5 or a Python pickle binary file created by the store_simulation_state(...) method. The method may be used to restore the following information: * type of Solver used * floating point precision * exponential evaluation method * number of FSRs * number of materials * number of energy groups * number of azimuthal angles * number of polar angles * track spacing * number of tracks * number of track segments * number of source iterations * source convergence tolerance * converged $k_{eff}$ * total runtime [seconds] * number of OpenMP or CUDA threads Note: If the fission rates were stored in a hdf5 binary file, they are not restored and returned in this method. Paramters --------- filename : str The simulation state filename string directory : str The directory where to find the simulation state file Returns ------- states : dict The dictionary of key/value pairs for simulation state data Examples -------- This method may be called from Python as follows: >>> restore_simulation_state(filename='simulation-state-v1.3.h5') See Also -------- store_simulation_state(...) """ cv.check_type('filename', filename, basestring) cv.check_type('directory', directory, basestring) filename = directory + '/' + filename if not os.path.isfile(filename): py_printf('ERROR', 'Unable restore simulation state since "{0}" ' + \ 'is not an existing simulation state file'.format(filename)) # If using HDF5 if '.h5' in filename or '.hdf5' in filename: import h5py # Create a file handle f = h5py.File(filename, 'r') states = {} # Loop over all simulation state timestamps by day for day in f.keys(): # Create sub-dictionary for this day states[day] = {} # Loop over all simulation state timestamps by time of day for time in f[day]: # Create sub-dictionary for this simulation state dataset = f[day][time] states[day][time] = {} state = states[day][time] # Extract simulation state data solver_type = str(dataset['solver type']) num_FSRs = int(dataset['# FSRs'][...]) num_materials = int(dataset['# materials'][...]) num_tracks = int(dataset['# tracks'][...]) num_segments = int(dataset['# segments'][...]) spacing = int(dataset['track spacing [cm]'][...]) num_azim = int(dataset['# azimuthal angles'][...]) num_polar = int(dataset['# polar angles'][...]) num_iters = int(dataset['# iterations'][...]) thresh = float(dataset['convergence threshold'][...]) method = str(dataset['exponential'][...]) precision = str(dataset['floating point'][...]) cmfd = str(dataset['CMFD'][...]) time = float(dataset['time [sec]'][...]) keff = float(dataset['keff'][...]) # Store simulation state data in sub-dictionary state['solver type'] = solver_type state['# FSRs'] = num_FSRs state['# materials'] = num_materials state['# tracks'] = num_tracks state['# segments'] = num_segments state['track spacing [cm]'] = spacing state['# azimuthal angles'] = num_azim state['# polar angles'] = num_polar state['# iterations'] = num_iters state['convergence threshold'] = thresh state['exponential'] = method state['floating point'] = precision state['CMFD'] = cmfd state['time [sec]'] = time state['keff'] = keff if solver_type is 'GPUSolver': state['# threads per block'] = \ int(dataset['# threads per block']) state['# thread blocks'] = int(dataset['# thread blocks']) else: state['# threads'] = int(dataset['# threads'][...]) if 'FSR scalar fluxes' in dataset: state['FSR scalar fluxes'] = \ dataset['FSR scalar fluxes'][...] if 'FSR sources' in dataset: state['FSR sources'] = dataset['FSR sources'][...] if 'note' in dataset: state['note'] = str(dataset['note']) if 'fission-rates' in dataset: py_printf('WARNING', 'The restore_simulation_state(...)' + 'method does not yet support fission rates') return states # If using a Python pickled file elif '.pkl' in filename: states = pickle.load(open(filename, 'rb')) return states # If file does not have a recognizable extension else: py_printf('WARNING', 'Unable to restore the simulation states file %s' + ' since it does not have a supported file extension. Only ' + '*.h5, *.hdf5, and *.pkl files are supported', filename) return {}
def get_scalar_fluxes(solver, fsrs='all', groups='all'): """Return an array of scalar fluxes in one or more FSRs and groups. This routine builds a 2D NumPy array indexed by FSR and energy group for the corresponding scalar fluxes. The fluxes are organized in the array in order of increasing FSR and enery group if 'all' FSRs or energy groups are requested (the default). If the user requests fluxes for specific FSRs or energy groups, then the fluxes are returned in the order in which the FSRs and groups are enumerated in the associated paramters. Parameters ---------- solver : openmoc.Solver The solver used to compute the flux fsrs : Iterable of Integral or 'all' A collection of integer FSR IDs or 'all' (default) groups : Iterable of Integral or 'all' A collection of integer energy groups or 'all' (default) Returns ------- fluxes : ndarray The scalar fluxes indexed by FSR ID and energy group. Note that the energy group index starts at 0 rather than 1 for the highest energy in accordance with Python's 0-based indexing. """ cv.check_type('solver', solver, openmoc.Solver) if isinstance('fsrs', basestring): cv.check_value('fsrs', fsrs, 'all') else: cv.check_type('fsrs', Iterable, Integral) if isinstance('groups', basestring): cv.check_value('groups', fsrs, 'all') else: cv.check_type('groups', Iterable, Integral) # Build a list of FSRs to iterate over if fsrs == 'all': num_fsrs = solver.getGeometry().getNumFSRs() fsrs = np.arange(num_fsrs) else: num_fsrs = len(fsrs) # Build a list of enery groups to iterate over if groups == 'all': num_groups = solver.getGeometry().getNumEnergyGroups() groups = np.arange(num_groups) + 1 else: num_groups = len(groups) # Extract the FSR scalar fluxes fluxes = np.zeros((num_fsrs, num_groups)) for fsr in fsrs: for group in groups: fluxes[fsr, group-1] = solver.getFlux(int(fsr), int(group)) return fluxes
def tally_on_mesh(self, solver, domains_to_coeffs, domain_type='fsr', volume='integrated', energy='integrated'): """Compute arbitrary reaction rates in each mesh cell. NOTE: This method assumes that the mesh perfectly aligns with the flat source region mesh used in the OpenMOC calculation. Parameters ---------- solver : {openmoc.CPUSolver, openmoc.GPUSolver, openmoc.VectorizedSolver} The solver used to compute the flux domains_to_coeffs : dict or numpy.ndarray of Real A mapping of spatial domains and energy groups to the coefficients to multiply the flux in each domain. If domain_type is 'material' or 'cell' then the coefficients must be a Python dictionary indexed by material/cell ID mapped to NumPy arrays indexed by energy group. If domain_type is 'fsr' then the coefficients may be a dictionary or NumPy array indexed by FSR ID and energy group. Note that the energy group indexing should start at 0 rather than 1 for the highest energy in accordance with Python's 0-based indexing. domain_type : {'fsr', 'cell', 'material'} The type of domain for which the coefficients are defined volume : {'averaged', 'integrated'} Compute volume-averaged or volume-integrated tallies energy : {'by_group', 'integrated'} Compute tallies by energy group or integrate across groups Returns ------- tally : numpy.ndarray of Real A NumPy array of the fission rates tallied in each mesh cell indexed by FSR ID and energy group (if energy is 'by_group') """ cv.check_type('solver', solver, openmoc.Solver) cv.check_value('domain_type', domain_type, ('fsr', 'cell', 'material')) cv.check_value('volume', volume, ('averaged', 'integrated')) cv.check_value('energy', energy, ('by_group', 'integrated')) # Extract parameters from the Geometry geometry = solver.getGeometry() num_groups = geometry.getNumEnergyGroups() num_fsrs = geometry.getNumFSRs() # Coefficients must be specified as a dict, ndarray or DataFrame if domain_type in ['material', 'cell']: cv.check_type('domains_to_coeffs', domains_to_coeffs, dict) else: cv.check_type('domains_to_coeffs', domains_to_coeffs, (dict, np.ndarray)) # Extract the FSR fluxes from the Solver fluxes = get_scalar_fluxes(solver) # Initialize a 2D or 3D NumPy array in which to tally tally_shape = tuple(self.dimension) + (num_groups,) tally = np.zeros(tally_shape, dtype=np.float) # Compute product of fluxes with domains-to-coeffs mapping by group, FSR for fsr in range(num_fsrs): point = geometry.getFSRPoint(fsr) mesh_indices = self.get_mesh_cell_indices(point) volume = solver.getFSRVolume(fsr) fsr_tally = np.zeros(num_groups, dtype=np.float) # Determine domain ID (material, cell or FSR) for this FSR if domain_type == 'fsr': domain_id = fsr else: coords = \ openmoc.LocalCoords(point.getX(), point.getY(), point.getZ()) coords.setUniverse(geometry.getRootUniverse()) cell = geometry.findCellContainingCoords(coords) if domain_type == 'cell': domain_id = cell.getId() else: domain_id = cell.getFillMaterial().getId() # Tally flux multiplied by coefficients by energy group for group in range(num_groups): fsr_tally[group] = \ fluxes[fsr, group] * domains_to_coeffs[domain_id][group] # Increment mesh tally with volume-integrated FSR tally tally[mesh_indices] += fsr_tally * volume # Integrate the energy groups if needed if energy == 'integrated': tally = np.sum(tally, axis=len(self.dimension)) # Average the fission rates by mesh cell volume if needed if volume == 'averaged': tally /= self.mesh_cell_volume return tally
def get_compatible_opencg_cells(opencg_cell, opencg_surface, halfspace): """Generate OpenCG cells that are compatible with OpenMOC equivalent to an OpenCG cell that is not compatible. Parameters ---------- opencg_cell : opencg.Cell OpenCG cell opencg_surface : opencg.Surface OpenCG surface that causes the incompatibility, e.g. an instance of XSquarePrism halfspace : {-1, 1} Which halfspace defined by the surface is contained in the cell Returns ------- compatible_cells : list of opencg.Cell Collection of cells equivalent to the original one but compatible with OpenMC """ cv.check_type('opencg_cell', opencg_cell, opencg.Cell) cv.check_type('opencg_surface', opencg_surface, opencg.Surface) cv.check_value('halfspace', halfspace, (-1, +1)) # Initialize an empty list for the new compatible cells compatible_cells = list() # SquarePrism Surfaces if opencg_surface.type in ['x-squareprism', 'y-squareprism', 'z-squareprism']: # Get the compatible Surfaces (XPlanes and YPlanes) compatible_surfaces = get_compatible_opencg_surfaces(opencg_surface) opencg_cell.remove_surface(opencg_surface) # If Cell is inside SquarePrism, add "inside" of Surface halfspaces if halfspace == -1: opencg_cell.add_surface(compatible_surfaces[0], +1) opencg_cell.add_surface(compatible_surfaces[1], -1) opencg_cell.add_surface(compatible_surfaces[2], +1) opencg_cell.add_surface(compatible_surfaces[3], -1) compatible_cells.append(opencg_cell) # If Cell is outside the SquarePrism (positive halfspace), add "outside" # of Surface halfspaces. Since OpenMOC does not have a SquarePrism # Surface, individual Cells are created for the 8 Cells that make up the # outer region of a SquarePrism. # | | # 0 | 1 | 2 # ______|____________________|______ # | SquarePrism | # 7 | (-) halfspace | 3 # ______|____________________|______ # | | # 6 | 5 | 4 # | | else: # Create 8 Cell clones to represent each of the disjoint planar # Surface halfspace intersections num_clones = 8 for clone_id in range(num_clones): # Create a cloned OpenCG Cell with Surfaces compatible with OpenMOC clone = opencg_cell.clone() compatible_cells.append(clone) # Top left subcell (subcell 0) if clone_id == 0: clone.add_surface(compatible_surfaces[0], -1) clone.add_surface(compatible_surfaces[3], +1) # Top center subcell (subcell 1) elif clone_id == 1: clone.add_surface(compatible_surfaces[0], +1) clone.add_surface(compatible_surfaces[1], -1) clone.add_surface(compatible_surfaces[3], +1) # Top right subcell (subcell 2) elif clone_id == 2: clone.add_surface(compatible_surfaces[1], +1) clone.add_surface(compatible_surfaces[3], +1) # Right center subcell (subcell 3) elif clone_id == 3: clone.add_surface(compatible_surfaces[1], +1) clone.add_surface(compatible_surfaces[3], -1) clone.add_surface(compatible_surfaces[2], +1) # Bottom right subcell (subcell 4) elif clone_id == 4: clone.add_surface(compatible_surfaces[1], +1) clone.add_surface(compatible_surfaces[2], -1) # Bottom center subcell (subcell 5) elif clone_id == 5: clone.add_surface(compatible_surfaces[0], +1) clone.add_surface(compatible_surfaces[1], -1) clone.add_surface(compatible_surfaces[2], -1) # Bottom left subcell (subcell 6) elif clone_id == 6: clone.add_surface(compatible_surfaces[0], -1) clone.add_surface(compatible_surfaces[2], -1) # Left center subcell (subcell 7) elif clone_id == 7: clone.add_surface(compatible_surfaces[0], -1) clone.add_surface(compatible_surfaces[3], -1) clone.add_surface(compatible_surfaces[2], +1) # Remove redundant Surfaces from the Cells for cell in compatible_cells: cell.remove_redundant_surfaces() # Return the list of compatible OpenCG Cells return compatible_cells
def get_opencg_lattice(openmoc_lattice): """Return an OpenCG lattice corresponding to an OpenMOC lattice. Parameters ---------- openmoc_lattice : openmoc.Lattice OpenMOC lattice Returns ------- opencg_lattice : opencg.Lattice Equivalent OpenCG lattice """ cv.check_type('openmoc_lattice', openmoc_lattice, openmoc.Lattice) global OPENCG_LATTICES lattice_id = openmoc_lattice.getId() # If this Lattice was already created, use it if lattice_id in OPENCG_LATTICES: return OPENCG_LATTICES[lattice_id] # Create an OpenCG Lattice to represent this OpenMOC Lattice name = openmoc_lattice.getName() offset = openmoc_lattice.getOffset() dimension = [1, openmoc_lattice.getNumY(), openmoc_lattice.getNumX()] width = [1, openmoc_lattice.getWidthY(), openmoc_lattice.getWidthX()] lower_left = [-np.inf, width[1]*dimension[1]/2. + offset.getX(), width[2]*dimension[2] / 2. + offset.getY()] # Initialize an empty array for the OpenCG nested Universes in this Lattice universe_array = np.ndarray(tuple(np.array(dimension)[::-1]), \ dtype=opencg.Universe) # Create OpenCG Universes for each unique nested Universe in this Lattice unique_universes = openmoc_lattice.getUniqueUniverses() for universe_id, universe in unique_universes.items(): unique_universes[universe_id] = get_opencg_universe(universe) # Build the nested Universe array for y in range(dimension[1]): for x in range(dimension[0]): universe = openmoc_lattice.getUniverse(x, y) universe_id = universe.getId() universe_array[0][y][x] = unique_universes[universe_id] opencg_lattice = opencg.Lattice(lattice_id, name) opencg_lattice.dimension = dimension opencg_lattice.width = width opencg_lattice.universes = universe_array offset = np.array(lower_left, dtype=np.float64) - \ ((np.array(width, dtype=np.float64) * \ np.array(dimension, dtype=np.float64))) / -2.0 opencg_lattice.offset = offset # Add the OpenMOC Lattice to the global collection of all OpenMOC Lattices OPENMOC_LATTICES[lattice_id] = openmoc_lattice # Add the OpenCG Lattice to the global collection of all OpenCG Lattices OPENCG_LATTICES[lattice_id] = opencg_lattice return opencg_lattice
def compute_fission_rates(solver, use_hdf5=False): """Computes the fission rate in each FSR. This method combines the rates based on their hierarchical universe/lattice structure. The fission rates are then exported to a binary HDF5 or Python pickle file. This routine is intended to be called by the user in Python to compute fission rates. Typically, the fission rates will represent pin powers. The routine either exports fission rates to an HDF5 binary file or pickle file with each fission rate being indexed by a string representing the universe/lattice hierarchy. Parameters ---------- solver : openmoc.Solver The solver used to compute the flux use_hdf5 : bool Whether or not to export fission rates to an HDF5 file Examples -------- This routine may be called from a Python script as follows: >>> compute_fission_rates(solver, use_hdf5=True) """ cv.check_type('solver', solver, openmoc.Solver) cv.check_type('use_hdf5', use_hdf5, bool) # Make directory if it does not exist directory = openmoc.get_output_directory() + '/fission-rates/' filename = 'fission-rates' if not os.path.exists(directory): os.makedirs(directory) # Get geometry geometry = solver.getGeometry() # Compute the volume-weighted fission rates for each FSR fsr_fission_rates = solver.computeFSRFissionRates(geometry.getNumFSRs()) # Initialize fission rates dictionary fission_rates_sum = {} # Loop over FSRs and populate fission rates dictionary for fsr in range(geometry.getNumFSRs()): if geometry.findFSRMaterial(fsr).isFissionable(): # Get the linked list of LocalCoords point = geometry.getFSRPoint(fsr) coords = openmoc.LocalCoords(point.getX(), point.getY(), point.getZ()) coords.setUniverse(geometry.getRootUniverse()) geometry.findCellContainingCoords(coords) coords = coords.getHighestLevel().getNext() # initialize dictionary key key = 'UNIV = 0 : ' # Parse through the linked list and create fsr key. # If lowest level sub dictionary already exists, then increment # fission rate; otherwise, set the fission rate. while True: if coords.getType() is openmoc.LAT: key += 'LAT = ' + str(coords.getLattice().getId()) + ' (' + \ str(coords.getLatticeX()) + ', ' + \ str(coords.getLatticeY()) + ', ' + \ str(coords.getLatticeZ()) + ') : ' else: key += 'UNIV = ' + str(coords.getUniverse().getId()) + ' : ' # Remove trailing ' : ' on end of key if at last univ/lat if coords.getNext() is None: key = key[:-3] break else: coords = coords.getNext() # Increment or set fission rate if key in fission_rates_sum: fission_rates_sum[key] += fsr_fission_rates[fsr] else: fission_rates_sum[key] = fsr_fission_rates[fsr] # Write the fission rates to the HDF5 file if use_hdf5: f = h5py.File(directory + filename + '.h5', 'w') fission_rates_group = f.create_group('fission-rates') for key, value in fission_rates_sum.items(): fission_rates_group.attrs[key] = value f.close() # Pickle the fission rates to a file else: pickle.dump(fission_rates_sum, open(directory + filename + '.pkl', 'wb'))
def load_from_hdf5(filename='mgxs.h5', directory='mgxs', geometry=None, domain_type='material', suffix=''): """This routine loads an HDF5 file of multi-group cross section data. The routine instantiates material with multi-group cross section data and returns a dictionary of each Material object keyed by its name or ID. An OpenMOC geometry may optionally be given and the routine will directly insert the multi-group cross sections into each material in the geometry. If a geometry is passed in, materials from the geometry will be used in place of those instantiated by this routine. Parameters ---------- filename : str Filename for cross sections HDF5 file (default is 'mgxs.h5') directory : str Directory for cross sections HDF5 file (default is 'mgxs') geometry : openmoc.Geometry, optional An optional geometry populated with materials, cells, etc. domain_type : str The domain type ('material' or 'cell') upon which the cross sections are defined (default is 'material') suffix : str, optional An optional string suffix to index the HDF5 file beyond the assumed domain_type/domain_id/mgxs_type group sequence (default is '') Returns ------- materials : dict A dictionary of Materials keyed by ID """ cv.check_type('filename', filename, basestring) cv.check_type('directory', directory, basestring) cv.check_value('domain_type', domain_type, ('material', 'cell')) cv.check_type('suffix', suffix, basestring) if geometry: cv.check_type('geometry', geometry, openmoc.Geometry) # Create a h5py file handle for the file import h5py filename = os.path.join(directory, filename) f = h5py.File(filename, 'r') # Check that the file has an 'energy groups' attribute if '# groups' not in f.attrs: py_printf( 'ERROR', 'Unable to load HDF5 file "%s" since it does ' 'not contain an \'# groups\' attribute', filename) if domain_type not in f.keys(): py_printf( 'ERROR', 'Unable to load HDF5 file "%s" since it does ' 'not contain domain type "%s"', filename, domain_type) # Instantiate dictionary to hold Materials to return to user materials = {} old_materials = {} num_groups = int(f.attrs['# groups']) # If a Geometry was passed in, extract all cells or materials from it if geometry: if domain_type == 'material': domains = geometry.getAllMaterials() elif domain_type == 'cell': domains = geometry.getAllMaterialCells() else: py_printf('ERROR', 'Domain type "%s" is not supported', domain_type) # Iterate over all domains (e.g., materials or cells) in the HDF5 file for domain_spec in sorted(f[domain_type]): py_printf('INFO', 'Importing cross sections for %s "%s"', domain_type, str(domain_spec)) # Create shortcut to HDF5 group for this domain domain_group = f[domain_type][domain_spec] # If domain_spec is an integer, it is an ID; otherwise a string name if domain_spec.isdigit(): domain_spec = int(domain_spec) else: domain_spec = str(domain_spec) # If using an OpenMOC Geometry, extract a Material from it if geometry: if domain_type == 'material': material = _get_domain(domains, domain_spec) elif domain_type == 'cell': cell = _get_domain(domains, domain_spec) material = cell.getFillMaterial() # If the user filled multiple Cells with the same Material, # the Material must be cloned for each unique Cell if material != None: if len(domains) > geometry.getNumMaterials(): old_materials[material.getId()] = material material = material.clone() # If the Cell does not contain a Material, create one for it else: if isinstance(domain_spec, int): material = openmoc.Material(id=domain_spec) else: # Reproducibly hash the domain name into an integer ID domain_id = hashlib.md5(domain_spec.encode('utf-8')) domain_id = int(domain_id.hexdigest()[:4], 16) material = \ openmoc.Material(id=domain_id, name=domain_spec) # Fill the Cell with the new Material cell.setFill(material) # If not Geometry, instantiate a new Material with the ID/name else: if isinstance(domain_spec, int): material = openmoc.Material(id=domain_spec) else: # Reproducibly hash the domain name into an integer ID domain_id = hashlib.md5(domain_spec.encode('utf-8')) domain_id = int(domain_id.hexdigest()[:4], 16) material = openmoc.Material(id=domain_id, name=domain_spec) # Add material to the collection materials[domain_spec] = material material.setNumEnergyGroups(num_groups) # Search for the total/transport cross section if 'transport' in domain_group: sigma = _get_numpy_array(domain_group, 'transport', suffix) material.setSigmaT(sigma) py_printf('DEBUG', 'Loaded "transport" MGXS for "%s %s"', domain_type, str(domain_spec)) elif 'total' in domain_group: sigma = _get_numpy_array(domain_group, 'total', suffix) material.setSigmaT(sigma) py_printf('DEBUG', 'Loaded "total" MGXS for "%s %s"', domain_type, str(domain_spec)) else: py_printf('WARNING', 'No "total" or "transport" MGXS found for' '"%s %s"', domain_type, str(domain_spec)) # Search for the fission production cross section if 'nu-fission' in domain_group: sigma = _get_numpy_array(domain_group, 'nu-fission', suffix) material.setNuSigmaF(sigma) py_printf('DEBUG', 'Loaded "nu-fission" MGXS for "%s %s"', domain_type, str(domain_spec)) else: py_printf('WARNING', 'No "nu-fission" MGXS found for' '"%s %s"', domain_type, str(domain_spec)) # Search for the scattering matrix cross section if 'consistent nu-scatter matrix' in domain_group: sigma = _get_numpy_array(domain_group, 'consistent nu-scatter matrix', suffix) material.setSigmaS(sigma) py_printf( 'DEBUG', 'Loaded "consistent nu-scatter matrix" MGXS for "%s %s"', domain_type, str(domain_spec)) elif 'nu-scatter matrix' in domain_group: sigma = _get_numpy_array(domain_group, 'nu-scatter matrix', suffix) material.setSigmaS(sigma) py_printf('DEBUG', 'Loaded "nu-scatter matrix" MGXS for "%s %s"', domain_type, str(domain_spec)) elif 'consistent scatter matrix' in domain_group: sigma = _get_numpy_array(domain_group, 'consistent scatter matrix', suffix) material.setSigmaS(sigma) py_printf('DEBUG', 'Loaded "consistent scatter matrix" MGXS for "%s %s"', domain_type, str(domain_spec)) elif 'scatter matrix' in domain_group: sigma = _get_numpy_array(domain_group, 'scatter matrix', suffix) material.setSigmaS(sigma) py_printf('DEBUG', 'Loaded "scatter matrix" MGXS for "%s %s"', domain_type, str(domain_spec)) else: py_printf('WARNING', 'No "scatter matrix" found for "%s %s"', domain_type, str(domain_spec)) # Search for chi (fission spectrum) if 'chi' in domain_group: chi = _get_numpy_array(domain_group, 'chi', suffix) material.setChi(chi) py_printf('DEBUG', 'Loaded "chi" MGXS for "%s %s"', domain_type, str(domain_spec)) else: py_printf('WARNING', 'No "chi" MGXS found for "%s %s"', domain_type, str(domain_spec)) # Search for optional cross sections if 'fission' in domain_group: sigma = _get_numpy_array(domain_group, 'fission', suffix) material.setSigmaF(sigma) py_printf('DEBUG', 'Loaded "fission" MGXS for "%s %s"', domain_type, str(domain_spec)) # Inform SWIG to garbage collect any old Materials from the Geometry for material_id in old_materials: old_materials[material_id].thisown = False # Return collection of materials return materials
def get_opencg_surface(openmoc_surface): """Return an OpenCG surface corresponding to an OpenMOC surface. Parameters ---------- openmc_surface : openmoc.Surface OpenMOC surface Returns ------- opencg_surface : opencg.Surface Equivalent OpenCG surface """ cv.check_type('openmoc_surface', openmoc_surface, openmoc.Surface) global OPENCG_SURFACES surface_id = openmoc_surface.getId() # If this Surface was already created, use it if surface_id in OPENCG_SURFACES: return OPENCG_SURFACES[surface_id] # Create an OpenCG Surface to represent this OpenMOC Surface name = openmoc_surface.getName() # Correct for OpenMOC's syntax for Surfaces dividing Cells boundary = openmoc_surface.getBoundaryType() if boundary == openmoc.VACUUM: boundary = 'vacuum' elif boundary == openmoc.REFLECTIVE: boundary = 'reflective' elif boundary == openmoc.BOUNDARY_NONE: boundary = 'interface' opencg_surface = None surface_type = openmoc_surface.getSurfaceType() if surface_type == openmoc.PLANE: openmoc_surface = openmoc.castSurfaceToPlane(openmoc_surface) A = openmoc_surface.getA() B = openmoc_surface.getB() C = openmoc_surface.getC() D = openmoc_surface.getD() opencg_surface = opencg.Plane(surface_id, name, boundary, A, B, C, D) elif surface_type == openmoc.XPLANE: openmoc_surface = openmoc.castSurfaceToXPlane(openmoc_surface) x0 = openmoc_surface.getX() opencg_surface = opencg.XPlane(surface_id, name, boundary, x0) elif surface_type == openmoc.YPLANE: openmoc_surface = openmoc.castSurfaceToYPlane(openmoc_surface) y0 = openmoc_surface.getY() opencg_surface = opencg.YPlane(surface_id, name, boundary, y0) elif surface_type == openmoc.ZPLANE: openmoc_surface = openmoc.castSurfaceToZPlane(openmoc_surface) z0 = openmoc_surface.getZ() opencg_surface = opencg.ZPlane(surface_id, name, boundary, z0) elif surface_type == openmoc.ZCYLINDER: openmoc_surface = openmoc.castSurfaceToZCylinder(openmoc_surface) x0 = openmoc_surface.getX0() y0 = openmoc_surface.getY0() R = openmoc_surface.getRadius() opencg_surface = opencg.ZCylinder(surface_id, name, boundary, x0, y0, R) # Add the OpenMOC Surface to the global collection of all OpenMOC Surfaces OPENMOC_SURFACES[surface_id] = openmoc_surface # Add the OpenCG Surface to the global collection of all OpenCG Surfaces OPENCG_SURFACES[surface_id] = opencg_surface return opencg_surface
def get_openmoc_surface(opencg_surface): """Return an OpenMOC surface corresponding to an OpenCG surface. Parameters ---------- opencg_surface : opencg.Surface OpenCG surface Returns ------- openmoc_surface : openmoc.Surface Equivalent OpenMOC surface """ cv.check_type('opencg_surface', opencg_surface, opencg.Surface) global OPENMOC_SURFACES surface_id = opencg_surface.id # If this Surface was already created, use it if surface_id in OPENMOC_SURFACES: return OPENMOC_SURFACES[surface_id] # Create an OpenMOC Surface to represent this OpenCG Surface name = str(opencg_surface.name) # Correct for OpenMOC's syntax for Surfaces dividing Cells boundary = opencg_surface.boundary_type if boundary == 'vacuum': boundary = openmoc.VACUUM elif boundary == 'reflective': boundary = openmoc.REFLECTIVE elif boundary == 'interface': boundary = openmoc.BOUNDARY_NONE if opencg_surface.type == 'plane': A = opencg_surface.a B = opencg_surface.b C = opencg_surface.c D = opencg_surface.d openmoc_surface = openmoc.Plane(A, B, C, D, surface_id, name) elif opencg_surface.type == 'x-plane': x0 = opencg_surface.x0 openmoc_surface = openmoc.XPlane(x0, int(surface_id), name) elif opencg_surface.type == 'y-plane': y0 = opencg_surface.y0 openmoc_surface = openmoc.YPlane(y0, surface_id, name) elif opencg_surface.type == 'z-plane': z0 = opencg_surface.z0 openmoc_surface = openmoc.ZPlane(z0, surface_id, name) elif opencg_surface.type == 'z-cylinder': x0 = opencg_surface.x0 y0 = opencg_surface.y0 R = opencg_surface.r openmoc_surface = openmoc.ZCylinder(x0, y0, R, surface_id, name) else: msg = 'Unable to create an OpenMOC Surface from an OpenCG ' \ 'Surface of type {0} since it is not a compatible ' \ 'Surface type in OpenMOC'.format(opencg_surface.type) raise ValueError(msg) # Set the boundary condition for this Surface openmoc_surface.setBoundaryType(boundary) # Add the OpenMOC Surface to the global collection of all OpenMOC Surfaces OPENMOC_SURFACES[surface_id] = openmoc_surface # Add the OpenCG Surface to the global collection of all OpenCG Surfaces OPENCG_SURFACES[surface_id] = opencg_surface return openmoc_surface
def parse_convergence_data(filename, directory=''): """Parse an OpenMOC log file to obtain a simulation's convergence data. This method compiles the eigenvalue and source residuals from each iteration of an OpenMOC simulation. This data is inserted into a Python dictionary under the key names 'eigenvalues' and 'residuals', along with an integer '# iters', and returned to the user. Parameters ---------- filename : str The OpenMOC log filename string directory : str The directory where to find the log file Returns ------- convergence_data : dict A Python dictionary of key/value pairs for convergence data Examples -------- This method may be called from Python as follows: >>> parse_convergence_data(filename='openmoc-XX-XX-XXXX--XX:XX:XX.log') """ cv.check_type('filename', filename, basestring) cv.check_type('directory', directory, basestring) # If the user specified a directory if len(directory) > 0: filename = directory + '/' + filename if not os.path.isfile(filename): py_printf('ERROR', 'Unable to parse convergence data since "{0}" is ' + 'not an existing OpenMOC log file'.format(filename)) # Compile regular expressions to find the residual and eigenvalue data res = re.compile(b'res = ([0-9].[0-9]+E[+|-][0-9]+)') keff = re.compile(b'k_eff = ([0-9]+.[0-9]+)') # Parse the eigenvalues with open(filename, 'r+') as f: data = mmap.mmap(f.fileno(), 0) eigenvalues = keff.findall(data) # Parse the source residuals with open(filename, 'r+') as f: data = mmap.mmap(f.fileno(), 0) residuals = res.findall(data) # Create NumPy arrays of the data eigenvalues = np.array([float(eigenvalue) for eigenvalue in eigenvalues]) residuals = np.array([float(residual) for residual in residuals]) # Find the total number of source iterations num_iters = len(residuals) # Store the data in a dictionary to return to the user convergence_data = dict() convergence_data['# iters'] = num_iters convergence_data['eigenvalues'] = eigenvalues convergence_data['residuals'] = residuals return convergence_data
def get_openmoc_geometry(opencg_geometry): """Return an OpenMOC geometry corresponding to an OpenCG geometry. Parameters ---------- opencg_geometry : opencg.Geometry OpenCG geometry Returns ------- openmoc_geometry : openmoc.Geometry Equivalent OpenMOC geometry """ cv.check_type('opencg_geometry', opencg_geometry, opencg.Geometry) # Deep copy the goemetry since it may be modified to make all Surfaces # compatible with OpenMOC's specifications opencg_geometry.assign_auto_ids() opencg_geometry = copy.deepcopy(opencg_geometry) # Update Cell bounding boxes in Geometry opencg_geometry.update_bounding_boxes() # Clear dictionaries and auto-generated IDs OPENMOC_MATERIALS.clear() OPENCG_MATERIALS.clear() OPENMOC_SURFACES.clear() OPENCG_SURFACES.clear() OPENMOC_CELLS.clear() OPENCG_CELLS.clear() OPENMOC_UNIVERSES.clear() OPENCG_UNIVERSES.clear() OPENMOC_LATTICES.clear() OPENCG_LATTICES.clear() # Make the entire geometry "compatible" before assigning auto IDs universes = opencg_geometry.get_all_universes() for universe_id, universe in universes.items(): make_opencg_cells_compatible(universe) opencg_geometry.assign_auto_ids() opencg_root_universe = opencg_geometry.root_universe openmoc_root_universe = get_openmoc_universe(opencg_root_universe) openmoc_geometry = openmoc.Geometry() openmoc_geometry.setRootUniverse(openmoc_root_universe) # Update OpenMOC's auto-generated object IDs (e.g., Surface, Material) # with the maximum of those created from the OpenCG objects all_materials = openmoc_geometry.getAllMaterials() all_surfaces = openmoc_geometry.getAllSurfaces() all_cells = openmoc_geometry.getAllCells() all_universes = openmoc_geometry.getAllUniverses() max_material_id = max(all_materials.keys()) max_surface_id = max(all_surfaces.keys()) max_cell_id = max(all_cells.keys()) max_universe_id = max(all_universes.keys()) openmoc.maximize_material_id(max_material_id+1) openmoc.maximize_surface_id(max_surface_id+1) openmoc.maximize_cell_id(max_cell_id+1) openmoc.maximize_universe_id(max_universe_id+1) return openmoc_geometry
def get_opencg_lattice(openmoc_lattice): """Return an OpenCG lattice corresponding to an OpenMOC lattice. Parameters ---------- openmoc_lattice : openmoc.Lattice OpenMOC lattice Returns ------- opencg_lattice : opencg.Lattice Equivalent OpenCG lattice """ cv.check_type('openmoc_lattice', openmoc_lattice, openmoc.Lattice) global OPENCG_LATTICES lattice_id = openmoc_lattice.getId() # If this Lattice was already created, use it if lattice_id in OPENCG_LATTICES: return OPENCG_LATTICES[lattice_id] # Create an OpenCG Lattice to represent this OpenMOC Lattice name = openmoc_lattice.getName() offset = openmoc_lattice.getOffset() dimension = [1, openmoc_lattice.getNumY(), openmoc_lattice.getNumX()] width = [1, openmoc_lattice.getWidthY(), openmoc_lattice.getWidthX()] lower_left = [ -np.inf, width[1] * dimension[1] / 2. + offset.getX(), width[2] * dimension[2] / 2. + offset.getY() ] # Initialize an empty array for the OpenCG nested Universes in this Lattice universe_array = np.ndarray(tuple(np.array(dimension)[::-1]), \ dtype=opencg.Universe) # Create OpenCG Universes for each unique nested Universe in this Lattice unique_universes = openmoc_lattice.getUniqueUniverses() for universe_id, universe in unique_universes.items(): unique_universes[universe_id] = get_opencg_universe(universe) # Build the nested Universe array for y in range(dimension[1]): for x in range(dimension[0]): universe = openmoc_lattice.getUniverse(x, y) universe_id = universe.getId() universe_array[0][y][x] = unique_universes[universe_id] opencg_lattice = opencg.Lattice(lattice_id, name) opencg_lattice.dimension = dimension opencg_lattice.width = width opencg_lattice.universes = universe_array offset = np.array(lower_left, dtype=np.float64) - \ ((np.array(width, dtype=np.float64) * \ np.array(dimension, dtype=np.float64))) / -2.0 opencg_lattice.offset = offset # Add the OpenMOC Lattice to the global collection of all OpenMOC Lattices OPENMOC_LATTICES[lattice_id] = openmoc_lattice # Add the OpenCG Lattice to the global collection of all OpenCG Lattices OPENCG_LATTICES[lattice_id] = opencg_lattice return opencg_lattice
def width(self, width): cv.check_type('mesh width', width, Iterable, Real) cv.check_length('mesh width', width, 2, 3) self._width = width
def compute_fission_rates(solver, use_hdf5=False): """Computes the fission rate in each FSR. This method combines the rates based on their hierarchical universe/lattice structure. The fission rates are then exported to a binary HDF5 or Python pickle file. This routine is intended to be called by the user in Python to compute fission rates. Typically, the fission rates will represent pin powers. The routine either exports fission rates to an HDF5 binary file or pickle file with each fission rate being indexed by a string representing the universe/lattice hierarchy. Parameters ---------- solver : openmoc.Solver The solver used to compute the flux use_hdf5 : bool Whether or not to export fission rates to an HDF5 file Examples -------- This routine may be called from a Python script as follows: >>> compute_fission_rates(solver, use_hdf5=True) """ global solver_types cv.check_type('solver', solver, solver_types) cv.check_type('use_hdf5', use_hdf5, bool) # Make directory if it does not exist directory = openmoc.get_output_directory() + '/fission-rates/' filename = 'fission-rates' if not os.path.exists(directory): os.makedirs(directory) # Get geometry geometry = solver.getGeometry() # Compute the volume-weighted fission rates for each FSR fsr_fission_rates = solver.computeFSRFissionRates(geometry.getNumFSRs()) # Initialize fission rates dictionary fission_rates_sum = {} # Loop over FSRs and populate fission rates dictionary for fsr in range(geometry.getNumFSRs()): if geometry.findFSRMaterial(fsr).isFissionable(): # Get the linked list of LocalCoords point = geometry.getFSRPoint(fsr) coords = openmoc.LocalCoords(point.getX(), point.getY(), point.getZ()) coords.setUniverse(geometry.getRootUniverse()) geometry.findCellContainingCoords(coords) coords = coords.getHighestLevel().getNext() # initialize dictionary key key = 'UNIV = 0 : ' # Parse through the linked list and create fsr key. # If lowest level sub dictionary already exists, then increment # fission rate; otherwise, set the fission rate. while True: if coords.getType() is openmoc.LAT: key += 'LAT = ' + str(coords.getLattice().getId()) + ' (' + \ str(coords.getLatticeX()) + ', ' + \ str(coords.getLatticeY()) + ', ' + \ str(coords.getLatticeZ()) + ') : ' else: key += 'UNIV = ' + str( coords.getUniverse().getId()) + ' : ' # Remove trailing ' : ' on end of key if at last univ/lat if coords.getNext() is None: key = key[:-3] break else: coords = coords.getNext() # Increment or set fission rate if key in fission_rates_sum: fission_rates_sum[key] += fsr_fission_rates[fsr] else: fission_rates_sum[key] = fsr_fission_rates[fsr] # Write the fission rates to the HDF5 file if use_hdf5: f = h5py.File(directory + filename + '.h5', 'w') fission_rates_group = f.create_group('fission-rates') for key, value in fission_rates_sum.items(): fission_rates_group.attrs[key] = value f.close() # Pickle the fission rates to a file else: pickle.dump(fission_rates_sum, open(directory + filename + '.pkl', 'wb'))
def get_compatible_opencg_surfaces(opencg_surface): """Generate OpenCG surfaces that are compatible with OpenMOC equivalent to an OpenCG surface that is not compatible. For example, this method may be used to convert a ZSquarePrism OpenCG surface into a collection of equivalent XPlane and YPlane OpenCG surfaces. Parameters ---------- opencg_surface : opencg.Surface OpenCG surface that is incompatible with OpenMOC Returns ------- surfaces : list of opencg.Surface Collection of surfaces equivalent to the original one but compatible with OpenMOC """ cv.check_type('opencg_surface', opencg_surface, opencg.Surface) global OPENMOC_SURFACES surface_id = opencg_surface.id # If this Surface was already created, use it if surface_id in OPENMOC_SURFACES: return OPENMOC_SURFACES[surface_id] # Create an OpenMOC Surface to represent this OpenCG Surface name = str(opencg_surface.name) # Correct for OpenMOC's syntax for Surfaces dividing Cells boundary = opencg_surface.boundary_type if opencg_surface.type == 'x-squareprism': y0 = opencg_surface.y0 z0 = opencg_surface.z0 R = opencg_surface.r # Create a list of the four planes we need min_y = opencg.YPlane(y0=y0 - R, name=name) max_y = opencg.YPlane(y0=y0 + R, name=name) min_z = opencg.ZPlane(z0=z0 - R, name=name) max_z = opencg.ZPlane(z0=z0 + R, name=name) # Set the boundary conditions for each Surface min_y.boundary_type = boundary max_y.boundary_type = boundary min_z.boundary_type = boundary max_z.boundary_type = boundary surfaces = [min_y, max_y, min_z, max_z] elif opencg_surface.type == 'y-squareprism': x0 = opencg_surface.x0 z0 = opencg_surface.z0 R = opencg_surface.r # Create a list of the four planes we need min_x = opencg.XPlane(name=name, boundary=boundary, x0=x0 - R) max_x = opencg.XPlane(name=name, boundary=boundary, x0=x0 + R) min_z = opencg.ZPlane(name=name, boundary=boundary, z0=z0 - R) max_z = opencg.ZPlane(name=name, boundary=boundary, z0=z0 + R) # Set the boundary conditions for each Surface min_x.boundary_type = boundary max_x.boundary_type = boundary min_z.boundary_type = boundary max_z.boundary_type = boundary surfaces = [min_x, max_x, min_z, max_z] elif opencg_surface.type == 'z-squareprism': x0 = opencg_surface.x0 y0 = opencg_surface.y0 R = opencg_surface.r # Create a list of the four planes we need min_x = opencg.XPlane(name=name, boundary=boundary, x0=x0 - R) max_x = opencg.XPlane(name=name, boundary=boundary, x0=x0 + R) min_y = opencg.YPlane(name=name, boundary=boundary, y0=y0 - R) max_y = opencg.YPlane(name=name, boundary=boundary, y0=y0 + R) # Set the boundary conditions for each Surface min_x.boundary_type = boundary max_x.boundary_type = boundary min_y.boundary_type = boundary max_y.boundary_type = boundary surfaces = [min_x, max_x, min_y, max_y] else: msg = 'Unable to create a compatible OpenMOC Surface an OpenCG ' \ 'Surface of type "{0}" since it already a compatible ' \ 'Surface type in OpenMOC'.format(opencg_surface.type) raise ValueError(msg) # Add the OpenMOC Surface(s) to global collection of all OpenMOC Surfaces OPENMOC_SURFACES[surface_id] = surfaces # Add the OpenCG Surface to the global collection of all OpenCG Surfaces OPENCG_SURFACES[surface_id] = opencg_surface return surfaces
def get_scalar_fluxes(solver, fsrs='all', groups='all'): """Return an array of scalar fluxes in one or more FSRs and groups. This routine builds a 2D NumPy array indexed by FSR and energy group for the corresponding scalar fluxes. The fluxes are organized in the array in order of increasing FSR and enery group if 'all' FSRs or energy groups are requested (the default). If the user requests fluxes for specific FSRs or energy groups, then the fluxes are returned in the order in which the FSRs and groups are enumerated in the associated paramters. Parameters ---------- solver : openmoc.Solver The solver used to compute the flux fsrs : Iterable of Integral or 'all' A collection of integer FSR IDs or 'all' (default) groups : Iterable of Integral or 'all' A collection of integer energy groups or 'all' (default) Returns ------- fluxes : ndarray The scalar fluxes indexed by FSR ID and energy group. Note that the energy group index starts at 0 rather than 1 for the highest energy in accordance with Python's 0-based indexing. """ global solver_types cv.check_type('solver', solver, solver_types) if isinstance('fsrs', basestring): cv.check_value('fsrs', fsrs, 'all') else: cv.check_type('fsrs', Iterable, Integral) if isinstance('groups', basestring): cv.check_value('groups', fsrs, 'all') else: cv.check_type('groups', Iterable, Integral) # Extract all of the FSR scalar fluxes if groups == 'all' and fsrs == 'all': num_fsrs = solver.getGeometry().getNumFSRs() num_groups = solver.getGeometry().getNumEnergyGroups() num_fluxes = num_groups * num_fsrs fluxes = solver.getFluxes(num_fluxes) fluxes = np.reshape(fluxes, (num_fsrs, num_groups)) return fluxes # Build a list of FSRs to iterate over if fsrs == 'all': num_fsrs = solver.getGeometry().getNumFSRs() fsrs = np.arange(num_fsrs) else: num_fsrs = len(fsrs) # Build a list of enery groups to iterate over if groups == 'all': num_groups = solver.getGeometry().getNumEnergyGroups() groups = np.arange(num_groups) + 1 else: num_groups = len(groups) # Extract some of the FSR scalar fluxes fluxes = np.zeros((num_fsrs, num_groups)) for fsr in fsrs: for group in groups: fluxes[fsr, group - 1] = solver.getFlux(int(fsr), int(group)) return fluxes
def parse_convergence_data(filename, directory=''): """Parse an OpenMOC log file to obtain a simulation's convergence data. This method compiles the eigenvalue and source residuals from each iteration of an OpenMOC simulation. This data is inserted into a Python dictionary under the key names 'eigenvalues' and 'residuals', along with an integer '# iters', and returned to the user. Parameters ---------- filename : str The OpenMOC log filename string directory : str The directory where to find the log file Returns ------- convergence_data : dict A Python dictionary of key/value pairs for convergence data Examples -------- This method may be called from Python as follows: >>> parse_convergence_data(filename='openmoc-XX-XX-XXXX--XX:XX:XX.log') """ cv.check_type('filename', filename, basestring) cv.check_type('directory', directory, basestring) # If the user specified a directory if len(directory) > 0: filename = directory + '/' + filename if not os.path.isfile(filename): py_printf( 'ERROR', 'Unable to parse convergence data since "{0}" is ' + 'not an existing OpenMOC log file'.format(filename)) # Compile regular expressions to find the residual and eigenvalue data res = re.compile(b'res = ([0-9].[0-9]+E[+|-][0-9]+)') keff = re.compile(b'k_eff = ([0-9]+.[0-9]+)') # Parse the eigenvalues with open(filename, 'r+') as f: data = mmap.mmap(f.fileno(), 0) eigenvalues = keff.findall(data) # Parse the source residuals with open(filename, 'r+') as f: data = mmap.mmap(f.fileno(), 0) residuals = res.findall(data) # Create NumPy arrays of the data eigenvalues = np.array([float(eigenvalue) for eigenvalue in eigenvalues]) residuals = np.array([float(residual) for residual in residuals]) # Find the total number of source iterations num_iters = len(residuals) # Store the data in a dictionary to return to the user convergence_data = dict() convergence_data['# iters'] = num_iters convergence_data['eigenvalues'] = eigenvalues convergence_data['residuals'] = residuals return convergence_data
def get_compatible_opencg_cells(opencg_cell, opencg_surface, halfspace): """Generate OpenCG cells that are compatible with OpenMOC equivalent to an OpenCG cell that is not compatible. Parameters ---------- opencg_cell : opencg.Cell OpenCG cell opencg_surface : opencg.Surface OpenCG surface that causes the incompatibility, e.g. an instance of XSquarePrism halfspace : {-1, 1} Which halfspace defined by the surface is contained in the cell Returns ------- compatible_cells : list of opencg.Cell Collection of cells equivalent to the original one but compatible with OpenMC """ cv.check_type('opencg_cell', opencg_cell, opencg.Cell) cv.check_type('opencg_surface', opencg_surface, opencg.Surface) cv.check_value('halfspace', halfspace, (-1, +1)) # Initialize an empty list for the new compatible cells compatible_cells = list() # SquarePrism Surfaces if opencg_surface.type in [ 'x-squareprism', 'y-squareprism', 'z-squareprism' ]: # Get the compatible Surfaces (XPlanes and YPlanes) compatible_surfaces = get_compatible_opencg_surfaces(opencg_surface) opencg_cell.remove_surface(opencg_surface) # If Cell is inside SquarePrism, add "inside" of Surface halfspaces if halfspace == -1: opencg_cell.add_surface(compatible_surfaces[0], +1) opencg_cell.add_surface(compatible_surfaces[1], -1) opencg_cell.add_surface(compatible_surfaces[2], +1) opencg_cell.add_surface(compatible_surfaces[3], -1) compatible_cells.append(opencg_cell) # If Cell is outside the SquarePrism (positive halfspace), add "outside" # of Surface halfspaces. Since OpenMOC does not have a SquarePrism # Surface, individual Cells are created for the 8 Cells that make up the # outer region of a SquarePrism. # | | # 0 | 1 | 2 # ______|____________________|______ # | SquarePrism | # 7 | (-) halfspace | 3 # ______|____________________|______ # | | # 6 | 5 | 4 # | | else: # Create 8 Cell clones to represent each of the disjoint planar # Surface halfspace intersections num_clones = 8 for clone_id in range(num_clones): # Create a cloned OpenCG Cell with Surfaces compatible with OpenMOC clone = opencg_cell.clone() compatible_cells.append(clone) # Top left subcell (subcell 0) if clone_id == 0: clone.add_surface(compatible_surfaces[0], -1) clone.add_surface(compatible_surfaces[3], +1) # Top center subcell (subcell 1) elif clone_id == 1: clone.add_surface(compatible_surfaces[0], +1) clone.add_surface(compatible_surfaces[1], -1) clone.add_surface(compatible_surfaces[3], +1) # Top right subcell (subcell 2) elif clone_id == 2: clone.add_surface(compatible_surfaces[1], +1) clone.add_surface(compatible_surfaces[3], +1) # Right center subcell (subcell 3) elif clone_id == 3: clone.add_surface(compatible_surfaces[1], +1) clone.add_surface(compatible_surfaces[3], -1) clone.add_surface(compatible_surfaces[2], +1) # Bottom right subcell (subcell 4) elif clone_id == 4: clone.add_surface(compatible_surfaces[1], +1) clone.add_surface(compatible_surfaces[2], -1) # Bottom center subcell (subcell 5) elif clone_id == 5: clone.add_surface(compatible_surfaces[0], +1) clone.add_surface(compatible_surfaces[1], -1) clone.add_surface(compatible_surfaces[2], -1) # Bottom left subcell (subcell 6) elif clone_id == 6: clone.add_surface(compatible_surfaces[0], -1) clone.add_surface(compatible_surfaces[2], -1) # Left center subcell (subcell 7) elif clone_id == 7: clone.add_surface(compatible_surfaces[0], -1) clone.add_surface(compatible_surfaces[3], -1) clone.add_surface(compatible_surfaces[2], +1) # Remove redundant Surfaces from the Cells for cell in compatible_cells: cell.remove_redundant_surfaces() # Return the list of compatible OpenCG Cells return compatible_cells
def lower_left(self, lower_left): cv.check_type('mesh lower_left', lower_left, Iterable, Real) cv.check_length('mesh lower_left', lower_left, 2, 3) self._lower_left = lower_left
def tally_on_mesh(self, solver, domains_to_coeffs, domain_type='fsr', volume='integrated', energy='integrated'): """Compute arbitrary reaction rates in each mesh cell. NOTE: This method assumes that the mesh perfectly aligns with the flat source region mesh used in the OpenMOC calculation. Parameters ---------- solver : {openmoc.CPUSolver, openmoc.GPUSolver, openmoc.VectorizedSolver} The solver used to compute the flux domains_to_coeffs : dict or numpy.ndarray of Real A mapping of spatial domains and energy groups to the coefficients to multiply the flux in each domain. If domain_type is 'material' or 'cell' then the coefficients must be a Python dictionary indexed by material/cell ID mapped to NumPy arrays indexed by energy group. If domain_type is 'fsr' then the coefficients may be a dictionary or NumPy array indexed by FSR ID and energy group. Note that the energy group indexing should start at 0 rather than 1 for the highest energy in accordance with Python's 0-based indexing. domain_type : {'fsr', 'cell', 'material'} The type of domain for which the coefficients are defined volume : {'averaged', 'integrated'} Compute volume-averaged or volume-integrated tallies energy : {'by_group', 'integrated'} Compute tallies by energy group or integrate across groups Returns ------- tally : numpy.ndarray of Real A NumPy array of the fission rates tallied in each mesh cell indexed by FSR ID and energy group (if energy is 'by_group') """ global solver_types cv.check_type('solver', solver, solver_types) cv.check_value('domain_type', domain_type, ('fsr', 'cell', 'material')) cv.check_value('volume', volume, ('averaged', 'integrated')) cv.check_value('energy', energy, ('by_group', 'integrated')) # Extract parameters from the Geometry geometry = solver.getGeometry() num_groups = geometry.getNumEnergyGroups() num_fsrs = geometry.getNumFSRs() # Coefficients must be specified as a dict, ndarray or DataFrame if domain_type in ['material', 'cell']: cv.check_type('domains_to_coeffs', domains_to_coeffs, dict) else: cv.check_type('domains_to_coeffs', domains_to_coeffs, (dict, np.ndarray)) # Extract the FSR fluxes from the Solver fluxes = get_scalar_fluxes(solver) # Initialize a 2D or 3D NumPy array in which to tally tally_shape = tuple(self.dimension) + (num_groups, ) tally = np.zeros(tally_shape, dtype=np.float) # Compute product of fluxes with domains-to-coeffs mapping by group, FSR for fsr in range(num_fsrs): point = geometry.getFSRPoint(fsr) mesh_indices = self.get_mesh_cell_indices(point) if np.nan in mesh_indices: continue volume = solver.getFSRVolume(fsr) fsr_tally = np.zeros(num_groups, dtype=np.float) # Determine domain ID (material, cell or FSR) for this FSR if domain_type == 'fsr': domain_id = fsr else: coords = \ openmoc.LocalCoords(point.getX(), point.getY(), point.getZ()) coords.setUniverse(geometry.getRootUniverse()) cell = geometry.findCellContainingCoords(coords) if domain_type == 'cell': domain_id = cell.getId() else: domain_id = cell.getFillMaterial().getId() # Tally flux multiplied by coefficients by energy group for group in range(num_groups): fsr_tally[group] = \ fluxes[fsr, group] * domains_to_coeffs[domain_id][group] # Increment mesh tally with volume-integrated FSR tally tally[mesh_indices] += fsr_tally * volume # Integrate the energy groups if needed if energy == 'integrated': tally = np.sum(tally, axis=len(self.dimension)) # Average the fission rates by mesh cell volume if needed if volume == 'averaged': tally /= self.mesh_cell_volume return tally
def get_compatible_opencg_surfaces(opencg_surface): """Generate OpenCG surfaces that are compatible with OpenMOC equivalent to an OpenCG surface that is not compatible. For example, this method may be used to convert a ZSquarePrism OpenCG surface into a collection of equivalent XPlane and YPlane OpenCG surfaces. Parameters ---------- opencg_surface : opencg.Surface OpenCG surface that is incompatible with OpenMOC Returns ------- surfaces : list of opencg.Surface Collection of surfaces equivalent to the original one but compatible with OpenMOC """ cv.check_type('opencg_surface', opencg_surface, opencg.Surface) global OPENMOC_SURFACES surface_id = opencg_surface.id # If this Surface was already created, use it if surface_id in OPENMOC_SURFACES: return OPENMOC_SURFACES[surface_id] # Create an OpenMOC Surface to represent this OpenCG Surface name = str(opencg_surface.name) # Correct for OpenMOC's syntax for Surfaces dividing Cells boundary = opencg_surface.boundary_type if opencg_surface.type == 'x-squareprism': y0 = opencg_surface.y0 z0 = opencg_surface.z0 R = opencg_surface.r # Create a list of the four planes we need min_y = opencg.YPlane(y0=y0-R, name=name) max_y = opencg.YPlane(y0=y0+R, name=name) min_z = opencg.ZPlane(z0=z0-R, name=name) max_z = opencg.ZPlane(z0=z0+R, name=name) # Set the boundary conditions for each Surface min_y.boundary_type = boundary max_y.boundary_type = boundary min_z.boundary_type = boundary max_z.boundary_type = boundary surfaces = [min_y, max_y, min_z, max_z] elif opencg_surface.type == 'y-squareprism': x0 = opencg_surface.x0 z0 = opencg_surface.z0 R = opencg_surface.r # Create a list of the four planes we need min_x = opencg.XPlane(name=name, boundary=boundary, x0=x0-R) max_x = opencg.XPlane(name=name, boundary=boundary, x0=x0+R) min_z = opencg.ZPlane(name=name, boundary=boundary, z0=z0-R) max_z = opencg.ZPlane(name=name, boundary=boundary, z0=z0+R) # Set the boundary conditions for each Surface min_x.boundary_type = boundary max_x.boundary_type = boundary min_z.boundary_type = boundary max_z.boundary_type = boundary surfaces = [min_x, max_x, min_z, max_z] elif opencg_surface.type == 'z-squareprism': x0 = opencg_surface.x0 y0 = opencg_surface.y0 R = opencg_surface.r # Create a list of the four planes we need min_x = opencg.XPlane(name=name, boundary=boundary, x0=x0-R) max_x = opencg.XPlane(name=name, boundary=boundary, x0=x0+R) min_y = opencg.YPlane(name=name, boundary=boundary, y0=y0-R) max_y = opencg.YPlane(name=name, boundary=boundary, y0=y0+R) # Set the boundary conditions for each Surface min_x.boundary_type = boundary max_x.boundary_type = boundary min_y.boundary_type = boundary max_y.boundary_type = boundary surfaces = [min_x, max_x, min_y, max_y] else: msg = 'Unable to create a compatible OpenMOC Surface an OpenCG ' \ 'Surface of type "{0}" since it already a compatible ' \ 'Surface type in OpenMOC'.format(opencg_surface.type) raise ValueError(msg) # Add the OpenMOC Surface(s) to global collection of all OpenMOC Surfaces OPENMOC_SURFACES[surface_id] = surfaces # Add the OpenCG Surface to the global collection of all OpenCG Surfaces OPENCG_SURFACES[surface_id] = opencg_surface return surfaces