Beispiel #1
0
    def __init__(self, filename, autolink=True):
        self._f = h5py.File(filename, 'r')
        self._meshes = {}
        self._filters = {}
        self._tallies = {}
        self._derivs = {}

        # Check filetype and version
        cv.check_filetype_version(self._f, 'statepoint', _VERSION_STATEPOINT)

        # Set flags for what data has been read
        self._meshes_read = False
        self._filters_read = False
        self._tallies_read = False
        self._summary = None
        self._global_tallies = None
        self._sparse = False
        self._derivs_read = False

        # Automatically link in a summary file if one exists
        if autolink:
            path_summary = os.path.join(os.path.dirname(filename),
                                        'summary.h5')
            if os.path.exists(path_summary):
                su = openmc.Summary(path_summary)
                self.link_with_summary(su)

            path_volume = os.path.join(os.path.dirname(filename),
                                       'volume_*.h5')
            for path_i in glob.glob(path_volume):
                if re.search(r'volume_\d+\.h5', path_i):
                    vol = openmc.VolumeCalculation.from_hdf5(path_i)
                    self.add_volume_information(vol)
    def _get_results(self, hash_output=False):
        """Digest info in the statepoint and return as a string."""

        # Read the statepoint file.
        statepoint = glob.glob(os.path.join(os.getcwd(), self._sp_name))[0]
        sp = openmc.StatePoint(statepoint)

        # Read the summary file.
        summary = glob.glob(os.path.join(os.getcwd(), 'summary.h5'))[0]
        su = openmc.Summary(summary)
        sp.link_with_summary(su)

        # Load the MGXS library from the statepoint
        self.mgxs_lib.load_from_statepoint(sp)

        # Build a string from Pandas Dataframe for each MGXS
        outstr = ''
        for domain in self.mgxs_lib.domains:
            for mgxs_type in self.mgxs_lib.mgxs_types:
                mgxs = self.mgxs_lib.get_mgxs(domain, mgxs_type)
                df = mgxs.get_pandas_dataframe()
                outstr += df.to_string()

        # Hash the results if necessary
        if hash_output:
            sha512 = hashlib.sha512()
            sha512.update(outstr.encode('utf-8'))
            outstr = sha512.hexdigest()

        return outstr
Beispiel #3
0
	def _set_updated_summary(self, path = os.getcwd()):

		updated_summary = openmc.Summary(path + '/summary.h5')

		######### OpenMC Summary src does not close the hdf5 file it opens
		######### When OpenBU tries to shutil.rmtree the pre_run folder, it can't because
		######### a stream to summary.h5 is still open
		######### We therefore close it here
		######### !!!! This should be modified in OpenMC at some points ###########
		updated_summary._f.close()
		######### !!!! This should be modified in OpenMC at some points ###########
		
		self._updated_summary = updated_summary
Beispiel #4
0
    def _compare_results(self):
        super()._compare_results()

        # load the summary file
        summary = openmc.Summary('summary.h5')

        # get the summary cells
        cells = summary.geometry.get_all_cells()

        # for the 2 by 2 lattice, each cell should have 4
        # temperature values set to 300 K
        for cell in cells.values():
            if isinstance(cell.fill, openmc.Material):
                assert len(cell.temperature) == 4
                assert_allclose(cell.temperature, 300.0)
Beispiel #5
0
    def _get_results(self, hash_output=False):
        """Digest info in the statepoint and return as a string."""

        # Read the statepoint file.
        statepoint = glob.glob(os.path.join(os.getcwd(), self._sp_name))[0]
        sp = openmc.StatePoint(statepoint)

        # Read the summary file.
        summary = glob.glob(os.path.join(os.getcwd(), 'summary.h5'))[0]
        su = openmc.Summary(summary)
        sp.link_with_summary(su)

        # Load the MGXS library from the statepoint
        self.mgxs_lib.load_from_statepoint(sp)

        # Export the MGXS Library to an HDF5 file
        self.mgxs_lib.build_hdf5_store(directory='.')

        # Open the MGXS HDF5 file
        f = h5py.File('mgxs.h5', 'r')

        # Build a string from the datasets in the HDF5 file
        outstr = ''
        for domain in self.mgxs_lib.domains:
            for mgxs_type in self.mgxs_lib.mgxs_types:
                outstr += 'domain={0} type={1}\n'.format(domain.id, mgxs_type)
                key = 'material/{0}/{1}/average'.format(domain.id, mgxs_type)
                outstr += str(f[key][...]) + '\n'
                key = 'material/{0}/{1}/std. dev.'.format(domain.id, mgxs_type)
                outstr += str(f[key][...]) + '\n'

        # Close the MGXS HDF5 file
        f.close()

        # Hash the results if necessary
        if hash_output:
            sha512 = hashlib.sha512()
            sha512.update(outstr.encode('utf-8'))
            outstr = sha512.hexdigest()

        return outstr
Beispiel #6
0
    def __init__(self, filename, autolink=True):
        import h5py
        if h5py.__version__ == '2.6.0':
            raise ImportError("h5py 2.6.0 has a known bug which makes it "
                              "incompatible with OpenMC's HDF5 files. "
                              "Please switch to a different version.")

        self._f = h5py.File(filename, 'r')

        # Ensure filetype and revision are correct
        try:
            if 'filetype' not in self._f or self._f['filetype'].value.decode(
            ) != 'statepoint':
                raise IOError('{} is not a statepoint file.'.format(filename))
        except AttributeError:
            raise IOError('Could not read statepoint file. This most likely '
                          'means the statepoint file was produced by a '
                          'different version of OpenMC than the one you are '
                          'using.')
        if self._f['revision'].value != 15:
            raise IOError('Statepoint file has a file revision of {} '
                          'which is not consistent with the revision this '
                          'version of OpenMC expects ({}).'.format(
                              self._f['revision'].value, 15))

        # Set flags for what data has been read
        self._meshes_read = False
        self._tallies_read = False
        self._summary = None
        self._global_tallies = None
        self._sparse = False

        # Automatically link in a summary file if one exists
        if autolink:
            path_summary = os.path.join(os.path.dirname(filename),
                                        'summary.h5')
            if os.path.exists(path_summary):
                su = openmc.Summary(path_summary)
                self.link_with_summary(su)
Beispiel #7
0
 def _get_results(self):
     outstr = super(MultipoleTestHarness, self)._get_results()
     su = openmc.Summary('summary.h5')
     outstr += str(su.get_cell_by_id(11))
     return outstr
 def _get_results(self):
     outstr = super(MultipoleTestHarness, self)._get_results()
     su = openmc.Summary('summary.h5')
     outstr += str(su.geometry.get_all_cells()[11])
     return outstr
Beispiel #9
0
    def unpack_tallies_and_normalize(self, filename, settings):
        """ Unpack tallies from OpenMC

        This function reads the tallies generated by OpenMC (from the tally.xml
        file generated in generate_tally_xml) normalizes them so that the total
        power generated is new_power, and then stores them in the reaction rate
        database.

        Parameters
        ----------
        filename : str
            The statepoint file to read from.
        settings : Settings
            The settings object that contains the target power in MeV/cm and
            FET booleans

        Returns
        -------
        keff : float
            Eigenvalue of the last simulation.

        Todo
        ----
            Provide units for new_power
        """
        import openmc.statepoint as sp

        statepoint = sp.StatePoint(filename)

        # Link with summary file so that cell IDs work.
        su = openmc.Summary('summary.h5')
        statepoint.link_with_summary(su)

        keff = statepoint.k_combined[0]

        # Generate new power dictionary

        self.power = OrderedDict()

        # ---------------------------------------------------------------------
        # Unpack depletion list

        mp = zernike.num_poly(settings.fet_order)

        # Zero out reaction_rates
        self.reaction_rates[:, :, :, :] = 0.0

        # For each cell to be burned
        
        for cell_str in self.burn_cell_to_ind:
            cell = int(cell_str)

            # For each nuclide that was tallied
            for nuc in self.burn_nuc_to_ind:

                # If density = 0, there was no tally
                if nuc not in self.total_number[cell]:
                    continue

                nuclide = self.chain.nuc_by_ind(nuc)

                # For each reaction pathway
                for j in range(nuclide.n_reaction_paths):

                    tally_type = nuclide.reaction_type[j]

                    k = self.reaction_rates.react_to_ind[tally_type]
                    # print("k={0}".format(k))
                    # We have to get the tally at the inner loop because
                    # the tally number will vary based on reaction
                    if (settings.fet_order != None):
                        # print('Getting tally ' + str(k+1))
                        # print(cell, nuc)
                        tally_dep = statepoint.get_tally(id=k+1)
                        fet_tally_type = 'micro-' + nuclide.reaction_type[j] + '-zn'
                        
                    else:
                        tally_dep = statepoint.get_tally(id=1)

                    df = tally_dep.get_pandas_dataframe()
                    
                    # modified by Jiankai 
                    con_cell = np.ravel(df['cell'] == cell)
                    df_cell = df[con_cell]
                    con_nuclide = np.ravel(df_cell['nuclide'] == nuc)
                    df_nuclide = df_cell[con_nuclide]
                    # print(df_nuclide) 
                    # 
                    if (settings.fet_order != None):
                        # print('fet_Tally_type = ' + fet_tally_type)
                        # print(df_nuclide[df_nuclide["score"] ==
                        #                   fet_tally_type]["mean"].values)
                        #print(df_nuclide)
                        # Create FET
                        con_score = np.ravel(df_nuclide["score"] == fet_tally_type)
                        # 
                        fet_coeffs = df_nuclide[con_score]["mean"].values
                        fet = zernike.ZernikePolynomial(settings.fet_order, fet_coeffs, \
                                                        0.4096, sqrt_normed=True)
                        
                        # Remove the sqrt normalization in OpenMC
                        fet.remove_fet_sqrt_normalization()

                        # Scale the FET microrate from barn x cm to cm^3
                        # ???
                        fet.scale_coefficients( self.number_density[cell][nuc].coeffs[0] * 1e-24 / self.total_number[cell][nuc].coeffs[0])

                        # Store the FET and reaction rates values
                        self.reaction_rates.set_fet([cell_str,nuclide.name,k], fet)

                    else:
                        value = df_nuclide[df_nuclide["score"] ==
                                           tally_type]["mean"].values[0]

                        # The reaction rates are normalized to total number of
                        # atoms in the simulation.
                        self.reaction_rates[cell_str, nuclide.name, k, 0] = value \
                            / self.total_number[cell][nuc].coeffs[0]

                    # Calculate power if fission
                    if settings.fet_order == None and tally_type == "fission":
                        power = value * nuclide.fission_power
                        if cell not in self.power:
                            self.power[cell] = power
                        else:
                            self.power[cell] += power
                    elif tally_type == "fission":
                        # modified by Jiankai
                        con_score = np.ravel(df_nuclide["score"] == fet_tally_type)
                        value = self.number_density[cell][nuc].product_integrate(df_nuclide[con_score]["mean"].values * 1e-24)
                        #~ value = self.number_density[cell][nuc].coeffs[0] * 1e-24 * df_nuclide[df_nuclide["score"] ==
                                           #~ fet_tally_type]["mean"].values[0]
                        power = value * nuclide.fission_power
                        if cell not in self.power:
                            self.power[cell] = power
                        else:
                            self.power[cell] += power

        # ---------------------------------------------------------------------
        # Normalize to power
        original_power = sum(self.power.values())

        print("ORIGINAL POWER!!!! ,", original_power)

        self.reaction_rates[:, :, :, :] *= (settings.power / original_power)

        return keff  # modified by Jiankai 
Beispiel #10
0
import openmc
import openmc.mgxs

# Load the last statepoint file
sp = openmc.StatePoint('statepoint.100.h5')
su = openmc.Summary('summary.h5')
sp.link_with_summary(su)

# Instantiate a 1-group EnergyGroups object
groups = openmc.mgxs.EnergyGroups()
groups.group_edges = [0., 20.]

# Initialize an 2-group MGXS Library for OpenMOC
mgxs_lib = openmc.mgxs.Library(su.openmc_geometry)
mgxs_lib.energy_groups = groups
mgxs_lib.mgxs_types = ['total', 'nu-fission', 'nu-scatter matrix', 'chi']
mgxs_lib.domain_type = 'cell'
mgxs_lib.correction = None
mgxs_lib.build_library()
mgxs_lib.load_from_statepoint(sp)

# Store library and its MGXS objects in a pickled binary file
mgxs_lib.dump_to_file(filename='mgxs', directory='.')