def main():
    endf_dir = os.environ.get("OPENMC_ENDF_DATA")
    if endf_dir is not None:
        endf_dir = Path(endf_dir)
    elif all(os.path.isdir(lib) for lib in ("neutrons", "decay", "nfy")):
        endf_dir = Path(".")
    else:
        for url in URLS:
            basename = download(url)
            with ZipFile(basename, 'r') as zf:
                print('Extracting {}...'.format(basename))
                zf.extractall()
        endf_dir = Path(".")

    decay_files = tuple((endf_dir / "decay").glob("*endf"))
    neutron_files = tuple((endf_dir / "neutrons").glob("*endf"))
    nfy_files = tuple((endf_dir / "nfy").glob("*endf"))

    # check files exist
    for flist, ftype in [(decay_files, "decay"), (neutron_files, "neutron"),
                         (nfy_files, "neutron fission product yield")]:
        if not flist:
            raise IOError("No {} endf files found in {}".format(
                ftype, endf_dir))

    chain = openmc.deplete.Chain.from_endf(decay_files, nfy_files,
                                           neutron_files)
    chain.export_to_xml('chain_endfb71.xml')
def download_photos(run_in_tmpdir):
    """use _utils download() function to download the same picture three times,
       twice to get unique names, & a third time to use the already downloaded
       block of code"""
    _utils.download("https://i.ibb.co/HhKFc8x/small.jpg")
    _utils.download("https://tinyurl.com/y4t38ugb")
    _utils.download("https://tinyurl.com/y4t38ugb", as_browser=True)
def test_checksum_error(run_in_tmpdir):
    """use download() in such a way that will test the checksum error line"""
    phrase = "MD5 checksum for y4t38ugb"
    with pytest.raises(OSError, match=phrase):
        _utils.download("https://tinyurl.com/y4t38ugb", as_browser=True,
                        checksum="not none")
Exemple #4
0
# ==============================================================================
# DOWNLOAD FILES FROM IAEA SITE

if args.download:
    print(download_warning)

    for particle in args.particles:
        # Create a directory to hold the downloads
        particle_download_path = download_path / particle
        particle_download_path.mkdir(parents=True, exist_ok=True)
        os.chdir(particle_download_path)

        particle_details = release_details[args.release][particle]
        for f in particle_details['files']:
            download(urljoin(particle_details['base_url'], f),
                     as_browser=True,
                     context=ssl._create_unverified_context())

    os.chdir(cwd)

# ==============================================================================
# EXTRACT FILES FROM ZIP
if args.extract:
    for particle in args.particles:
        os.chdir(download_path / particle)

        particle_details = release_details[args.release][particle]
        special_cases = check_special_case(particle_details, 'extract')

        if particle_details['file_type'] == "ace":
            extraction_dir = ace_files_dir
Exemple #5
0
           release_details[args.release]['uncompressed_file_size'])

response = input(download_warning) if not args.batch else 'y'
if response.lower().startswith('n'):
    sys.exit()

block_size = 16384

# ==============================================================================
# DOWNLOAD FILES FROM IAEA SITE

files_complete = []
for f in release_details[args.release]['files']:
    # Establish connection to URL
    url = release_details[args.release]['base_url'] + f
    downloaded_file = download(url, context=ssl._create_unverified_context())
    files_complete.append(downloaded_file)

# ==============================================================================
# EXTRACT FILES FROM TGZ

for f in release_details[args.release]['files']:
    if f not in files_complete:
        continue

    # Extract files, the fendl release was compressed using type 9 zip format
    # unfortunatly which is incompatible with the standard python zipfile library
    # therefore the following system command is used

    subprocess.call(['unzip', '-o', f, '-d', ace_files_dir])
def main():
    if os.path.isdir('./decay') and os.path.isdir('./nfy') and os.path.isdir('./neutrons'):
        endf_dir = '.'
    elif 'OPENMC_ENDF_DATA' in os.environ:
        endf_dir = os.environ['OPENMC_ENDF_DATA']
    else:
        for url in URLS:
            basename = download(url)
            with ZipFile(basename, 'r') as zf:
                print('Extracting {}...'.format(basename))
                zf.extractall()
        endf_dir = '.'

    decay_files = glob.glob(os.path.join(endf_dir, 'decay', '*.endf'))
    fpy_files = glob.glob(os.path.join(endf_dir, 'nfy', '*.endf'))
    neutron_files = glob.glob(os.path.join(endf_dir, 'neutrons', '*.endf'))

    # Create a Chain
    chain = openmc.deplete.Chain()

    print('Reading ENDF nuclear data from "{}"...'.format(os.path.abspath(endf_dir)))

    # Create dictionary mapping target to filename
    print('Processing neutron sub-library files...')
    reactions = {}
    for f in neutron_files:
        evaluation = openmc.data.endf.Evaluation(f)
        nuc_name = evaluation.gnd_name
        if nuc_name in CASL_CHAIN:
            reactions[nuc_name] = {}
            for mf, mt, nc, mod in evaluation.reaction_list:
                # Q value for each reaction is given in MF=3
                if mf == 3:
                    file_obj = StringIO(evaluation.section[3, mt])
                    openmc.data.endf.get_head_record(file_obj)
                    q_value = openmc.data.endf.get_cont_record(file_obj)[1]
                    reactions[nuc_name][mt] = q_value

    # Determine what decay and FPY nuclides are available
    print('Processing decay sub-library files...')
    decay_data = {}
    for f in decay_files:
        decay_obj = openmc.data.Decay(f)
        nuc_name = decay_obj.nuclide['name']
        if nuc_name in CASL_CHAIN:
            decay_data[nuc_name] = decay_obj

    for nuc_name in CASL_CHAIN:
        if nuc_name not in decay_data:
            print('WARNING: {} has no decay data!'.format(nuc_name))

    print('Processing fission product yield sub-library files...')
    fpy_data = {}
    for f in fpy_files:
        fpy_obj = openmc.data.FissionProductYields(f)
        name = fpy_obj.nuclide['name']
        if name in CASL_CHAIN:
            fpy_data[name] = fpy_obj

    print('Creating depletion_chain...')
    missing_daughter = []
    missing_rx_product = []
    missing_fpy = []

    for idx, parent in enumerate(sorted(decay_data, key=openmc.data.zam)):
        data = decay_data[parent]

        nuclide = Nuclide()
        nuclide.name = parent

        chain.nuclides.append(nuclide)
        chain.nuclide_dict[parent] = idx

        if not CASL_CHAIN[parent][0] and \
           not data.nuclide['stable'] and data.half_life.nominal_value != 0.0:
            nuclide.half_life = data.half_life.nominal_value
            nuclide.decay_energy = sum(E.nominal_value for E in
                                       data.average_energies.values())
            sum_br = 0.0
            for mode in data.modes:
                decay_type = ','.join(mode.modes)
                if mode.daughter in decay_data:
                    target = mode.daughter
                else:
                    missing_daughter.append((parent, mode))
                    continue

                # Append decay mode
                br = mode.branching_ratio.nominal_value
                nuclide.decay_modes.append(DecayTuple(decay_type, target, br))

            # Ensure sum of branching ratios is unity by slightly modifying last
            # value if necessary
            sum_br = sum(m.branching_ratio for m in nuclide.decay_modes)
            if sum_br != 1.0 and nuclide.decay_modes and parent not in UNMODIFIED_DECAY_BR:
                decay_type, target, br = nuclide.decay_modes.pop()
                br = 1.0 - sum(m.branching_ratio for m in nuclide.decay_modes)
                nuclide.decay_modes.append(DecayTuple(decay_type, target, br))

        # If nuclide has incident neutron data, we need to list what
        # transmutation reactions are possible
        if parent in reactions:
            reactions_available = reactions[parent].keys()
            for name, mts, changes in _REACTIONS:
                if mts & reactions_available:
                    delta_A, delta_Z = changes
                    A = data.nuclide['mass_number'] + delta_A
                    Z = data.nuclide['atomic_number'] + delta_Z
                    daughter = '{}{}'.format(openmc.data.ATOMIC_SYMBOL[Z], A)

                    if name not in chain.reactions:
                        chain.reactions.append(name)

                    if daughter not in decay_data:
                        missing_rx_product.append((parent, name, daughter))
                        daughter = 'Nothing'

                    # Store Q value -- use sorted order so we get summation
                    # reactions (e.g., MT=103) first
                    for mt in sorted(mts):
                        if mt in reactions[parent]:
                            q_value = reactions[parent][mt]
                            break
                    else:
                        q_value = 0.0

                    nuclide.reactions.append(ReactionTuple(
                        name, daughter, q_value, 1.0))

            # Check for fission reactions
            if any(mt in reactions_available for mt in [18, 19, 20, 21, 38]):
                if parent in fpy_data:
                    q_value = reactions[parent][18]
                    nuclide.reactions.append(
                        ReactionTuple('fission', 0, q_value, 1.0))

                    if 'fission' not in chain.reactions:
                        chain.reactions.append('fission')
                else:
                    missing_fpy.append(parent)

        if parent in fpy_data:
            fpy = fpy_data[parent]

            if fpy.energies is not None:
                yield_energies = fpy.energies
            else:
                yield_energies = [0.0]

            yield_data = {}
            for E, table_yd, table_yc in zip(yield_energies, fpy.independent, fpy.cumulative):
                yields = defaultdict(float)
                for product in table_yd:
                    if product in decay_data:
                        # identifier
                        ifpy = CASL_CHAIN[product][2]
                        # 1 for independent
                        if ifpy == 1:
                            if product not in table_yd:
                                print('No independent fission yields found for {} in {}'.format(product, parent))
                            else:
                                yields[product] += table_yd[product].nominal_value
                        # 2 for cumulative
                        elif ifpy == 2:
                            if product not in table_yc:
                                print('No cumulative fission yields found for {} in {}'.format(product, parent))
                            else:
                                yields[product] += table_yc[product].nominal_value
                        # 3 for special treatment with weight fractions
                        elif ifpy == 3:
                            for name_i, weight_i, ifpy_i in CASL_CHAIN[product][3]:
                                if name_i not in table_yd:
                                    print('No fission yields found for {} in {}'.format(name_i, parent))
                                else:
                                    if ifpy_i == 1:
                                        yields[product] += weight_i * table_yd[name_i].nominal_value
                                    elif ifpy_i == 2:
                                        yields[product] += weight_i * table_yc[name_i].nominal_value

                yield_data[E] = yields

            nuclide.yield_data = FissionYieldDistribution(yield_data)

    # Display warnings
    if missing_daughter:
        print('The following decay modes have daughters with no decay data:')
        for parent, mode in missing_daughter:
            print('  {} -> {} ({})'.format(parent, mode.daughter, ','.join(mode.modes)))
        print('')

    if missing_rx_product:
        print('The following reaction products have no decay data:')
        for vals in missing_rx_product:
            print('{} {} -> {}'.format(*vals))
        print('')

    if missing_fpy:
        print('The following fissionable nuclides have no fission product yields:')
        for parent in missing_fpy:
            print('  ' + parent)
        print('')

    chain.export_to_xml('chain_casl.xml')
    action='store_false',
    help='Do not extract .tgz file if it has already been extracted')
parser.add_argument('--libver',
                    choices=['earliest', 'latest'],
                    default='earliest',
                    help="Output HDF5 versioning. Use "
                    "'earliest' for backwards compatibility or 'latest' for "
                    "performance")
parser.set_defaults(download=True, extract=True)
args = parser.parse_args()

# Download JEFF 3.3 library
filename = 'JEFF33-n_tsl-ace.tgz'
url = f'http://www.oecd-nea.org/dbdata/jeff/jeff33/downloads/{filename}'
if args.download:
    download(url)

# Extract tar file
if args.extract:
    with tarfile.open(filename, 'r') as tgz:
        print(f'Extracting {filename}...')
        tgz.extractall()

# Create output directory if it doesn't exist
args.destination.mkdir(parents=True, exist_ok=True)

# Get a list of all ACE files
paths = sorted(Path('ace').glob('*.[Aa][Cc][Ee]'))

lib = openmc.data.DataLibrary()
for p in sorted(paths):
temperatures = [293.6, 500.0, 750.0, 1000.0, 1250.0]
pwd = Path.cwd()
output_dir = pwd / 'endf71_multitemp'

os.makedirs(output_dir / 'photon', exist_ok=True)

with tempfile.TemporaryDirectory() as tmpdir:
    # Save current working directory and temporarily change dir
    os.chdir(tmpdir)
    library = openmc.data.DataLibrary()

    # =========================================================================
    # Download files from NNDC server
    for base, fname, checksum in files:
        download(urljoin(base, fname), checksum)

    # =========================================================================
    # EXTRACT FROM ZIP FILES

    for _, f, _ in files:
        print(f'Extracting {f}...')
        zipfile.ZipFile(f).extractall()

    # =========================================================================
    # PROCESS INCIDENT NEUTRON DATA IN PARALLEL

    with Pool() as pool:
        neutron_files = sorted(glob.glob('neutrons/*.endf'))
        results = []
        for f in neutron_files:
Exemple #9
0
Are you sure you want to continue? ([y]/n)
""".format(release_details[args.release]['compressed_file_size'],
           release_details[args.release]['uncompressed_file_size'])

response = input(download_warning) if not args.batch else 'y'
if response.lower().startswith('n'):
    sys.exit()

# ==============================================================================
# DOWNLOAD FILES FROM WEBSITE

files_complete = []
for f in release_details[args.release]['files']:
    # Establish connection to URL
    url = release_details[args.release]['base_url'] + f
    downloaded_file = download(url)
    files_complete.append(downloaded_file)

# ==============================================================================
# EXTRACT FILES FROM TGZ

for f in release_details[args.release]['files']:
    if f not in files_complete:
        continue

    # Extract files

    suffix = ''
    with tarfile.open(f, 'r') as tgz:
        print('Extracting {0}...'.format(f))
        tgz.extractall(path=os.path.join(endf_files_dir, suffix))
Exemple #10
0
""".format(release_details[args.release]['compressed_file_size'],
           release_details[args.release]['uncompressed_file_size'])

response = input(download_warning) if not args.batch else 'y'
if response.lower().startswith('n'):
    sys.exit()

# ==============================================================================
# DOWNLOAD FILES FROM IAEA SITE

files_complete = []
for f in release_details[args.release]['files']:
    # Establish connection to URL
    url = release_details[args.release]['base_url'] + f
    downloaded_file = download(url,
                               as_browser=True,
                               context=ssl._create_unverified_context())
    files_complete.append(downloaded_file)

# ==============================================================================
# EXTRACT FILES FROM TGZ

for f in release_details[args.release]['files']:
    if f not in files_complete:
        continue

    # Extract files, the fendl release was compressed using type 9 zip format
    # unfortunatly which is incompatible with the standard python zipfile library
    # therefore the following system command is used

    subprocess.call(['unzip', '-o', f, '-d', ace_files_dir])
Exemple #11
0
download_warning = """
WARNING: This script will download up to {} GB of data. Extracting and
processing the data may require as much as {} GB of additional free disk
space. Note that if you don't need all 11 temperatures, you can used the 
--temperature argument to download only the temperatures you want.
""".format(release_details[args.release]['compressed_file_size'],
           release_details[args.release]['uncompressed_file_size'])

# ==============================================================================
# DOWNLOAD FILES FROM OECD SITE

if args.download:
    print(download_warning)
    for f in release_details[args.release]['files']:
        download(urljoin(release_details[args.release]['base_url'], f))

# ==============================================================================
# EXTRACT FILES FROM TGZ

if args.extract:
    for f in release_details[args.release]['files']:
        # Extract files
        if f.endswith('.zip'):
            with zipfile.ZipFile(f, 'r') as zipf:
                print('Extracting {}...'.format(f))
                zipf.extractall(ace_files_dir)

        else:
            suffix = 'ACEs_293K' if '293' in f else ''
            with tarfile.open(f, 'r') as tgz:
from tempfile import TemporaryDirectory
from pathlib import Path
import zipfile

from openmc._utils import download

filename = {'decay': 'sss_endfb71.dec', 'nfy': 'sss_endfb71.nfy'}

md5s = {
    'decay': 'ad346888d55427c3f25d39f7fed3e659',
    'nfy': '0420894a296871cd08f68def84526d0e',
}

with TemporaryDirectory() as tmpdir:
    for sublib in ('decay', 'nfy'):
        # download and extract from zip file
        download(
            f'https://www.nndc.bnl.gov/endf/b7.1/zips/ENDF-B-VII.1-{sublib}.zip',
            checksum=md5s[sublib])
        with zipfile.ZipFile(f'ENDF-B-VII.1-{sublib}.zip') as z:
            z.extractall(path=tmpdir)

        # concatenate ENDF files
        with open(filename[sublib], 'w') as out:
            for f in sorted(Path(tmpdir).joinpath(sublib).glob('*.endf')):
                out.write(open(f).read())
from tempfile import TemporaryDirectory
from pathlib import Path
import zipfile

from openmc._utils import download

filename = {'decay': 'sss_endfb71.dec', 'nfy': 'sss_endfb71.nfy'}

with TemporaryDirectory() as tmpdir:
    for sublib in ('decay', 'nfy'):
        # download and extract from zip file
        download(
            f'https://www.nndc.bnl.gov/endf/b7.1/zips/ENDF-B-VII.1-{sublib}.zip'
        )
        with zipfile.ZipFile(f'ENDF-B-VII.1-{sublib}.zip') as z:
            z.extractall(path=tmpdir)

        # concatenate ENDF files
        with open(filename[sublib], 'w') as out:
            for f in sorted(Path(tmpdir).joinpath(sublib).glob('*.endf')):
                out.write(open(f).read())
Exemple #14
0
}

download_warning = """
WARNING: This script will download {} of data.
Extracting and processing the data requires {} of additional free disk space.
""".format(release_details[args.release]['compressed_file_size'],
           release_details[args.release]['uncompressed_file_size'])

# ==============================================================================
# DOWNLOAD FILES FROM WEBSITE

if args.download:
    print(download_warning)
    for f in release_details[args.release]['files']:
        # Establish connection to URL
        download(urljoin(release_details[args.release]['base_url'], f),
                 context=ssl._create_unverified_context())

# ==============================================================================
# EXTRACT FILES FROM TGZ
if args.extract:
    for f in release_details[args.release]['files']:
        # Extract files
        with tarfile.open(f, 'r') as tgz:
            print('Extracting {0}...'.format(f))
            tgz.extractall(path=endf_files_dir)

# ==============================================================================
# GENERATE HDF5 LIBRARY -- NEUTRON FILES

# Get a list of all ACE files
neutron_files = release_details[args.release]['neutron_files']