Exemplo n.º 1
0
def test_iselement_U235():
    are = [92, "U"]
    arent = [922350, "U235"]
    for nuc in are:
        yield assert_true, nucname.iselement(nuc)
    for nuc in arent:
        yield assert_false, nucname.iselement(nuc)
Exemplo n.º 2
0
def test_iselement_H1():
    are = [1, "H"]
    arent = [1001, "H1"]
    for nuc in are:
        yield assert_true, nucname.iselement(nuc)
    for nuc in arent:
        yield assert_false, nucname.iselement(nuc)
Exemplo n.º 3
0
def test_iselement():
    are = [92, 'U']
    arent = [922350, 'U235']
    for nuc in are:
        yield assert_true, nucname.iselement(nuc)
    for nuc in arent:
        yield assert_false, nucname.iselement(nuc)
Exemplo n.º 4
0
def test_iselement():
    are = [92, 'U']
    arent = [922350, 'U235']
    for nuc in are:
        yield assert_true, nucname.iselement(nuc)
    for nuc in arent:
        yield assert_false, nucname.iselement(nuc)
Exemplo n.º 5
0
def parse_csv_abundances(csvy_data):
    """
    A parser for the csv data part of a csvy model file. This function filters out columns that are not abundances.

    Parameters
    ----------

    csvy_data : pandas.DataFrame

    Returns
    -------

    index : ~np.ndarray
    abundances : ~pandas.DataFrame
    isotope_abundance : ~pandas.MultiIndex
    """

    abundance_col_names = [
        name for name in csvy_data.columns
        if nucname.iselement(name) or nucname.isnuclide(name)
    ]
    df = csvy_data.loc[:, abundance_col_names]

    df = df.transpose()

    abundance = pd.DataFrame(
        columns=np.arange(df.shape[1]),
        index=pd.Index([], name="atomic_number"),
        dtype=np.float64,
    )

    isotope_index = pd.MultiIndex([[]] * 2, [[]] * 2,
                                  names=["atomic_number", "mass_number"])
    isotope_abundance = pd.DataFrame(columns=np.arange(df.shape[1]),
                                     index=isotope_index,
                                     dtype=np.float64)

    for element_symbol_string in df.index[0:]:
        if element_symbol_string in nucname.name_zz:
            z = nucname.name_zz[element_symbol_string]
            abundance.loc[z, :] = df.loc[element_symbol_string].tolist()
        else:
            z = nucname.znum(element_symbol_string)
            mass_no = nucname.anum(element_symbol_string)
            isotope_abundance.loc[(
                z, mass_no), :] = df.loc[element_symbol_string].tolist()

    return abundance.index, abundance, isotope_abundance
Exemplo n.º 6
0
    def from_csvy(cls, config):
        """
        Create a new Radial1DModel instance from a Configuration object.

        Parameters
        ----------
        config : tardis.io.config_reader.Configuration

        Returns
        -------
        Radial1DModel

        """
        CSVY_SUPPORTED_COLUMNS = {
            'velocity', 'density', 't_rad', 'dilution_factor'
        }

        if os.path.isabs(config.csvy_model):
            csvy_model_fname = config.csvy_model
        else:
            csvy_model_fname = os.path.join(config.config_dirname,
                                            config.csvy_model)
        csvy_model_config, csvy_model_data = load_csvy(csvy_model_fname)
        base_dir = os.path.abspath(os.path.dirname(__file__))
        schema_dir = os.path.join(base_dir, '..', 'io', 'schemas')
        csvy_schema_file = os.path.join(schema_dir, 'csvy_model.yml')
        csvy_model_config = Configuration(
            validate_dict(csvy_model_config, schemapath=csvy_schema_file))

        if hasattr(csvy_model_data, 'columns'):
            abund_names = set([
                name for name in csvy_model_data.columns
                if nucname.iselement(name) or nucname.isnuclide(name)
            ])
            unsupported_columns = set(
                csvy_model_data.columns) - abund_names - CSVY_SUPPORTED_COLUMNS

            field_names = set(
                [field['name'] for field in csvy_model_config.datatype.fields])
            assert set(csvy_model_data.columns) - field_names == set(),\
                'CSVY columns exist without field descriptions'
            assert field_names - set(csvy_model_data.columns) == set(),\
                'CSVY field descriptions exist without corresponding csv data'
            if unsupported_columns != set():
                logger.warning(
                    "The following columns are specified in the csvy"
                    "model file, but are IGNORED by TARDIS: %s" %
                    (str(unsupported_columns)))

        time_explosion = config.supernova.time_explosion.cgs

        electron_densities = None
        temperature = None

        #if hasattr(csvy_model_config, 'v_inner_boundary'):
        #    v_boundary_inner = csvy_model_config.v_inner_boundary
        #else:
        #    v_boundary_inner = None

        #if hasattr(csvy_model_config, 'v_outer_boundary'):
        #    v_boundary_outer = csvy_model_config.v_outer_boundary
        #else:
        #    v_boundary_outer = None

        if hasattr(config, 'model'):
            if hasattr(config.model, 'v_inner_boundary'):
                v_boundary_inner = config.model.v_inner_boundary
            else:
                v_boundary_inner = None

            if hasattr(config.model, 'v_outer_boundary'):
                v_boundary_outer = config.model.v_outer_boundary
            else:
                v_boundary_outer = None
        else:
            v_boundary_inner = None
            v_boundary_outer = None

        if hasattr(csvy_model_config, 'velocity'):
            velocity = quantity_linspace(csvy_model_config.velocity.start,
                                         csvy_model_config.velocity.stop,
                                         csvy_model_config.velocity.num +
                                         1).cgs
        else:
            velocity_field_index = [
                field['name'] for field in csvy_model_config.datatype.fields
            ].index('velocity')
            velocity_unit = u.Unit(
                csvy_model_config.datatype.fields[velocity_field_index]
                ['unit'])
            velocity = csvy_model_data['velocity'].values * velocity_unit
            velocity = velocity.to('cm/s')

        if hasattr(csvy_model_config, 'density'):
            homologous_density = HomologousDensity.from_csvy(
                config, csvy_model_config)
        else:
            time_0 = csvy_model_config.model_density_time_0
            density_field_index = [
                field['name'] for field in csvy_model_config.datatype.fields
            ].index('density')
            density_unit = u.Unit(
                csvy_model_config.datatype.fields[density_field_index]['unit'])
            density_0 = csvy_model_data['density'].values * density_unit
            density_0 = density_0.to('g/cm^3')[1:]
            density_0 = density_0.insert(0, 0)
            homologous_density = HomologousDensity(density_0, time_0)

        no_of_shells = len(velocity) - 1

        # TODO -- implement t_radiative
        #t_radiative = None
        if temperature:
            t_radiative = temperature
        elif hasattr(csvy_model_data, 'columns'):
            if 't_rad' in csvy_model_data.columns:
                t_rad_field_index = [
                    field['name']
                    for field in csvy_model_config.datatype.fields
                ].index('t_rad')
                t_rad_unit = u.Unit(
                    csvy_model_config.datatype.fields[t_rad_field_index]
                    ['unit'])
                t_radiative = csvy_model_data['t_rad'].iloc[
                    0:].values * t_rad_unit
            else:
                t_radiative = None

        dilution_factor = None
        if hasattr(csvy_model_data, 'columns'):
            if 'dilution_factor' in csvy_model_data.columns:
                dilution_factor = csvy_model_data['dilution_factor'].iloc[
                    0:].to_numpy()

        elif config.plasma.initial_t_rad > 0 * u.K:
            t_radiative = np.ones(no_of_shells) * config.plasma.initial_t_rad
        else:
            t_radiative = None

        if config.plasma.initial_t_inner < 0.0 * u.K:
            luminosity_requested = config.supernova.luminosity_requested
            t_inner = None
        else:
            luminosity_requested = None
            t_inner = config.plasma.initial_t_inner

        if hasattr(csvy_model_config, 'abundance'):
            abundances_section = csvy_model_config.abundance
            abundance, isotope_abundance = read_uniform_abundances(
                abundances_section, no_of_shells)
        else:
            index, abundance, isotope_abundance = parse_csv_abundances(
                csvy_model_data)

        abundance = abundance.replace(np.nan, 0.0)
        abundance = abundance[abundance.sum(axis=1) > 0]
        abundance = abundance.loc[:, 1:]
        abundance.columns = np.arange(abundance.shape[1])

        norm_factor = abundance.sum(axis=0) + isotope_abundance.sum(axis=0)

        if np.any(np.abs(norm_factor - 1) > 1e-12):
            logger.warning("Abundances have not been normalized to 1."
                           " - normalizing")
            abundance /= norm_factor
            isotope_abundance /= norm_factor

        #isotope_abundance = IsotopeAbundances(isotope_abundance)
        isotope_abundance = IsotopeAbundances(
            isotope_abundance, time_0=csvy_model_config.model_isotope_time_0)
        #isotope_abundance.time_0 = csvy_model_config.model_isotope_time_0

        return cls(velocity=velocity,
                   homologous_density=homologous_density,
                   abundance=abundance,
                   isotope_abundance=isotope_abundance,
                   time_explosion=time_explosion,
                   t_radiative=t_radiative,
                   t_inner=t_inner,
                   luminosity_requested=luminosity_requested,
                   dilution_factor=dilution_factor,
                   v_boundary_inner=v_boundary_inner,
                   v_boundary_outer=v_boundary_outer,
                   electron_densities=electron_densities)
Exemplo n.º 7
0
def tape9_to_sparse(tape9s,
                    phi,
                    format='csr',
                    decaylib='decay.lib',
                    include_fission=True,
                    alpha_as_He4=False):
    """Converts a TAPE9 file to a sparse matrix.

    Parameters
    ----------
    tape9s : str or list of str
        The filename(s) of the tape file(s).
    phi : float
        The neutron flux in [n / cm^2 / sec]
    format : str, optional
        Format of the sparse matrix created.
    decaylib : str, optional
        A path to TAPE9 containg the decay data libraries if the decay libraries
        are not present in tape9. If this is a relative path, it is taken
        relative to the given tape9 location.
    include_fission : bool
        Flag for whether or not the fission data should be included in the
        resultant matrix.

    Returns
    -------
    mat : scipy.sparse matrix
        A sparse matrix in the specified layout.
    nucs : list
        The list of nuclide names in canonical order.
    """
    all_decays_consts, all_gammas, all_sigma_ij, all_sigma_fission, all_fission_product_yields = [], [], [], [], []
    all_alpha_ij, all_gamma_alphas = [], []
    nucs = set()
    mats = []
    # seed initial nucs with known atomic masses
    data.atomic_mass('u235')
    for tape9 in tape9s:
        print("Getting data for", tape9)
        t9 = parse_tape9(tape9)
        decay = find_decaylib(t9, tape9, decaylib)

        for i in data.atomic_mass_map.keys():
            if nucname.iselement(i):
                continue
            try:
                nucs.add(nucname.name(i))
            except RuntimeError:
                pass

        # get the tape 9 data
        nucs, decays_consts, gammas, gammas_alphas = decay_data(decay,
                                                                nucs=nucs)
        nucs, sigma_ij, sigma_fission, fission_product_yields, alpha_ij = cross_section_data(
            t9, nucs=nucs)

        if not include_fission:
            sigma_fission = {}
            fission_product_yields = {}
        if not alpha_as_He4:
            gammas_alphas = {}
            alpha_ij = {}
        all_decays_consts.append(decays_consts)
        all_gammas.append(gammas)
        all_sigma_ij.append(sigma_ij)
        all_sigma_fission.append(sigma_fission)
        all_fission_product_yields.append(fission_product_yields)
        all_alpha_ij.append(alpha_ij)
        all_gamma_alphas.append(gammas_alphas)

    nucs = sort_nucs(nucs)
    for i in range(len(tape9s)):
        dok = create_dok(phi, nucs, all_decays_consts[i], all_gammas[i],
                         all_sigma_ij[i], all_sigma_fission[i],
                         all_fission_product_yields[i], all_alpha_ij[i],
                         all_gamma_alphas[i])
        rows, cols, vals, shape = dok_to_sparse_info(nucs, dok)
        mats.append(SPMAT_FORMATS[format]((vals, (rows, cols)), shape=shape))
    return mats, nucs