Пример #1
0
def cdsd2df(fname,
            version="hitemp",
            count=-1,
            cache=False,
            verbose=True,
            drop_non_numeric=True):
    """Convert a CDSD-HITEMP [1]_ or CDSD-4000 [2]_ file to a Pandas dataframe

    Parameters
    ----------

    fname: str
        CDSD file name

    version: str ('4000', 'hitemp')
        CDSD version

    count: int
        number of items to read (-1 means all file)

    cache: boolean, or 'regen'
        if ``True``, a pandas-readable HDF5 file is generated on first access,
        and later used. This saves on the datatype cast and conversion and
        improves performances a lot (but changes in the database are not
        taken into account). If ``False``, no database is used. If 'regen', temp
        file are reconstructed. Default ``False``.

    Other Parameters
    ----------------

    drop_non_numeric: boolean
        if ``True``, non numeric columns are dropped. This improves performances,
        but make sure all the columns you need are converted to numeric formats
        before hand. Default ``True``. Note that if a cache file is loaded it
        will be left untouched.

    Returns
    -------

    df: pandas Dataframe
        dataframe containing all lines and parameters

    Notes
    -----

    CDSD-4000 Database can be downloaded from [3]_

    Performances: I had huge performance trouble with this function, because the files are
    huge (500k lines) and the format is to special (no space between numbers...)
    to apply optimized methods such as pandas's. A line by line reading isn't
    so bad, using struct to parse each line. However, we waste typing determining
    what every line is. I ended up using the fromfiles functions from numpy,
    not considering *\\n* (line return) as a special character anymore, and a second call
    to numpy to cast the correct format. That ended up being twice as fast.

        - initial:                      20s / loop
        - with mmap:                    worse
        - w/o readline().rstrip('\\n'):  still 20s
        - numpy fromfiles:              17s
        - no more readline, 2x fromfile 9s

    Think about using cache mode too:

        - no cache mode                 9s
        - cache mode, first time        22s
        - cache mode, then              2s

    Moving to HDF5:

    On cdsd_02069_02070 (56 Mb)

    Reading::

        cdsd2df(): 9.29 s
        cdsd2df(cache=True [old .txt version]): 2.3s
        cdsd2df(cache=True [new h5 version, table]): 910ms
        cdsd2df(cache=True [new h5 version, fixed]): 125ms

    Storage::

        %timeit df.to_hdf("cdsd_02069_02070.h5", "df", format="fixed")  337ms
        %timeit df.to_hdf("cdsd_02069_02070.h5", "df", format="table")  1.03s

    References
    ----------

    Note that CDSD-HITEMP is used as the line database for CO2 in HITEMP 2010

    .. [1] `HITEMP 2010, Rothman et al., 2010 <https://www.sciencedirect.com/science/article/pii/S002240731000169X>`_

    .. [2] `CDSD-4000 article, Tashkun et al., 2011 <https://www.sciencedirect.com/science/article/pii/S0022407311001154>`_

    .. [3] `CDSD-4000 database <ftp://ftp.iao.ru/pub/CDSD-4000/>`_

    See Also
    --------

    :func:`~radis.io.hitran.hit2df`

    """
    metadata = {}
    metadata["last_modification"] = time.ctime(getmtime(fname))

    if verbose >= 2:
        print("Opening file {0} (format=CDSD {1}, cache={2})".format(
            fname, version, cache))
        print("Last Modification time: {0}".format(
            metadata["last_modification"]))

    if version == "hitemp":
        columns = columns_hitemp
    elif version == "4000":
        columns = columns_4000
    else:
        raise ValueError("Unknown CDSD version: {0}".format(version))

    # Use cache file if possible
    fcache = cache_file_name(fname)
    if cache and exists(fcache):
        df = load_h5_cache_file(
            fcache,
            cache,
            metadata=metadata,
            current_version=radis.__version__,
            last_compatible_version=OLDEST_COMPATIBLE_VERSION,
            verbose=verbose,
        )
        if df is not None:
            return df

    # %% Start reading the full file

    df = parse_hitran_file(fname, columns, count)

    # Remove non numerical attributes
    if drop_non_numeric:
        replace_PQR_with_m101(df)
        df = drop_object_format_columns(df, verbose=verbose)

    # cached file mode but cached file doesn't exist yet (else we had returned)
    if cache:
        if verbose:
            print("Generating cached file: {0}".format(fcache))
        try:
            save_to_hdf(
                df,
                fcache,
                metadata=metadata,
                version=radis.__version__,
                key="df",
                overwrite=True,
                verbose=verbose,
            )
        except:
            if verbose:
                print(
                    "An error occured in cache file generation. Lookup access rights"
                )
            pass

    return df
Пример #2
0
    def __init__(
        self,
        energy_levels,
        isotope,
        levelsfmt,  # ='cdsd-pc',
        use_cached=True,
        use_json=None,
        verbose=True,
    ):

        # %% Init

        # Initialize PartitionFunctionCalculator for this electronic state
        ElecState = ElectronicState("CO2", isotope, "X", "1Σu+")
        super(PartFuncCO2_CDSDcalc, self).__init__(ElecState)

        # Check inputs ('return' is not mentionned in signature. it will just return
        # after cache name is given)
        assert use_cached in [True, False, "regen", "force", "return"]
        if isotope not in [1, 2]:
            raise ValueError(
                "CDSD Energies not defined for isotope: {0}".format(isotope)
            )
        if use_json is not None:
            warn(
                DeprecationWarning(
                    "use_json replaced with faster HDF5-based use_cached"
                )
            )
        # Get vibrational level definitions that match Energy Database (in particular
        # how Evib and Erot are calculated)
        # This is needed to be able to match the levels in the Line Database and
        # the levels in the Energy database
        if levelsfmt == "cdsd-p":
            viblvl_label = "p"
        elif levelsfmt == "cdsd-pc":
            viblvl_label = "pc"
        elif levelsfmt == "cdsd-pcN":
            viblvl_label = "pcN"
        elif levelsfmt == "cdsd-hamil":
            viblvl_label = "pcJN"
        elif levelsfmt is None:
            # dont label the levels. Wont be able to use the EnergyDatabase to fetch
            # vibrational energies for lines, however it can still be used to
            # calculate Partition functions independently from a Spectrum calculation
            viblvl_label = None
        else:
            raise ValueError(
                "Unknown Energy database format: levelsfmt = `{0}`".format(levelsfmt)
                + ". Use one of: `cdsd-p`, `cdsd-pc`, `cdsd-pcN`,`cdsd-hamil`"
            )

        # Store defaults
        self.verbose = verbose
        self.use_cached = use_cached
        self.levelsfmt = levelsfmt
        self.viblvl_label = viblvl_label

        # Get variables to store in metadata  (after default values have been set)
        molecule = "CO2"  # will be stored in cache file metadata
        _discard = [
            "self",
            "energy_levels",
            "verbose",
            "ElecState",
            "electronic_state",
            "use_json",
            "use_cached",
        ]
        # (dev) locals() automatically stores all variables: levelsfmt, viblvl_label, etc.
        metadata = filter_metadata(locals(), discard_variables=_discard)

        # %% Get levels

        # Function of use_cached value:
        # ... if True, use (and generate if doesnt exist) cache file.
        # ... if 'regen', regenerate cache file. If 'force', raise an error
        # ... if file doesnt exist.
        # If file is deprecated, regenerate it unless 'force' was used

        # Load cache file if exists

        cachefile = energy_levels + ".h5"
        self.cachefile = cachefile

        # If return, return after cachefile generated (used for tests)
        if use_cached == "return":
            return

        df = load_h5_cache_file(
            cachefile,
            use_cached,
            metadata=metadata,
            current_version=radis.__version__,
            last_compatible_version=OLDEST_COMPATIBLE_VERSION,
            verbose=verbose,
        )

        if df is None:  # Read normal file
            df = pd.read_csv(energy_levels, comment="#", delim_whitespace=True)
            df = self._add_degeneracies(df)
            df = self._add_levels(df)

        self.df = df  # Store

        if use_cached and not exists(cachefile):
            save_to_hdf(
                self.df,
                cachefile,
                metadata=metadata,
                version=radis.__version__,
                key="df",
                overwrite=True,
                verbose=verbose,
            )
Пример #3
0
def hit2df(fname, count=-1, cache=False, verbose=True, drop_non_numeric=True):
    """Convert a HITRAN/HITEMP [1]_ file to a Pandas dataframe

    Parameters
    ----------

    fname: str
        HITRAN-HITEMP file name

    count: int
        number of items to read (-1 means all file)

    cache: boolean, or ``'regen'`` or ``'force'``
        if ``True``, a pandas-readable HDF5 file is generated on first access,
        and later used. This saves on the datatype cast and conversion and
        improves performances a lot (but changes in the database are not
        taken into account). If False, no database is used. If ``'regen'``, temp
        file are reconstructed. Default ``False``.

    Other Parameters
    ----------------

    drop_non_numeric: boolean
        if ``True``, non numeric columns are dropped. This improves performances,
        but make sure all the columns you need are converted to numeric formats
        before hand. Default ``True``. Note that if a cache file is loaded it
        will be left untouched.

    Returns
    -------

    df: pandas Dataframe
        dataframe containing all lines and parameters



    References
    ----------


    .. [1] `HITRAN 1996, Rothman et al., 1998 <https://www.sciencedirect.com/science/article/pii/S0022407398000788>`__



    Notes
    -----

    Performances: see CDSD-HITEMP parser


    See Also
    --------

    :func:`~radis.io.cdsd.cdsd2df`

    """
    metadata = {}
    # metadata["last_modification"] = time.ctime(getmtime(getTestFile(fname)))
    metadata["last_modification"] = time.ctime(getmtime(fname))
    if verbose >= 2:
        print("Opening file {0} (cache={1})".format(fname, cache))
        print("Last modification time: {0}".format(
            metadata["last_modification"]))

    columns = columns_2004

    # Use cache file if possible
    fcache = cache_file_name(fname)
    if cache and exists(fcache):
        df = load_h5_cache_file(
            fcache,
            cache,
            metadata=metadata,
            current_version=radis.__version__,
            last_compatible_version=OLDEST_COMPATIBLE_VERSION,
            verbose=verbose,
        )
        if df is not None:
            return df

    # Detect the molecule by reading the start of the file
    try:
        with open(fname) as f:
            mol = get_molecule(int(f.read(2)))
    except UnicodeDecodeError as err:
        raise ValueError(
            "You're trying to read a binary file {0} ".format(fname) +
            "instead of an HITRAN file") from err

    # %% Start reading the full file

    df = parse_hitran_file(fname, columns, count)

    # %% Post processing

    # assert one molecule per database only. Else the groupbase data reading
    # above doesnt make sense
    nmol = len(set(df["id"]))
    if nmol == 0:
        raise ValueError("Databank looks empty")
    elif nmol != 1:
        # Crash, give explicity error messages
        try:
            secondline = df.iloc[1]
        except IndexError:
            secondline = ""
        raise ValueError(
            "Multiple molecules in database ({0}). Current ".format(nmol) +
            "spectral code only computes 1 species at the time. Use MergeSlabs. "
            +
            "Verify the parsing was correct by looking at the first row below: "
            + "\n{0}".format(df.iloc[0]) +
            "\n----------------\nand the second row " +
            "below: \n{0}".format(secondline))

    # dd local quanta attributes, based on the HITRAN group
    df = parse_local_quanta(df, mol)

    # Add global quanta attributes, based on the HITRAN class
    df = parse_global_quanta(df, mol)

    # Remove non numerical attributes
    if drop_non_numeric:
        if "branch" in df:
            replace_PQR_with_m101(df)
        df = drop_object_format_columns(df, verbose=verbose)

    # cached file mode but cached file doesn't exist yet (else we had returned)
    if cache:
        if verbose:
            print("Generating cached file: {0}".format(fcache))
        try:
            save_to_hdf(
                df,
                fcache,
                metadata=metadata,
                version=radis.__version__,
                key="df",
                overwrite=True,
                verbose=verbose,
            )
        except:
            if verbose:
                print(sys.exc_info())
                print(
                    "An error occured in cache file generation. Lookup access rights"
                )
            pass

    return df