Beispiel #1
0
def test_pdwrire_cols(tmpdir):
    """Test writer that can take variable column numbers."""
    tmpfile = tmpdir.join("pd_multicol_test.dat")
    pd_multicol_name = str(tmpfile)

    data1 = range(5)
    data2 = range(5, 10)

    # 0 means successful write
    assert 0 == io.pdwrite_cols(pd_multicol_name, data1, data2, data1)
    assert 0 == io.pdwrite_cols(pd_multicol_name, data1)
    assert 0 == io.pdwrite_cols(
        pd_multicol_name,
        data1,
        data2,
        data1,
        data2,
        header=["headers", "for", "column", "labels"],
    )
    assert 0 == io.pdwrite_cols(pd_multicol_name,
                                data1,
                                data2,
                                sep=",",
                                index=True)
    # clean-up
    utils.silent_remove(pd_multicol_name)
Beispiel #2
0
def test_write_col_errors_bad_keyword(tmpdir):
    tmpfile = tmpdir.join("pd_multicol_test.dat")
    pd_multicol_name = str(tmpfile)

    data1 = range(5)

    with pytest.raises(TypeError):
        io.pdwrite_cols(pd_multicol_name, data1, bad="keyword")
Beispiel #3
0
def test_write_col_errors_different_length(tmpdir):
    tmpfile = tmpdir.join("pd_multicol_test.dat")
    pd_multicol_name = str(tmpfile)

    data1 = range(5)
    data2 = range(5, 10)
    bad_data = range(6)  # Different length

    # test uneven data lengths
    with pytest.raises(ValueError):
        io.pdwrite_cols(pd_multicol_name, data1, data2, bad_data)
Beispiel #4
0
def test_write_col_errors(tmpdir):
    tmpfile = tmpdir.join("pd_multicol_test.dat")
    pd_multicol_name = str(tmpfile)

    data1 = range(5)
    data2 = range(5, 10)
    bad_data = range(6)  # Different length

    # test bad header
    with pytest.raises(ValueError):
        io.pdwrite_cols(pd_multicol_name,
                        data1,
                        data2,
                        bad_data,
                        header=["too", "many", "values"])
Beispiel #5
0
    def to_file(self,
                fname: str,
                header: Optional[List[str]] = None,
                fmt: str = "%11.8f"):
        """Save the atmospheric model to a txt file.

        Converts micron back into nanometers to be consistent with from_file().

        Parameters
        ----------
        fname: str
            Name of atmosphere file to save to.
        header:
            Header lines to add.
        fmt: str
             String formatting
        """
        if header is None:
            header = ["# atm_wav(nm)", "atm_flux", "atm_std_flux", "atm_mask"]
        return io.pdwrite_cols(
            fname,
            self.wl * 1000,
            self.transmission,
            self.std,
            self.mask.astype(int),
            header=header,
            float_format=fmt,
        )
Beispiel #6
0
def test_pdwrire_cols():
    """Test writer that can take variable column numbers."""
    filedir = "data/test_data"
    pd_multicol_name = os.path.join(filedir, "pd_multicol_test.dat")

    data1 = range(5)
    data2 = range(5, 10)
    bad_data = range(6)  # Different length

    # 0 means successful write
    assert 0 == io.pdwrite_cols(pd_multicol_name, data1, data2, data1)
    assert 0 == io.pdwrite_cols(pd_multicol_name, data1)
    assert 0 == io.pdwrite_cols(
        pd_multicol_name,
        data1,
        data2,
        data1,
        data2,
        header=["headers", "for", "column", "labels"],
    )
    assert 0 == io.pdwrite_cols(pd_multicol_name,
                                data1,
                                data2,
                                sep=",",
                                index=True)

    # test uneven data lengths
    with pytest.raises(ValueError):
        io.pdwrite_cols(pd_multicol_name, data1, data2, bad_data)

    # test bad header
    with pytest.raises(ValueError):
        io.pdwrite_cols(pd_multicol_name,
                        data1,
                        data2,
                        bad_data,
                        header=["too", "many", "values"])

    with pytest.raises(TypeError):
        io.pdwrite_cols(pd_multicol_name, data1, bad="keyword")

    # clean-up
    utils.silent_remove(pd_multicol_name)
Beispiel #7
0
def main(bands="J", use_unshifted=False, save=False, snr=100, ref_band="J"):
    """Main function that calls calc_precision.

    Parameters
    ----------
    bands: str or list of str or None, Default="J"
        Band letters to use. None does the bands Z through K.
    use_unshifted: bool default=False
        Flag to start with the un-Doppler shifted atmmodel.
    save: bool
        Save results to file.

    """
    os.makedirs(eniric.paths["precision"], exist_ok=True)

    spectral_types = ["M0", "M3", "M6", "M9"]
    if "ALL" in bands:
        bands = ["Z", "Y", "J", "H", "K"]
    elif isinstance(bands, str):
        bands = [bands]

    vsini = ["1.0", "5.0", "10.0"]
    resolution = ["60k", "80k", "100k"]
    sampling = ["3"]

    results = calculate_prec(
        spectral_types,
        bands,
        vsini,
        resolution,
        sampling,
        plot_atm=False,
        plot_ste=False,
        plot_flux=False,
        paper_plots=False,
        rv_offset=0.0,
        use_unshifted=use_unshifted,
        snr=snr,
        ref_band=ref_band,
    )

    print("{Combination\t\tPrec_1\t\tPrec_2\t\tPrec_3")
    print("-" * 20)
    for key in results:
        print(
            "{0:s}\t\t{1:0.4f}\t{2:0.4f}\t{3:0.4f}".format(
                key, results[key][0], results[key][1], results[key][2]
            )
        )
    # Save precision results
    if save:
        output_filename = os.path.join(
            eniric.paths["precision"],
            "precision_results_2017_ref_band-{0}_snr-{1}.dat".format(ref_band, snr),
        )
        ids = []
        prec_1s = []
        prec_2s = []
        prec_3s = []
        for star in spectral_types:
            for band in bands:
                for vel in vsini:
                    for res in resolution:
                        id_string = "{0:s}-{1:s}-{2:.1f}-{3:s}".format(
                            star, band, float(vel), res
                        )
                        ids.append(id_string)
                        prec_1s.append(results[id_string][0].value)
                        prec_2s.append(results[id_string][1].value)
                        prec_3s.append(results[id_string][2].value)

        io.pdwrite_cols(
            output_filename,
            ids,
            prec_1s,
            prec_2s,
            prec_3s,
            header=["# id", r"prec_1 [m/s]", r"prec_2 [m/s]", r"prec_3 [m/s]"],
            float_format="%5.01f",
        )
        print("saved results to {}".format(output_filename))
Beispiel #8
0
def main(
    startype,
    temp,
    logg,
    metallicity,
    alpha,
    flux_type="photon",
    data_dir=None,
    phoenix_dir=None,
    replace=False,
):
    """Prepare datafiles for phoenix models that match the input parameters.

    This add the wavelength information to each spectra and converts
    to microns/photons if the flux_type="photons" is given.
    We do realise that this is a waste of space and it would be more
    storage efficient to just read in the phoenix raw fits files and
    wavelength file when needed.

    """
    if data_dir is None:
        data_dir = eniric.paths["phoenix_dat"]
    os.makedirs(data_dir, exist_ok=True)

    if phoenix_dir is None:
        phoenix_dir = eniric.paths["phoenix_raw"]

    # Get Phoenix wavelength data
    wavelength_file = "WAVE_PHOENIX-ACES-AGSS-COND-2011.fits"
    wavelength = fits.getdata(os.path.join(phoenix_dir, wavelength_file))

    if flux_type == "photon":
        file_suffix = "_wave_photon.dat"  # For saving output
    else:
        file_suffix = "_wave.dat"

    # Convert Stellar_types into
    stellar_dict = {"M0": 3900.0, "M3": 3500.0, "M6": 2800.0, "M9": 2600.0}
    # Add temperature of stellar_type to temp list
    for star in startype:
        try:
            temp.append(stellar_dict[star])
        except KeyError:
            print(
                "Stellar type {0} is not implemented here (yet), submit and issue."
                .format(star))

    # Get all phoenix fits files we want to convert
    for (path, dirs, files) in os.walk(phoenix_dir):

        phoenix_files = []
        for f in files:
            # Test if filename meets conditions
            end_cond = f.endswith("PHOENIX-ACES-AGSS-COND-2011-HiRes.fits")

            try:
                if "Alpha=" in f:
                    (
                        match_temp, match_logg, match_feh, match_alpha
                    ) = re.search(
                        r"(\d{5})-(\d\.\d\d)([+\-]\d\.\d)\.Alpha=([+\-]\d\.\d\d)\.",
                        f).groups()
                    alpha_cond = float(match_alpha) in alpha
                else:
                    (match_temp, match_logg,
                     match_feh) = re.search(r"(\d{5})-(\d\.\d\d)([+\-]\d\.\d)",
                                            f).groups()
                    alpha_cond = True  # To make work
            except AttributeError:
                # This file doesn't match what we want so continue with next loop
                continue

            temp_cond = float(match_temp) in temp
            feh_cond = float(match_feh) in metallicity
            logg_cond = float(match_logg) in logg

            if np.all([end_cond, temp_cond, feh_cond, logg_cond,
                       alpha_cond]):  # All conditions met
                # Matching file found
                phoenix_files.append(f)
            else:
                pass

        for phoenix_file in phoenix_files:
            z_folder = path.split(os.sep)[-1]
            os.makedirs(os.path.join(data_dir, z_folder), exist_ok=True)
            output_filename = os.path.join(data_dir, z_folder,
                                           phoenix_file[:-5] +
                                           file_suffix)  # Name of .dat file
            if os.path.exists(output_filename) and not replace:
                print("Skipping as {0} already exists (use -r to replace)".
                      format(output_filename))
                continue
            spectra = fits.getdata(os.path.join(path, phoenix_file))

            # Need to add conversions pedro preformed to flux!
            """The energy units of Phoenix fits files is erg/s/cm**2/cm
            We transform the flux into photons in the read_spectrum()
            function by multiplying the flux result by the wavelength (lambda)

                Flux_photon = Flux_energy/Energy_photon
            with
                Energy_photon = h*c/lambda
            Flux_photon = Flux_energy * lambda / (h * c)

            Here we convert the flux into erg/s/cm**2/\mum by multiplying by 10**-4 cm/\mum
            Flux_e(erg/s/cm**2/\mum)  = Flux_e(erg/s/cm**2/cm) * (1 cm) / (10000 \mum)
            """

            spectra_micron = spectra * 10**-4  # Convert   /cm    to  /micron

            if flux_type == "photon":
                wavelength_micron = (wavelength * 10**-4
                                     )  # Convert Angstrom to   micron

                spectra_photon = (
                    spectra_micron * wavelength_micron
                )  # Ignoring constants h*c in photon energy equation

                result = io.pdwrite_cols(
                    output_filename,
                    wavelength_micron,
                    spectra_photon,
                    header=["# Wavelength (micron)", r"Flux (photon/s/cm^2)"],
                    float_format="%.7f",
                )

            else:
                result = io.pdwrite_cols(
                    output_filename,
                    wavelength,
                    spectra_micron,
                    header=[
                        "# Wavelength (Angstrom)", r"Flux (erg/s/cm^2/micron)"
                    ],
                    float_format=None,
                )

            if not result:
                print("Successfully wrote to ", output_filename)
            else:
                print("Failed to write to ", output_filename)

    print("Done")
    return 0