Esempio n. 1
0
def write_file_to_molecfit(molecfit_folder,
                           name,
                           headers,
                           waves,
                           spectra,
                           ii,
                           plot=False):
    """This is a wrapper for writing a spectrum from a list to molecfit format.
    name is the filename of the fits file that is the output.
    headers is the list of astropy header objects associated with the list of spectra
    in the spectra variable. ii is the number from that list that needs to be written (meaning
    that this routine is expected to be called as part of a loop).

    The user must make sure that the wavelength axes of these spectra are in air, in the observatory
    rest frame (meaning not BERV_corrected). Tayphs read_e2ds() function should have done this
    automatically.
    """
    import astropy.io.fits as fits
    from scipy import stats
    import copy
    import tayph.functions as fun
    import astropy.constants as const
    import astropy.units as u
    import numpy as np
    from tayph.vartests import typetest
    import tayph.util as ut
    import sys
    import matplotlib.pyplot as plt
    typetest(ii, int, 'ii in write_file_to_molecfit()')
    molecfit_folder = ut.check_path(molecfit_folder, exists=True)
    wave = waves[int(ii)]
    spectrum = spectra[int(ii)]
    npx = len(spectrum)

    #Need to un-berv-correct the s1d spectra to go back to the frame of the Earths atmosphere.
    #This is no longer true as of Feb 17, because read_e2ds now uncorrects HARPS, ESPRESSO and
    #UVES spectra by default.
    # if mode == 'HARPS':
    #     berv = headers[ii]['HIERARCH ESO DRS BERV']
    # elif mode == 'HARPSN':
    #     berv = headers[ii]['HIERARCH TNG DRS BERV']
    # elif mode in ['ESPRESSO','UVES-red','UVES-blue']:
    #     berv = headers[ii]['HIERARCH ESO QC BERV']
    # wave = copy.deepcopy(wave*(1.0-(berv*u.km/u.s/const.c).decompose().value))
    spectrum[spectrum <= 0] = np.nan
    err = np.sqrt(spectrum)
    # spectrum[np.isnan(spectrum)]=0
    # err[np.isnan(err)]=0
    if plot:
        plt.plot(wave, spectrum)
        plt.xlabel('Wavelength')
        plt.ylabel('Flux')
        plt.show()
        plt.plot(wave, err)
        plt.xlabel('Wavelength')
        plt.ylabel('Error')
        plt.show()
    #Write out the s1d spectrum in a format that molecfit eats.
    #This is a fits file with an empty primary extension that contains the header of the original s1d file.
    #Plus an extension that contains a binary table with 3 columns.
    #The names of these columns need to be indicated in the molecfit parameter file,
    #as well as the name of the file itself. This is currently hardcoded.
    col1 = fits.Column(name='wavelength', format='1D', array=wave)
    col2 = fits.Column(name='flux', format='1D', array=spectrum)
    col3 = fits.Column(name='err_flux', format='1D', array=err)
    cols = fits.ColDefs([col1, col2, col3])
    tbhdu = fits.BinTableHDU.from_columns(cols)
    prihdr = fits.Header()
    prihdr = copy.deepcopy(headers[ii])
    prihdu = fits.PrimaryHDU(header=prihdr)
    thdulist = fits.HDUList([prihdu, tbhdu])
    thdulist.writeto(molecfit_folder / name, overwrite=True)
    ut.tprint(f'Spectrum {ii} written to {str(molecfit_folder/name)}')
    return (0)
Esempio n. 2
0
def inject_model(list_of_wls,
                 list_of_orders,
                 dp,
                 modelname,
                 model_library='library/models'):
    """This function takes a list of spectral orders and injects a model with library
    identifier modelname, and system parameters as defined in dp. The model is blurred taking into
    account spectral resolution and rotation broadening (with an LSF as per Brogi et al.) and
    finite-exposure broadening (with a box LSF).

    It returns a copy of the list of orders with the model injected."""

    import tayph.util as ut
    import tayph.system_parameters as sp
    import tayph.models
    import astropy.constants as const
    import numpy as np
    import scipy
    import tayph.operations as ops
    from tayph.vartests import typetest, dimtest
    import pdb
    import copy
    import matplotlib.pyplot as plt

    # dimtest(order,[0,len(wld)])
    dp = ut.check_path(dp)
    typetest(modelname, str, 'modelname in models.inject_model()')
    typetest(model_library, str, 'model_library in models.inject_model()')

    c = const.c.to('km/s').value  #In km/s
    Rd = sp.paramget('resolution', dp)
    planet_radius = sp.paramget('Rp', dp)
    inclination = sp.paramget('inclination', dp)
    P = sp.paramget('P', dp)
    transit = sp.transit(dp)
    n_exp = len(transit)
    vsys = sp.paramget('vsys', dp)
    rv = sp.RV(dp) + vsys
    dRV = sp.dRV(dp)
    phi = sp.phase(dp)
    dimtest(transit, [n_exp])
    dimtest(rv, [n_exp])
    dimtest(phi, [n_exp])
    dimtest(dRV, [n_exp])

    mask = (transit - 1.0) / (np.min(transit - 1.0))

    wlm, fxm = get_model(modelname, library=model_library)
    if wlm[-1] <= wlm[0]:  #Reverse the wl axis if its sorted the wrong way.
        wlm = np.flipud(wlm)
        fxm = np.flipud(fxm)

    #With the model and the revelant parameters in hand, now only select that
    #part of the model that covers the wavelengths of the order provided.
    #A larger wavelength range would take much extra time because the convolution
    #is a slow operation.

    N_orders = len(list_of_wls)
    if N_orders != len(list_of_orders):
        raise RuntimeError(
            f'in models.inject_model: List_of_wls and list_of_orders are not of the '
            f'same length ({N_orders} vs {len(list_of_orders)})')

    if np.min(wlm) > np.min(list_of_wls) - 1.0 or np.max(
            wlm) < np.max(list_of_wls) + 1.0:
        ut.tprint(
            'WARNING in model injection: Data grid falls (partly) outside of model range. '
            'Setting missing area to 1.0. (meaning, no planet absorption.)')

    modelsel = [
        (wlm >= np.min(list_of_wls) - 1.0) & (wlm <= np.max(list_of_wls) + 1.0)
    ]

    wlm = wlm[tuple(modelsel)]
    fxm = fxm[tuple(modelsel)]

    fxm_b = ops.blur_rotate(wlm, fxm, c / Rd, planet_radius, P,
                            inclination)  #Only do this once per dataset.

    oversampling = 2.5
    wlm_cv, fxm_bcv, vstep = ops.constant_velocity_wl_grid(
        wlm, fxm_b, oversampling=oversampling)

    if np.min(dRV) < c / Rd / 10.0:

        dRV_min = c / Rd / 10.0  #If the minimum dRV is less than 10% of the spectral
        #resolution, we introduce a lower limit to when we are going to blur, because the effect
        #becomes insignificant.
    else:
        dRV_min = np.min(dRV)

    if dRV_min / vstep < 3:  #Meaning, if the smoothing is going to be undersampled by this choice
        #in v_step, it means that the oversampling parameter in ops.constant_velocity_wl_grid was
        #not high enough. Then we adjust it. I choose a value of 3 here to be safe, even though
        #ops.smooth below accepts values as low as 2.
        oversampling_new = 3.0 / (
            dRV_min / vstep) * oversampling  #scale up the oversampling factor.
        wlm_cv, fxm_bcv, vstep = ops.constant_velocity_wl_grid(
            wlm, fxm_b, oversampling=oversampling_new)

    list_of_orders_injected = copy.deepcopy(list_of_orders)

    for i in range(n_exp):
        if dRV[i] >= c / Rd / 10.0:
            fxm_b2 = ops.smooth(fxm_bcv, dRV[i] / vstep, mode='box')
        else:
            fxm_b2 = copy.deepcopy(fxm_bcv)
        shift = 1.0 + rv[i] / c
        fxm_i = scipy.interpolate.interp1d(
            wlm_cv * shift, fxm_b2, fill_value=1.0,
            bounds_error=False)  #This is a class that can be called.
        #Fill_value = 1 because if the model does not fully cover the order, it will be padded with 1.0s,
        #assuming that we are dealing with a model that is in transmission.
        for j in range(len(list_of_orders)):
            list_of_orders_injected[j][i] *= (
                1.0 + mask[i] * (fxm_i(list_of_wls[j]) - 1.0))  #This assumes
            #that the model is in transit radii. This can definitely be vectorised!

        #These are for checking that the broadening worked as expected:
        # injection_total[i,:]= scipy.interpolate.interp1d(wlm_cv*shift,fxm_b2)(wld)
        # injection_rot_only[i,:]=scipy.interpolate.interp1d(wlm*shift,fxm_b)(wld)
        # injection_pure[i,:]=scipy.interpolate.interp1d(wlm*shift,fxm)(wld)

    # ut.save_stack('test.fits',[injection_pure,injection_rot_only,injection_total])
    # pdb.set_trace()
    # ut.writefits('test.fits',injection)
    # pdb.set_trace()

    return (list_of_orders_injected)
Esempio n. 3
0
def set_molecfit_config(configpath):
    import pkg_resources
    from pathlib import Path
    import os
    import subprocess
    import tayph.system_parameters as sp
    import tayph.util as ut

    #Prepare for making formatted output.
    # terminal_height,terminal_width = subprocess.check_output(['stty', 'size']).split()

    Q1 = (
        'In what folder are parameter files defined and should (intermediate) molecfit output be '
        'written to?')
    Q2 = 'In what folder is the molecfit binary located?'
    Q3 = 'What is your python 3.x alias?'

    # configpath=get_molecfit_config()
    configpath = Path(configpath)
    if configpath.exists():
        ut.tprint(
            f'Molecfit configuration file already exists at {configpath}.')
        print('Overwriting existing values.')
        current_molecfit_input_folder = sp.paramget('molecfit_input_folder',
                                                    configpath,
                                                    full_path=True)
        current_molecfit_prog_folder = sp.paramget('molecfit_prog_folder',
                                                   configpath,
                                                   full_path=True)
        current_python_alias = sp.paramget('python_alias',
                                           configpath,
                                           full_path=True)

        ut.tprint(Q1)
        ut.tprint(
            f'Currently: {current_molecfit_input_folder} (leave empty to keep current '
            'value).')

        new_input_folder_input = str(input())
        if len(new_input_folder_input) == 0:
            new_molecfit_input_folder = ut.check_path(
                current_molecfit_input_folder, exists=True)
        else:
            new_molecfit_input_folder = ut.check_path(new_input_folder_input,
                                                      exists=True)
        print('')
        ut.tprint(Q2)
        ut.tprint(f'Currently: {current_molecfit_prog_folder}')
        new_prog_folder_input = str(input())
        if len(new_prog_folder_input) == 0:
            new_molecfit_prog_folder = ut.check_path(
                current_molecfit_prog_folder, exists=True)
        else:
            new_molecfit_prog_folder = ut.check_path(new_prog_folder_input,
                                                     exists=True)
        print('')
        ut.tprint(Q3)
        ut.tprint(f'Currently: {current_python_alias}')
        new_python_alias_input = str(input())
        if len(new_python_alias_input) == 0:
            new_python_alias = current_python_alias
        else:
            new_python_alias = new_python_alias_input
    else:  #This is actually the default mode of using this, because this function is generally
        #only called when tel.molecfit() is run for the first time and the config file doesn't exist yet.
        ut.tprint(Q1)
        new_molecfit_input_folder = ut.check_path(str(input()), exists=True)
        print('')
        ut.tprint(Q2)
        new_molecfit_prog_folder = ut.check_path(str(input()), exists=True)
        print('')
        ut.tprint(Q3)
        new_python_alias = str(input())

    with open(configpath, "w") as f:
        f.write(f'molecfit_input_folder   {str(new_molecfit_input_folder)}\n')
        f.write(f'molecfit_prog_folder   {str(new_molecfit_prog_folder)}\n')
        f.write(f'python_alias   {str(new_python_alias)}\n')

    ut.tprint(
        f'New molecfit configation file successfully written to {configpath}')
Esempio n. 4
0
def test_molecfit_config(molecfit_config):
    """This tests the existence and integrity of the system-wide molecfit configuration folder."""
    import tayph.util as ut
    import tayph.system_parameters as sp
    from pathlib import Path
    import sys

    try:
        molecfit_input_folder = Path(
            sp.paramget('molecfit_input_folder',
                        molecfit_config,
                        full_path=True))
        molecfit_prog_folder = Path(
            sp.paramget('molecfit_prog_folder',
                        molecfit_config,
                        full_path=True))
        python_alias = sp.paramget('python_alias',
                                   molecfit_config,
                                   full_path=True)
    except:
        err_msg = (
            f'ERROR in initialising Molecfit. The molecfit configuration file '
            f'({molecfit_config}) exists, but it does not contain the right keywords. The required '
            'parameters are molecfit_input_folder, molecfit_prog_folder and python_alias'
        )
        ut.tprint(err_msg)
        sys.exit()

    if molecfit_input_folder.exists() == False:
        err_msg = (
            f"ERROR in initialising Molecfit. The molecfit configuration file "
            f"({molecfit_config}) exists and it has the correct parameter keywords "
            f"(molecfit_input_folder, molecfit_prog_folder and python_alias), but the "
            f"molecfit_input_folder path ({molecfit_input_folder}) does not exist. Please run "
            f"tayph.tellurics.set_molecfit_config() to resolve this.")
        ut.tprint(err_msg)
        sys.exit()

    if molecfit_prog_folder.exists() == False:
        err_msg = (
            f"ERROR in initialising Molecfit. The molecfit configuration file "
            f"({molecfit_config}) exists and it has the correct parameter keywords "
            f"(molecfit_input_folder, molecfit_prog_folder and python_alias), but the "
            f"molecfit_prog_folder path ({molecfit_prog_folder}) does not exist. Please run "
            f"tayph.tellurics.set_molecfit_config() to resolve this.")
        ut.tprint(err_msg)
        sys.exit()
    binarypath = molecfit_prog_folder / 'molecfit'
    guipath = molecfit_prog_folder / 'molecfit_gui'

    if binarypath.exists() == False:
        err_msg = (
            f"ERROR in initialising Molecfit. The molecfit configuration file "
            f"({molecfit_config}) exists and it has the correct parameter keywords "
            f"(molecfit_input_folder, molecfit_prog_folder and python_alias), but the molecfit "
            f"binary ({binarypath}) does not exist. Please run "
            f"tayph.tellurics.set_molecfit_config() to resolve this.")
        ut.tprint(err_msg)
        sys.exit()

    if guipath.exists() == False:
        err_msg = (
            f"ERROR in initialising Molecfit. The molecfit configuration file "
            f"({molecfit_config}) exists and it has the correct parameter keywords "
            f"(molecfit_input_folder, molecfit_prog_folder and python_alias), but the molecfit "
            f"gui binary ({guipath}) does not exist. Please run "
            f"tayph.tellurics.set_molecfit_config() to resolve this.")
        ut.tprint(err_msg)
        sys.exit()

    if ut.test_alias(python_alias) == False:
        err_msg = (
            f'ERROR in initialising Molecfit. The molecfit configuration file '
            f'({molecfit_config}) exists and it has the correct parameter keywords '
            f'(molecfit_input_folder, molecfit_prog_folder and python_alias), but the python '
            f'alias ({python_alias}) does not exist. Please run '
            f'tayph.tellurics.set_molecfit_config() to resolve this.')
        ut.tprint(err_msg)
        sys.exit()
Esempio n. 5
0
def interpolate_over_NaNs(list_of_orders,cutoff=0.2,quiet=False):
    #This is a helper function I had to dump here that is mostly unrelated to the GUI,
    #but with healing NaNs. If there are too many NaNs in a column, instead of
    #interpolating, just set the entire column to NaN. If an entire column is set to NaN,
    #it doesn't need to be healed because the cross-correlation never sees it, and the pixel
    #never contributes. It becomes like the column is beyond the edge of the wavelength range of the data.
    import numpy as np
    import tayph.functions as fun
    import tayph.util as ut
    from tayph.vartests import typetest
    import astropy.io.fits as fits
    """
    This function loops through a list of orders, over the individual
    spectra in each order, and interpolates over the NaNs. It uses the manual provided at
    https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
    which I duplicated in tayph.functions.

    Parameters
    ----------
    list_of_orders : list
        The list of 2D orders for which NaNs need to be removed.

    cutoff : float
        If a column contains more NaNs than this value times its length, instead
        of interpolating over those NaNs, the entire column is set to NaN.

    Returns
    -------
    list_of_healed_orders : list
        The corrected 2D orders.
    """

    typetest(cutoff,float,'cutoff in masking.interpolate_over_NaNs()',)
    if cutoff <= 0  or cutoff > 1:
        raise Exception('Runtime Error in interpolate_over_NaNs: cutoff should be between 0 or 1 (not including 0).')

    N = len(list_of_orders)
    if N == 0:
        raise Exception('Runtime Error in interpolate_over_NaNs: List of orders is empty.')

    # N_nans_total = 0
    N_nans_columns = 0
    N_nans_isolated = 0
    N_healed = 0
    N_pixels = 0
    list_of_healed_orders = []
    for i in range(len(list_of_orders)):
        order = list_of_orders[i]*1.0#x1 to copy it, otherwise the input is altered backwardly.
        shape  = np.shape(order)
        nexp = shape[0]
        npx = shape[1]
        N_pixels += nexp*npx
        list_of_masked_columns = []#This will contain the column numbers to mask completely at the end.
        if np.sum(np.isnan(order)) > 0:
            # N_nans_total+=np.sum(np.isnan(order))
            #So this order contains NaNs.
            #First we loop over all columns to try to find columns where the number
            #of NaNs is greater than CUTOFF.
            for j in range(npx):
                column = order[:,j]
                N_nans = np.sum(np.isnan(column))
                if N_nans > cutoff*nexp:
                    list_of_masked_columns.append(j)
                    N_nans_columns+=nexp
                else:
                    N_nans_isolated+=N_nans
            # fits.writeto('test.fits',order,overwrite=True)
            for k in range(nexp):
                spectrum = order[k,:]
                nans,x= fun.nan_helper(spectrum)
                if np.sum(nans) > 0:
                    spectrum_healed = spectrum*1.0
                    #There are nans in this spectrum.
                    N_healed += np.sum(nans)
                    if len(x(~nans)) > 0:
                        spectrum_healed[nans]= np.interp(x(nans), x(~nans), spectrum[~nans])
                        #This heals all the NaNs, including the ones in all-NaN columns.
                        #These will be set back to NaN below.
                    else:#This happens if an entire order is masked.
                        spectrum_healed[nans]=0
                    order[k,:] = spectrum_healed

        if len(list_of_masked_columns) > 0:
            for l in list_of_masked_columns:
                order[:,l]+=np.nan#Set the ones that were erroneously healed back to nan.
        list_of_healed_orders.append(order)
    if quiet == False:
        ut.tprint(f'------Total number of pixels in {N} orders: {N_pixels}')
        ut.tprint(f'------Number of NaNs in columns identified as bad (or previously masked): {N_nans_columns} ({np.round(N_nans_columns/N_pixels*100,2)}% of total)')
        ut.tprint(f'------Number of NaNs in isolated pixels: {N_nans_isolated} ({np.round(N_nans_isolated/N_pixels*100,2)}% of total)')
        ut.tprint(f'------Number of bad pixels identified: {N_nans_isolated+N_nans_columns} ({np.round((N_nans_isolated+N_nans_columns)/N_pixels*100,2)}% of total)')
    return(list_of_healed_orders)
Esempio n. 6
0
def blur_rotate(wl, order, dv, Rp, P, inclination, status=False, fast=False):
    """This function takes a spectrum and blurs it using a rotation x Gaussian
    kernel which has a FWHM width of dv km/s everywhere. Meaning that its width changes
    dynamically.
    Because the kernel needs to be recomputed on each element of the wavelength axis
    individually, this operation is much slower than convolution with a constant kernel,
    in which a simple shifting of the array, rather than a recomputation of the rotation
    profile is sufficient. By setting the fast keyword, the input array will first
    be oversampled onto a constant-velocity grid to enable the usage of a constant kernel,
    after which the result is interpolated back to the original grid.

    Input:
    The wavelength axis wl.
    The spectral axis order.
    The FHWM width of the resolution element in km/s.
    The Radius of the rigid body in Rj.
    The periodicity of the rigid body rotation in days.
    The inclination of the spin axis in degrees.

    Wavelength and order need to be numpy arrays and have the same number of elements.
    Rp, P and i need to be scalar floats.

    Output:
    The blurred spectral axis, with the same dimensions as wl and order.


    WARNING: THIS FUNCTION HANDLES NANS POORLY. I HAVE THEREFORE DECIDED CURRENTLY
    TO REQUIRE NON-NAN INPUT.




    This computes the simple numerical derivative of x by convolving with kernel [-1,0,1].

    Parameters
    ----------
    wl : list, np.ndarray
        The wavelength array.

    order : list, np.ndarray.
        The spectral axis.

    dv: float
        The FWHM of a resolution element in km/s.

    Rp: float
        The radius of the planet in jupiter radii.

    P: float
        The rotation period of the planet. For tidally locked planets, this is equal
        to the orbital period.

    inclination:
        The inclination of the spin axis in degrees. Presumed to be close to 90 degrees
        for transiting planets

    status: bool
        Output a statusbar, but only if fast == False.

    fast: bool
        Re-interpolate the input on a constant-v grid in order to speed up the computation
        of the convolution by eliminating the need to re-interpolate the kernel every step.



    Returns
    -------
    order_blurred : np.array
        The rotation-broadened spectrum on the same wavelength grid as the input.

    Example
    -------
    >>> import tayph.functions as fun
    >>> wl = fun.findgen(4000)*0.001+500.0
    >>> fx = wl*0.0
    >>> fx[2000] = 1.0
    >>> fx_blurred1 = blur_rotate(wl,fx,3.0,1.5,0.8,90.0,status=False,fast=False)
    >>> fx_blurred2 = blur_rotate(wl,fx,3.0,1.5,0.8,90.0,status=False,fast=True)
    """

    import numpy as np
    import tayph.util as ut
    import tayph.functions as fun
    from tayph.vartests import typetest, nantest, dimtest
    from matplotlib import pyplot as plt
    import astropy.constants as const
    import astropy.units as u
    import time
    import sys
    import pdb
    from scipy import interpolate
    typetest(dv, float, 'dv in blur_rotate()')
    typetest(wl, [list, np.ndarray], 'wl in blur_rotate()')
    typetest(order, [list, np.ndarray], 'order in blur_rotate()')
    typetest(P, float, 'P in blur_rotate()')
    typetest(Rp, float, 'Rp in blur_rotate()')
    typetest(inclination, float, 'inclination in blur_rotate()')
    typetest(status, bool, 'status in blur_rotate()')
    typetest(fast, bool, 'fast in blur_rotate()')
    nantest(wl, 'dv in blur_rotate()')
    nantest(order, 'order in blur_rotate()')
    dimtest(wl, [0], 'wl in blur_rotate()')
    dimtest(order, [len(wl)],
            'order in blur_rotate()')  #Test that wl and order are 1D, and that
    #they have the same length.

    if np.min(np.array([dv, P, Rp])) <= 0.0:
        raise Exception(
            "ERROR in blur_rotate: dv, P and Rp should be strictly positive.")

    #ut.typetest_array('wl',wl,np.float64)
    #ut.typetest_array('order',order,np.float64)
    #This is not possible because order may be 2D...
    #And besides, you can have floats, np.float32 and np.float64... All of these would
    #need to pass. Need to fix typetest_array some day.

    order_blurred = order * 0.0  #init the output.
    truncsize = 5.0  #The gaussian is truncated at 5 sigma from the extremest points of the RV amplitude.
    sig_dv = dv / (2 * np.sqrt(2.0 * np.log(2))
                   )  #Transform FWHM to Gaussian sigma. In km/s.
    deriv = derivative(wl)
    if max(deriv) < 0:
        raise Exception(
            "ERROR in ops.blur_rotate: WL derivative is smaller than 1.0. Sort wl in ascending order."
        )
    sig_wl = wl * sig_dv / (const.c.to('km/s').value)  #in nm
    sig_px = sig_wl / deriv

    n = 1000.0
    a = fun.findgen(n) / (n - 1) * np.pi
    rv = np.cos(a) * np.sin(
        np.radians(inclination)) * (2.0 * np.pi * Rp * const.R_jup /
                                    (P * u.day)).to('km/s').value  #in km/s
    trunc_dist = np.round(sig_px * truncsize + np.max(rv) * wl /
                          (const.c.to('km/s').value) / deriv).astype(int)
    # print('Maximum rotational rv: %s' % max(rv))
    # print('Sigma_px: %s' % np.nanmean(np.array(sig_px)))

    rvgrid_max = (np.max(trunc_dist) + 1.0) * sig_dv + np.max(rv)
    rvgrid_n = rvgrid_max / dv * 100.0  #100 samples per lsf fwhm.
    rvgrid = (
        fun.findgen(2 * rvgrid_n + 1) - rvgrid_n
    ) / rvgrid_n * rvgrid_max  #Need to make sure that this is wider than the truncation bin and more finely sampled than wl - everywhere.

    lsf = rvgrid * 0.0
    #We loop through velocities in the velocity grid to build up the sum of Gaussians
    #that is the LSF.
    for v in rv:
        lsf += fun.gaussian(
            rvgrid, 1.0, v, sig_dv
        )  #This defines the LSF on a velocity grid wih high fidelity.
    if fast:
        wlt, fxt, dv = constant_velocity_wl_grid(wl, order, 4)
        dv_grid = rvgrid[1] - rvgrid[0]

        len_rv_grid_low = int(max(rvgrid) / dv * 2 - 2)
        # print(len_rv_grid_low)
        # print(len(fun.findgen(len_rv_grid_low)))
        # print(len_rv_grid_low%2)
        if len_rv_grid_low % 2 == 0:
            len_rv_grid_low -= 1
        rvgrid_low = fun.findgen(
            len_rv_grid_low) * dv  #Slightly smaller than the original grid.
        rvgrid_low -= 0.5 * np.max(rvgrid_low)
        lsf_low = interpolate.interp1d(rvgrid, lsf)(rvgrid_low)
        lsf_low /= np.sum(
            lsf_low
        )  #This is now an LSF on a grid with the same spacing as the data has.
        #This means I can use it directly as a convolution kernel:
        fxt_blurred = convolve(fxt, lsf_low, edge_degree=1, fit_width=1)
        #And interpolate back to where it came from:
        order_blurred = interpolate.interp1d(wlt,
                                             fxt_blurred,
                                             bounds_error=False)(wl)
        #I can use interp1d because after blurring, we are now oversampled.
        # order_blurred2 = bin_avg(wlt,fxt_blurred,wl)
        return (order_blurred)

    #Now we loop through the wavelength grid to place this LSF at each wavelength position.
    for i in range(0, len(wl)):
        binstart = max([0, i - trunc_dist[i]])
        binend = i + trunc_dist[i]
        wlbin = wl[binstart:binend]

        wlgrid = wl[i] * rvgrid / (const.c.to('km/s').value) + wl[
            i]  #This converts the velocity grid to a d-wavelength grid centered on wk[i]
        #print([np.min(wlbin),np.min(wlgrid),np.max(wlbin),np.max(wlgrid)])

        i_wl = interpolate.interp1d(
            wlgrid, lsf, bounds_error=False, fill_value='extrapolate'
        )  #Extrapolate should not be necessary but sometimes there is a minute mismatch between the
        #start and end wavelengths of the constructed grid and the bin.
        try:
            lsf_wl = i_wl(wlbin)
        except:
            ut.tprint(
                'Error in interpolating LSF onto wlbin. Pausing to debug.')
            pdb.set_trace()
        k_n = lsf_wl / np.sum(
            lsf_wl
        )  #Normalize at each instance of the interpolation to make sure flux is conserved exactly.
        order_blurred[i] = np.sum(k_n * order[binstart:binend])
        if status == True:
            ut.statusbar(i, len(wl))
    return (order_blurred)
Esempio n. 7
0
def read_espresso(inpath, filelist, read_s1d=True):
    #The following variables define lists in which all the necessary data will be stored.
    framename = []
    header = []
    s1dhdr = []
    obstype = []
    texp = np.array([])
    date = []
    mjd = np.array([])
    s1dmjd = np.array([])
    npx = np.array([])
    norders = np.array([])
    e2ds = []
    s1d = []
    wave1d = []
    airmass = np.array([])
    berv = np.array([])
    wave = []
    catkeyword = 'EXTNAME'
    bervkeyword = 'HIERARCH ESO QC BERV'
    airmass_keyword1 = 'HIERARCH ESO TEL'
    airmass_keyword2 = ' AIRM '
    airmass_keyword3_start = 'START'
    airmass_keyword3_end = 'END'
    for i in range(len(filelist)):
        if filelist[i].endswith('S2D_BLAZE_A.fits'):
            hdul = fits.open(inpath / filelist[i])
            data = copy.deepcopy(hdul[1].data)
            hdr = hdul[0].header
            hdr2 = hdul[1].header
            wavedata = copy.deepcopy(hdul[5].data)
            hdul.close()
            del hdul

            if hdr2[catkeyword] == 'SCIDATA':
                # print('science keyword found')
                print(f'------{filelist[i]}', end="\r")
                framename.append(filelist[i])
                header.append(hdr)
                obstype.append('SCIENCE')
                texp = np.append(texp, hdr['EXPTIME'])
                date.append(hdr['DATE-OBS'])
                mjd = np.append(mjd, hdr['MJD-OBS'])
                npx = np.append(npx, hdr2['NAXIS1'])
                norders = np.append(norders, hdr2['NAXIS2'])
                e2ds.append(data)
                berv = np.append(berv, hdr[bervkeyword])  #in km.s.
                telescope = hdr['TELESCOP'][-1]
                airmass = np.append(
                    airmass,
                    0.5 * (hdr[airmass_keyword1 + telescope + ' AIRM START'] +
                           hdr[airmass_keyword1 + telescope + ' AIRM END']))
                wave.append(
                    wavedata / 10.0
                )  #*(1.0-(hdr[bervkeyword]*u.km/u.s/const.c).decompose().value))
                #Ok.! So unlike HARPS, ESPRESSO wavelengths are actually BERV corrected in the S2Ds.
                #WHY!!!?. WELL SO BE IT. IN ORDER TO HAVE E2DSes THAT ARE ON THE SAME GRID, AS REQUIRED, WE UNDO THE BERV CORRECTION HERE.
                #WHEN COMPARING WAVE[0] WITH WAVE[1], YOU SHOULD SEE THAT THE DIFFERENCE IS NILL.
                #THATS WHY LATER WE CAN JUST USE WAVE[0] AS THE REPRESENTATIVE GRID FOR ALL.
                #BUT THAT IS SILLY. JUST SAVE THE WAVELENGTHS!

                if read_s1d:
                    s1d_path = inpath / Path(
                        str(filelist[i]).replace('_S2D_BLAZE_A.fits',
                                                 '_S1D_A.fits'))
                    #Need the blazed files. Not the S2D_A's by themselves.
                    ut.check_path(
                        s1d_path,
                        exists=True)  #Crash if the S1D doesn't exist.
                    hdul = fits.open(s1d_path)
                    data_table = copy.deepcopy(hdul[1].data)
                    hdr1d = hdul[0].header
                    hdul.close()
                    del hdul
                    s1d.append(data_table.field(2))

                    berv1d = hdr1d[bervkeyword]
                    if berv1d != hdr[bervkeyword]:
                        wrn_msg = (
                            'WARNING in read_espresso(): BERV correction of S1D file is not'
                            f'equal to that of the S2D file. {berv1d} vs {hdr[bervkeyword]}'
                        )
                        ut.tprint(wrn_msg)
                    gamma = (1.0 -
                             (berv1d * u.km / u.s / const.c).decompose().value)
                    wave1d.append(data_table.field(1) *
                                  gamma)  #This is in angstroms.
                    #We need to check to which UT ESPRESSO was connected, so that we can read
                    #the weather information (which is UT-specific) and parse them into the
                    #header using UT-agnostic keywords that are in the ESPRESSO.par file.
                    TELESCOP = hdr1d['TELESCOP'].split('U')[
                        1]  #This is the number of the UT, either 1, 2, 3 or 4.
                    if TELESCOP not in ['1', '2', '3', '4']:
                        raise ValueError(
                            f"in read_e2ds when reading ESPRESSO data. The UT telescope is not recognised. (TELESCOP={hdr['TELESCOP']})"
                        )
                    else:
                        hdr1d['TELALT'] = hdr1d[f'ESO TEL{TELESCOP} ALT']
                        hdr1d['RHUM'] = hdr1d[f'ESO TEL{TELESCOP} AMBI RHUM']
                        hdr1d['PRESSURE'] = (
                            hdr1d[f'ESO TEL{TELESCOP} AMBI PRES START'] +
                            hdr1d[f'ESO TEL{TELESCOP} AMBI PRES END']) / 2.0
                        hdr1d['AMBITEMP'] = hdr1d[
                            f'ESO TEL{TELESCOP} AMBI TEMP']
                        hdr1d['M1TEMP'] = hdr1d[
                            f'ESO TEL{TELESCOP} TH M1 TEMP']
                    s1dhdr.append(hdr1d)
                    s1dmjd = np.append(s1dmjd, hdr1d['MJD-OBS'])
    if read_s1d:
        output = {
            'wave': wave,
            'e2ds': e2ds,
            'header': header,
            'wave1d': wave1d,
            's1d': s1d,
            's1dhdr': s1dhdr,
            'mjd': mjd,
            'date': date,
            'texp': texp,
            'obstype': obstype,
            'framename': framename,
            'npx': npx,
            'norders': norders,
            'berv': berv,
            'airmass': airmass,
            's1dmjd': s1dmjd
        }
    else:
        output = {
            'wave': wave,
            'e2ds': e2ds,
            'header': header,
            'mjd': mjd,
            'date': date,
            'texp': texp,
            'obstype': obstype,
            'framename': framename,
            'npx': npx,
            'norders': norders,
            'berv': berv,
            'airmass': airmass
        }
    return (output)
Esempio n. 8
0
def read_harpslike(inpath, filelist, mode, read_s1d=True):
    """
    This reads a folder of HARPS or HARPSN data. Input is a list of filepaths and the mode (HARPS
    or HARPSN).
    """

    if mode == 'HARPS':
        catkeyword = 'HIERARCH ESO DPR CATG'
        bervkeyword = 'HIERARCH ESO DRS BERV'
        thfilekeyword = 'HIERARCH ESO DRS CAL TH FILE'
        Zstartkeyword = 'HIERARCH ESO TEL AIRM START'
        Zendkeyword = 'HIERARCH ESO TEL AIRM END'
    elif mode == 'HARPSN':
        catkeyword = 'OBS-TYPE'
        bervkeyword = 'HIERARCH TNG DRS BERV'
        thfilekeyword = 'HIERARCH TNG DRS CAL TH FILE'
        Zstartkeyword = 'AIRMASS'
        Zendkeyword = 'AIRMASS'  #These are the same because HARPSN doesnt have start and end keywords.
        #Down there, the airmass is averaged, so there is no problem in taking the average of the same number.
    else:
        raise ValueError(
            f"Error in read_harpslike: mode should be set to HARPS or HARPSN ({mode})"
        )

    #The following variables define lists in which all the necessary data will be stored.
    framename = []
    header = []
    s1dhdr = []
    obstype = []
    texp = np.array([])
    date = []
    mjd = np.array([])
    s1dmjd = np.array([])
    npx = np.array([])
    norders = np.array([])
    e2ds = []
    s1d = []
    wave1d = []
    airmass = np.array([])
    berv = np.array([])
    wave = []
    # wavefile_used = []
    for i in range(len(filelist)):
        if filelist[i].endswith('e2ds_A.fits'):
            print(f'------{filelist[i]}', end="\r")
            hdul = fits.open(inpath / filelist[i])
            data = copy.deepcopy(hdul[0].data)
            hdr = hdul[0].header
            hdul.close()
            del hdul[0].data
            if hdr[catkeyword] == 'SCIENCE':
                framename.append(filelist[i])
                header.append(hdr)
                obstype.append(hdr[catkeyword])
                texp = np.append(texp, hdr['EXPTIME'])
                date.append(hdr['DATE-OBS'])
                mjd = np.append(mjd, hdr['MJD-OBS'])
                npx = np.append(npx, hdr['NAXIS1'])
                norders = np.append(norders, hdr['NAXIS2'])
                e2ds.append(data)
                berv = np.append(berv, hdr[bervkeyword])
                airmass = np.append(
                    airmass, 0.5 * (hdr[Zstartkeyword] + hdr[Zendkeyword])
                )  #This is an approximation where we take the mean airmass.
                # if nowave == True:
                # wavefile_used.append(hdr[thfilekeyword])
                #Record which wavefile was used by the pipeline to
                #create the wavelength solution.
                wavedata = ut.read_wave_from_e2ds_header(
                    hdr, mode=mode) / 10.0  #convert to nm.
                wave.append(wavedata)
                # if filelist[i].endswith('wave_A.fits'):
                #     print(filelist[i]+' (wave)')
                #     if nowave == True:
                #         warnings.warn(" in read_e2ds: nowave was set to True but a wave_A file was detected. This wave file is now ignored in favor of the header.",RuntimeWarning)
                #     else:
                #         wavedata=fits.getdata(inpath/filelist[i])
                #         wave.append(wavedata)

                if read_s1d:
                    s1d_path = inpath / Path(
                        str(filelist[i]).replace('e2ds_A.fits', 's1d_A.fits'))
                    ut.check_path(
                        s1d_path,
                        exists=True)  #Crash if the S1D doesn't exist.
                    # if filelist[i].endswith('s1d_A.fits'):
                    hdul = fits.open(s1d_path)
                    data_1d = copy.deepcopy(hdul[0].data)
                    hdr1d = hdul[0].header
                    hdul.close()
                    del hdul
                    # if hdr[catkeyword] == 'SCIENCE':
                    s1d.append(data_1d)
                    if mode == 'HARPSN':  #In the case of HARPS-N we need to convert the units of the
                        #elevation and provide a UTC keyword.
                        hdr1d['TELALT'] = np.degrees(float(hdr1d['EL']))
                        hdr1d['UTC'] = (float(hdr1d['MJD-OBS']) %
                                        1.0) * 86400.0
                    s1dhdr.append(hdr1d)
                    s1dmjd = np.append(s1dmjd, hdr1d['MJD-OBS'])
                    berv1d = hdr1d[bervkeyword]
                    if berv1d != hdr[bervkeyword]:
                        wrn_msg = (
                            'WARNING in read_harpslike(): BERV correction of s1d file is not'
                            f'equal to that of the e2ds file. {berv1d} vs {hdr[bervkeyword]}'
                        )
                        ut.tprint(wrn_msg)
                    gamma = (1.0 -
                             (berv1d * u.km / u.s / const.c).decompose().value
                             )  #Doppler factor BERV.
                    wave1d.append(
                        (hdr1d['CDELT1'] * fun.findgen(len(data_1d)) +
                         hdr1d['CRVAL1']) * gamma)

    #Check that all exposures have the same number of pixels, and clip s1ds if needed.
    # min_npx1d = int(np.min(np.array(npx1d)))
    # if np.sum(np.abs(np.array(npx1d)-npx1d[0])) != 0:
    #     warnings.warn("in read_e2ds when reading HARPS data: Not all s1d files have the same number of pixels. This could have happened if the pipeline has extracted one or two extra pixels in some exposures but not others. The s1d files will be clipped to the smallest length.",RuntimeWarning)
    #     for i in range(len(s1d)):
    #         wave1d[i]=wave1d[i][0:min_npx1d]
    #         s1d[i]=s1d[i][0:min_npx1d]
    #         npx1d[i]=min_npx1d
    output = {
        'wave': wave,
        'e2ds': e2ds,
        'header': header,
        'wave1d': wave1d,
        's1d': s1d,
        's1dhdr': s1dhdr,
        'mjd': mjd,
        'date': date,
        'texp': texp,
        'obstype': obstype,
        'framename': framename,
        'npx': npx,
        'norders': norders,
        'berv': berv,
        'airmass': airmass,
        's1dmjd': s1dmjd
    }
    return (output)
Esempio n. 9
0
def build_template(templatename,binsize=1.0,maxfrac=0.01,mode='top',resolution=0.0,c_subtract=True,
twopass=False,template_library='models/library',verbose=False):
    """This routine reads a specified model from the library and turns it into a
    cross-correlation template by subtracting the top-envelope (or bottom envelope),
    if c_subtract is set to True. Returns the wavelength axis and flux axis of the template,
    and whether the template is a binary mask (True) or a spectrum (False)."""

    import tayph.util as ut
    from tayph.vartests import typetest,postest,notnegativetest
    import numpy as np
    import tayph.operations as ops
    import astropy.constants as const
    from astropy.io import fits
    from matplotlib import pyplot as plt
    from scipy import interpolate
    from pathlib import Path
    typetest(templatename,str,'templatename mod.build_template()')
    typetest(binsize,[int,float],'binsize mod.build_template()')
    typetest(maxfrac,[int,float],'maxfrac mod.build_template()')
    typetest(mode,str,'mode mod.build_template()',)
    typetest(resolution,[int,float],'resolution in mod.build_template()')
    typetest(twopass,bool,'twopass in mod.build_template()')




    binsize=float(binsize)
    maxfrac=float(maxfrac)
    resolution=float(resolution)
    postest(binsize,'binsize mod.build_template()')
    postest(maxfrac,'maxfrac mod.build_template()')
    notnegativetest(resolution,'resolution in mod.build_template()')
    template_library=ut.check_path(template_library,exists=True)

    c=const.c.to('km/s').value

    if mode not in ['top','bottom']:
        raise Exception(f'RuntimeError in build_template: Mode should be set to "top" or "bottom" ({mode}).')
    wlt,fxt=get_model(templatename,library=template_library)

    if wlt[-1] <= wlt[0]:#Reverse the wl axis if its sorted the wrong way.
        wlt=np.flipud(wlt)
        fxt=np.flipud(fxt)

    if get_model(templatename,library=template_library,is_binary=True):#Bypass all template-specific operations.
        return(wlt,fxt,True)

    if c_subtract == True:
        wle,fxe=ops.envelope(wlt,fxt-np.median(fxt),binsize,selfrac=maxfrac,mode=mode)#These are binpoints of the top-envelope.
        #The median of fxm is first removed to decrease numerical errors, because the spectrum may
        #have values that are large (~1.0) while the variations are small (~1e-5).
        e_i = interpolate.interp1d(wle,fxe,fill_value='extrapolate')
        envelope=e_i(wlt)
        T = fxt-np.median(fxt)-envelope
        absT = np.abs(T)
        T[(absT < 1e-4 * np.max(absT))] = 0.0 #This is now continuum-subtracted and binary-like.
        #Any values that are small are taken out.
        #This therefore assumes that the model has lines that are deep compared to the numerical
        #error of envelope subtraction (!).
    else:
        T = fxt*1.0

    if resolution !=0.0:
        dRV = c/resolution
        if verbose:
            ut.tprint(f'------Blurring template to resolution of data ({round(resolution,0)}, {round(dRV,2)} km/s)')
        wlt_cv,T_cv,vstep=ops.constant_velocity_wl_grid(wlt,T,oversampling=2.0)
        if verbose:
            ut.tprint(f'---------v_step is {np.round(vstep,3)} km/s')
            ut.tprint(f'---------So the resolution blurkernel has an avg width of {np.round(dRV/vstep,3)} px.')
        T_b=ops.smooth(T_cv,dRV/vstep,mode='gaussian')
        wlt = wlt_cv*1.0
        T = T_b*1.0
    return(wlt,T,False)