Пример #1
0
def running_MAD_2D(z, w, verbose=False, parallel=False):
    """Computers a running standard deviation of a 2-dimensional array z.
    The stddev is evaluated over the vertical block with width w pixels.
    The output is a 1D array with length equal to the width of z.
    This is very slow on arrays that are wide in x (hundreds of thousands of points)."""
    import astropy.stats as stats
    import numpy as np
    from tayph.vartests import typetest, dimtest, postest
    import tayph.util as ut
    if parallel: from joblib import Parallel, delayed
    typetest(z, np.ndarray, 'z in fun.running_MAD_2D()')
    dimtest(z, [0, 0], 'z in fun.running_MAD_2D()')
    typetest(w, [int, float], 'w in fun.running_MAD_2D()')
    postest(w, 'w in fun.running_MAD_2D()')
    size = np.shape(z)
    ny = size[0]
    nx = size[1]
    s = np.arange(0, nx, dtype=float) * 0.0
    dx1 = int(0.5 * w)
    dx2 = int(int(0.5 * w) + (w % 2))  #To deal with odd windows.
    for i in range(nx):
        minx = max([0, i - dx1])  #This here is only a 3% slowdown.
        maxx = min([nx, i + dx2])
        s[i] = stats.mad_std(
            z[:, minx:maxx],
            ignore_nan=True)  #This is what takes 97% of the time.
        if verbose: ut.statusbar(i, nx)
    return (s)
Пример #2
0
def construct_KpVsys(rv, ccf, ccf_e, dp, kprange=[0, 300], dkp=1.0):
    """The name says it all. Do good tests."""
    import tayph.functions as fun
    import tayph.operations as ops
    import numpy as np
    import tayph.system_parameters as sp
    import matplotlib.pyplot as plt
    import astropy.io.fits as fits
    import tayph.util as ut
    import sys
    import pdb
    Kp = fun.findgen((kprange[1] - kprange[0]) / dkp + 1) * dkp + kprange[0]
    n_exp = np.shape(ccf)[0]
    KpVsys = np.zeros((len(Kp), len(rv)))
    KpVsys_e = np.zeros((len(Kp), len(rv)))
    transit = sp.transit(dp) - 1.0
    transit /= np.nansum(transit)
    transitblock = fun.rebinreform(transit, len(rv)).T

    j = 0
    ccfs = []
    for i in Kp:
        dRV = sp.RV(dp, vorb=i) * (-1.0)
        ccf_shifted = shift_ccf(rv, ccf, dRV)
        ccf_e_shifted = shift_ccf(rv, ccf_e, dRV)
        ccfs.append(ccf_shifted)
        KpVsys[j, :] = np.nansum(transitblock * ccf_shifted, axis=0)
        KpVsys_e[j, :] = (np.nansum((transitblock * ccf_e_shifted)**2.0,
                                    axis=0))**0.5
        # plt.plot(rv,KpVsys[j,:])
        # plt.fill_between(rv, KpVsys[j,:]-KpVsys_e[j,:], KpVsys[j,:]+KpVsys_e[j,:],alpha=0.5)
        # plt.show()
        # pdb.set_trace()
        j += 1
        ut.statusbar(i, Kp)
    return (Kp, KpVsys, KpVsys_e)
Пример #3
0
def mask_orders(list_of_wls,list_of_orders,dp,maskname,w,c_thresh,manual=False):
    """
    This code takes the list of orders and masks out bad pixels.
    It combines two steps, a simple sigma clipping step and a manual step, where
    the user can interactively identify bad pixels in each order. The sigma
    clipping is done on a threshold of c_thresh, using a rolling standard dev.
    with a width of w pixels. Manual masking is a big routine needed to support
    a nice GUI to do that.

    If c_thresh is set to zero, sigma clipping is skipped. If manual=False, the
    manual selection of masking regions (which is manual labour) is turned off.
    If both are turned off, the list_of_orders is returned unchanged.

    If either or both are active, the routine will output 1 or 2 FITS files that
    contain a stack (cube) of the masks for each order. The first file is the mask
    that was computed automatically, the second is the mask that was constructed
    manually. This is done so that the manual mask can be transplanted onto another
    dataset, or saved under a different file-name, to limit repetition of work.

    At the end of the routine, the two masks are merged into a single list, and
    applied to the list of orders.
    """
    import tayph.operations as ops
    import numpy as np
    import tayph.functions as fun
    import tayph.plotting as plotting
    import sys
    import matplotlib.pyplot as plt
    import tayph.util as ut
    import warnings
    from tayph.vartests import typetest,dimtest,postest
    ut.check_path(dp)
    typetest(maskname,str,'maskname in mask_orders()')
    typetest(w,[int,float],'w in mask_orders()')
    typetest(c_thresh,[int,float],'c_thresh in mask_orders()')
    postest(w,'w in mask_orders()')
    postest(c_thresh,'c_thresh in mask_orders()')
    typetest(list_of_wls,list,'list_of_wls in mask_orders()')
    typetest(list_of_orders,list,'list_of_orders in mask_orders()')
    typetest(manual,bool,'manual keyword in mask_orders()')
    dimtest(list_of_wls,[0,0],'list_of_wls in mask_orders()')
    dimtest(list_of_orders,[len(list_of_wls),0,0],'list_of_orders in mask_orders()')

    if c_thresh <= 0 and manual == False:
        print('---WARNING in mask_orders: c_thresh is set to zero and manual masking is turned off.')
        print('---Returning orders unmasked.')
        return(list_of_orders)

    N = len(list_of_orders)
    void = fun.findgen(N)

    list_of_orders = ops.normalize_orders(list_of_orders,list_of_orders)[0]#first normalize. Dont want outliers to
    #affect the colour correction later on, so colour correction cant be done before masking, meaning
    #that this needs to be done twice; as colour correction is also needed for proper maskng. The second variable is
    #a dummy to replace the expected list_of_sigmas input.
    N_NaN = 0
    list_of_masked_orders = []

    for i in range(N):
        list_of_masked_orders.append(list_of_orders[i])

    list_of_masks = []

    if c_thresh > 0:#Check that c_thresh is positive. If not, skip sigma clipping.
        print('------Sigma-clipping mask')
        for i in range(N):
            order = list_of_orders[i]
            N_exp = np.shape(order)[0]
            N_px = np.shape(order)[1]
            with warnings.catch_warnings():
                warnings.simplefilter("ignore", category=RuntimeWarning)
                meanspec = np.nanmean(order,axis = 0)
            meanblock = fun.rebinreform(meanspec,N_exp)
            res = order / meanblock - 1.0
            sigma = fun.running_MAD_2D(res,w)
            with np.errstate(invalid='ignore'):#https://stackoverflow.com/questions/25345843/inequality-comparison-of-numpy-array-with-nan-to-a-scalar
                sel = np.abs(res) >= c_thresh*sigma
                N_NaN += np.sum(sel)#This is interesting because True values count as 1, and False as zero.
                order[sel] = np.nan
            list_of_masks.append(order*0.0)
            ut.statusbar(i,void)

        print(f'%s outliers identified and set to NaN ({N_NaN}/{round(N_NaN/np.size(list_of_masks)*100.0,3)}).')
    else:
        print('------Skipping sigma-clipping (c_thres <= 0)')
        #Do nothing to list_of_masks. It is now an empty list.
        #We now automatically proceed to manual masking, because at this point
        #it has already been established that it must have been turned on.


    list_of_masks_manual = []
    if manual == True:


        previous_list_of_masked_columns = load_columns_from_file(dp,maskname,mode='relaxed')
        list_of_masked_columns = manual_masking(list_of_wls,list_of_orders,list_of_masks,saved = previous_list_of_masked_columns)
        print('------Successfully concluded manual mask.')
        write_columns_to_file(dp,maskname,list_of_masked_columns)

        print('------Building manual mask from selected columns')
        for i in range(N):
            order = list_of_orders[i]
            N_exp = np.shape(order)[0]
            N_px = np.shape(order)[1]
            list_of_masks_manual.append(np.zeros((N_exp,N_px)))
            for j in list_of_masked_columns[i]:
                list_of_masks_manual[i][:,int(j)] = np.nan

    #We write 1 or 2 mask files here. The list of manual masks
    #and list_of_masks (auto) are either filled, or either is an emtpy list if
    #c_thresh was set to zero or manual was set to False (because they were defined
    #as empty lists initially, and then not filled with anything).
    write_mask_to_file(dp,maskname,list_of_masks,list_of_masks_manual)
    return(0)
Пример #4
0
def run_instance(p):
    """This runs the entire cross correlation analysis cascade."""
    import numpy as np
    from astropy.io import fits
    import astropy.constants as const
    import astropy.units as u
    from matplotlib import pyplot as plt
    import os.path
    import scipy.interpolate as interp
    import pylab
    import pdb
    import os.path
    import os
    import sys
    import glob
    import distutils.util
    import pickle
    import copy
    from pathlib import Path

    import tayph.util as ut
    import tayph.operations as ops
    import tayph.functions as fun
    import tayph.system_parameters as sp
    import tayph.tellurics as telcor
    import tayph.masking as masking
    import tayph.models as models
    from tayph.ccf import xcor, clean_ccf, filter_ccf, construct_KpVsys
    from tayph.vartests import typetest, notnegativetest, nantest, postest, typetest_array, dimtest
    import tayph.shadow as shadow
    # from lib import analysis
    # from lib import cleaning
    # from lib import masking as masking
    # from lib import shadow as shadow
    # from lib import molecfit as telcor

    #First parse the parameter dictionary into required variables and test them.
    typetest(p, dict, 'params in run_instance()')

    dp = Path(p['dp'])
    ut.check_path(dp, exists=True)

    modellist = p['modellist']
    templatelist = p['templatelist']
    model_library = p['model_library']
    template_library = p['template_library']
    typetest(modellist, [str, list], 'modellist in run_instance()')
    typetest(templatelist, [str, list], 'templatelist in run_instance()')
    typetest(model_library, str, 'model_library in run_instance()')
    typetest(template_library, str, 'template_library in run_instance()')
    ut.check_path(model_library, exists=True)
    ut.check_path(template_library, exists=True)
    if type(modellist) == str:
        modellist = [modellist]  #Force this to be a list
    if type(templatelist) == str:
        templatelist = [templatelist]  #Force this to be a list
    typetest_array(modellist, str, 'modellist in run_instance()')
    typetest_array(templatelist, str, 'modellist in run_instance()')

    shadowname = p['shadowname']
    maskname = p['maskname']
    typetest(shadowname, str, 'shadowname in run_instance()')
    typetest(maskname, str, 'shadowname in run_instance()')

    RVrange = p['RVrange']
    drv = p['drv']
    f_w = p['f_w']
    resolution = sp.paramget('resolution', dp)
    typetest(RVrange, [int, float], 'RVrange in run_instance()')
    typetest(drv, [int, float], 'drv in run_instance()')
    typetest(f_w, [int, float], 'f_w in run_instance()')
    typetest(resolution, [int, float], 'resolution in run_instance()')
    nantest(RVrange, 'RVrange in run_instance()')
    nantest(drv, 'drv in run_instance()')
    nantest(f_w, 'f_w in run_instance()')
    nantest(resolution, 'resolution in run_instance()')
    postest(RVrange, 'RVrange in run_instance()')
    postest(drv, 'drv in run_instance()')
    postest(resolution, 'resolution in run_instance()')
    notnegativetest(f_w, 'f_w in run_instance()')

    do_colour_correction = p['do_colour_correction']
    do_telluric_correction = p['do_telluric_correction']
    do_xcor = p['do_xcor']
    plot_xcor = p['plot_xcor']
    make_mask = p['make_mask']
    apply_mask = p['apply_mask']
    c_subtract = p['c_subtract']
    do_berv_correction = p['do_berv_correction']
    do_keplerian_correction = p['do_keplerian_correction']
    make_doppler_model = p['make_doppler_model']
    skip_doppler_model = p['skip_doppler_model']
    typetest(do_colour_correction, bool,
             'do_colour_correction in run_instance()')
    typetest(do_telluric_correction, bool,
             'do_telluric_correction in run_instance()')
    typetest(do_xcor, bool, 'do_xcor in run_instance()')
    typetest(plot_xcor, bool, 'plot_xcor in run_instance()')
    typetest(make_mask, bool, 'make_mask in run_instance()')
    typetest(apply_mask, bool, 'apply_mask in run_instance()')
    typetest(c_subtract, bool, 'c_subtract in run_instance()')
    typetest(do_berv_correction, bool, 'do_berv_correction in run_instance()')
    typetest(do_keplerian_correction, bool,
             'do_keplerian_correction in run_instance()')
    typetest(make_doppler_model, bool, 'make_doppler_model in run_instance()')
    typetest(skip_doppler_model, bool, 'skip_doppler_model in run_instance()')

    #We start by defining constants and preparing for generating output.
    c = const.c.value / 1000.0  #in km/s
    colourdeg = 3  #A fitting degree for the colour correction.

    print(
        f'---Passed parameter input tests. Initiating output folder tree in {Path("output")/dp}.'
    )
    libraryname = str(template_library).split('/')[-1]
    if str(dp).split('/')[0] == 'data':
        dataname = str(dp).replace('data/', '')
        print(
            f'------Data is located in data/ folder. Assuming output name for this dataset as {dataname}'
        )
    else:
        dataname = dp
        print(
            f'------Data is NOT located in data/ folder. Assuming output name for this dataset as {dataname}'
        )

    list_of_wls = []  #This will store all the data.
    list_of_orders = []  #All of it needs to be loaded into your memory.
    list_of_sigmas = []

    trigger2 = 0  #These triggers are used to limit the generation of output in the forloop.
    trigger3 = 0
    n_negative_total = 0  #This will hold the total number of pixels that were set to NaN because they were zero when reading in the data.
    air = sp.paramget('air', dp)  #Read bool from str in config file.
    typetest(air, bool, 'air in run_instance()')

    filelist_orders = [str(i) for i in Path(dp).glob('order_*.fits')]
    if len(filelist_orders) == 0:
        raise Exception(
            f'Runtime error: No orders_*.fits files were found in {dp}.')
    try:
        order_numbers = [
            int(i.split('order_')[1].split('.')[0]) for i in filelist_orders
        ]
    except:
        raise Exception(
            'Runtime error: Failed casting fits filename numerals to ints. Are the filenames of the spectral orders correctly formatted?'
        )
    order_numbers.sort()  #This is the ordered list of numerical order IDs.
    n_orders = len(order_numbers)
    if n_orders == 0:
        raise Exception(
            f'Runtime error: n_orders may not have ended up as zero. ({n_orders})'
        )

#Loading the data from the datafolder.
    if do_xcor == True or plot_xcor == True or make_mask == True:
        print(f'---Loading orders from {dp}.')

        # for i in range(startorder,endorder+1):
        for i in order_numbers:
            wavepath = dp / f'wave_{i}.fits'
            orderpath = dp / f'order_{i}.fits'
            sigmapath = dp / f'sigma_{i}.fits'
            ut.check_path(wavepath, exists=True)
            ut.check_path(orderpath, exists=True)
            ut.check_path(sigmapath, exists=False)
            wave_axis = fits.getdata(wavepath)
            dimtest(wave_axis, [0], 'wavelength grid in run_instance()')
            n_px = len(wave_axis)  #Pixel width of the spectral order.
            if air == False:
                if i == np.min(order_numbers):
                    print("------Assuming wavelengths are in vaccuum.")
                list_of_wls.append(1.0 * wave_axis)
            else:
                if i == np.min(order_numbers):
                    print("------Applying airtovac correction.")
                list_of_wls.append(ops.airtovac(wave_axis))

            order_i = fits.getdata(orderpath)
            if i == np.min(order_numbers):
                dimtest(
                    order_i, [0, n_px], f'order {i} in run_instance()'
                )  #For the first order, check that it is 2D and that is has a width equal to n_px.
                n_exp = np.shape(
                    order_i
                )[0]  #then fix n_exp. All other orders should have the same n_exp.
                print(f'------{n_exp} exposures recognised.')
            else:
                dimtest(order_i, [n_exp, n_px], f'order {i} in run_instance()')

            #Now test for negatives, set them to NaN and track them.
            n_negative = len(order_i[order_i <= 0])
            if trigger3 == 0 and n_negative > 0:
                print("------Setting negative values to NaN.")
                trigger3 = -1
            n_negative_total += n_negative
            order_i[order_i <= 0] = np.nan
            postest(order_i, f'order {i} in run_instance().'
                    )  #make sure whatever comes out here is strictly positive.
            list_of_orders.append(order_i)

            try:  #Try to get a sigma file. If it doesn't exist, we raise a warning. If it does, we test its dimensions and append it.
                sigma_i = fits.getdata(sigmapath)
                dimtest(sigma_i, [n_exp, n_px],
                        f'order {i} in run_instance().')
                list_of_sigmas.append(sigma_i)
            except FileNotFoundError:
                if trigger2 == 0:
                    print(
                        '------WARNING: Sigma (flux error) files not provided. Assuming sigma = sqrt(flux). This is standard practise for HARPS data, but e.g. ESPRESSO has a pipeline that computes standard errors on each pixel for you.'
                    )
                    trigger2 = -1
                list_of_sigmas.append(np.sqrt(order_i))
        print(
            f"------{n_negative_total} negative values set to NaN ({np.round(100.0*n_negative_total/n_exp/n_px/len(order_numbers),2)}% of total spectral pixels in dataset.)"
        )

    if len(list_of_orders) != n_orders:
        raise Exception(
            'Runtime error: n_orders is not equal to the length of list_of_orders. Something went wrong when reading them in?'
        )

    print('---Finished loading dataset to memory.')

    #Apply telluric correction file or not.
    # plt.plot(list_of_wls[60],list_of_orders[60][10],color='red')
    # plt.plot(list_of_wls[60],list_of_orders[60][10]+list_of_sigmas[60][10],color='red',alpha=0.5)#plot corrected spectra
    # plt.plot(list_of_wls[60],list_of_orders[60][10]/list_of_sigmas[60][10],color='red',alpha=0.5)#plot SNR
    if do_telluric_correction == True and n_orders > 0:
        print('---Applying telluric correction')
        telpath = dp / 'telluric_transmission_spectra.pkl'
        list_of_orders, list_of_sigmas = telcor.apply_telluric_correction(
            telpath, list_of_wls, list_of_orders, list_of_sigmas)

    # plt.plot(list_of_wls[60],list_of_orders[60][10],color='blue')
    # plt.plot(list_of_wls[60],list_of_orders[60][10]+list_of_sigmas[60][10],color='blue',alpha=0.5)#plot corrected spectra

    # plt.plot(list_of_wls[60],list_of_orders[60][10]/list_of_sigmas[60][10],color='blue',alpha=0.5) #plot SNR
    # plt.show()
    # pdb.set_trace()

#Do velocity correction of wl-solution. Explicitly after telluric correction
#but before masking. Because the cross-correlation relies on columns being masked.
#Then if you start to move the CCFs around before removing the time-average,
#each masked column becomes slanted. Bad deal.
    rv_cor = 0
    if do_berv_correction == True:
        rv_cor += sp.berv(dp)
    if do_keplerian_correction == True:
        rv_cor -= sp.RV_star(dp) * (1.0)

    if type(rv_cor) != int and len(list_of_orders) > 0:
        print('---Reinterpolating data to correct velocities')
        list_of_orders_cor = []
        list_of_sigmas_cor = []
        for i in range(len(list_of_wls)):
            order = list_of_orders[i]
            sigma = list_of_sigmas[i]
            order_cor = order * 0.0
            sigma_cor = sigma * 0.0
            for j in range(len(list_of_orders[0])):
                wl_i = interp.interp1d(list_of_wls[i],
                                       order[j],
                                       bounds_error=False)
                si_i = interp.interp1d(list_of_wls[i],
                                       sigma[j],
                                       bounds_error=False)
                wl_cor = list_of_wls[i] * (
                    1.0 - (rv_cor[j] * u.km / u.s / const.c)
                )  #The minus sign was tested on a slow-rotator.
                order_cor[j] = wl_i(wl_cor)
                sigma_cor[j] = si_i(
                    wl_cor
                )  #I checked that this works because it doesn't affect the SNR, apart from wavelength-shifting it.
            list_of_orders_cor.append(order_cor)
            list_of_sigmas_cor.append(sigma_cor)
            ut.statusbar(i, fun.findgen(len(list_of_wls)))
        # plt.plot(list_of_wls[60],list_of_orders[60][10]/list_of_sigmas[60][10],color='blue')
        # plt.plot(list_of_wls[60],list_of_orders_cor[60][10]/list_of_sigmas_cor[60][10],color='red')
        # plt.show()
        # sys.exit()
        list_of_orders = list_of_orders_cor
        list_of_sigmas = list_of_sigmas_cor

    if len(list_of_orders) != n_orders:
        raise RuntimeError(
            'n_orders is no longer equal to the length of list_of_orders, though it was before. Something went wrong during telluric correction or velocity correction.'
        )

#Compute / create a mask and save it to file (or not)
    if make_mask == True and len(list_of_orders) > 0:
        if do_colour_correction == True:
            print(
                '---Constructing mask with intra-order colour correction applied'
            )
            masking.mask_orders(list_of_wls,
                                ops.normalize_orders(list_of_orders,
                                                     list_of_sigmas,
                                                     colourdeg)[0],
                                dp,
                                maskname,
                                40.0,
                                5.0,
                                manual=True)
        else:
            print(
                '---Constructing mask WITHOUT intra-order colour correction applied.'
            )
            print(
                '---Switch on colour correction if you see colour variations in the 2D spectra.'
            )
            masking.mask_orders(list_of_wls,
                                list_of_orders,
                                dp,
                                maskname,
                                40.0,
                                5.0,
                                manual=True)
        if apply_mask == False:
            print(
                '---WARNING in run_instance: Mask was made but is not applied to data (apply_mask == False)'
            )

#Apply the mask that was previously created and saved to file.
    if apply_mask == True:
        print('---Applying mask')
        list_of_orders = masking.apply_mask_from_file(dp, maskname,
                                                      list_of_orders)
        list_of_sigmas = masking.apply_mask_from_file(dp, maskname,
                                                      list_of_sigmas)
#Interpolate over all isolated NaNs and set bad columns to NaN (so that they are ignored in the CCF)
    if do_xcor == True:
        print('---Healing NaNs')
        list_of_orders = masking.interpolate_over_NaNs(
            list_of_orders
        )  #THERE IS AN ISSUE HERE: INTERPOLATION SHOULD ALSO HAPPEN ON THE SIGMAS ARRAY!
        list_of_sigmas = masking.interpolate_over_NaNs(list_of_sigmas)

#Normalize the orders to their average flux in order to effectively apply a broad-band colour correction (colour is typically a function of airmass and seeing).
    if do_colour_correction == True:
        print('---Normalizing orders to common flux level')
        # plt.plot(list_of_wls[60],list_of_orders[60][10]/list_of_sigmas[60][10],color='blue',alpha=0.4)
        list_of_orders_normalised, list_of_sigmas_normalised, meanfluxes = ops.normalize_orders(
            list_of_orders, list_of_sigmas, colourdeg
        )  #I tested that this works because it doesn't alter the SNR.

        meanfluxes_norm = meanfluxes / np.nanmean(meanfluxes)
    else:
        meanfluxes_norm = fun.findgen(len(
            list_of_orders[0])) * 0.0 + 1.0  #All unity.
        # plt.plot(list_of_wls[60],list_of_orders_normalised[60][10]/list_of_sigmas[60][10],color='red',alpha=0.4)
        # plt.show()
        # sys.exit()

    if len(list_of_orders) != n_orders:
        raise RuntimeError(
            'n_orders is no longer equal to the length of list_of_orders, though it was before. Something went wrong during masking or colour correction.'
        )

#Construct the cross-correlation templates in case we will be computing or plotting the CCF.
    if do_xcor == True or plot_xcor == True:

        list_of_wlts = []
        list_of_templates = []
        outpaths = []

        for templatename in templatelist:
            print(f'---Building template {templatename}')
            wlt, T = models.build_template(templatename,
                                           binsize=0.5,
                                           maxfrac=0.01,
                                           resolution=resolution,
                                           template_library=template_library,
                                           c_subtract=c_subtract)
            T *= (-1.0)
            if np.mean(wlt) < 50.0:  #This is likely in microns:
                print(
                    '------WARNING: The loaded template has a mean wavelength less than 50.0, meaning that it is very likely not in nm, but in microns. I have divided by 1,000 now and hope for the best...'
                )
                wlt *= 1000.0
            list_of_wlts.append(wlt)
            list_of_templates.append(T)

            outpath = Path('output') / Path(dataname) / Path(
                libraryname) / Path(templatename)

            if not os.path.exists(outpath):
                print(
                    f"------The output location ({outpath}) didn't exist, I made it now."
                )
                os.makedirs(outpath)
            outpaths.append(outpath)

#Perform the cross-correlation on the entire list of orders.
    for i in range(len(list_of_wlts)):
        templatename = templatelist[i]
        wlt = list_of_wlts[i]
        T = list_of_templates[i]
        outpath = outpaths[i]
        if do_xcor == True:
            print(
                f'---Cross-correlating spectra with template {templatename}.')
            t1 = ut.start()
            rv, ccf, ccf_e, Tsums = xcor(
                list_of_wls,
                list_of_orders_normalised,
                np.flipud(np.flipud(wlt)),
                T,
                drv,
                RVrange,
                list_of_errors=list_of_sigmas_normalised)
            ut.end(t1)
            print(f'------Writing CCFs to {str(outpath)}')
            ut.writefits(outpath / 'ccf.fits', ccf)
            ut.writefits(outpath / 'ccf_e.fits', ccf_e)
            ut.writefits(outpath / 'RV.fits', rv)
            ut.writefits(outpath / 'Tsum.fits', Tsums)
        else:
            print(
                f'---Reading CCFs with template {templatename} from {str(outpath)}.'
            )
            if os.path.isfile(outpath / 'ccf.fits') == False:
                raise FileNotFoundError(
                    f'CCF output not located at {outpath}. Rerun with do_xcor=True to create these files?'
                )
        rv = fits.getdata(outpath / 'rv.fits')
        ccf = fits.getdata(outpath / 'ccf.fits')
        ccf_e = fits.getdata(outpath / 'ccf_e.fits')
        Tsums = fits.getdata(outpath / 'Tsum.fits')

        ccf_cor = ccf * 1.0
        ccf_e_cor = ccf_e * 1.0

        print('---Cleaning CCFs')
        ccf_n, ccf_ne, ccf_nn, ccf_nne = clean_ccf(rv, ccf_cor, ccf_e_cor, dp)

        if make_doppler_model == True and skip_doppler_model == False:
            shadow.construct_doppler_model(rv,
                                           ccf_nn,
                                           dp,
                                           shadowname,
                                           xrange=[-200, 200],
                                           Nxticks=20.0,
                                           Nyticks=10.0)
            make_doppler_model = False  # This sets it to False after it's been run once, for the first template.
        if skip_doppler_model == False:
            print('---Reading doppler shadow model from ' + shadowname)
            doppler_model, dsmask = shadow.read_shadow(
                dp, shadowname, rv, ccf
            )  #This returns both the model evaluated on the rv,ccf grid, as well as the mask that blocks the planet trace.
            ccf_clean, matched_ds_model = shadow.match_shadow(
                rv, ccf_nn, dsmask, dp, doppler_model
            )  #THIS IS AN ADDITIVE CORRECTION, SO CCF_NNE DOES NOT NEED TO BE ALTERED AND IS STILL VALID VOOR CCF_CLEAN
        else:
            print('---Not performing shadow correction')
            ccf_clean = ccf_nn * 1.0
            matched_ds_model = ccf_clean * 0.0

        if f_w > 0.0:
            print('---Performing high-pass filter on the CCF')
            ccf_clean_filtered, wiggles = filter_ccf(
                rv, ccf_clean, v_width=f_w
            )  #THIS IS ALSO AN ADDITIVE CORRECTION, SO CCF_NNE IS STILL VALID.
        else:
            print('---Skipping high-pass filter')
            ccf_clean_filtered = ccf_clean * 1.0
            wiggles = ccf_clean * 0.0  #This filtering is additive so setting to zero is accurate.

        print('---Weighing CCF rows by mean fluxes that were normalised out')
        ccf_clean_weighted = np.transpose(
            np.transpose(ccf_clean_filtered) * meanfluxes_norm
        )  #MULTIPLYING THE AVERAGE FLUXES BACK IN! NEED TO CHECK THAT THIS ALSO GOES PROPERLY WITH THE ERRORS!
        ccf_nne = np.transpose(np.transpose(ccf_nne) * meanfluxes_norm)

        ut.save_stack(outpath / 'cleaning_steps.fits', [
            ccf, ccf_cor, ccf_nn, ccf_clean, matched_ds_model,
            ccf_clean_filtered, wiggles, ccf_clean_weighted
        ])
        ut.writefits(outpath / 'ccf_cleaned.fits', ccf_clean_weighted)
        ut.writefits(outpath / 'ccf_cleaned_error.fits', ccf_nne)

        print('---Constructing KpVsys')
        Kp, KpVsys, KpVsys_e = construct_KpVsys(rv, ccf_clean_weighted,
                                                ccf_nne, dp)
        ut.writefits(outpath / 'KpVsys.fits', KpVsys)
        ut.writefits(outpath / 'KpVsys_e.fits', KpVsys_e)
        ut.writefits(outpath / 'Kp.fits', Kp)

    return
    sys.exit()

    if plot_xcor == True and inject_model == False:
        print('---Plotting KpVsys')
        analysis.plot_KpVsys(rv, Kp, KpVsys, dp)

    #Now repeat it all for the model injection.
    if inject_model == True:
        for modelname in modellist:
            outpath_i = outpath + modelname + '/'
            if do_xcor == True:
                print('---Injecting model ' + modelname)
                list_of_orders_injected = models.inject_model(
                    list_of_wls,
                    list_of_orders,
                    dp,
                    modelname,
                    model_library=model_library
                )  #Start with the unnormalised orders from before.
                #Normalize the orders to their average flux in order to effectively apply
                #a broad-band colour correction (colour is a function of airmass and seeing).
                if do_colour_correction == True:
                    print(
                        '------Normalizing injected orders to common flux level'
                    )
                    list_of_orders_injected, list_of_sigmas_injected, meanfluxes_injected = ops.normalize_orders(
                        list_of_orders_injected, list_of_sigmas, colourdeg)
                    meanfluxes_norm_injected = meanfluxes_injected / np.mean(
                        meanfluxes_injected)
                else:
                    meanfluxes_norm_injected = fun.findgen(
                        len(list_of_orders_injected[0])
                    ) * 0.0 + 1.0  #All unity.

                print('------Cross-correlating injected orders')
                rv_i, ccf_i, ccf_e_i, Tsums_i = analysis.xcor(
                    list_of_wls,
                    list_of_orders_injected,
                    np.flipud(np.flipud(wlt)),
                    T,
                    drv,
                    RVrange,
                    list_of_errors=list_of_sigmas_injected)
                print('------Writing injected CCFs to ' + outpath_i)
                if not os.path.exists(outpath_i):
                    print("---------That path didn't exist, I made it now.")
                    os.makedirs(outpath_i)
                ut.writefits(outpath_i + '/' + 'ccf_i_' + modelname + '.fits',
                             ccf_i)
                ut.writefits(
                    outpath_i + '/' + 'ccf_e_i_' + modelname + '.fits',
                    ccf_e_i)
            else:
                print('---Reading injected CCFs from ' + outpath_i)
                if os.path.isfile(outpath_i + 'ccf_i_' + modelname +
                                  '.fits') == False:
                    print('------ERROR: Injected CCF not located at ' +
                          outpath_i + 'ccf_i_' + modelname + '.fits' +
                          '. Set do_xcor and inject_model to True?')
                    sys.exit()
                if os.path.isfile(outpath_i + 'ccf_e_i_' + modelname +
                                  '.fits') == False:
                    print('------ERROR: Injected CCF error not located at ' +
                          outpath_i + 'ccf_e_i_' + modelname + '.fits' +
                          '. Set do_xcor and inject_model to True?')
                    sys.exit()
                # f.close()
                # f2.close()
                ccf_i = fits.getdata(outpath_i + 'ccf_i_' + modelname +
                                     '.fits')
                ccf_e_i = fits.getdata(outpath_i + 'ccf_e_i_' + modelname +
                                       '.fits')

            print('---Cleaning injected CCFs')
            ccf_n_i, ccf_ne_i, ccf_nn_i, ccf_nne_i = cleaning.clean_ccf(
                rv, ccf_i, ccf_e_i, dp)
            ut.writefits(outpath_i + 'ccf_normalized_i.fits', ccf_nn_i)
            ut.writefits(outpath_i + 'ccf_ne_i.fits', ccf_ne_i)

            # if make_doppler_model == True and skip_doppler_model == False:
            # shadow.construct_doppler_model(rv,ccf_nn,dp,shadowname,xrange=[-200,200],Nxticks=20.0,Nyticks=10.0)
            if skip_doppler_model == False:
                # print('---Reading doppler shadow model from '+shadowname)
                # doppler_model,maskHW = shadow.read_shadow(dp,shadowname,rv,ccf)
                ccf_clean_i, matched_ds_model_i = shadow.match_shadow(
                    rv, ccf_nn_i, dp, doppler_model, maskHW)
            else:
                print(
                    '---Not performing shadow correction on injected spectra either.'
                )
                ccf_clean_i = ccf_nn_i * 1.0
                matched_ds_model_i = ccf_clean_i * 0.0

            if f_w > 0.0:
                ccf_clean_i_filtered, wiggles_i = cleaning.filter_ccf(
                    rv, ccf_clean_i, v_width=f_w)
            else:
                ccf_clean_i_filtered = ccf_clean_i * 1.0

            ut.writefits(outpath_i + 'ccf_cleaned_i.fits',
                         ccf_clean_i_filtered)
            ut.writefits(outpath + 'ccf_cleaned_i_error.fits', ccf_nne)

            print(
                '---Weighing injected CCF rows by mean fluxes that were normalised out'
            )
            ccf_clean_i_filtered = np.transpose(
                np.transpose(ccf_clean_i_filtered) * meanfluxes_norm_injected
            )  #MULTIPLYING THE AVERAGE FLUXES BACK IN! NEED TO CHECK THAT THIS ALSO GOES PROPERLY WITH THE ERRORS!
            ccf_nne_i = np.transpose(
                np.transpose(ccf_nne_i) * meanfluxes_norm_injected)

            print('---Constructing injected KpVsys')
            Kp, KpVsys_i, KpVsys_e_i = analysis.construct_KpVsys(
                rv, ccf_clean_i_filtered, ccf_nne_i, dp)
            ut.writefits(outpath_i + 'KpVsys_i.fits', KpVsys_i)
            # ut.writefits(outpath+'KpVsys_e_i.fits',KpVsys_e_i)
            if plot_xcor == True:
                print('---Plotting KpVsys with ' + modelname + ' injected.')
                analysis.plot_KpVsys(rv, Kp, KpVsys, dp, injected=KpVsys_i)
Пример #5
0
def apply_telluric_correction(inpath, list_of_wls, list_of_orders,
                              list_of_sigmas):
    """
    This applies a set of telluric spectra (computed by molecfit) for each exposure
    in our time series that were written to a pickle file by write_telluric_transmission_to_file.

    List of errors are provided to propagate telluric correction into the error array as well.

    Parameters
    ----------
    inpath : str, path like
        The path to the pickled transmission spectra.

    list_of_wls : list
        List of wavelength axes.

    list_of_orders :
        List of 2D spectral orders, matching to the wavelength axes in dimensions and in number.

    list_of_isgmas :
        List of 2D error matrices, matching dimensions and number of list_of_orders.

    Returns
    -------
    list_of_orders_corrected : list
        List of 2D spectral orders, telluric corrected.

    list_of_sigmas_corrected : list
        List of 2D error matrices, telluric corrected.

    """
    import scipy.interpolate as interp
    import numpy as np
    import tayph.util as ut
    import tayph.functions as fun
    from tayph.vartests import dimtest, postest, typetest, nantest
    import copy

    T = read_telluric_transmission_from_file(inpath)
    wlT = T[0]
    fxT = T[1]

    typetest(list_of_wls, list, 'list_of_wls in apply_telluric_correction()')
    typetest(list_of_orders, list,
             'list_of_orders in apply_telluric_correction()')
    typetest(list_of_sigmas, list,
             'list_of_sigmas in apply_telluric_correction()')
    typetest(wlT, list,
             'list of telluric wave-axes in apply_telluric_correction()')
    typetest(
        fxT, list,
        'list of telluric transmission spectra in apply_telluric_correction()')

    No = len(list_of_wls)  #Number of orders.
    x = fun.findgen(No)
    Nexp = len(wlT)

    #Test dimensions
    if No != len(list_of_orders):
        raise Exception(
            'Runtime error in telluric correction: List of wavelength axes and List '
            'of orders do not have the same length.')
    if Nexp != len(fxT):
        raise Exception(
            'Runtime error in telluric correction: List of telluric wls and telluric '
            'spectra read from file do not have the same length.')
    if Nexp != len(list_of_orders[0]):
        raise Exception(
            f'Runtime error in telluric correction: List of telluric spectra and data'
            f'spectra read from file do not have the same length ({Nexp} vs {len(list_of_orders[0])}).'
        )

    #Corrected orders will be stored here.
    list_of_orders_cor = []
    list_of_sigmas_cor = []

    #Do the correction order by order:
    for i in range(No):
        order = list_of_orders[i]
        order_cor = order * 0.0
        error = list_of_sigmas[i]
        error_cor = error * 0.0
        wl = copy.deepcopy(list_of_wls[i])  #input wl axis, either 1D or 2D.
        #If it is 1D, we make it 2D by tiling it vertically:
        if wl.ndim == 1: wl = np.tile(wl, (Nexp, 1))  #Tile it into a 2D thing.

        #If read (2D) or tiled (1D) correctly, wl and order should have the same shape:
        dimtest(wl, np.shape(order),
                f'Wl axis of order {i}/{No} in apply_telluric_correction()')
        dimtest(error, np.shape(order),
                f'errors {i}/{No} in apply_telluric_correction()')
        for j in range(Nexp):
            T_i = interp.interp1d(wlT[j], fxT[j],
                                  fill_value="extrapolate")(wl[j])
            postest(T_i,
                    f'T-spec of exposure {j} in apply_telluric_correction()')
            nantest(T_i,
                    f'T-spec of exposure {j} in apply_telluric_correction()')
            order_cor[j] = order[j] / T_i
            error_cor[j] = error[
                j] / T_i  #I checked that this works because the SNR before and after
            #telluric correction is identical.
        list_of_orders_cor.append(order_cor)
        list_of_sigmas_cor.append(error_cor)
        ut.statusbar(i, x)
    return (list_of_orders_cor, list_of_sigmas_cor)
Пример #6
0
def clean_block(wl,
                block,
                deg=0,
                w=200,
                nsigma=5.0,
                verbose=False,
                renorm=True):
    """This quickly cleans a spectral block by performing trimming of zeroes at the edges,
    setting zeroes and negative values to NaN, normalising the flux along both axes, rejecting
    outlier values by using a running MAD, and optionally detrending using polynomials. This is
    intended for quick-look cross-correlation when reading in the data.

    The output is the trimmed wavelength axis, the outlier-rejected sequence of spectra (block),
    and the residuals after outlier rejection. If detrend set to True, polynomials will be fit, and the
    residuals after polyfitting are also returned. So setting detrend=True increases the number of output
    variables from 3 to 4.

    I've ran mad trying to make this faster but medians simply don't collapse into nice matrix operations.
    So what remains is to parallelise, but doing that right the first time (I don't have experience here)
    in a platform agnostic way seems unlikely.

    Set the renorm keyword to maintain the average flux in the block. If set to false, this function will return
    spectra that have the same average as before cleaning.
    """
    import numpy as np
    import tayph.functions as fun
    import warnings
    import copy
    import tayph.util as ut
    block[np.abs(block) < 1e-10 *
          np.nanmedian(block)] = 0.0  #First set very small values to zero.
    sum = np.nansum(
        np.abs(block), axis=0
    )  #Anything that is zero in this sum (i.e. the all-zero columns) will be identified.
    npx = len(sum)
    leftsize = npx - len(np.trim_zeros(
        sum, 'f'))  #Size of zero-padding on the left.
    rightsize = npx - len(np.trim_zeros(
        sum, 'b'))  #Size of zero-padding on the right

    wl_trimmed = wl[leftsize:npx - rightsize - 1]
    block = block[:, leftsize:npx - rightsize - 1]
    block[block <=
          0] = np.nan  #We get rid of remaining zeroes and negative values:

    #Compute residuals of s1d spectra by double-normalisation:
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        avg_flux = np.nanmean(block, axis=1)
        block = np.transpose(np.transpose(block) / avg_flux)
        block_avg = np.nanmean(block, axis=0)
        r = block / block_avg

        if renorm == False:
            avg_flux = avg_flux * 0.0 + 1.0

        #Outlier rejection in the 2D residuals via a running MAD:
        MAD = fun.running_MAD_2D(r, w, verbose=verbose)  #This can take long.
        warnings.simplefilter("ignore")
        r[np.abs(r - np.nanmean(r)) / MAD > nsigma] = np.nan
        block[np.abs(r - np.nanmean(r)) / MAD > nsigma] = np.nan
    if deg > 0:
        r2 = copy.deepcopy(r)
        for j, s in enumerate(r):
            nansel = np.isnan(s)
            p = np.polyfit(wl_trimmed[~nansel], s[~nansel], deg)
            block[j] /= np.polyval(p, wl_trimmed)
            r2[j] /= np.polyval(p, wl_trimmed)
            ut.statusbar(j, len(r))
        return (wl_trimmed, np.transpose(np.transpose(block) * avg_flux), r, r2
                )  #Avg_flux is 1 unless renorm=True.
    return (wl_trimmed, np.transpose(np.transpose(block) * avg_flux), r)
Пример #7
0
def blur_rotate(wl, order, dv, Rp, P, inclination, status=False, fast=False):
    """This function takes a spectrum and blurs it using a rotation x Gaussian
    kernel which has a FWHM width of dv km/s everywhere. Meaning that its width changes
    dynamically.
    Because the kernel needs to be recomputed on each element of the wavelength axis
    individually, this operation is much slower than convolution with a constant kernel,
    in which a simple shifting of the array, rather than a recomputation of the rotation
    profile is sufficient. By setting the fast keyword, the input array will first
    be oversampled onto a constant-velocity grid to enable the usage of a constant kernel,
    after which the result is interpolated back to the original grid.

    Input:
    The wavelength axis wl.
    The spectral axis order.
    The FHWM width of the resolution element in km/s.
    The Radius of the rigid body in Rj.
    The periodicity of the rigid body rotation in days.
    The inclination of the spin axis in degrees.

    Wavelength and order need to be numpy arrays and have the same number of elements.
    Rp, P and i need to be scalar floats.

    Output:
    The blurred spectral axis, with the same dimensions as wl and order.


    WARNING: THIS FUNCTION HANDLES NANS POORLY. I HAVE THEREFORE DECIDED CURRENTLY
    TO REQUIRE NON-NAN INPUT.




    This computes the simple numerical derivative of x by convolving with kernel [-1,0,1].

    Parameters
    ----------
    wl : list, np.ndarray
        The wavelength array.

    order : list, np.ndarray.
        The spectral axis.

    dv: float
        The FWHM of a resolution element in km/s.

    Rp: float
        The radius of the planet in jupiter radii.

    P: float
        The rotation period of the planet. For tidally locked planets, this is equal
        to the orbital period.

    inclination:
        The inclination of the spin axis in degrees. Presumed to be close to 90 degrees
        for transiting planets

    status: bool
        Output a statusbar, but only if fast == False.

    fast: bool
        Re-interpolate the input on a constant-v grid in order to speed up the computation
        of the convolution by eliminating the need to re-interpolate the kernel every step.



    Returns
    -------
    order_blurred : np.array
        The rotation-broadened spectrum on the same wavelength grid as the input.

    Example
    -------
    >>> import tayph.functions as fun
    >>> wl = fun.findgen(4000)*0.001+500.0
    >>> fx = wl*0.0
    >>> fx[2000] = 1.0
    >>> fx_blurred1 = blur_rotate(wl,fx,3.0,1.5,0.8,90.0,status=False,fast=False)
    >>> fx_blurred2 = blur_rotate(wl,fx,3.0,1.5,0.8,90.0,status=False,fast=True)
    """

    import numpy as np
    import tayph.util as ut
    import tayph.functions as fun
    from tayph.vartests import typetest, nantest, dimtest
    from matplotlib import pyplot as plt
    import astropy.constants as const
    import astropy.units as u
    import time
    import sys
    import pdb
    from scipy import interpolate
    typetest(dv, float, 'dv in blur_rotate()')
    typetest(wl, [list, np.ndarray], 'wl in blur_rotate()')
    typetest(order, [list, np.ndarray], 'order in blur_rotate()')
    typetest(P, float, 'P in blur_rotate()')
    typetest(Rp, float, 'Rp in blur_rotate()')
    typetest(inclination, float, 'inclination in blur_rotate()')
    typetest(status, bool, 'status in blur_rotate()')
    typetest(fast, bool, 'fast in blur_rotate()')
    nantest(wl, 'dv in blur_rotate()')
    nantest(order, 'order in blur_rotate()')
    dimtest(wl, [0], 'wl in blur_rotate()')
    dimtest(order, [len(wl)],
            'order in blur_rotate()')  #Test that wl and order are 1D, and that
    #they have the same length.

    if np.min(np.array([dv, P, Rp])) <= 0.0:
        raise Exception(
            "ERROR in blur_rotate: dv, P and Rp should be strictly positive.")

    #ut.typetest_array('wl',wl,np.float64)
    #ut.typetest_array('order',order,np.float64)
    #This is not possible because order may be 2D...
    #And besides, you can have floats, np.float32 and np.float64... All of these would
    #need to pass. Need to fix typetest_array some day.

    order_blurred = order * 0.0  #init the output.
    truncsize = 5.0  #The gaussian is truncated at 5 sigma from the extremest points of the RV amplitude.
    sig_dv = dv / (2 * np.sqrt(2.0 * np.log(2))
                   )  #Transform FWHM to Gaussian sigma. In km/s.
    deriv = derivative(wl)
    if max(deriv) < 0:
        raise Exception(
            "ERROR in ops.blur_rotate: WL derivative is smaller than 1.0. Sort wl in ascending order."
        )
    sig_wl = wl * sig_dv / (const.c.to('km/s').value)  #in nm
    sig_px = sig_wl / deriv

    n = 1000.0
    a = fun.findgen(n) / (n - 1) * np.pi
    rv = np.cos(a) * np.sin(
        np.radians(inclination)) * (2.0 * np.pi * Rp * const.R_jup /
                                    (P * u.day)).to('km/s').value  #in km/s
    trunc_dist = np.round(sig_px * truncsize + np.max(rv) * wl /
                          (const.c.to('km/s').value) / deriv).astype(int)
    # print('Maximum rotational rv: %s' % max(rv))
    # print('Sigma_px: %s' % np.nanmean(np.array(sig_px)))

    rvgrid_max = (np.max(trunc_dist) + 1.0) * sig_dv + np.max(rv)
    rvgrid_n = rvgrid_max / dv * 100.0  #100 samples per lsf fwhm.
    rvgrid = (
        fun.findgen(2 * rvgrid_n + 1) - rvgrid_n
    ) / rvgrid_n * rvgrid_max  #Need to make sure that this is wider than the truncation bin and more finely sampled than wl - everywhere.

    lsf = rvgrid * 0.0
    #We loop through velocities in the velocity grid to build up the sum of Gaussians
    #that is the LSF.
    for v in rv:
        lsf += fun.gaussian(
            rvgrid, 1.0, v, sig_dv
        )  #This defines the LSF on a velocity grid wih high fidelity.
    if fast:
        wlt, fxt, dv = constant_velocity_wl_grid(wl, order, 4)
        dv_grid = rvgrid[1] - rvgrid[0]

        len_rv_grid_low = int(max(rvgrid) / dv * 2 - 2)
        # print(len_rv_grid_low)
        # print(len(fun.findgen(len_rv_grid_low)))
        # print(len_rv_grid_low%2)
        if len_rv_grid_low % 2 == 0:
            len_rv_grid_low -= 1
        rvgrid_low = fun.findgen(
            len_rv_grid_low) * dv  #Slightly smaller than the original grid.
        rvgrid_low -= 0.5 * np.max(rvgrid_low)
        lsf_low = interpolate.interp1d(rvgrid, lsf)(rvgrid_low)
        lsf_low /= np.sum(
            lsf_low
        )  #This is now an LSF on a grid with the same spacing as the data has.
        #This means I can use it directly as a convolution kernel:
        fxt_blurred = convolve(fxt, lsf_low, edge_degree=1, fit_width=1)
        #And interpolate back to where it came from:
        order_blurred = interpolate.interp1d(wlt,
                                             fxt_blurred,
                                             bounds_error=False)(wl)
        #I can use interp1d because after blurring, we are now oversampled.
        # order_blurred2 = bin_avg(wlt,fxt_blurred,wl)
        return (order_blurred)

    #Now we loop through the wavelength grid to place this LSF at each wavelength position.
    for i in range(0, len(wl)):
        binstart = max([0, i - trunc_dist[i]])
        binend = i + trunc_dist[i]
        wlbin = wl[binstart:binend]

        wlgrid = wl[i] * rvgrid / (const.c.to('km/s').value) + wl[
            i]  #This converts the velocity grid to a d-wavelength grid centered on wk[i]
        #print([np.min(wlbin),np.min(wlgrid),np.max(wlbin),np.max(wlgrid)])

        i_wl = interpolate.interp1d(
            wlgrid, lsf, bounds_error=False, fill_value='extrapolate'
        )  #Extrapolate should not be necessary but sometimes there is a minute mismatch between the
        #start and end wavelengths of the constructed grid and the bin.
        try:
            lsf_wl = i_wl(wlbin)
        except:
            ut.tprint(
                'Error in interpolating LSF onto wlbin. Pausing to debug.')
            pdb.set_trace()
        k_n = lsf_wl / np.sum(
            lsf_wl
        )  #Normalize at each instance of the interpolation to make sure flux is conserved exactly.
        order_blurred[i] = np.sum(k_n * order[binstart:binend])
        if status == True:
            ut.statusbar(i, len(wl))
    return (order_blurred)
Пример #8
0
def apply_telluric_correction(inpath, list_of_wls, list_of_orders,
                              list_of_sigmas):
    """
    This applies a set of telluric spectra (computed by molecfit) for each exposure
    in our time series that were written to a pickle file by write_telluric_transmission_to_file.

    List of errors are provided to propagate telluric correction into the error array as well.

    Parameters
    ----------
    inpath : str, path like
        The path to the pickled transmission spectra.

    list_of_wls : list
        List of wavelength axes.

    list_of_orders :
        List of 2D spectral orders, matching to the wavelength axes in dimensions and in number.

    list_of_isgmas :
        List of 2D error matrices, matching dimensions and number of list_of_orders.

    Returns
    -------
    list_of_orders_corrected : list
        List of 2D spectral orders, telluric corrected.

    list_of_sigmas_corrected : list
        List of 2D error matrices, telluric corrected.

    """
    import scipy.interpolate as interp
    import numpy as np
    import tayph.util as ut
    import tayph.functions as fun
    from tayph.vartests import dimtest, postest, typetest, nantest
    wlT, fxT = read_telluric_transmission_from_file(inpath)
    typetest(list_of_wls, list, 'list_of_wls in apply_telluric_correction()')
    typetest(list_of_orders, list,
             'list_of_orders in apply_telluric_correction()')
    typetest(list_of_sigmas, list,
             'list_of_sigmas in apply_telluric_correction()')
    typetest(wlT, list,
             'list of telluric wave-axes in apply_telluric_correction()')
    typetest(
        fxT, list,
        'list of telluric transmission spectra in apply_telluric_correction()')

    No = len(list_of_wls)
    x = fun.findgen(No)

    if No != len(list_of_orders):
        raise Exception(
            'Runtime error in telluric correction: List of data wls and List of orders do not have the same length.'
        )

    Nexp = len(wlT)

    if Nexp != len(fxT):
        raise Exception(
            'Runtime error in telluric correction: List of telluric wls and telluric spectra read from file do not have the same length.'
        )

    if Nexp != len(list_of_orders[0]):
        raise Exception(
            f'Runtime error in telluric correction: List of telluric spectra and data spectra read from file do not have the same length ({Nexp} vs {len(list_of_orders[0])}).'
        )
    list_of_orders_cor = []
    list_of_sigmas_cor = []
    # ut.save_stack('test.fits',list_of_orders)
    # pdb.set_trace()

    for i in range(No):  #Do the correction order by order.
        order = list_of_orders[i]
        order_cor = order * 0.0
        error = list_of_sigmas[i]
        error_cor = error * 0.0
        wl = list_of_wls[i]
        dimtest(order, [0, len(wl)],
                f'order {i}/{No} in apply_telluric_correction()')
        dimtest(error, np.shape(order),
                f'errors {i}/{No} in apply_telluric_correction()')

        for j in range(Nexp):
            T_i = interp.interp1d(wlT[j], fxT[j], fill_value="extrapolate")(wl)
            postest(T_i,
                    f'T-spec of exposure {j} in apply_telluric_correction()')
            nantest(T_i,
                    f'T-spec of exposure {j} in apply_telluric_correction()')
            order_cor[j] = order[j] / T_i
            error_cor[j] = error[
                j] / T_i  #I checked that this works because the SNR before and after telluric correction is identical.
        list_of_orders_cor.append(order_cor)
        list_of_sigmas_cor.append(error_cor)
        ut.statusbar(i, x)
    return (list_of_orders_cor, list_of_sigmas_cor)
Пример #9
0
def normalize_orders(list_of_orders,
                     list_of_sigmas,
                     deg=0,
                     nsigma=4,
                     sinusoid=False):
    """
    If deg is set to 1, this function will normalise based on the mean flux in each order.
    If set higher, it will remove the average spectrum in each order and fit a polynomial
    to the residual. This means that in the presence of spectral lines, the fluxes will be
    slightly lower than if def=1 is used. nsigma is only used if deg > 1, and is used to
    throw away outliers from the polynomial fit. The program also computes the total
    mean flux of each exposure in the time series - totalled over all orders. These
    are important to correctly weigh the cross-correlation functions later. The
    inter-order colour correction is assumed to be an insignificant modification to
    these weights.

    Parameters
    ----------
    list_of_orders : list
        The list of 2D orders that need to be normalised.

    list_of_sigmas : list
        The list of 2D error matrices corresponding to the 2D orders that need to be normalised.

    deg : int
        The polynomial degree to remove. If set to 1, only the average flux is removed. If higher,
        polynomial fits are made to the residuals after removal of the average spectrum.

    nsigma : int, float
        The number of sigmas beyond which outliers are rejected from the polynomial fit.
        Only used when deg > 1.

    Returns
    -------
    out_list_of_orders : list
        The normalised 2D orders.
    out_list_of_sigmas : list
        The corresponding errors.
    meanfluxes : np.array
        The mean flux of each exposure in the time series, averaged over all orders.
    """
    import numpy as np
    import tayph.functions as fun
    from tayph.vartests import dimtest, postest, typetest
    import tayph.util as ut
    import warnings
    import pdb
    typetest(list_of_orders, list, 'list_of_orders in ops.normalize_orders()')
    typetest(list_of_sigmas, list, 'list_of_sigmas in ops.normalize_orders()')

    dimtest(list_of_orders[0], [0, 0])  #Test that the first order is 2D.
    dimtest(list_of_sigmas[0],
            [0, 0])  #And that the corresponding sigma array is, as well.
    n_exp = np.shape(list_of_orders[0])[0]  #Get the number of exposures.
    for i in range(len(list_of_orders)):  #Should be the same for all orders.
        dimtest(list_of_orders[i], [n_exp, 0])
        dimtest(list_of_sigmas[i], np.shape(list_of_orders[i]))
    typetest(deg, int, 'degree in ops.normalize_orders()')
    typetest(nsigma, [int, float], 'nsigma in ops.normalize_orders()')
    # postest(deg,'degree in ops.normalize_orders()')
    postest(nsigma, 'degree in ops.normalize_orders()')

    N = len(list_of_orders)
    out_list_of_orders = []
    out_list_of_sigmas = []

    #First compute the exposure-to-exposure flux variations to be used as weights.
    meanfluxes = np.zeros(n_exp)  #fun.findgen(n_exp)*0.0

    ### suggestions for improvement
    """
    m = [np.nanmedian(list_of_orders[i], axis=1) for i in range(N)]
    skipped = np.where(np.sum(np.isnan(m)) > 0)[0]
    meanfluxes = np.sum(m[np.sum(np.isnan(m)) <= 0]) / len(m[np.sum(np.isnan(m)) <= 0])
    """

    N_i = 0
    for i in range(N):
        m = np.nanmedian(list_of_orders[i], axis=1)  #Median or mean?
        if np.sum(np.isnan(m)) > 0:
            print(
                '---Warning in normalise_orders: Skipping order %s because many nans are present.'
                % i)
        else:
            N_i += 1
            meanfluxes += m  #These contain the exposure-to-exposure variability of the time-series.
    meanfluxes /= N_i  #These are the weights.

    if deg == 0:

        ### suggestion for improvement: (no loop needed)!
        """
        #meanflux = m # we already did that above!
        meanblock = m / np.nanmean(meanflux)
        out_list_of_orders.append((list_of_orders[i].T/meanblock).T)
        """
        for i in range(N):
            #What I'm doing here is probably stupid and numpy division will probably work just fine without
            #IDL-relics.
            n_px = np.shape(list_of_orders[i])[1]
            meanflux = np.nanmedian(
                list_of_orders[i],
                axis=1)  #Average flux in each order. Median or mean?
            meanblock = fun.rebinreform(
                meanflux / np.nanmean(meanflux), n_px
            ).T  #This is a slow operation. Row-by-row division is better done using a double-transpose...
            out_list_of_orders.append(list_of_orders[i] / meanblock)
            out_list_of_sigmas.append(list_of_sigmas[i] / meanblock)
    else:
        for i in range(N):
            with warnings.catch_warnings():
                warnings.simplefilter("ignore", category=RuntimeWarning)
                meanspec = np.nanmean(list_of_orders[i],
                                      axis=0)  #Average spectrum in each order.
            x = np.array(range(len(meanspec)))
            poly_block = list_of_orders[
                i] * 0.0  #Array that will host the polynomial fits.
            colour = list_of_orders[
                i] / meanspec  #What if there are zeroes? I.e. padding around the edges of the order?
            for j, s in enumerate(list_of_orders[i]):
                idx = np.isfinite(colour[j])
                if np.sum(idx) > 0:
                    p = np.poly1d(np.polyfit(x[idx], colour[j][idx], deg))(
                        x)  #Polynomial fit to the colour variation.
                    res = colour[
                        j] / p - 1.0  #The residual, which is flat around zero if it's a good fit. This has all sorts of line residuals that we need to throw out.
                    #We do that using the weight keyword of polyfit, and just set all those weights to zero.
                    sigma = np.nanstd(res)
                    w = x * 0.0 + 1.0  #Start with a weight function that is 1.0 everywhere.
                    with warnings.catch_warnings():
                        warnings.simplefilter("ignore",
                                              category=RuntimeWarning)
                        w[np.abs(res) > nsigma * sigma] = 0.0
                    if sinusoid == False:
                        p2 = np.poly1d(
                            np.polyfit(x[idx], colour[j][idx], deg, w=w[idx])
                        )(
                            x
                        )  #Second, weighted polynomial fit to the colour variation.
                    else:
                        p2 = fun.polysin(
                            x,
                            *fun.polysinfit(x[idx],
                                            colour[j][idx],
                                            deg,
                                            stepsize=1,
                                            polyprimer=True,
                                            lmfit=True,
                                            w=w[idx]))
                    poly_block[j] = p2

            out_list_of_orders.append(list_of_orders[i] / poly_block)
            out_list_of_sigmas.append(list_of_sigmas[i] / poly_block)
            ut.statusbar(i, N)
    return (out_list_of_orders, out_list_of_sigmas, meanfluxes)