Ejemplo n.º 1
0
def spline_interpolation(pointx, pointy, wave, wave_temp, flux, flux_temp, axis,
                         leg_instance, con_instance, linewidth= 2.0, endpoints = 'y',
                         endpoint_order = 4):
    """Sort spline points and interpolate between marked continuum points"""
    from numpy.polynomial import chebyshev
    from scipy.interpolate import splrep,splev    
    
    #Insert endpoints
    if endpoints == 'y':
        sort_array = np.argsort(pointx)
        x = np.array(pointx)[sort_array]
        y = np.array(pointy)[sort_array]
        chebfit = chebyshev.chebfit(x, y, deg = endpoint_order)
        chebfitval = chebyshev.chebval(wave, chebfit)
        i =wave[150], wave[-150]
        window1, window2 = ((i[0]-70)<=wave) & (wave<=(i[0]+70)), ((i[1]-70)<=wave) & (wave<=(i[1]+70))
        y_val = np.median(chebfitval[window1]).astype(np.float64),np.median(chebfitval[window2]).astype(np.float64)
        pointx = np.concatenate([pointx,i])
        pointy = np.concatenate([pointy,y_val])
        ind_uni = f8(pointx)
        pointx = np.array(pointx)[ind_uni]
        pointy = np.array(pointy)[ind_uni]
        try:
            leg_instance.remove()
        except AttributeError:
            pass
        finally:
               leg_instance, = axis.plot(wave, chebfitval, color='black', lw = linewidth,
                                         label = 'legendre fit', zorder = 10)


    # print(pointx,pointy)
    #Sort numerically
    sort_array = np.argsort(pointx)
    x = np.array(pointx)[sort_array]
    y = np.array(pointy)[sort_array]


    from gen_methods import smooth
    #Interpolate
    spline = splrep(x,y, k=3)
    continuum = splev(wave,spline)
    continuum = smooth(continuum, window_len=1, window='hanning')
    
    #Plot
    try:
        con_instance.remove()
    except AttributeError:
        pass
    finally:
           con_instance, = axis.plot(wave,continuum, color='red', lw = linewidth, 
                                     label = 'continuum', zorder = 10)  
                                     
    return continuum, leg_instance, con_instance
Ejemplo n.º 2
0
def main():
    """Main script to prepare x-shooter observations for combination"""
    from astropy.io import fits
    import glob
    import matplotlib.pyplot as pl
    from methods import latexify
    latexify()
    import numpy as np
    from xshoo.combine import inter_arm_cut

    #Files
    obj_name = 'SDSS1437-0147'
    root_dir = '/Users/jselsing/Work/X-Shooter/CompositeRedQuasar/processed_data/'+obj_name
    object_files = glob.glob(root_dir+'/OBJECT/*IDP*.fits')
    transmission_files = glob.glob(root_dir+'/transmission*.fits')
    arms = ['UVB', 'VIS', 'NIR']
    wl_out = []
    flux_out = []
    flux_uncorr_out = []
    err_out = []
    start = []
    end = []

    for n in arms:
        print('In arm: '+n)


        #Read in object spectrum
        obser = [k for k in object_files if n in k]
        ob = fits.open(obser[0])
        wl = 10.0*ob[1].data.field('WAVE')[0]
        flux = ob[1].data.field('FLUX')[0]
        err = ob[1].data.field('ERR')[0]

        wl_tmp, flux_uncorr, err_tmp, start_tmp, end_tmp = inter_arm_cut(wl, flux, err, n, start, end)
        if n== 'VIS' or n== 'NIR':
            transmission = fits.open([k for k in transmission_files if n in k][0])[0].data
            for j, k in enumerate(transmission):
                if k <= 1e-10:
                    transmission[j] = 1
            flux /= transmission
            err /= transmission
        wl, flux, err, start, end = inter_arm_cut(wl, flux, err, n, start, end)

        wl_out.append(wl)
        flux_out.append(flux)
        err_out.append(err)
        flux_uncorr_out.append(flux_uncorr)

    wl_out = np.hstack(wl_out)
    flux_out = np.hstack(flux_out)
    err_out = np.hstack(err_out)
    flux_uncorr_out = np.hstack(flux_uncorr_out)

    bp_map = []
    for j , (k, l) in enumerate(zip(flux_out[:-1],err_out[:-1])):
        if k > 1.1 * flux_out[j-1] or k < 0:
           bp_map.append(1)
        elif k < 0.90 * flux_out[j-1] or k < 0:
           bp_map.append(1)
        else:
           bp_map.append(0)
    bp_map.append(1)

    import json
    import urllib2

    query_terms = dict()
    query_terms["ra"] = str(ob[0].header['RA'])+'d' #"185.1d"
    query_terms["dec"] = str(ob[0].header['DEC'])  #"56.78"
    query_terms["radius"] = "5.0"

    url = "http://api.sdss3.org/spectrumQuery?" + '&'.join(["{0}={1}".format(key, value) for key, value in query_terms.items()])
    print(url)
    # make call to API
    response = urllib2.urlopen(url)

    # read response, converting JSON format to Python list
    matching_ids = json.loads(response.read())
    print(json.dumps(matching_ids, indent=4))

    # get the first id
    spec_id = matching_ids[0]

    url = "http://api.sdss3.org/spectrum?id={0}&format=json".format(spec_id)

    response = urllib2.urlopen(url)
    result = json.loads(response.read())
    SDSS_spectrum = result[spec_id]

    wl_sdss = np.array(SDSS_spectrum["wavelengths"])
    flux_sdss =  np.array(SDSS_spectrum["flux"])
    z_sdss = (np.array(SDSS_spectrum["z"]))
    z_sdss_err = ((np.array(SDSS_spectrum["z_err"])))

    #Insert zeros
    wl_sdss = np.concatenate([wl_sdss,np.zeros(len(wl_out) - len(wl_sdss))])
    flux_sdss = np.concatenate([flux_sdss,np.zeros(len(flux_out) - len(flux_sdss))])

    # Load linelist
    fit_line_positions = np.genfromtxt('data/fitlinelist.txt', dtype=None)
    linelist = []
    for n in fit_line_positions:
        linelist.append(n[1])
    linelist = np.array(linelist)

    from methods import wavelength_conversion
    linelist = wavelength_conversion(linelist, conversion='vacuum_to_air')

    #Cut out fitting region
    mask = np.logical_and(wl_out > 11350, (wl_out < 11750))
    wl_fit = wl_out[mask]
    flux_fit = flux_out[mask]
    fluxerr_fit = err_out[mask]

    fluxerr_new = []
    for j, (k, l) in enumerate(zip(flux_fit,fluxerr_fit)):
        if k > 1.5 * flux_fit[j-2] and k > 0:
            fluxerr_new.append(l*50)
        elif k < 0.75 * flux_fit[j-2] and k > 0:
            fluxerr_new.append(l*50)
        else:
            fluxerr_new.append(l)
    from gen_methods import smooth
    fluxerr_fit = smooth(np.array(fluxerr_new), window_len=15, window='hanning')

    #Fit continuum and subtract
    from methods import continuum_fit
    from numpy.polynomial import chebyshev
    cont, chebfit = continuum_fit(wl_fit, flux_fit, fluxerr_fit, edge_mask_len=20)
    chebfitval = chebyshev.chebval(wl, chebfit)


    #Define models to use
    from methods import voigt,gauss
    def model1(t,  amp2, sig22g, sig22l, z):
            tmp = voigt(t, abs(amp2), (1+z)*linelist[2], sig22g, sig22l)
            return tmp

    def model2(t, amp2, sig22g, z):
            tmp = gauss(t, abs(amp2), (1+z)*linelist[2], sig22g)
            return tmp

    #Initial parameters
    init_vals = [6e-12,100, z_sdss]
    y_fit_guess = model2(wl_fit, *init_vals) + cont

    #Fit
    import scipy.optimize as op
    np.random.seed(12345)
    y_op = []
    vals = []
    for i in np.arange(10000):
        print('Iteration: ', i)
        resampled_spec = np.random.normal(flux_fit, abs(fluxerr_fit))

        cont, chebfit = continuum_fit(wl_fit, resampled_spec, fluxerr_fit, edge_mask_len=20)
        chebfitval = chebyshev.chebval(wl, chebfit)

        best_vals, covar = op.curve_fit(model2, wl_fit, resampled_spec - cont, sigma=fluxerr_fit, absolute_sigma=True, p0=init_vals)
        vals.append(best_vals)


    up = (np.percentile(vals, 84, axis = 0)[2] - np.mean(vals, axis = 0)[2])
    down = (np.percentile(vals, 16, axis = 0)[2] - np.mean(vals, axis = 0)[2])



    v_bary = ob[0].header['HIERARCH ESO QC VRAD BARYCOR']
    c_km = (2.99792458e8/1000.0)

    print("""Curve_fit results:
        Redshift = {0} + {1} - {2} (SDSS: {3} +- {4})
    """.format(np.mean(vals, axis = 0)[2] + v_bary /c_km, up, down, z_sdss, z_sdss_err))

    z_op = np.mean(vals, axis = 0)[2] + v_bary /c_km



    #Correct for Lyman alpha forest absorption. Will only have data for objects with z > 3000 / 1216 -1 ~ 1.5
    mask = (wl_out < (1 + z_op)*1216)
    wave = wl_out[mask]
    flux = flux_out[mask]
    import continuum_mark.interactive
    normalise = continuum_mark.interactive.continuum_mark(wl_out[mask], flux_out[mask], err_out[mask])
    normalise.endpoint = 'n' #str(raw_input('Insert endpoint before interpolation(y/n)? '))

    normalise.run()
    pl.show()
    cont_out = np.concatenate([normalise.continuum,flux_out[~mask]])



    #Flag whether to use estimated redshift
    flag = 1


    from astroquery.irsa_dust import IrsaDust
    import astropy.coordinates as coord
    import astropy.units as u
    C = coord.SkyCoord(ob[0].header['RA']*u.deg, ob[0].header['DEC']*u.deg, frame='fk5')
    dust_image = IrsaDust.get_images(C, radius=2 *u.deg, image_type='ebv')[0]
    ebv = np.mean(dust_image[0].data[40:42,40:42])


    # Saving telluric uncorrected data to .dat file
    dt = [("wl", np.float64), ("flux", np.float64), ("error", np.float64), ("bp map", np.float64),
          ("wl_sdss", np.float64), ("flux_sdss", np.float64) , ("flux_cont", np.float64) ]
    data = np.array(zip(wl_out, flux_uncorr_out, err_out, bp_map, wl_sdss, flux_sdss, cont_out), dtype=dt)
    file_name = "Telluric_uncorrected_science"
    np.savetxt(root_dir+"/"+file_name+".dat", data, header="wl flux fluxerror bp_map wl_sdss flux_sdss cont")#, fmt = ['%5.1f', '%2.15E'] )



    #Saving telluric corrected data to .dat file
    dt = [("wl", np.float64), ("flux", np.float64), ("error", np.float64), ("bp map", np.float64),
          ("wl_sdss", np.float64), ("flux_sdss", np.float64) , ("flux_cont", np.float64) ]
    data = np.array(zip(wl_out, flux_out, err_out, bp_map, wl_sdss, flux_sdss, cont_out), dtype=dt)
    file_name = "Telluric_corrected_science"
    np.savetxt(root_dir+"/"+file_name+".dat", data, header="wl flux fluxerror bp_map wl_sdss flux_sdss cont")#, fmt = ['%5.1f', '%2.15E'] )


    #Saving info to .dat file
    dt = [("z_op", np.float64), ("z_sdss", np.float64), ("flag", np.float64), ("ebv", np.float64)]
    data = np.array(zip([z_op], [z_sdss], [flag], [ebv]), dtype=dt)
    file_name = "Object_info"
    np.savetxt(root_dir+"/"+file_name+".dat", data, header="z_op z_sdss flag ebv ") #, fmt = ['%5.1f', '%2.15E'] )
Ejemplo n.º 3
0
def main():


    root_dir = '/Users/jselsing/Work/X-Shooter/CompositeRedQuasar/processed_data/'


    data_file = np.genfromtxt(root_dir+'Composite.dat')

    wl = data_file[:,0]
    mean = data_file[:,1]
    err_mean = data_file[:,2]
    wmean = data_file[:,3]
    err_wmean = data_file[:,4]
    geo_mean = data_file[:,5]
    median = data_file[:,6]
    n_spec = data_file[:,7]
    std = data_file[:,8]
    std_norm = data_file[:,9]
    wmean_cont = data_file[:,10]




    #Fitting power laws
    from scipy import optimize

    def power_law(x_tmp, a_tmp, k_tmp):

        tmp = a_tmp * x_tmp ** k_tmp
        return tmp

    def power_law2(x_tmp, a1_tmp, x_c, k1_tmp, k2_tmp):

        tmp1 = power_law(x_tmp, a1_tmp,k1_tmp)[x_tmp<x_c]
        scale2loc = np.argmin(np.abs(x_tmp - x_c))
        a2_tmp = power_law(x_tmp[scale2loc], a1_tmp, (k1_tmp - k2_tmp))

        tmp2 = power_law(x_tmp, a2_tmp,k2_tmp)[x_tmp>= x_c]

        return np.concatenate((tmp1,tmp2))

    def power_law3(x_tmp, a_tmp, k_tmp, b_tmp):

        tmp = a_tmp * x_tmp ** (k_tmp + b_tmp * x_tmp)
        return tmp

    par_guess = [1, -1.70]
    par_guess2 = [1, 5000, -1.7, -1.7]
    wmean[np.where(np.isnan(wmean) == True)] = 0

    mask = (wl > 1300) & (wl < 1350) | (wl > 1425) & (wl < 1475) | (wl > 5500) & (wl < 5800) | (wl > 7300) & (wl < 7500)
    err = ((std)[std != 0])[mask]
    popt, pcov = optimize.curve_fit(power_law, wl[mask], wmean_cont[mask], p0=par_guess, sigma=np.sqrt(err**2 + err_wmean[mask]**2), absolute_sigma=True, maxfev=5000)
    popt2, pcov2 = optimize.curve_fit(power_law2, wl[mask], wmean_cont[mask], p0=par_guess2, sigma=np.sqrt(err**2 + err_wmean[mask]**2), absolute_sigma=True, maxfev=5000)

    print(*popt)
    print(*popt2)


    par_guess = [1, -1.7]

    wl_new = wl
    wm = []
    m = []
    geo = []
    med = []
    #Fit
    np.random.seed(12345)
    mask = (wl_new > 1300) & (wl_new < 1350) | (wl_new > 1425) & (wl_new < 1475) | (wl_new > 5500) & (wl_new < 5800) | (wl_new > 7300) & (wl_new < 7500)


    for i in np.arange(10):
        print('Iteration: ', i)
        err = ((std)[std != 0])[mask]
        resampled_spec = np.random.normal((wmean_cont)[mask], np.sqrt(err**2 + err_wmean[mask]**2))
        popt_wmean, pcov_wmean = optimize.curve_fit(power_law, wl_new[mask], resampled_spec, p0=par_guess,
                                                    sigma=np.sqrt(err**2 + err_wmean[mask]**2), absolute_sigma=True)
        wm.append(popt_wmean)

        err = ((std)[std != 0])[mask]
        resampled_spec = np.random.normal((mean)[mask], np.sqrt(err**2 + err_mean[mask]**2))
        popt_mean, pcov_mean = optimize.curve_fit(power_law, wl_new[mask], resampled_spec, p0=par_guess,
                                                    sigma=np.sqrt(err**2 + err_mean[mask]**2), absolute_sigma=True)
        m.append(popt_mean)

        err = ((std)[std != 0])[mask]
        resampled_spec = np.random.normal((median[std != 0])[mask], err)
        popt_median, pcov_median = optimize.curve_fit(power_law, wl_new[mask], resampled_spec, p0=par_guess,
                                                    sigma=err, absolute_sigma=True, maxfev=600)

        med.append(popt_median)


        err = ((std)[std != 0])[mask]
        resampled_spec = np.random.normal((geo_mean[std != 0])[mask], err)
        # pl.plot(wl_new[mask], resampled_spec)
        popt_geo, pcov_geo = optimize.curve_fit(power_law, wl_new[mask], resampled_spec, p0=par_guess,
                                                    sigma=err, absolute_sigma=True, maxfev=600)


        geo.append(popt_geo)
    # pl.plot(wl, geo_mean)
    # pl.show()


    print("""Composite fit slope wmean...{0} +- {1}""".format(np.mean(wm, axis=0)[1],np.std(wm, axis=0)[1]))
    print("""Composite fit slope mean...{0} +- {1}""".format(np.mean(m, axis=0)[1], np.std(m, axis=0)[1]))
    print("""Composite fit slope median...{0} +- {1}""".format(np.mean(med, axis=0)[1], np.std(med, axis=0)[1]))
    print("""Composite fit slope geo...{0} +- {1}""".format(np.mean(geo, axis=0)[1], np.std(geo, axis=0)[1]))


    # Plotting
    latexify(columns=2, fig_height=7)
    import matplotlib.gridspec as gridspec

    fig = pl.figure()

    gs = gridspec.GridSpec(4, 1, height_ratios=[2,1,1,1])

    ax1 = pl.subplot(gs[0])
    ax2 = pl.subplot(gs[1])
    ax3 = pl.subplot(gs[2])
    ax4 = pl.subplot(gs[3])

    #Plotting
    # ax1.plot(wl, power_law2(wl, *popt2) , 'b--')
    # ax1.plot(wl, power_law(wl, *popt) , 'b--')
    ax1.plot(wl, wmean_cont, lw = 0.5, linestyle = 'steps-mid', label='X-shooter wmean composite')

    nbins = len(wl)
    from methods import hist
    log_binned_wl = np.array(hist(wl,[min(wl),max(wl)], int(2*nbins),'log'))
    from scipy.interpolate import InterpolatedUnivariateSpline
    sps = InterpolatedUnivariateSpline(wl, std_norm)
    std_plot = smooth(medfilt(sps(log_binned_wl) , 9), window='hanning', window_len=15)
    wave_std = log_binned_wl

    ax2.plot(wave_std, std_plot, lw = 0.5, linestyle = 'steps-mid', label='Normalised variability')

    ax3.plot(wl, wmean_cont/medfilt(err_wmean, 5), lw = 0.5, linestyle = 'steps-mid', label = 'Signal-to-noise')


    ax4.plot(wl,medfilt( n_spec, 1), label='Number of spectra', lw=0.5)





    #Overplot lines
    fit_line_positions = np.genfromtxt('data/plotlinelist.txt', dtype=None)

    linelist = []
    linenames = []
    for n in fit_line_positions:
        linelist.append(n[1])
        linenames.append(n[0])

    pl.xlabel(r'Rest Wavelength  [$\AA$]')
    ax1.set_ylabel(r'Normalised flux density F$_{\lambda}$')
    ax2.set_ylabel(r'Normalised Variability  $\delta$F$_{\lambda}$')
    ax3.set_ylabel(r'S/N Ratio')
    ax4.set_ylabel(r'Number of spectra')

    ax1.semilogy()
    ax1.semilogx()
    ax1.set_xlim((1000, 11500 ))
    ax1.set_ylim((0.1, 750))


    ax2.semilogy()
    ax2.semilogx()
    ax2.set_xlim((1000, 11500 ))
    ax2.set_ylim((0.001, 90))

    ax3.semilogx()
    ax3.set_xlim((1000, 11500 ))

    ax4.semilogx()
    ax4.set_xlim((1000, 11500 ))
    ax4.set_ylim((0, 9))



    # Formatting axes
    import matplotlib as mpl

    ax4.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax4.set_xticks([1000, 2000, 3000, 5000, 9000])
    ax1.xaxis.set_major_locator(mpl.ticker.NullLocator())
    ax2.xaxis.set_major_locator(mpl.ticker.NullLocator())
    ax3.xaxis.set_major_locator(mpl.ticker.NullLocator())


    ax1.yaxis.set_minor_locator(mpl.ticker.NullLocator())
    ax2.yaxis.set_minor_locator(mpl.ticker.NullLocator())

    ax1.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax1.set_yticks([0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500])
    ax2.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax2.set_yticks([0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30])
    ax3.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax3.set_yticks([50, 100, 150, 200, 250, 300])
    ax4.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax4.set_yticks([0, 1, 2, 3, 4, 5, 6, 7, 8])


    pl.tight_layout()
    fig.subplots_adjust(hspace=0)

    format_axes(ax1)
    format_axes(ax2)
    format_axes(ax3)
    format_axes(ax4)



    val = []
    for p in range(len(linelist)):
        xcoord = linelist[p]
        mask = (wl > xcoord - 1) & (wl < xcoord + 1)
        y_val = np.mean(wmean_cont[mask])
        val.append(1.5 * y_val)
    arrow_tips = val
    lineid_plot.plot_line_ids(wl, wmean_cont, linelist, linenames, arrow_tip=arrow_tips, ax=ax1)
    for i in ax1.lines:
        if '$' in i.get_label():
            i.set_alpha(0.3)
            i.set_linewidth(0.75)

    for p in range(len(linelist)):
         xcoord = linelist[p]
         mask = (wl > xcoord - 1) & (wl < xcoord + 1)
         y_val = np.mean(wmean_cont[mask])
         ax1.vlines(xcoord, ax1.get_ylim()[0], y_val, color='black',linestyle='dashed', lw=0.75, alpha=0.3)
         ax2.axvline(x=xcoord, color='black',linestyle='dashed', lw=0.75, alpha=0.3)
         ax3.axvline(x=xcoord, color='black',linestyle='dashed', lw=0.75, alpha=0.3)
         ax4.axvline(x=xcoord, color='black',linestyle='dashed', lw=0.75, alpha=0.3)

    a = ax1.findobj(mpl.text.Annotation)
    for i in a:
        if '$' in i.get_label():
            i.set_size(10)



    fig.savefig("../documents/figs/Combined.pdf", rasterized=True, dpi=600)
    pl.show()
Ejemplo n.º 4
0
    #Cut out fitting region
    mask = ((wl >= 4975) & (wl <= 5100)) & (flux.mask == 0.)
    wl_fit = wl[mask]
    flux_fit = flux[mask]
    fluxerr_fit = fluxerror[mask]

    fluxerr_new = []
    for j, (k, l) in enumerate(zip(flux_fit,fluxerr_fit)):
        if k > 1.5 * flux_fit[j-2] and k > 0:
            fluxerr_new.append(l*50)
        elif k < 0.75 * flux_fit[j-2] and k > 0:
            fluxerr_new.append(l*50)
        else:
            fluxerr_new.append(l)
    from gen_methods import smooth
    fluxerr_fit = smooth(np.array(fluxerr_new), window_len=15, window='hanning')


    #Fit continuum and subtract
    from methods import continuum_fit
    from numpy.polynomial import chebyshev
    cont, chebfit = continuum_fit(wl_fit, flux_fit, fluxerr_fit, edge_mask_len=20)
    chebfitval = chebyshev.chebval(wl, chebfit)


    #Define models to use
    from methods import gauss

    def model2(t, amp2, sig22g, z):
            tmp = gauss(t, abs(amp2), (1+z)*linelist[2], sig22g)
            return tmp