예제 #1
0
def main():
    """Main script to prepare x-shooter observations for combination"""
    from astropy.io import fits
    import glob
    import matplotlib.pyplot as pl
    from methods import latexify
    latexify()
    import numpy as np
    from xshoo.combine import inter_arm_cut

    #Files
    obj_name = 'SDSS1437-0147'
    root_dir = '/Users/jselsing/Work/X-Shooter/CompositeRedQuasar/processed_data/'+obj_name
    object_files = glob.glob(root_dir+'/OBJECT/*IDP*.fits')
    transmission_files = glob.glob(root_dir+'/transmission*.fits')
    arms = ['UVB', 'VIS', 'NIR']
    wl_out = []
    flux_out = []
    flux_uncorr_out = []
    err_out = []
    start = []
    end = []

    for n in arms:
        print('In arm: '+n)


        #Read in object spectrum
        obser = [k for k in object_files if n in k]
        ob = fits.open(obser[0])
        wl = 10.0*ob[1].data.field('WAVE')[0]
        flux = ob[1].data.field('FLUX')[0]
        err = ob[1].data.field('ERR')[0]

        wl_tmp, flux_uncorr, err_tmp, start_tmp, end_tmp = inter_arm_cut(wl, flux, err, n, start, end)
        if n== 'VIS' or n== 'NIR':
            transmission = fits.open([k for k in transmission_files if n in k][0])[0].data
            for j, k in enumerate(transmission):
                if k <= 1e-10:
                    transmission[j] = 1
            flux /= transmission
            err /= transmission
        wl, flux, err, start, end = inter_arm_cut(wl, flux, err, n, start, end)

        wl_out.append(wl)
        flux_out.append(flux)
        err_out.append(err)
        flux_uncorr_out.append(flux_uncorr)

    wl_out = np.hstack(wl_out)
    flux_out = np.hstack(flux_out)
    err_out = np.hstack(err_out)
    flux_uncorr_out = np.hstack(flux_uncorr_out)

    bp_map = []
    for j , (k, l) in enumerate(zip(flux_out[:-1],err_out[:-1])):
        if k > 1.1 * flux_out[j-1] or k < 0:
           bp_map.append(1)
        elif k < 0.90 * flux_out[j-1] or k < 0:
           bp_map.append(1)
        else:
           bp_map.append(0)
    bp_map.append(1)

    import json
    import urllib2

    query_terms = dict()
    query_terms["ra"] = str(ob[0].header['RA'])+'d' #"185.1d"
    query_terms["dec"] = str(ob[0].header['DEC'])  #"56.78"
    query_terms["radius"] = "5.0"

    url = "http://api.sdss3.org/spectrumQuery?" + '&'.join(["{0}={1}".format(key, value) for key, value in query_terms.items()])
    print(url)
    # make call to API
    response = urllib2.urlopen(url)

    # read response, converting JSON format to Python list
    matching_ids = json.loads(response.read())
    print(json.dumps(matching_ids, indent=4))

    # get the first id
    spec_id = matching_ids[0]

    url = "http://api.sdss3.org/spectrum?id={0}&format=json".format(spec_id)

    response = urllib2.urlopen(url)
    result = json.loads(response.read())
    SDSS_spectrum = result[spec_id]

    wl_sdss = np.array(SDSS_spectrum["wavelengths"])
    flux_sdss =  np.array(SDSS_spectrum["flux"])
    z_sdss = (np.array(SDSS_spectrum["z"]))
    z_sdss_err = ((np.array(SDSS_spectrum["z_err"])))

    #Insert zeros
    wl_sdss = np.concatenate([wl_sdss,np.zeros(len(wl_out) - len(wl_sdss))])
    flux_sdss = np.concatenate([flux_sdss,np.zeros(len(flux_out) - len(flux_sdss))])

    # Load linelist
    fit_line_positions = np.genfromtxt('data/fitlinelist.txt', dtype=None)
    linelist = []
    for n in fit_line_positions:
        linelist.append(n[1])
    linelist = np.array(linelist)

    from methods import wavelength_conversion
    linelist = wavelength_conversion(linelist, conversion='vacuum_to_air')

    #Cut out fitting region
    mask = np.logical_and(wl_out > 11350, (wl_out < 11750))
    wl_fit = wl_out[mask]
    flux_fit = flux_out[mask]
    fluxerr_fit = err_out[mask]

    fluxerr_new = []
    for j, (k, l) in enumerate(zip(flux_fit,fluxerr_fit)):
        if k > 1.5 * flux_fit[j-2] and k > 0:
            fluxerr_new.append(l*50)
        elif k < 0.75 * flux_fit[j-2] and k > 0:
            fluxerr_new.append(l*50)
        else:
            fluxerr_new.append(l)
    from gen_methods import smooth
    fluxerr_fit = smooth(np.array(fluxerr_new), window_len=15, window='hanning')

    #Fit continuum and subtract
    from methods import continuum_fit
    from numpy.polynomial import chebyshev
    cont, chebfit = continuum_fit(wl_fit, flux_fit, fluxerr_fit, edge_mask_len=20)
    chebfitval = chebyshev.chebval(wl, chebfit)


    #Define models to use
    from methods import voigt,gauss
    def model1(t,  amp2, sig22g, sig22l, z):
            tmp = voigt(t, abs(amp2), (1+z)*linelist[2], sig22g, sig22l)
            return tmp

    def model2(t, amp2, sig22g, z):
            tmp = gauss(t, abs(amp2), (1+z)*linelist[2], sig22g)
            return tmp

    #Initial parameters
    init_vals = [6e-12,100, z_sdss]
    y_fit_guess = model2(wl_fit, *init_vals) + cont

    #Fit
    import scipy.optimize as op
    np.random.seed(12345)
    y_op = []
    vals = []
    for i in np.arange(10000):
        print('Iteration: ', i)
        resampled_spec = np.random.normal(flux_fit, abs(fluxerr_fit))

        cont, chebfit = continuum_fit(wl_fit, resampled_spec, fluxerr_fit, edge_mask_len=20)
        chebfitval = chebyshev.chebval(wl, chebfit)

        best_vals, covar = op.curve_fit(model2, wl_fit, resampled_spec - cont, sigma=fluxerr_fit, absolute_sigma=True, p0=init_vals)
        vals.append(best_vals)


    up = (np.percentile(vals, 84, axis = 0)[2] - np.mean(vals, axis = 0)[2])
    down = (np.percentile(vals, 16, axis = 0)[2] - np.mean(vals, axis = 0)[2])



    v_bary = ob[0].header['HIERARCH ESO QC VRAD BARYCOR']
    c_km = (2.99792458e8/1000.0)

    print("""Curve_fit results:
        Redshift = {0} + {1} - {2} (SDSS: {3} +- {4})
    """.format(np.mean(vals, axis = 0)[2] + v_bary /c_km, up, down, z_sdss, z_sdss_err))

    z_op = np.mean(vals, axis = 0)[2] + v_bary /c_km



    #Correct for Lyman alpha forest absorption. Will only have data for objects with z > 3000 / 1216 -1 ~ 1.5
    mask = (wl_out < (1 + z_op)*1216)
    wave = wl_out[mask]
    flux = flux_out[mask]
    import continuum_mark.interactive
    normalise = continuum_mark.interactive.continuum_mark(wl_out[mask], flux_out[mask], err_out[mask])
    normalise.endpoint = 'n' #str(raw_input('Insert endpoint before interpolation(y/n)? '))

    normalise.run()
    pl.show()
    cont_out = np.concatenate([normalise.continuum,flux_out[~mask]])



    #Flag whether to use estimated redshift
    flag = 1


    from astroquery.irsa_dust import IrsaDust
    import astropy.coordinates as coord
    import astropy.units as u
    C = coord.SkyCoord(ob[0].header['RA']*u.deg, ob[0].header['DEC']*u.deg, frame='fk5')
    dust_image = IrsaDust.get_images(C, radius=2 *u.deg, image_type='ebv')[0]
    ebv = np.mean(dust_image[0].data[40:42,40:42])


    # Saving telluric uncorrected data to .dat file
    dt = [("wl", np.float64), ("flux", np.float64), ("error", np.float64), ("bp map", np.float64),
          ("wl_sdss", np.float64), ("flux_sdss", np.float64) , ("flux_cont", np.float64) ]
    data = np.array(zip(wl_out, flux_uncorr_out, err_out, bp_map, wl_sdss, flux_sdss, cont_out), dtype=dt)
    file_name = "Telluric_uncorrected_science"
    np.savetxt(root_dir+"/"+file_name+".dat", data, header="wl flux fluxerror bp_map wl_sdss flux_sdss cont")#, fmt = ['%5.1f', '%2.15E'] )



    #Saving telluric corrected data to .dat file
    dt = [("wl", np.float64), ("flux", np.float64), ("error", np.float64), ("bp map", np.float64),
          ("wl_sdss", np.float64), ("flux_sdss", np.float64) , ("flux_cont", np.float64) ]
    data = np.array(zip(wl_out, flux_out, err_out, bp_map, wl_sdss, flux_sdss, cont_out), dtype=dt)
    file_name = "Telluric_corrected_science"
    np.savetxt(root_dir+"/"+file_name+".dat", data, header="wl flux fluxerror bp_map wl_sdss flux_sdss cont")#, fmt = ['%5.1f', '%2.15E'] )


    #Saving info to .dat file
    dt = [("z_op", np.float64), ("z_sdss", np.float64), ("flag", np.float64), ("ebv", np.float64)]
    data = np.array(zip([z_op], [z_sdss], [flag], [ebv]), dtype=dt)
    file_name = "Object_info"
    np.savetxt(root_dir+"/"+file_name+".dat", data, header="z_op z_sdss flag ebv ") #, fmt = ['%5.1f', '%2.15E'] )
def main():
    # latexify()
    import numpy as np

    root_dir = '/Users/jselsing/Work/X-Shooter/CompositeRedQuasar/processed_data/'
    data_file = np.genfromtxt(root_dir+'Composite.dat')

    wl = data_file[:,0]
    mean = data_file[:,1]
    err_mean = data_file[:,2]
    wmean = data_file[:,3]
    err_wmean = data_file[:,4]
    geo_mean = data_file[:,5]
    median = data_file[:,6]
    n_spec = data_file[:,7]
    std = data_file[:,8]
    std_norm = data_file[:,9]
    wmean_cont = data_file[:,10]

    from scipy.interpolate import InterpolatedUnivariateSpline
    mask = (np.where(err_wmean != 0))
    f = InterpolatedUnivariateSpline(wl[mask], wmean_cont[mask], w=err_wmean[mask], k=5)
    wmean_cont = f(wl)
    f = InterpolatedUnivariateSpline(wl[mask], err_wmean[mask], k=5)
    err_wmean = f(wl)

    #Saving to .dat file
    dt = [("wl", np.float64), ("wmean_cont", np.float64), ("err_wmean", np.float64) ]
    data = np.array(zip(wl, wmean_cont, err_wmean), dtype=dt)
    file_name = "data/templates/Selsing2015_interpolated.dat"
    np.savetxt(file_name, data, header="wl	weighted mean	error of weighted mean", fmt = ['%5.1f', '%1.4f', '%1.4f' ])
    pl.plot(wl,wmean_cont)
    pl.semilogy()
    pl.show()
    exit()

    #Fitting power laws
    from scipy import optimize

    def power_law(x_tmp, a_tmp, k_tmp):

        tmp = a_tmp * x_tmp ** k_tmp
        return tmp

    def power_law2(x_tmp, a1_tmp, x_c, k1_tmp, k2_tmp):

        tmp1 = power_law(x_tmp, a1_tmp,k1_tmp)[x_tmp<x_c]
        scale2loc = np.argmin(np.abs(x_tmp - x_c))
        a2_tmp = power_law(x_tmp[scale2loc], a1_tmp, (k1_tmp - k2_tmp))

        tmp2 = power_law(x_tmp, a2_tmp,k2_tmp)[x_tmp>= x_c]

        return np.concatenate((tmp1,tmp2))


    par_guess = [1, -1.7]
    par_guess2 = [1, 5000, -1.7, -1.7]
    wmean[np.where(np.isnan(wmean) == True)] = 0
    mask = (wl > 1300) & (wl < 1350) | (wl > 1425) & (wl < 1475) | (wl > 5500) & (wl < 5800) | (wl > 7300) & (wl < 7500) #| (wl > 9700) & (wl < 9900) | (wl > 10200) & (wl < 10600)
    popt, pcov = optimize.curve_fit(power_law, wl[mask], wmean_cont[mask], p0=par_guess, sigma=err_wmean[mask], absolute_sigma=True)
    popt2, pcov2 = optimize.curve_fit(power_law2, wl[mask], wmean_cont[mask], p0=par_guess2, sigma=err_wmean[mask], absolute_sigma=True)

    print(*popt)
    print(*popt2)



    #Plotting
    latexify(columns=2)
    fig, ax = pl.subplots()

    ax.plot(wl, medfilt(wmean_cont, 5),
            lw = 0.5, alpha=1.0, linestyle = 'steps-mid', label='X-shooter mean composite', color=cmap[1])

    ax.plot(wl, power_law(wl, *popt),
            linestyle='dashed', label ='Pure power law fit', color=cmap[2])

    sdss_compo = np.genfromtxt('/Users/jselsing/Work/X-Shooter/CompositeRedQuasar/processed_data/sdss_compo.dat')
    sdss_wl = sdss_compo[:,0]
    sdss_flux = sdss_compo[:, 1]

    norm_reg = 1430

    mask = (wl > norm_reg) & (wl < norm_reg + 20)
    norm1 = np.median(wmean_cont[mask])
    mask = (sdss_wl > norm_reg) & (sdss_wl < norm_reg + 20)
    norm2 = np.median(sdss_flux[mask])


    ax.plot(sdss_wl, sdss_flux * (norm1/norm2),
            linestyle='solid', label ='Full sample SDSS composite', color=cmap[0])


    #Overplot lines
    fit_line_positions = np.genfromtxt('data/plotlinelist.txt', dtype=None)

    linelist = []
    linenames = []
    for n in fit_line_positions:
        linelist.append(n[1])
        linenames.append(n[0])

    pl.semilogy()

    # Formatting axes
    import matplotlib as mpl

    ax.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax.get_xaxis().tick_bottom()
    ax.xaxis.set_minor_locator(mpl.ticker.NullLocator())

    ax.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax.set_yticks([0.3, 1, 3, 10, 30, 100, 300])
    ax.yaxis.set_minor_locator(mpl.ticker.NullLocator())

    # pl.legend(loc=3)
    ax.set_xlim((1000, 11500))
    ax.set_ylim((0.2, 500))
    format_axes(ax)





    ax.set_xlabel(r'Wavelength [$\AA$]')
    ax.set_ylabel(r'Rescaled flux density F$_\lambda$')
    pl.tight_layout()




    val = []
    for p in range(len(linelist)):
        xcoord = linelist[p]
        mask = (wl > xcoord - 1) & (wl < xcoord + 1)
        y_val = np.mean(wmean_cont[mask])
        val.append(2 * y_val)
    print(val)
    arrow_tips = val
    lineid_plot.plot_line_ids(wl, wmean_cont, linelist, linenames, arrow_tip=arrow_tips, ax=ax)
    for i in ax.lines:
        if '$' in i.get_label():
            i.set_alpha(0.3)
    a = ax.findobj(mpl.text.Annotation)
    for i in a:
        if '$' in i.get_label():
            i.set_size(10)


    pl.savefig('../documents/figs/compo_full_sample.pdf')
    pl.show()
예제 #3
0
def main():


    root_dir = '/Users/jselsing/Work/X-Shooter/CompositeRedQuasar/processed_data/'


    data_file = np.genfromtxt(root_dir+'Composite.dat')

    wl = data_file[:,0]
    mean = data_file[:,1]
    err_mean = data_file[:,2]
    wmean = data_file[:,3]
    err_wmean = data_file[:,4]
    geo_mean = data_file[:,5]
    median = data_file[:,6]
    n_spec = data_file[:,7]
    std = data_file[:,8]
    std_norm = data_file[:,9]
    wmean_cont = data_file[:,10]




    #Fitting power laws
    from scipy import optimize

    def power_law(x_tmp, a_tmp, k_tmp):

        tmp = a_tmp * x_tmp ** k_tmp
        return tmp

    def power_law2(x_tmp, a1_tmp, x_c, k1_tmp, k2_tmp):

        tmp1 = power_law(x_tmp, a1_tmp,k1_tmp)[x_tmp<x_c]
        scale2loc = np.argmin(np.abs(x_tmp - x_c))
        a2_tmp = power_law(x_tmp[scale2loc], a1_tmp, (k1_tmp - k2_tmp))

        tmp2 = power_law(x_tmp, a2_tmp,k2_tmp)[x_tmp>= x_c]

        return np.concatenate((tmp1,tmp2))

    def power_law3(x_tmp, a_tmp, k_tmp, b_tmp):

        tmp = a_tmp * x_tmp ** (k_tmp + b_tmp * x_tmp)
        return tmp

    par_guess = [1, -1.70]
    par_guess2 = [1, 5000, -1.7, -1.7]
    wmean[np.where(np.isnan(wmean) == True)] = 0

    mask = (wl > 1300) & (wl < 1350) | (wl > 1425) & (wl < 1475) | (wl > 5500) & (wl < 5800) | (wl > 7300) & (wl < 7500)
    err = ((std)[std != 0])[mask]
    popt, pcov = optimize.curve_fit(power_law, wl[mask], wmean_cont[mask], p0=par_guess, sigma=np.sqrt(err**2 + err_wmean[mask]**2), absolute_sigma=True, maxfev=5000)
    popt2, pcov2 = optimize.curve_fit(power_law2, wl[mask], wmean_cont[mask], p0=par_guess2, sigma=np.sqrt(err**2 + err_wmean[mask]**2), absolute_sigma=True, maxfev=5000)

    print(*popt)
    print(*popt2)


    par_guess = [1, -1.7]

    wl_new = wl
    wm = []
    m = []
    geo = []
    med = []
    #Fit
    np.random.seed(12345)
    mask = (wl_new > 1300) & (wl_new < 1350) | (wl_new > 1425) & (wl_new < 1475) | (wl_new > 5500) & (wl_new < 5800) | (wl_new > 7300) & (wl_new < 7500)


    for i in np.arange(10):
        print('Iteration: ', i)
        err = ((std)[std != 0])[mask]
        resampled_spec = np.random.normal((wmean_cont)[mask], np.sqrt(err**2 + err_wmean[mask]**2))
        popt_wmean, pcov_wmean = optimize.curve_fit(power_law, wl_new[mask], resampled_spec, p0=par_guess,
                                                    sigma=np.sqrt(err**2 + err_wmean[mask]**2), absolute_sigma=True)
        wm.append(popt_wmean)

        err = ((std)[std != 0])[mask]
        resampled_spec = np.random.normal((mean)[mask], np.sqrt(err**2 + err_mean[mask]**2))
        popt_mean, pcov_mean = optimize.curve_fit(power_law, wl_new[mask], resampled_spec, p0=par_guess,
                                                    sigma=np.sqrt(err**2 + err_mean[mask]**2), absolute_sigma=True)
        m.append(popt_mean)

        err = ((std)[std != 0])[mask]
        resampled_spec = np.random.normal((median[std != 0])[mask], err)
        popt_median, pcov_median = optimize.curve_fit(power_law, wl_new[mask], resampled_spec, p0=par_guess,
                                                    sigma=err, absolute_sigma=True, maxfev=600)

        med.append(popt_median)


        err = ((std)[std != 0])[mask]
        resampled_spec = np.random.normal((geo_mean[std != 0])[mask], err)
        # pl.plot(wl_new[mask], resampled_spec)
        popt_geo, pcov_geo = optimize.curve_fit(power_law, wl_new[mask], resampled_spec, p0=par_guess,
                                                    sigma=err, absolute_sigma=True, maxfev=600)


        geo.append(popt_geo)
    # pl.plot(wl, geo_mean)
    # pl.show()


    print("""Composite fit slope wmean...{0} +- {1}""".format(np.mean(wm, axis=0)[1],np.std(wm, axis=0)[1]))
    print("""Composite fit slope mean...{0} +- {1}""".format(np.mean(m, axis=0)[1], np.std(m, axis=0)[1]))
    print("""Composite fit slope median...{0} +- {1}""".format(np.mean(med, axis=0)[1], np.std(med, axis=0)[1]))
    print("""Composite fit slope geo...{0} +- {1}""".format(np.mean(geo, axis=0)[1], np.std(geo, axis=0)[1]))


    # Plotting
    latexify(columns=2, fig_height=7)
    import matplotlib.gridspec as gridspec

    fig = pl.figure()

    gs = gridspec.GridSpec(4, 1, height_ratios=[2,1,1,1])

    ax1 = pl.subplot(gs[0])
    ax2 = pl.subplot(gs[1])
    ax3 = pl.subplot(gs[2])
    ax4 = pl.subplot(gs[3])

    #Plotting
    # ax1.plot(wl, power_law2(wl, *popt2) , 'b--')
    # ax1.plot(wl, power_law(wl, *popt) , 'b--')
    ax1.plot(wl, wmean_cont, lw = 0.5, linestyle = 'steps-mid', label='X-shooter wmean composite')

    nbins = len(wl)
    from methods import hist
    log_binned_wl = np.array(hist(wl,[min(wl),max(wl)], int(2*nbins),'log'))
    from scipy.interpolate import InterpolatedUnivariateSpline
    sps = InterpolatedUnivariateSpline(wl, std_norm)
    std_plot = smooth(medfilt(sps(log_binned_wl) , 9), window='hanning', window_len=15)
    wave_std = log_binned_wl

    ax2.plot(wave_std, std_plot, lw = 0.5, linestyle = 'steps-mid', label='Normalised variability')

    ax3.plot(wl, wmean_cont/medfilt(err_wmean, 5), lw = 0.5, linestyle = 'steps-mid', label = 'Signal-to-noise')


    ax4.plot(wl,medfilt( n_spec, 1), label='Number of spectra', lw=0.5)





    #Overplot lines
    fit_line_positions = np.genfromtxt('data/plotlinelist.txt', dtype=None)

    linelist = []
    linenames = []
    for n in fit_line_positions:
        linelist.append(n[1])
        linenames.append(n[0])

    pl.xlabel(r'Rest Wavelength  [$\AA$]')
    ax1.set_ylabel(r'Normalised flux density F$_{\lambda}$')
    ax2.set_ylabel(r'Normalised Variability  $\delta$F$_{\lambda}$')
    ax3.set_ylabel(r'S/N Ratio')
    ax4.set_ylabel(r'Number of spectra')

    ax1.semilogy()
    ax1.semilogx()
    ax1.set_xlim((1000, 11500 ))
    ax1.set_ylim((0.1, 750))


    ax2.semilogy()
    ax2.semilogx()
    ax2.set_xlim((1000, 11500 ))
    ax2.set_ylim((0.001, 90))

    ax3.semilogx()
    ax3.set_xlim((1000, 11500 ))

    ax4.semilogx()
    ax4.set_xlim((1000, 11500 ))
    ax4.set_ylim((0, 9))



    # Formatting axes
    import matplotlib as mpl

    ax4.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax4.set_xticks([1000, 2000, 3000, 5000, 9000])
    ax1.xaxis.set_major_locator(mpl.ticker.NullLocator())
    ax2.xaxis.set_major_locator(mpl.ticker.NullLocator())
    ax3.xaxis.set_major_locator(mpl.ticker.NullLocator())


    ax1.yaxis.set_minor_locator(mpl.ticker.NullLocator())
    ax2.yaxis.set_minor_locator(mpl.ticker.NullLocator())

    ax1.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax1.set_yticks([0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500])
    ax2.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax2.set_yticks([0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30])
    ax3.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax3.set_yticks([50, 100, 150, 200, 250, 300])
    ax4.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax4.set_yticks([0, 1, 2, 3, 4, 5, 6, 7, 8])


    pl.tight_layout()
    fig.subplots_adjust(hspace=0)

    format_axes(ax1)
    format_axes(ax2)
    format_axes(ax3)
    format_axes(ax4)



    val = []
    for p in range(len(linelist)):
        xcoord = linelist[p]
        mask = (wl > xcoord - 1) & (wl < xcoord + 1)
        y_val = np.mean(wmean_cont[mask])
        val.append(1.5 * y_val)
    arrow_tips = val
    lineid_plot.plot_line_ids(wl, wmean_cont, linelist, linenames, arrow_tip=arrow_tips, ax=ax1)
    for i in ax1.lines:
        if '$' in i.get_label():
            i.set_alpha(0.3)
            i.set_linewidth(0.75)

    for p in range(len(linelist)):
         xcoord = linelist[p]
         mask = (wl > xcoord - 1) & (wl < xcoord + 1)
         y_val = np.mean(wmean_cont[mask])
         ax1.vlines(xcoord, ax1.get_ylim()[0], y_val, color='black',linestyle='dashed', lw=0.75, alpha=0.3)
         ax2.axvline(x=xcoord, color='black',linestyle='dashed', lw=0.75, alpha=0.3)
         ax3.axvline(x=xcoord, color='black',linestyle='dashed', lw=0.75, alpha=0.3)
         ax4.axvline(x=xcoord, color='black',linestyle='dashed', lw=0.75, alpha=0.3)

    a = ax1.findobj(mpl.text.Annotation)
    for i in a:
        if '$' in i.get_label():
            i.set_size(10)



    fig.savefig("../documents/figs/Combined.pdf", rasterized=True, dpi=600)
    pl.show()
예제 #4
0
if __name__ == '__main__':
    dat = np.genfromtxt('data/regularised.dat')
    print(np.shape(dat))
    n_test = (dat[5800 : 5900,:])
    # Checking for normality
    from matplotlib import pyplot as plt
    import seaborn as sns; sns.set_style('ticks')
    cmap = sns.color_palette("cubehelix", 6)
    import scipy.stats as stats
    import statsmodels.api as sm
    p_val = []

    #Plotting
    ratio = (1.0 + np.sqrt(5.0))/2.0
    latexify(columns=2)
    fig, ax = plt.subplots()
    for i, k in enumerate(n_test):

        p_val.append((stats.shapiro(k)[1]))

    mtest = np.mean(n_test, axis = 0)
    print(mtest)

    sm.qqplot(mtest, fit=True, line='45', ax=ax)

    print(np.mean(p_val))

    format_axes(ax)

    for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
예제 #5
0
def load_sdss_dr12(path):
    data_file = fits.open(path)
    # print(data_file[1].data.field)
    sdss_nam = data_file[1].data.field('SDSS_NAME')
    # f = 82900
    # print(sdss_nam[f:f+100])
    z_warning = data_file[1].data.field('zWarning')
    z = data_file[1].data.field('z_vi')
    # mask = np.logical_and(np.logical_and((z >= 1.0), (z <= 2.3)), (z_warning == 0))
    # mask =  np.ones(np.shape(z)).astype(bool)#(z_warning == 0)
    mask =  (z_warning == 0)
    z = z[mask]


    mi = data_file[1].data.field('MI')[mask]



    # print(len(mi))
    dgmi = data_file[1].data.field('DGMI')[mask]

    bands = ['u', 'g', 'r', 'i', 'z']
    data = {}
    length_data = []
    for i, k in enumerate(bands):
        data[k] = (data_file[1].data.field('PSFMAG')[:,i])[mask] - (data_file[1].data.field('EXTINCTION_RECAL')[:,i])[mask]
        length_data.append(len(data[k]))

    nam = np.array(['082045.38+130618.9', '115043.86-002354.1', '121940.36-010007.4', '123602.33-033129.9', '135425.24-001357.9', '143148.09+053558.0', '143748.28-014710.7'])
    # for n in nam:
    #     print(str(n))
    #     print([zip(i,k) for i,k in enumerate(sdss_nam) if str(n) == str(k)])
    # [print(k) for i,k in enumerate(sdss_nam)]



    u_obj = np.array([16.340282535250985, 16.868115642182865, 16.868490820682688, 16.829230761566329, 16.443006053710171, 16.86379118669786, 15.633876668255006 ])
    u_obj_sdss = np.array([16.28, 17.10, 17.22, 17.09, 16.98, 16.99, 15.86])
    g_obj = np.array([16.173628740863542, 16.942949312866595, 16.609497093992836, 16.763042601191465, 16.361315268986992, 16.878526911269127, 15.622724562677639])
    g_obj_sdss = np.array([16.13, 17.06, 16.92, 16.99, 16.78, 16.88, 15.76])
    r_obj = np.array([15.983908769640571, 16.910639809893361, 16.53028330223578, 16.606965809044731, 16.259465400302297, 16.763917281092667, 15.381490575686691])
    r_obj_sdss = np.array([15.91, 16.99, 16.82, 16.90, 16.67, 16.75, 15.48])
    i_obj = np.array([15.960828249585155, 16.647157166516017, 16.356895490345813, 16.375764327940693, 16.113968031003473, 16.585061165051222, 15.355882945611519])
    i_obj_sdss = np.array([15.87, 16.78, 16.63, 16.66, 16.51, 16.52, 15.41])
    z_obj = np.array([15.949328467438541, 16.501224893192735, 16.341537724690703, 16.366777188037226, 16.161932733774769, 16.349770828709673, 15.386318026049175])
    z_obj_sdss = np.array([15.83, 16.60, 16.61, 16.71, 16.51, 16.25, 15.41])
    mi_obj = np.array([-28.887216966079912, -29.492018441782825, -29.206175823944648, -29.521104883342353, -29.343074749485346, -29.759800580770957, -29.85016859018959])
    zz_obj = np.array([1.1242971107973012, 1.9798694976693576, 1.5830422701934881, 1.8463767030959246, 1.5123220764878522, 2.0997959346967061, 1.3089485173836708])


    K_corr = np.array([-0.048, -0.248, -0.268, -0.287, -0.258, -0.233, -0.143, -0.0])
    Mz0 = np.array([-28.512270824785411, -29.337462849149809, -29.032421150963174, -29.423493429879706, -29.153275130431958, -29.555455539949492, -29.524955755590376,  -29.523504957708496])
    ric = Mz0 - K_corr
    print(ric)

    print(np.mean(mi), np.std(mi))
    print(np.mean(ric), np.std(ric))


    print(np.mean(1 - u_obj / u_obj_sdss), np.std(1 - u_obj / u_obj_sdss))
    print(np.mean(1 - g_obj / g_obj_sdss), np.std(1 - g_obj / g_obj_sdss))
    print(np.mean(1 - r_obj / r_obj_sdss), np.std(1 - r_obj / r_obj_sdss))
    print(np.mean(1 - i_obj / i_obj_sdss), np.std(1 - i_obj / i_obj_sdss))
    print(np.mean(1 - z_obj / z_obj_sdss), np.std(1 - z_obj / z_obj_sdss))

    print(np.mean( u_obj -  u_obj_sdss), np.std( u_obj - u_obj_sdss))
    print(np.mean( g_obj -  g_obj_sdss), np.std( g_obj - g_obj_sdss))
    print(np.mean( r_obj -  r_obj_sdss), np.std( r_obj - r_obj_sdss))
    print(np.mean( i_obj -  i_obj_sdss), np.std( i_obj - i_obj_sdss))
    print(np.mean( z_obj -  z_obj_sdss), np.std( z_obj - z_obj_sdss))
    # pl.show()
    colors = ['z', 'g - i', 'i']
    gi = data['g'] - data['i']



    # data_color = np.array(zip(mi , gi))
    data_color = np.array(zip(z , gi, data['i']))




    # data_color = data_color[(np.logical_and(np.logical_and(data_color[:,0] > -40.0, data_color[:,1] >= -5), data_color[:,0] < -28.0))]
    data_color = data_color[(np.logical_and(data_color[:,0] > -40.0, data_color[:,1] >= -5))]
    # data = data[(np.logical_and(data_color[:,0] > -40.0, data_color[:,1] >= -5))]
    data_color = data_color[(data_color[:,1] >= -5)]
    # data = data[(np.logical_and(data_color[:,0] > -40.0, data_color[:,1] >= -5))]

    color_data = pd.DataFrame(data_color, columns=colors)


    # latexify()
    # Set up the subplot grid
    ratio = 5
    fig_width = 8

    golden_mean = (np.sqrt(5)-1.0)/2.0    # Aesthetic ratio
    fig_height = 1.0 * fig_width*golden_mean



    latexify(columns=2)
    fig = pl.figure()
    gs = pl.GridSpec(ratio + 1, ratio + 1)

    ax = fig.add_subplot(gs[1:, :-1])
    # ax_marg_x = fig.add_subplot(gs[0, :-1], sharex=ax)
    ax_marg_y = fig.add_subplot(gs[1:, -1], sharey=ax)

    x = np.array(color_data['z'])
    y = np.array(color_data['g - i'])
    imag = np.array(color_data['i'])


    import triangle

    print(np.mean(x), np.std(x))

    # print(np.shape(x))

    mask = (imag < 17.0) & ((1 < x) & (x < 2.3))
    # ax.scatter(mi_obj, g_obj - i_obj, s = 15, c=sns.xkcd_rgb["denim blue"], alpha = 0.7)
    triangle.hist2d(x, y, bins=200, ax=ax, smooth=0.3)
    ax.scatter(x[mask] , y[mask] ,  marker='o', s=10, facecolor=cmap[1], lw = 0, cmap=cmap, alpha= 1.0)
    ax.scatter(zz_obj, g_obj - i_obj, s = 25, c=cmap[2], alpha = 1.0)

    format_axes(ax)
    format_axes(ax_marg_y)
    pl.setp(ax_marg_y.get_yticklabels(), visible=False)
    pl.setp(ax_marg_y.yaxis.get_majorticklines(), visible=False)
    pl.setp(ax_marg_y.yaxis.get_minorticklines(), visible=False)
    pl.setp(ax_marg_y.xaxis.get_majorticklines(), visible=False)
    pl.setp(ax_marg_y.xaxis.get_minorticklines(), visible=False)
    pl.setp(ax_marg_y.get_xticklabels(), visible=False)
    ax_marg_y.xaxis.grid(False)
    despine(ax=ax_marg_y, bottom=True)
    sns.distplot(y, hist=False, kde=True, ax=ax_marg_y, kde_kws={"shade": True, "color": sns.xkcd_rgb["black"], "gridsize": 200, "alpha": 0.2},
                 vertical=True, axlabel=False)
    sns.distplot(g_obj - i_obj, hist=False, rug=True, kde=False, ax=ax_marg_y, rug_kws={"height": 1.5, "color": cmap[2], "alpha": 1.0},
                 vertical=True, axlabel=False)
    # sns.distplot(y[mask], hist=False, rug=True, kde=False, ax=ax_marg_y, rug_kws={"height": 0.5, "color": cmap[1], "alpha": 0.3},
    #              vertical=True, axlabel=False)
    sns.distplot(y[mask], hist=False, kde=True, ax=ax_marg_y, kde_kws={"shade": True, "color": cmap[1], "gridsize": 200, "alpha": 0.3},
                 vertical=True, axlabel=False)
    # sns.kdeplot(color_data, ax = ax, cmap='Greys', n_levels=50, norm=PowerNorm(gamma=0.3),
    #             shade=True, gridsize=100, linewidths = (0.5,), alpha=0.7)











    # ax.set_xlabel(r"M$_i$(z=2)")
    ax.set_xlabel(r"z")
    ax.set_ylabel(r"$g - i$")



    # ax.set_xlim((np.mean(x) - 5* np.std(x), np.mean(x) + 2* np.std(x)))
    ax.set_xlim((0.0, 3.5))
    ax.set_ylim((-0.5, 1.0))

    format_axes(ax)
    for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
                 ax.get_xticklabels() + ax.get_yticklabels()):
        item.set_fontsize(16)

    fig.tight_layout()
    pl.savefig('../documents/figs/color_comparison2.pdf')
    pl.show()
예제 #6
0
from __future__ import division, print_function

__all__ = ["Module Name"]
__version__ = "0.0.0"
__author__ = "Jonatan Selsing ([email protected])"
__copyright__ = "Copyright 2014 Jonatan Selsing"




import numpy as np
import glob
from scipy import interpolate
import matplotlib.pylab as pl
from methods import latexify
latexify()
# use seaborn for nice default plot settings
import seaborn; seaborn.set_style('ticks')

# from unred import ccm_unred,cardelli_reddening
from cardelli_unred import cardelli_reddening
# from gen_methods import smooth,medfilt
from methods import common_wavelength

def main():
    root_dir = '/Users/jselsing/Work/X-Shooter/CompositeRedQuasar/processed_data/'

    sdssobjects = glob.glob(root_dir+'*SDSS*/Telluric_corrected_science.dat')
    object_info_files = glob.glob(root_dir+'*SDSS*/Object_info.dat')