def main():
    # latexify()
    import numpy as np

    root_dir = '/Users/jselsing/Work/X-Shooter/CompositeRedQuasar/processed_data/'
    data_file = np.genfromtxt(root_dir+'Composite.dat')

    wl = data_file[:,0]
    mean = data_file[:,1]
    err_mean = data_file[:,2]
    wmean = data_file[:,3]
    err_wmean = data_file[:,4]
    geo_mean = data_file[:,5]
    median = data_file[:,6]
    n_spec = data_file[:,7]
    std = data_file[:,8]
    std_norm = data_file[:,9]
    wmean_cont = data_file[:,10]

    from scipy.interpolate import InterpolatedUnivariateSpline
    mask = (np.where(err_wmean != 0))
    f = InterpolatedUnivariateSpline(wl[mask], wmean_cont[mask], w=err_wmean[mask], k=5)
    wmean_cont = f(wl)
    f = InterpolatedUnivariateSpline(wl[mask], err_wmean[mask], k=5)
    err_wmean = f(wl)

    #Saving to .dat file
    dt = [("wl", np.float64), ("wmean_cont", np.float64), ("err_wmean", np.float64) ]
    data = np.array(zip(wl, wmean_cont, err_wmean), dtype=dt)
    file_name = "data/templates/Selsing2015_interpolated.dat"
    np.savetxt(file_name, data, header="wl	weighted mean	error of weighted mean", fmt = ['%5.1f', '%1.4f', '%1.4f' ])
    pl.plot(wl,wmean_cont)
    pl.semilogy()
    pl.show()
    exit()

    #Fitting power laws
    from scipy import optimize

    def power_law(x_tmp, a_tmp, k_tmp):

        tmp = a_tmp * x_tmp ** k_tmp
        return tmp

    def power_law2(x_tmp, a1_tmp, x_c, k1_tmp, k2_tmp):

        tmp1 = power_law(x_tmp, a1_tmp,k1_tmp)[x_tmp<x_c]
        scale2loc = np.argmin(np.abs(x_tmp - x_c))
        a2_tmp = power_law(x_tmp[scale2loc], a1_tmp, (k1_tmp - k2_tmp))

        tmp2 = power_law(x_tmp, a2_tmp,k2_tmp)[x_tmp>= x_c]

        return np.concatenate((tmp1,tmp2))


    par_guess = [1, -1.7]
    par_guess2 = [1, 5000, -1.7, -1.7]
    wmean[np.where(np.isnan(wmean) == True)] = 0
    mask = (wl > 1300) & (wl < 1350) | (wl > 1425) & (wl < 1475) | (wl > 5500) & (wl < 5800) | (wl > 7300) & (wl < 7500) #| (wl > 9700) & (wl < 9900) | (wl > 10200) & (wl < 10600)
    popt, pcov = optimize.curve_fit(power_law, wl[mask], wmean_cont[mask], p0=par_guess, sigma=err_wmean[mask], absolute_sigma=True)
    popt2, pcov2 = optimize.curve_fit(power_law2, wl[mask], wmean_cont[mask], p0=par_guess2, sigma=err_wmean[mask], absolute_sigma=True)

    print(*popt)
    print(*popt2)



    #Plotting
    latexify(columns=2)
    fig, ax = pl.subplots()

    ax.plot(wl, medfilt(wmean_cont, 5),
            lw = 0.5, alpha=1.0, linestyle = 'steps-mid', label='X-shooter mean composite', color=cmap[1])

    ax.plot(wl, power_law(wl, *popt),
            linestyle='dashed', label ='Pure power law fit', color=cmap[2])

    sdss_compo = np.genfromtxt('/Users/jselsing/Work/X-Shooter/CompositeRedQuasar/processed_data/sdss_compo.dat')
    sdss_wl = sdss_compo[:,0]
    sdss_flux = sdss_compo[:, 1]

    norm_reg = 1430

    mask = (wl > norm_reg) & (wl < norm_reg + 20)
    norm1 = np.median(wmean_cont[mask])
    mask = (sdss_wl > norm_reg) & (sdss_wl < norm_reg + 20)
    norm2 = np.median(sdss_flux[mask])


    ax.plot(sdss_wl, sdss_flux * (norm1/norm2),
            linestyle='solid', label ='Full sample SDSS composite', color=cmap[0])


    #Overplot lines
    fit_line_positions = np.genfromtxt('data/plotlinelist.txt', dtype=None)

    linelist = []
    linenames = []
    for n in fit_line_positions:
        linelist.append(n[1])
        linenames.append(n[0])

    pl.semilogy()

    # Formatting axes
    import matplotlib as mpl

    ax.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax.get_xaxis().tick_bottom()
    ax.xaxis.set_minor_locator(mpl.ticker.NullLocator())

    ax.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax.set_yticks([0.3, 1, 3, 10, 30, 100, 300])
    ax.yaxis.set_minor_locator(mpl.ticker.NullLocator())

    # pl.legend(loc=3)
    ax.set_xlim((1000, 11500))
    ax.set_ylim((0.2, 500))
    format_axes(ax)





    ax.set_xlabel(r'Wavelength [$\AA$]')
    ax.set_ylabel(r'Rescaled flux density F$_\lambda$')
    pl.tight_layout()




    val = []
    for p in range(len(linelist)):
        xcoord = linelist[p]
        mask = (wl > xcoord - 1) & (wl < xcoord + 1)
        y_val = np.mean(wmean_cont[mask])
        val.append(2 * y_val)
    print(val)
    arrow_tips = val
    lineid_plot.plot_line_ids(wl, wmean_cont, linelist, linenames, arrow_tip=arrow_tips, ax=ax)
    for i in ax.lines:
        if '$' in i.get_label():
            i.set_alpha(0.3)
    a = ax.findobj(mpl.text.Annotation)
    for i in a:
        if '$' in i.get_label():
            i.set_size(10)


    pl.savefig('../documents/figs/compo_full_sample.pdf')
    pl.show()
    from matplotlib import pyplot as plt
    import seaborn as sns; sns.set_style('ticks')
    cmap = sns.color_palette("cubehelix", 6)
    import scipy.stats as stats
    import statsmodels.api as sm
    p_val = []

    #Plotting
    ratio = (1.0 + np.sqrt(5.0))/2.0
    latexify(columns=2)
    fig, ax = plt.subplots()
    for i, k in enumerate(n_test):

        p_val.append((stats.shapiro(k)[1]))

    mtest = np.mean(n_test, axis = 0)
    print(mtest)

    sm.qqplot(mtest, fit=True, line='45', ax=ax)

    print(np.mean(p_val))

    format_axes(ax)

    for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
                 ax.get_xticklabels() + ax.get_yticklabels()):
        item.set_fontsize(16)

    fig.tight_layout()
    plt.savefig("../documents/figs/normality.pdf", dpi= 150)
    plt.show(block=True)
Example #3
0
def main():


    root_dir = '/Users/jselsing/Work/X-Shooter/CompositeRedQuasar/processed_data/'


    data_file = np.genfromtxt(root_dir+'Composite.dat')

    wl = data_file[:,0]
    mean = data_file[:,1]
    err_mean = data_file[:,2]
    wmean = data_file[:,3]
    err_wmean = data_file[:,4]
    geo_mean = data_file[:,5]
    median = data_file[:,6]
    n_spec = data_file[:,7]
    std = data_file[:,8]
    std_norm = data_file[:,9]
    wmean_cont = data_file[:,10]




    #Fitting power laws
    from scipy import optimize

    def power_law(x_tmp, a_tmp, k_tmp):

        tmp = a_tmp * x_tmp ** k_tmp
        return tmp

    def power_law2(x_tmp, a1_tmp, x_c, k1_tmp, k2_tmp):

        tmp1 = power_law(x_tmp, a1_tmp,k1_tmp)[x_tmp<x_c]
        scale2loc = np.argmin(np.abs(x_tmp - x_c))
        a2_tmp = power_law(x_tmp[scale2loc], a1_tmp, (k1_tmp - k2_tmp))

        tmp2 = power_law(x_tmp, a2_tmp,k2_tmp)[x_tmp>= x_c]

        return np.concatenate((tmp1,tmp2))

    def power_law3(x_tmp, a_tmp, k_tmp, b_tmp):

        tmp = a_tmp * x_tmp ** (k_tmp + b_tmp * x_tmp)
        return tmp

    par_guess = [1, -1.70]
    par_guess2 = [1, 5000, -1.7, -1.7]
    wmean[np.where(np.isnan(wmean) == True)] = 0

    mask = (wl > 1300) & (wl < 1350) | (wl > 1425) & (wl < 1475) | (wl > 5500) & (wl < 5800) | (wl > 7300) & (wl < 7500)
    err = ((std)[std != 0])[mask]
    popt, pcov = optimize.curve_fit(power_law, wl[mask], wmean_cont[mask], p0=par_guess, sigma=np.sqrt(err**2 + err_wmean[mask]**2), absolute_sigma=True, maxfev=5000)
    popt2, pcov2 = optimize.curve_fit(power_law2, wl[mask], wmean_cont[mask], p0=par_guess2, sigma=np.sqrt(err**2 + err_wmean[mask]**2), absolute_sigma=True, maxfev=5000)

    print(*popt)
    print(*popt2)


    par_guess = [1, -1.7]

    wl_new = wl
    wm = []
    m = []
    geo = []
    med = []
    #Fit
    np.random.seed(12345)
    mask = (wl_new > 1300) & (wl_new < 1350) | (wl_new > 1425) & (wl_new < 1475) | (wl_new > 5500) & (wl_new < 5800) | (wl_new > 7300) & (wl_new < 7500)


    for i in np.arange(10):
        print('Iteration: ', i)
        err = ((std)[std != 0])[mask]
        resampled_spec = np.random.normal((wmean_cont)[mask], np.sqrt(err**2 + err_wmean[mask]**2))
        popt_wmean, pcov_wmean = optimize.curve_fit(power_law, wl_new[mask], resampled_spec, p0=par_guess,
                                                    sigma=np.sqrt(err**2 + err_wmean[mask]**2), absolute_sigma=True)
        wm.append(popt_wmean)

        err = ((std)[std != 0])[mask]
        resampled_spec = np.random.normal((mean)[mask], np.sqrt(err**2 + err_mean[mask]**2))
        popt_mean, pcov_mean = optimize.curve_fit(power_law, wl_new[mask], resampled_spec, p0=par_guess,
                                                    sigma=np.sqrt(err**2 + err_mean[mask]**2), absolute_sigma=True)
        m.append(popt_mean)

        err = ((std)[std != 0])[mask]
        resampled_spec = np.random.normal((median[std != 0])[mask], err)
        popt_median, pcov_median = optimize.curve_fit(power_law, wl_new[mask], resampled_spec, p0=par_guess,
                                                    sigma=err, absolute_sigma=True, maxfev=600)

        med.append(popt_median)


        err = ((std)[std != 0])[mask]
        resampled_spec = np.random.normal((geo_mean[std != 0])[mask], err)
        # pl.plot(wl_new[mask], resampled_spec)
        popt_geo, pcov_geo = optimize.curve_fit(power_law, wl_new[mask], resampled_spec, p0=par_guess,
                                                    sigma=err, absolute_sigma=True, maxfev=600)


        geo.append(popt_geo)
    # pl.plot(wl, geo_mean)
    # pl.show()


    print("""Composite fit slope wmean...{0} +- {1}""".format(np.mean(wm, axis=0)[1],np.std(wm, axis=0)[1]))
    print("""Composite fit slope mean...{0} +- {1}""".format(np.mean(m, axis=0)[1], np.std(m, axis=0)[1]))
    print("""Composite fit slope median...{0} +- {1}""".format(np.mean(med, axis=0)[1], np.std(med, axis=0)[1]))
    print("""Composite fit slope geo...{0} +- {1}""".format(np.mean(geo, axis=0)[1], np.std(geo, axis=0)[1]))


    # Plotting
    latexify(columns=2, fig_height=7)
    import matplotlib.gridspec as gridspec

    fig = pl.figure()

    gs = gridspec.GridSpec(4, 1, height_ratios=[2,1,1,1])

    ax1 = pl.subplot(gs[0])
    ax2 = pl.subplot(gs[1])
    ax3 = pl.subplot(gs[2])
    ax4 = pl.subplot(gs[3])

    #Plotting
    # ax1.plot(wl, power_law2(wl, *popt2) , 'b--')
    # ax1.plot(wl, power_law(wl, *popt) , 'b--')
    ax1.plot(wl, wmean_cont, lw = 0.5, linestyle = 'steps-mid', label='X-shooter wmean composite')

    nbins = len(wl)
    from methods import hist
    log_binned_wl = np.array(hist(wl,[min(wl),max(wl)], int(2*nbins),'log'))
    from scipy.interpolate import InterpolatedUnivariateSpline
    sps = InterpolatedUnivariateSpline(wl, std_norm)
    std_plot = smooth(medfilt(sps(log_binned_wl) , 9), window='hanning', window_len=15)
    wave_std = log_binned_wl

    ax2.plot(wave_std, std_plot, lw = 0.5, linestyle = 'steps-mid', label='Normalised variability')

    ax3.plot(wl, wmean_cont/medfilt(err_wmean, 5), lw = 0.5, linestyle = 'steps-mid', label = 'Signal-to-noise')


    ax4.plot(wl,medfilt( n_spec, 1), label='Number of spectra', lw=0.5)





    #Overplot lines
    fit_line_positions = np.genfromtxt('data/plotlinelist.txt', dtype=None)

    linelist = []
    linenames = []
    for n in fit_line_positions:
        linelist.append(n[1])
        linenames.append(n[0])

    pl.xlabel(r'Rest Wavelength  [$\AA$]')
    ax1.set_ylabel(r'Normalised flux density F$_{\lambda}$')
    ax2.set_ylabel(r'Normalised Variability  $\delta$F$_{\lambda}$')
    ax3.set_ylabel(r'S/N Ratio')
    ax4.set_ylabel(r'Number of spectra')

    ax1.semilogy()
    ax1.semilogx()
    ax1.set_xlim((1000, 11500 ))
    ax1.set_ylim((0.1, 750))


    ax2.semilogy()
    ax2.semilogx()
    ax2.set_xlim((1000, 11500 ))
    ax2.set_ylim((0.001, 90))

    ax3.semilogx()
    ax3.set_xlim((1000, 11500 ))

    ax4.semilogx()
    ax4.set_xlim((1000, 11500 ))
    ax4.set_ylim((0, 9))



    # Formatting axes
    import matplotlib as mpl

    ax4.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax4.set_xticks([1000, 2000, 3000, 5000, 9000])
    ax1.xaxis.set_major_locator(mpl.ticker.NullLocator())
    ax2.xaxis.set_major_locator(mpl.ticker.NullLocator())
    ax3.xaxis.set_major_locator(mpl.ticker.NullLocator())


    ax1.yaxis.set_minor_locator(mpl.ticker.NullLocator())
    ax2.yaxis.set_minor_locator(mpl.ticker.NullLocator())

    ax1.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax1.set_yticks([0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500])
    ax2.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax2.set_yticks([0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30])
    ax3.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax3.set_yticks([50, 100, 150, 200, 250, 300])
    ax4.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
    ax4.set_yticks([0, 1, 2, 3, 4, 5, 6, 7, 8])


    pl.tight_layout()
    fig.subplots_adjust(hspace=0)

    format_axes(ax1)
    format_axes(ax2)
    format_axes(ax3)
    format_axes(ax4)



    val = []
    for p in range(len(linelist)):
        xcoord = linelist[p]
        mask = (wl > xcoord - 1) & (wl < xcoord + 1)
        y_val = np.mean(wmean_cont[mask])
        val.append(1.5 * y_val)
    arrow_tips = val
    lineid_plot.plot_line_ids(wl, wmean_cont, linelist, linenames, arrow_tip=arrow_tips, ax=ax1)
    for i in ax1.lines:
        if '$' in i.get_label():
            i.set_alpha(0.3)
            i.set_linewidth(0.75)

    for p in range(len(linelist)):
         xcoord = linelist[p]
         mask = (wl > xcoord - 1) & (wl < xcoord + 1)
         y_val = np.mean(wmean_cont[mask])
         ax1.vlines(xcoord, ax1.get_ylim()[0], y_val, color='black',linestyle='dashed', lw=0.75, alpha=0.3)
         ax2.axvline(x=xcoord, color='black',linestyle='dashed', lw=0.75, alpha=0.3)
         ax3.axvline(x=xcoord, color='black',linestyle='dashed', lw=0.75, alpha=0.3)
         ax4.axvline(x=xcoord, color='black',linestyle='dashed', lw=0.75, alpha=0.3)

    a = ax1.findobj(mpl.text.Annotation)
    for i in a:
        if '$' in i.get_label():
            i.set_size(10)



    fig.savefig("../documents/figs/Combined.pdf", rasterized=True, dpi=600)
    pl.show()
def load_sdss_dr12(path):
    data_file = fits.open(path)
    # print(data_file[1].data.field)
    sdss_nam = data_file[1].data.field('SDSS_NAME')
    # f = 82900
    # print(sdss_nam[f:f+100])
    z_warning = data_file[1].data.field('zWarning')
    z = data_file[1].data.field('z_vi')
    # mask = np.logical_and(np.logical_and((z >= 1.0), (z <= 2.3)), (z_warning == 0))
    # mask =  np.ones(np.shape(z)).astype(bool)#(z_warning == 0)
    mask =  (z_warning == 0)
    z = z[mask]


    mi = data_file[1].data.field('MI')[mask]



    # print(len(mi))
    dgmi = data_file[1].data.field('DGMI')[mask]

    bands = ['u', 'g', 'r', 'i', 'z']
    data = {}
    length_data = []
    for i, k in enumerate(bands):
        data[k] = (data_file[1].data.field('PSFMAG')[:,i])[mask] - (data_file[1].data.field('EXTINCTION_RECAL')[:,i])[mask]
        length_data.append(len(data[k]))

    nam = np.array(['082045.38+130618.9', '115043.86-002354.1', '121940.36-010007.4', '123602.33-033129.9', '135425.24-001357.9', '143148.09+053558.0', '143748.28-014710.7'])
    # for n in nam:
    #     print(str(n))
    #     print([zip(i,k) for i,k in enumerate(sdss_nam) if str(n) == str(k)])
    # [print(k) for i,k in enumerate(sdss_nam)]



    u_obj = np.array([16.340282535250985, 16.868115642182865, 16.868490820682688, 16.829230761566329, 16.443006053710171, 16.86379118669786, 15.633876668255006 ])
    u_obj_sdss = np.array([16.28, 17.10, 17.22, 17.09, 16.98, 16.99, 15.86])
    g_obj = np.array([16.173628740863542, 16.942949312866595, 16.609497093992836, 16.763042601191465, 16.361315268986992, 16.878526911269127, 15.622724562677639])
    g_obj_sdss = np.array([16.13, 17.06, 16.92, 16.99, 16.78, 16.88, 15.76])
    r_obj = np.array([15.983908769640571, 16.910639809893361, 16.53028330223578, 16.606965809044731, 16.259465400302297, 16.763917281092667, 15.381490575686691])
    r_obj_sdss = np.array([15.91, 16.99, 16.82, 16.90, 16.67, 16.75, 15.48])
    i_obj = np.array([15.960828249585155, 16.647157166516017, 16.356895490345813, 16.375764327940693, 16.113968031003473, 16.585061165051222, 15.355882945611519])
    i_obj_sdss = np.array([15.87, 16.78, 16.63, 16.66, 16.51, 16.52, 15.41])
    z_obj = np.array([15.949328467438541, 16.501224893192735, 16.341537724690703, 16.366777188037226, 16.161932733774769, 16.349770828709673, 15.386318026049175])
    z_obj_sdss = np.array([15.83, 16.60, 16.61, 16.71, 16.51, 16.25, 15.41])
    mi_obj = np.array([-28.887216966079912, -29.492018441782825, -29.206175823944648, -29.521104883342353, -29.343074749485346, -29.759800580770957, -29.85016859018959])
    zz_obj = np.array([1.1242971107973012, 1.9798694976693576, 1.5830422701934881, 1.8463767030959246, 1.5123220764878522, 2.0997959346967061, 1.3089485173836708])


    K_corr = np.array([-0.048, -0.248, -0.268, -0.287, -0.258, -0.233, -0.143, -0.0])
    Mz0 = np.array([-28.512270824785411, -29.337462849149809, -29.032421150963174, -29.423493429879706, -29.153275130431958, -29.555455539949492, -29.524955755590376,  -29.523504957708496])
    ric = Mz0 - K_corr
    print(ric)

    print(np.mean(mi), np.std(mi))
    print(np.mean(ric), np.std(ric))


    print(np.mean(1 - u_obj / u_obj_sdss), np.std(1 - u_obj / u_obj_sdss))
    print(np.mean(1 - g_obj / g_obj_sdss), np.std(1 - g_obj / g_obj_sdss))
    print(np.mean(1 - r_obj / r_obj_sdss), np.std(1 - r_obj / r_obj_sdss))
    print(np.mean(1 - i_obj / i_obj_sdss), np.std(1 - i_obj / i_obj_sdss))
    print(np.mean(1 - z_obj / z_obj_sdss), np.std(1 - z_obj / z_obj_sdss))

    print(np.mean( u_obj -  u_obj_sdss), np.std( u_obj - u_obj_sdss))
    print(np.mean( g_obj -  g_obj_sdss), np.std( g_obj - g_obj_sdss))
    print(np.mean( r_obj -  r_obj_sdss), np.std( r_obj - r_obj_sdss))
    print(np.mean( i_obj -  i_obj_sdss), np.std( i_obj - i_obj_sdss))
    print(np.mean( z_obj -  z_obj_sdss), np.std( z_obj - z_obj_sdss))
    # pl.show()
    colors = ['z', 'g - i', 'i']
    gi = data['g'] - data['i']



    # data_color = np.array(zip(mi , gi))
    data_color = np.array(zip(z , gi, data['i']))




    # data_color = data_color[(np.logical_and(np.logical_and(data_color[:,0] > -40.0, data_color[:,1] >= -5), data_color[:,0] < -28.0))]
    data_color = data_color[(np.logical_and(data_color[:,0] > -40.0, data_color[:,1] >= -5))]
    # data = data[(np.logical_and(data_color[:,0] > -40.0, data_color[:,1] >= -5))]
    data_color = data_color[(data_color[:,1] >= -5)]
    # data = data[(np.logical_and(data_color[:,0] > -40.0, data_color[:,1] >= -5))]

    color_data = pd.DataFrame(data_color, columns=colors)


    # latexify()
    # Set up the subplot grid
    ratio = 5
    fig_width = 8

    golden_mean = (np.sqrt(5)-1.0)/2.0    # Aesthetic ratio
    fig_height = 1.0 * fig_width*golden_mean



    latexify(columns=2)
    fig = pl.figure()
    gs = pl.GridSpec(ratio + 1, ratio + 1)

    ax = fig.add_subplot(gs[1:, :-1])
    # ax_marg_x = fig.add_subplot(gs[0, :-1], sharex=ax)
    ax_marg_y = fig.add_subplot(gs[1:, -1], sharey=ax)

    x = np.array(color_data['z'])
    y = np.array(color_data['g - i'])
    imag = np.array(color_data['i'])


    import triangle

    print(np.mean(x), np.std(x))

    # print(np.shape(x))

    mask = (imag < 17.0) & ((1 < x) & (x < 2.3))
    # ax.scatter(mi_obj, g_obj - i_obj, s = 15, c=sns.xkcd_rgb["denim blue"], alpha = 0.7)
    triangle.hist2d(x, y, bins=200, ax=ax, smooth=0.3)
    ax.scatter(x[mask] , y[mask] ,  marker='o', s=10, facecolor=cmap[1], lw = 0, cmap=cmap, alpha= 1.0)
    ax.scatter(zz_obj, g_obj - i_obj, s = 25, c=cmap[2], alpha = 1.0)

    format_axes(ax)
    format_axes(ax_marg_y)
    pl.setp(ax_marg_y.get_yticklabels(), visible=False)
    pl.setp(ax_marg_y.yaxis.get_majorticklines(), visible=False)
    pl.setp(ax_marg_y.yaxis.get_minorticklines(), visible=False)
    pl.setp(ax_marg_y.xaxis.get_majorticklines(), visible=False)
    pl.setp(ax_marg_y.xaxis.get_minorticklines(), visible=False)
    pl.setp(ax_marg_y.get_xticklabels(), visible=False)
    ax_marg_y.xaxis.grid(False)
    despine(ax=ax_marg_y, bottom=True)
    sns.distplot(y, hist=False, kde=True, ax=ax_marg_y, kde_kws={"shade": True, "color": sns.xkcd_rgb["black"], "gridsize": 200, "alpha": 0.2},
                 vertical=True, axlabel=False)
    sns.distplot(g_obj - i_obj, hist=False, rug=True, kde=False, ax=ax_marg_y, rug_kws={"height": 1.5, "color": cmap[2], "alpha": 1.0},
                 vertical=True, axlabel=False)
    # sns.distplot(y[mask], hist=False, rug=True, kde=False, ax=ax_marg_y, rug_kws={"height": 0.5, "color": cmap[1], "alpha": 0.3},
    #              vertical=True, axlabel=False)
    sns.distplot(y[mask], hist=False, kde=True, ax=ax_marg_y, kde_kws={"shade": True, "color": cmap[1], "gridsize": 200, "alpha": 0.3},
                 vertical=True, axlabel=False)
    # sns.kdeplot(color_data, ax = ax, cmap='Greys', n_levels=50, norm=PowerNorm(gamma=0.3),
    #             shade=True, gridsize=100, linewidths = (0.5,), alpha=0.7)











    # ax.set_xlabel(r"M$_i$(z=2)")
    ax.set_xlabel(r"z")
    ax.set_ylabel(r"$g - i$")



    # ax.set_xlim((np.mean(x) - 5* np.std(x), np.mean(x) + 2* np.std(x)))
    ax.set_xlim((0.0, 3.5))
    ax.set_ylim((-0.5, 1.0))

    format_axes(ax)
    for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
                 ax.get_xticklabels() + ax.get_yticklabels()):
        item.set_fontsize(16)

    fig.tight_layout()
    pl.savefig('../documents/figs/color_comparison2.pdf')
    pl.show()