Пример #1
0
def plot_pk_comp(cat_corrs, n_mock, ell=0, type='ratio', **kwargs): 
    ''' Plot comparison of average power spectrum monopole or quadrupole (avg(P(k))) 
    for multiple a list of catalog and correction specifications. Main use is to 
    compare the effects of fiber collisions correction method. However, it can be
    used to compare any power spectra as long as cat_corr dictionary is specified.

    --------------------------------------------------------------------------
    Paramters
    --------------------------------------------------------------------------
    cat_corrs : list of catalog correction dictionary 
    n_mock : number of mocks 
    ell : ell-th component of multipole decomposition 
    type : 'regular' compares the actual P2(k) values, 'ratio' compares the ratio 
    with the true P2(k), 'residual' compares the difference with the true P2(k) 
    
    --------------------------------------------------------------------------
    Notes
    --------------------------------------------------------------------------
    * Long ass code with a lot of idiosyncracies.
    * Make sure k values agree with each other. 
    
    --------------------------------------------------------------------------
    Example
    --------------------------------------------------------------------------
    cat_corrs = [ 
            {'catalog': {'name': 'nseries'}, 'correction': {'name': 'true'}},
            {'catalog': {'name': 'nseries'}, 'correction': {'name': 'upweight'}},
            {'catalog': {'name': 'nseries'}, 'correction': {'name': 'dlospeak', 'fit': 'gauss', 'sigma': 3.9, 'fpeak': 0.68}} 
            ] 

    plot_pk_comp(cat_corrs, 84, quad=False, type='Pk')
    plot_pk_comp(cat_corrs, 84, quad=False, type='ratio')

    '''

    if 'Ngrid' in kwargs.keys():
        Ngrid = kwargs['Ngrid']
    else: 
        Ngrid = 360 

    if isinstance(n_mock, int): 
        n_mock_list = [ n_mock for i in xrange(len(cat_corrs)) ] 
    else: 
        if len(n_mock) != len(cat_corrs): 
            raise ValueError()
        else: 
            n_mock_list = n_mock

    corr_str = ''

    prettyplot()                         # set up plot 
    pretty_colors = prettycolors()
    
    if 'figsize' in kwargs.keys(): 
        fig = plt.figure(1, kwargs['figsize'])
    else: 
        fig = plt.figure(1, figsize=(7, 8)) # set up figure 
    sub = fig.add_subplot(111)

    for i_corr, cat_corr in enumerate(cat_corrs):

        catdict = cat_corr['catalog']
        corrdict = cat_corr['correction']
        specdict = {
                'P0': 20000,
                'Lbox': 3600, 
                'Ngrid': Ngrid, 
                'ell': ell 
                }
        cat_corr_i = {
                'catalog': {'name': catdict['name'], 'n_mock': 1}, 
                'correction': corrdict, 
                'spec': specdict
                }

        avg_spec = AvgSpec(n_mock_list[i_corr], 'pk', cat_corr_i)
        avg_spec.read()
    
        spec_type = 'pk'
        spec_key = ''.join(['p', str(ell), 'k'])

        k_arr = avg_spec.k
        avg_pk = getattr(avg_spec, spec_key)
        
        if type == 'Pk':         # Compare P(k) to each other 

            sub.plot( 
                    k_arr, avg_pk, 
                    color = pretty_colors[i_corr + 1], 
                    label = plot_label(cat_corr),
                    lw = 4
                    ) 

        elif type == 'Pk_err':  # Compare P(k) with sample variance error bar

            pk_err = avg_spec.stddev()

            sub.errorbar( 
                    k_arr, avg_pk, 
                    yerr = [pk_err, pk_err], 
                    color = pretty_colors[i_corr + 1], 
                    label = plot_label(cat_corr),
                    fmt='--o'
                    ) 

        elif type == 'Pk_all': 
            if isinstance(n_mock_list[i_corr], int): 
                n_mock_list_i = range(1, n_mock_list[i_corr]+1)
            else: 
                n_mock_list_i = n_mock_list[i_corr]

            for i_mock in n_mock_list_i:
                k_i, spec_i_spec = avg_spec.spec_i(i_mock)

                sub.plot( 
                        k_i, spec_i_spec, 
                        color = '0.25', 
                        lw = 1 
                        ) 
            sub.plot( 
                    k_arr, avg_pk, 
                    color = pretty_colors[i_corr + 1], 
                    label = plot_label(cat_corr),
                    lw = 4
                    ) 
        
        elif type == 'kPk':         # Compare k^1.5 * P(k) with each other. Enhances the 
            # BAO signature? (Upon Paco's request). 

            kPk = k_arr**1.5 * avg_pk
                
            sub.scatter(
                    k_arr, kPk, 
                    color = pretty_colors[i_corr+1], 
                    label = plot_label(cat_corr)
                    )

        elif type == 'ratio':       # Compare the ratio of the power spectra (P/P_denom)

            if i_corr == 0 :        
                avg_pk_denom = avg_pk
                #denom_cat = catdict['name']
                denom_cat = corrdict['name']

            else: 
                sub.scatter(
                        k_arr, 
                        avg_pk/avg_pk_denom, 
                        color = pretty_colors[i_corr+1], 
                        label = plot_label(cat_corr)
                        )
                
                print plot_label(cat_corr)

                largescale = np.where(k_arr < 0.2)
                smallscale = np.where(k_arr > 0.2)
                print np.sum( np.abs((avg_pk/avg_pk_denom) - 1.0 ) )
                print 'Large scale k < 0.2'
                print np.sum( np.abs((avg_pk[largescale]/avg_pk_denom[largescale]) - 1.0 ) )
                print 'Small scale k > 0.2'
                print np.sum( np.abs((avg_pk[smallscale]/avg_pk_denom[smallscale]) - 1.0 ) )

            if corrdict['name'] == 'true': 

                pk_err = avg_spec.stddev()
        
                sub.plot( 
                        k_arr, 1.0 + pk_err/np.abs(avg_pk),  
                        color = 'k', 
                        lw = 2, 
                        ls = '-.', 
                        label = r"$\mathtt{1 + \Delta P^{true} (k) / P^{true}}$"
                        ) 
        
                sub.plot( 
                        k_arr, 1.0 + -1.0 * pk_err/np.abs(avg_pk),  
                        color = 'k',  
                        lw = 2, 
                        ls = '-.'
                        ) 

        elif type == 'l1_norm':

            if i_corr == 0 :        
                avg_pk_denom = avg_pk
                denom_cat = catdict['name']

            else: 
                sub.scatter(
                        k_arr, 
                        avg_pk - avg_pk_denom, 
                        color = pretty_colors[i_corr+1], 
                        label = plot_label(cat_corr)
                        )
                
                print plot_label(cat_corr)
                print (avg_pk-avg_pk_denom)[-10:]
        
        del avg_pk
        
        # Specify corrections for figure file name  
        if 'dlospeak' in corrdict['name']: 
            try:
                corr_str += ''.join([
                    catdict['name'], '_', 
                    corrdict['name'], '_', 
                    corrdict['fit'], '_',
                    '_sigma', str(corrdict['sigma']), 
                    'fpeak', str(corrdict['fpeak'])
                    ]) 
            except KeyError: 
                corr_str += ''.join([
                    catdict['name'], '_', 
                    corrdict['name'], '_', 
                    '_sigma', str(corrdict['sigma'])
                    ]) 
        elif corrdict['name'] == 'fourier_tophat': 
            corr_str += ''.join([
                catdict['name'], '_', 
                corrdict['name'],  
                '.fs', str(round(corrdict['fs'], 1)), 
                '.rc', str(round(corrdict['rc'], 2)), 
                '.kfit', str(round(corrdict['k_fit'], 2)), 
                '.kfixed', str(round(corrdict['k_fixed'], 2))
                ])
        else: 
            corr_str += ''.join([ 
                catdict['name'], '_', 
                corrdict['name']
                ]) 
    
    # Dictate the x-range and y-range of the plotting
    # based on type of comparison 
    if (type == 'Pk') or (type == 'Pk_err') or (type == 'Pk_all'):
        if 'yrange' in kwargs.keys(): 
            ylimit = kwargs['yrange'] 
            yytext = 10**.5*min(ylimit) 
        else: 
            ylimit = [10**2,10**5.5]
            yytext = 10**2.5
        
        if 'ylabel' in kwargs.keys(): 
            ylabel = kwargs['ylabel']
        else: 
            ylabel = r'$\mathtt{P_'+str(ell)+'(k)}$'

        if 'xscale' in kwargs.keys(): 
            sub.set_xscale(kwargs['xscale']) 
        else: 
            sub.set_xscale('log')

        if 'yscale' in kwargs.keys(): 
            sub.set_yscale(kwargs['yscale'])
        else: 
            sub.set_yscale('log')
        
        if type == 'Pk': 
            resid_str = ''
        elif type == 'Pk_err': 
            resid_str = '_err'
        elif type == 'Pk_all':
            resid_str = '_all'
    
    elif type == 'kPk':
        if 'yrange' in kwargs.keys(): 
            ylimit = kwargs['yrange'] 
            yytext = 10**.5*min(ylimit) 
        else: 
            ylimit = [10**0,10**2.0]
            yytext = 10**0.1

        if 'ylabel' in kwargs.keys(): 
            ylabel = kwargs['ylabel']
        else: 
            ylabel = r'$\mathtt{k^{1.5} P_0(k)}$'

        if 'xscale' in kwargs.keys(): 
            sub.set_xscale(kwargs['xscale']) 
        else: 
            sub.set_xscale('log')

        if 'yscale' in kwargs.keys(): 
            sub.set_yscale(kwargs['yscale'])
        else: 
            sub.set_yscale('log')

        resid_str = '_kPk'

    elif type == 'ratio': 
        if 'yrange' in kwargs.keys(): 
            ylimit = kwargs['yrange'] 
            yytext = 0.05 + min(ylimit) 
        else: 
            ylimit = [0.5, 1.5] 
            yytext = 0.55

        ylabel = ''.join([
            r"$\mathtt{\overline{P_", str(ell), "(k)}/\overline{P_", str(ell), r"(k)_{\rm{", denom_cat, "}}}}$"
            ])
        
        if 'xscale' in kwargs.keys(): 
            sub.set_xscale(kwargs['xscale']) 
        else: 
            sub.set_xscale('log')

        if 'yscale' in kwargs.keys(): 
            sub.set_yscale(kwargs['yscale'])
        else: 
            pass

        resid_str = '_ratio'

    elif type == 'residual':
        if 'yrange' in kwargs.keys(): 
            ylimit = kwargs['yrange'] 
            yytext = np.mean(ylimit) 
        else: 
            ylimit = [0.0, 5.0]
            yytext = 2.5

        ylabel = ''.join([
            r"$\mathtt{|\overline{P_", str(ell), "(k)} - \overline{P_", str(ell), r"(k)_{\rm{True}}}|/\Delta P_", str(ell), "}$"
            ])

        if 'xscale' in kwargs.keys(): 
            sub.set_xscale(kwargs['xscale']) 
        else: 
            sub.set_xscale('log')

        if 'yscale' in kwargs.keys(): 
            sub.set_yscale(kwargs['yscale'])
            if (kwargs['yscale'] == 'log') and \
                    ('yrange' not in kwargs.keys()):
                ylimit = [10**-3, 10**1] 
        else: 
            pass

        resid_str = '_residual'

    elif type == 'l1_norm': 

        if 'yrange' in kwargs.keys(): 
            ylimit = kwargs['yrange'] 
            yytext = 0.05 + min(ylimit) 
        else: 
            ylimit = None 
            yytext = 0.55

        ylabel = ''.join([
            r"$\mathtt{\overline{P_", str(ell), "(k)} - \overline{P_", str(ell), r"(k)_{\rm{", denom_cat, "}}}}$"
            ])
        
        if 'xscale' in kwargs.keys(): 
            sub.set_xscale(kwargs['xscale']) 
        else: 
            sub.set_xscale('log')

        if 'yscale' in kwargs.keys(): 
            sub.set_yscale(kwargs['yscale'])
        else: 
            pass

        resid_str = '_l1norm'
    else: 
        raise NotImplementedError('asdfasdfasdf') 

    if 'xrange' in kwargs.keys():   # specify x-range 
        sub.set_xlim(kwargs['xrange']) 
        yxtext = 1.5*min(kwargs['xrange'])
    else: 
        sub.set_xlim([10**-3,10**0])
        yxtext = 1.5*10**-3

    if type == 'ratio': 
        sub.axhline(y = 1.0, lw=2, ls='--', c='k')

    sub.set_ylim(ylimit)
    sub.set_xlabel('k (h/Mpc)', fontsize=20)
    sub.set_ylabel(ylabel, fontsize=20)
    
    # Display the number of mocks for given catalog so that
    # I know how many mocks the P(k) is averaged over.
    n_mock_text = '\n'.join([
                ' '.join([
                    str(n_mock_list[ii]), 
                    ((cat_corrs[ii])['catalog'])['name'].upper()
                    ]) 
                for ii in xrange(len(n_mock_list))
                ])
    sub.text(yxtext, yytext, n_mock_text)

    sub.legend(scatterpoints=1, loc='upper left', prop={'size':14})
    
    try: 
        n_mock_str = '_'.join([str(nm) for nm in n_mock]) 
    except TypeError:
        n_mock_str = str(n_mock) 

    fig_name = ''.join([
        spec_key, '_', 
        n_mock_str, 
        'mock_fibcoll_', 
        corr_str, 
        resid_str, 
        '_comparison_Ngrid', 
        str(Ngrid), 
        '.png'
        ])     

    fig_dir = '/home/users/hahn/powercode/FiberCollisions/figure/'
    print fig_name
    fig.savefig(
            ''.join([fig_dir, fig_name]), 
            bbox_inches="tight"
            )
    #plt.show()
    plt.close()

    return None