コード例 #1
0
def plot_kappa_simple(catalog_file,
                      remove_z_problem=False,
                      Npix=128,
                      kappa_filter=4,
                      kappa_cutoff=0):
    """
    kappa_filter is in pixels
    """
    gamma_plot, Ngal, RArange_g, DECrange_g = get_gamma_vector(
        catalog_file, dtheta=2.0, remove_z_problem=remove_z_problem)

    # get a square grid for the kappa measurement
    NRA = NDEC = Npix

    RAmin = RArange_g[0]
    RAmax = RArange_g[-1]
    DECmin = DECrange_g[0]
    DECmax = DECrange_g[-1]

    dtheta = (RAmax - RAmin) * 1. / NRA * 60

    # DEC range is smaller than RA range: make half the
    #  zeros on the bottom
    DECmin -= 0.5 * (DECmin + NDEC * dtheta / 60. - DECmax)
    DECmax = DECmin + NDEC * dtheta / 60.

    # get the shear realization
    gamma, Ngal, RArange, DECrange = get_gamma_vector(
        catalog_file,
        dtheta,
        RAmin,
        DECmin,
        NRA,
        NDEC + 1,
        remove_z_problem=remove_z_problem)

    kappa = gamma_to_kappa(gamma.T, dtheta)

    if kappa_filter:
        kappa = filters.gaussian_filter(kappa.real, kappa_filter) \
                + 1j * filters.gaussian_filter(kappa.imag, kappa_filter)

    if kappa_cutoff is not None:
        kappa[np.where(kappa < kappa_cutoff)] = kappa_cutoff

    pylab.figure(figsize=(10, 8))
    pylab.imshow(kappa.real.T,
                 origin='lower',
                 cmap=BkBuW(1.0),
                 extent=(RAmin, RAmax, DECmin, DECmax))
    pylab.xlim(pylab.xlim()[::-1])
    pylab.title('COSMOS convergence map: Kaiser-Squires')
    pylab.colorbar().set_label(r'$\kappa\ \mathrm{(E-mode)}$')

    #conjugate gamma: we're flipping the x-axis
    whiskerplot(gamma_plot.conj(), RArange_g, DECrange_g, color='w')
    pylab.ylim(DECmin, 2.9)
コード例 #2
0
ファイル: COSMOS_KL_map.py プロジェクト: jakevdp/Thesis
def plot_kappa_simple(catalog_file, remove_z_problem=False,
                      Npix=128, kappa_filter=4,
                      kappa_cutoff=0):
    """
    kappa_filter is in pixels
    """
    gamma_plot, Ngal, RArange_g, DECrange_g = get_gamma_vector(
        catalog_file, dtheta=2.0, remove_z_problem=remove_z_problem)
    
    # get a square grid for the kappa measurement
    NRA = NDEC = Npix
    
    RAmin = RArange_g[0]
    RAmax = RArange_g[-1]
    DECmin = DECrange_g[0]
    DECmax = DECrange_g[-1]

    dtheta = (RAmax-RAmin) * 1./NRA * 60

    # DEC range is smaller than RA range: make half the
    #  zeros on the bottom
    DECmin -= 0.5 * (DECmin + NDEC * dtheta/60. - DECmax)
    DECmax = DECmin + NDEC * dtheta/60.

    # get the shear realization
    gamma, Ngal, RArange, DECrange = get_gamma_vector(
        catalog_file, dtheta, RAmin, DECmin, NRA, NDEC+1,
        remove_z_problem=remove_z_problem)

    kappa = gamma_to_kappa(gamma.T, dtheta)

    if kappa_filter:
        kappa = filters.gaussian_filter(kappa.real, kappa_filter) \
                + 1j * filters.gaussian_filter(kappa.imag, kappa_filter)

    if kappa_cutoff is not None:
        kappa[np.where(kappa < kappa_cutoff)] = kappa_cutoff

    pylab.figure(figsize=(10,8))
    pylab.imshow(kappa.real.T,
                 origin='lower',
                 cmap=BkBuW(1.0),
                 extent=(RAmin, RAmax, DECmin, DECmax))
    pylab.xlim(pylab.xlim()[::-1])
    pylab.title('COSMOS convergence map: Kaiser-Squires')
    pylab.colorbar().set_label(r'$\kappa\ \mathrm{(E-mode)}$')
    

    #conjugate gamma: we're flipping the x-axis
    whiskerplot(gamma_plot.conj(), RArange_g, DECrange_g, color='w')
    pylab.ylim(DECmin, 2.9)
コード例 #3
0
def plot_mask(catalog_file,
              dtheta,
              RAmin=None,
              DECmin=None,
              NRA=None,
              NDEC=None,
              remove_z_problem=True,
              N_bootstraps=0):
    gamma, Ngal, RArange, DECrange = get_gamma_vector(
        catalog_file,
        dtheta,
        RAmin,
        DECmin,
        NRA,
        NDEC,
        N_bootstraps=N_bootstraps,
        remove_z_problem=remove_z_problem)

    print RArange[0], RArange[-1]
    print DECrange[0], DECrange[-1]
    print gamma.shape
    print Ngal.shape

    pylab.imshow(Ngal.T, origin='lower', interpolation='nearest')
    pylab.colorbar()
コード例 #4
0
ファイル: COSMOS_KL_map.py プロジェクト: jakevdp/Thesis
def plot_shear(catalog_file, dtheta):
    gamma, Ngal, RArange, DECrange = get_gamma_vector(
        catalog_file, dtheta, remove_z_problem=False)
    
    pylab.imshow(Ngal.T, origin='lower',
                 cmap=BkBuW(0.8),
                 extent=(RArange[0], RArange[-1], DECrange[0], DECrange[-1]))
    pylab.xlim(pylab.xlim()[::-1])
    

    whiskerplot(gamma, RArange, DECrange, color='w')
コード例 #5
0
def plot_shear(catalog_file, dtheta):
    gamma, Ngal, RArange, DECrange = get_gamma_vector(catalog_file,
                                                      dtheta,
                                                      remove_z_problem=False)

    pylab.imshow(Ngal.T,
                 origin='lower',
                 cmap=BkBuW(0.8),
                 extent=(RArange[0], RArange[-1], DECrange[0], DECrange[-1]))
    pylab.xlim(pylab.xlim()[::-1])

    whiskerplot(gamma, RArange, DECrange, color='w')
コード例 #6
0
ファイル: COSMOS_KL_map.py プロジェクト: jakevdp/Thesis
def plot_mask(catalog_file, dtheta,
              RAmin = None, DECmin = None,
              NRA = None, NDEC = None,
              remove_z_problem = True,
              N_bootstraps=0):
    gamma, Ngal, RArange, DECrange = get_gamma_vector(
        catalog_file, dtheta, RAmin, DECmin, NRA, NDEC,
        N_bootstraps=N_bootstraps, remove_z_problem=remove_z_problem)

    print RArange[0], RArange[-1]
    print DECrange[0], DECrange[-1]
    print gamma.shape
    print Ngal.shape

    pylab.imshow(Ngal.T,
                 origin='lower',
                 interpolation='nearest')
    pylab.colorbar()
コード例 #7
0
ファイル: compute_likelihoods.py プロジェクト: akr89/Thesis
def compute_likelihoods(catalog_file,
                        out_file,
                        cosmo_dict,
                        nmodes,
                        dtheta,
                        n_z,
                        sigma=0.3,
                        RAmin = None, DECmin = None,
                        NRA = None, NDEC = None,
                        remove_z_problem=True,
                        fiducial_cosmology = None,
                        **kwargs):
    """
    compute the likelihoods for a range of cosmologies

    Parameters
    ----------
    catalog_file : string or list of strings
        location of the COSMOS catalog(s) to use
    out_file : string
        location to save likelihood output
        Output will be a text file, with columns labeled
    cosmo_dict : Dictionary
        keys are arguments for cosmology object
        values are the corresponding range
    nmodes : 
        number of modes to use.  This should be less than NRA*NDEC
        alternatively, a list of integers can be supplied
    dtheta : float
        size of (square) pixels in arcmin
    sigma : float (default = 0.3)
        intrinsic ellipticity of galaxies
    fiducial_cosmology : Cosmology object
        the fiducial cosmology used for determination of KL vectors
        if unspecified, it will be initialized from remaining kwargs

    Other Parameters
    ----------------
    n_z : `shear_KL_source.zdist.zdist` object
        n_z(z) returns the galaxy distribution
            
    If the following are unspecified, they will be determined from the data
    RAmin/DECmin : float
        minimum of RA/DEC bins (degrees).
    NRA/NDEC : int
        number of RA/DEC bins
    """
    gamma, Ngal, noise, RArange, DECrange = get_gamma_vector(
        catalog_file, dtheta, RAmin, DECmin, NRA, NDEC, N_bootstraps=10,
        remove_z_problem=True)
    
    if fiducial_cosmology is None:
        fiducial_cosmology = Cosmology(**kwargs)

    # use results of bootstrap resampling to compute
    # the noise on observed shear
    gamma = gamma.reshape(gamma.size)
    Ngal = Ngal.reshape(Ngal.size)
    noise = noise.reshape(noise.size)

    i_sigma = np.where(Ngal > 1)
    sigma_estimate = np.sqrt(np.mean(noise[i_sigma] * Ngal[i_sigma]))
    print "average sigma: %.2g" % sigma_estimate

    izero = np.where(Ngal <= 1)
    noise[izero] = sigma_estimate ** 2
    N_m_onehalf = noise ** -0.5

    # noise = sigma^2 / Ngal
    # correlation matrix takes a constant sigma and a variable
    # ngal. So we'll encode the noise as an "effective Ngal"
    # using the user-defined sigma.
    Ngal_eff = sigma**2 / noise
    Ngal_eff[np.where(Ngal == 0)] = 0

    # construct fiducial correlation matrix
    print ">> fiducial correlation matrix"
    R_fid = shear_correlation_matrix(sigma, RArange, DECrange, Ngal_eff,
                                     n_z,
                                     whiten=True,
                                     cosmo=fiducial_cosmology)

    evals, evecs = np.linalg.eigh(R_fid)
    isort = np.argsort(evals)[::-1]
    evals = evals[isort]
    evecs = evecs[:,isort]

    #compute KL transform of data
    a_data = np.dot(evecs.T, N_m_onehalf * gamma)

    #iterate through all nmodes requested
    if not hasattr(nmodes, '__iter__'):
        nmodes = [nmodes]

    cosmo_keys = cosmo_dict.keys()
    cosmo_vals = [cosmo_dict[k] for k in cosmo_keys]
    cosmo_kwargs = fiducial_cosmology.get_dict()
    
    log2pi = np.log(2*np.pi)

    OF = open(out_file, 'w')
    OF.write('# fiducial cosmology: %s\n' % str(cosmo_kwargs))
    OF.write('# ncut ')
    OF.write(' '.join(cosmo_keys))
    OF.write(' chi2 log|det(C)| log(Likelihood)\n')

    for cosmo_tup in iter_product(*cosmo_vals):
        cosmo_kwargs.update(dict(zip(cosmo_keys,cosmo_tup)))

        #flat universe prior
        cosmo_kwargs['Ol'] = 1. - cosmo_kwargs['Om']
        
        print ">>", cosmo_keys, ['%.2g' % v for v in cosmo_tup]
        R = shear_correlation_matrix(sigma, RArange, DECrange, Ngal_eff,
                                     n_z,
                                     whiten=True,
                                     **cosmo_kwargs)
        cosmo_args = (len(cosmo_keys) * " %.6g") % cosmo_tup
        for ncut in nmodes:
            evecs_n = evecs[:,:ncut]
            a_n = a_data[:ncut]
            C_n = np.dot(evecs_n.T, np.dot(R, evecs_n))

            # compute chi2 = (a_n-<a_n>)^T C_n^-1 (a_n-<a_n>)
            # model predicts <a> = 0 so this simplifies:
            chi2_raw = np.dot(a_n.conj(), np.linalg.solve(C_n, a_n))

            #chi2_raw is complex because a_n is complex.  The imaginary
            # part of chi2 should be zero (within machine precision), because
            # C_n is Hermitian.  We'll skip checking that this is the case.
            chi2 = chi2_raw.real
            s, logdetC = np.linalg.slogdet(C_n)
            
            X0 = -0.5 * ncut * log2pi
            X1 = -0.5 * logdetC
            X2 = -0.5 * chi2
            print chi2, logdetC, X0, X1, X2
            OF.write("%i %s %.6g %.6g %.6g\n" % (ncut, cosmo_args, chi2,
                                                 logdetC, X0+X1+X2))
        ###
    ###
    OF.close()
コード例 #8
0
ファイル: plot_diagnostics.py プロジェクト: akr89/Thesis
def compute_all(
    catalog_file,
    dtheta,
    n_z,
    sigma=0.39,
    RAmin=None,
    DECmin=None,
    NRA=None,
    NDEC=None,
    remove_z_problem=True,
    N_bootstraps=1000,
    cosmo=None,
    **kwargs
):
    """
    parse catalog data and compute eigenvalue decomposition
    
    Parameters
    ----------
    catalog_file : string or list of strings
        location of the COSMOS catalog(s) to use
    dtheta : float
        size of (square) pixels in arcmin
    sigma : float (default = 0.39)
        intrinsic ellipticity of galaxies
    cosmo : Cosmology object
        the fiducial cosmology used for determination of KL vectors
        if unspecified, it will be initialized from remaining kwargs

    Other Parameters
    ----------------
    n_z : `shear_KL_source.zdist.zdist` object
        n_z(z) returns the galaxy distribution
            
    If the following are unspecified, they will be determined from the data
    RAmin/DECmin : float
        minimum of RA/DEC bins (degrees).
    NRA/NDEC : int
        number of RA/DEC bins

    Returns
    -------
    evals, evecs, a_fit, shape
    """
    gamma, Ngal, noise, RArange, DECrange = get_gamma_vector(
        catalog_file, dtheta, RAmin, DECmin, NRA, NDEC, N_bootstraps=N_bootstraps, remove_z_problem=False
    )

    if cosmo is None:
        cosmo = Cosmology(**kwargs)

    NRA, NDEC = gamma.shape

    # use results of bootstrap resampling to compute
    # the noise on observed shear
    gamma = gamma.reshape(gamma.size)
    Ngal = Ngal.reshape(Ngal.size)
    noise = noise.reshape(noise.size)

    i_sigma = np.where(Ngal > 5)
    sigma_estimate = np.sqrt(np.mean(noise[i_sigma] * Ngal[i_sigma]))
    print "average sigma: %.2g" % sigma_estimate

    izero = np.where(Ngal <= 5)
    noise[izero] = sigma_estimate ** 2
    N_m_onehalf = noise ** -0.5

    # noise = sigma^2 / Ngal
    # correlation matrix takes a constant sigma and a variable
    # ngal. So we'll encode the noise as an "effective Ngal"
    # using the user-defined sigma.
    Ngal_eff = sigma ** 2 / noise
    Ngal_eff[np.where(Ngal == 0)] = 0

    # construct fiducial correlation matrix
    print ">> fiducial correlation matrix"
    R_fid = shear_correlation_matrix(sigma, RArange, DECrange, Ngal_eff, n_z, whiten=True, cosmo=cosmo)

    evals, evecs = np.linalg.eigh(R_fid)
    isort = np.argsort(evals)[::-1]
    evals = evals[isort]
    evecs = evecs[:, isort]

    a = np.dot(evecs.T, N_m_onehalf * gamma)

    return (evals, evecs, a, RArange, DECrange, Ngal, noise, N_bootstraps, dtheta)
コード例 #9
0
def plot_kappa_SN(catalog_file, N_realizations=1000):
    # compute a 64 x 64 grid which encompasses all the data
    RAmin = 149.4317396
    RAmax = 150.798406267
    DECmin = 1.570097085
    DECmax = 2.90343041833

    NRA = 64
    NDEC = 64

    dtheta = (RAmax - RAmin) * 1. / NRA * 60

    DECmin -= 0.5 * (DECmin + NDEC * dtheta / 60. - DECmax)

    # get the shear realization
    gamma, Ngal, d2gamma, RArange, DECrange = get_gamma_vector(
        catalog_file,
        dtheta,
        RAmin,
        DECmin,
        NRA,
        NDEC,
        remove_z_problem=True,
        N_bootstraps=N_realizations)

    kappa = np.zeros(gamma.shape, dtype=complex)
    kappa_2 = np.zeros(gamma.shape, dtype=float)

    i_zero = np.where(d2gamma <= 0)

    d2gamma[i_zero] = 1

    dgamma = np.sqrt(d2gamma)

    print "computing kappa %i times" % N_realizations
    for i in range(N_realizations):
        if (i + 1) % 100 == 0:
            print " >", i + 1
        phase = np.exp(2j * np.pi * np.random.random(gamma.shape))
        noise = np.random.normal(0, dgamma)
        noise[i_zero] = 0
        k = gamma_to_kappa(gamma + phase * noise, dtheta)

        kappa += k
        kappa_2 += abs(k)**2

    kappa /= N_realizations
    d2kappa = kappa_2 / N_realizations - abs(kappa)**2

    pylab.figure()
    pylab.imshow(d2kappa.T,
                 origin='lower',
                 interpolation='nearest',
                 extent=(RAmin, RAmax, DECmin, DECmax))
    pylab.xlim(pylab.xlim()[::-1])
    pylab.colorbar()

    pylab.figure()
    pylab.imshow(Ngal.T,
                 origin='lower',
                 interpolation='nearest',
                 extent=(RAmin, RAmax, DECmin, DECmax))
    pylab.xlim(pylab.xlim()[::-1])

    # compute convergence signal-to-noise
    i_zero = np.where(d2kappa == 0)
    d2kappa[i_zero] = 1
    kappa_SN = kappa / np.sqrt(d2kappa)
    kappa_SN[i_zero] = np.nan

    pylab.figure()
    pylab.imshow(kappa_SN[4:-4, 4:-4].real.T,
                 origin='lower',
                 extent=(RAmin, RAmax, DECmin, DECmax))
    pylab.xlim(pylab.xlim()[::-1])
    pylab.colorbar()

    pylab.figure()
    pylab.imshow(kappa_SN[4:-4, 4:-4].imag.T,
                 origin='lower',
                 extent=(RAmin, RAmax, DECmin, DECmax))
    pylab.xlim(pylab.xlim()[::-1])
    pylab.colorbar()
コード例 #10
0
def compute_likelihoods(catalog_file,
                        out_file,
                        cosmo_dict,
                        nmodes,
                        dtheta,
                        n_z,
                        sigma=0.3,
                        RAmin=None,
                        DECmin=None,
                        NRA=None,
                        NDEC=None,
                        remove_z_problem=True,
                        fiducial_cosmology=None,
                        **kwargs):
    """
    compute the likelihoods for a range of cosmologies

    Parameters
    ----------
    catalog_file : string or list of strings
        location of the COSMOS catalog(s) to use
    out_file : string
        location to save likelihood output
        Output will be a text file, with columns labeled
    cosmo_dict : Dictionary
        keys are arguments for cosmology object
        values are the corresponding range
    nmodes : 
        number of modes to use.  This should be less than NRA*NDEC
        alternatively, a list of integers can be supplied
    dtheta : float
        size of (square) pixels in arcmin
    sigma : float (default = 0.3)
        intrinsic ellipticity of galaxies
    fiducial_cosmology : Cosmology object
        the fiducial cosmology used for determination of KL vectors
        if unspecified, it will be initialized from remaining kwargs

    Other Parameters
    ----------------
    n_z : `shear_KL_source.zdist.zdist` object
        n_z(z) returns the galaxy distribution
            
    If the following are unspecified, they will be determined from the data
    RAmin/DECmin : float
        minimum of RA/DEC bins (degrees).
    NRA/NDEC : int
        number of RA/DEC bins
    """
    gamma, Ngal, noise, RArange, DECrange = get_gamma_vector(
        catalog_file,
        dtheta,
        RAmin,
        DECmin,
        NRA,
        NDEC,
        N_bootstraps=10,
        remove_z_problem=True)

    if fiducial_cosmology is None:
        fiducial_cosmology = Cosmology(**kwargs)

    # use results of bootstrap resampling to compute
    # the noise on observed shear
    gamma = gamma.reshape(gamma.size)
    Ngal = Ngal.reshape(Ngal.size)
    noise = noise.reshape(noise.size)

    i_sigma = np.where(Ngal > 1)
    sigma_estimate = np.sqrt(np.mean(noise[i_sigma] * Ngal[i_sigma]))
    print "average sigma: %.2g" % sigma_estimate

    izero = np.where(Ngal <= 1)
    noise[izero] = sigma_estimate**2
    N_m_onehalf = noise**-0.5

    # noise = sigma^2 / Ngal
    # correlation matrix takes a constant sigma and a variable
    # ngal. So we'll encode the noise as an "effective Ngal"
    # using the user-defined sigma.
    Ngal_eff = sigma**2 / noise
    Ngal_eff[np.where(Ngal == 0)] = 0

    # construct fiducial correlation matrix
    print ">> fiducial correlation matrix"
    R_fid = shear_correlation_matrix(sigma,
                                     RArange,
                                     DECrange,
                                     Ngal_eff,
                                     n_z,
                                     whiten=True,
                                     cosmo=fiducial_cosmology)

    evals, evecs = np.linalg.eigh(R_fid)
    isort = np.argsort(evals)[::-1]
    evals = evals[isort]
    evecs = evecs[:, isort]

    #compute KL transform of data
    a_data = np.dot(evecs.T, N_m_onehalf * gamma)

    #iterate through all nmodes requested
    if not hasattr(nmodes, '__iter__'):
        nmodes = [nmodes]

    cosmo_keys = cosmo_dict.keys()
    cosmo_vals = [cosmo_dict[k] for k in cosmo_keys]
    cosmo_kwargs = fiducial_cosmology.get_dict()

    log2pi = np.log(2 * np.pi)

    OF = open(out_file, 'w')
    OF.write('# fiducial cosmology: %s\n' % str(cosmo_kwargs))
    OF.write('# ncut ')
    OF.write(' '.join(cosmo_keys))
    OF.write(' chi2 log|det(C)| log(Likelihood)\n')

    for cosmo_tup in iter_product(*cosmo_vals):
        cosmo_kwargs.update(dict(zip(cosmo_keys, cosmo_tup)))

        #flat universe prior
        cosmo_kwargs['Ol'] = 1. - cosmo_kwargs['Om']

        print ">>", cosmo_keys, ['%.2g' % v for v in cosmo_tup]
        R = shear_correlation_matrix(sigma,
                                     RArange,
                                     DECrange,
                                     Ngal_eff,
                                     n_z,
                                     whiten=True,
                                     **cosmo_kwargs)
        cosmo_args = (len(cosmo_keys) * " %.6g") % cosmo_tup
        for ncut in nmodes:
            evecs_n = evecs[:, :ncut]
            a_n = a_data[:ncut]
            C_n = np.dot(evecs_n.T, np.dot(R, evecs_n))

            # compute chi2 = (a_n-<a_n>)^T C_n^-1 (a_n-<a_n>)
            # model predicts <a> = 0 so this simplifies:
            chi2_raw = np.dot(a_n.conj(), np.linalg.solve(C_n, a_n))

            #chi2_raw is complex because a_n is complex.  The imaginary
            # part of chi2 should be zero (within machine precision), because
            # C_n is Hermitian.  We'll skip checking that this is the case.
            chi2 = chi2_raw.real
            s, logdetC = np.linalg.slogdet(C_n)

            X0 = -0.5 * ncut * log2pi
            X1 = -0.5 * logdetC
            X2 = -0.5 * chi2
            print chi2, logdetC, X0, X1, X2
            OF.write("%i %s %.6g %.6g %.6g\n" %
                     (ncut, cosmo_args, chi2, logdetC, X0 + X1 + X2))
        ###
    ###
    OF.close()
コード例 #11
0
ファイル: COSMOS_KL_map.py プロジェクト: jakevdp/Thesis
def plot_kappa_SN(catalog_file,
                  N_realizations=1000):
    # compute a 64 x 64 grid which encompasses all the data
    RAmin = 149.4317396
    RAmax = 150.798406267
    DECmin = 1.570097085
    DECmax = 2.90343041833

    NRA = 64
    NDEC = 64

    dtheta = (RAmax-RAmin) * 1./NRA * 60

    DECmin -= 0.5 * (DECmin + NDEC * dtheta/60. - DECmax)
    
    # get the shear realization
    gamma, Ngal, d2gamma, RArange, DECrange = get_gamma_vector(
        catalog_file, dtheta, RAmin, DECmin, NRA, NDEC,
        remove_z_problem=True,
        N_bootstraps=N_realizations)

    kappa = np.zeros(gamma.shape, dtype=complex)
    kappa_2 = np.zeros(gamma.shape, dtype=float)
    
    i_zero = np.where(d2gamma <= 0)

    d2gamma[i_zero] = 1

    dgamma = np.sqrt(d2gamma)

    print "computing kappa %i times" % N_realizations
    for i in range(N_realizations):
        if (i+1)%100 == 0:
            print " >", i+1
        phase = np.exp(2j * np.pi * np.random.random(gamma.shape))
        noise = np.random.normal(0, dgamma)
        noise[i_zero] = 0
        k = gamma_to_kappa(gamma + phase * noise, dtheta)

        kappa += k
        kappa_2 += abs(k) ** 2

    kappa /= N_realizations
    d2kappa = kappa_2 / N_realizations - abs(kappa) ** 2

    pylab.figure()
    pylab.imshow(d2kappa.T, origin='lower', interpolation='nearest',
                 extent=(RAmin, RAmax, DECmin, DECmax))
    pylab.xlim(pylab.xlim()[::-1])
    pylab.colorbar()

    pylab.figure()
    pylab.imshow(Ngal.T, origin='lower', interpolation='nearest',
                 extent=(RAmin, RAmax, DECmin, DECmax))
    pylab.xlim(pylab.xlim()[::-1])

    # compute convergence signal-to-noise
    i_zero = np.where(d2kappa == 0)
    d2kappa[i_zero] = 1
    kappa_SN = kappa / np.sqrt(d2kappa)
    kappa_SN[i_zero] = np.nan
    
    pylab.figure()
    pylab.imshow(kappa_SN[4:-4,4:-4].real.T,
                 origin='lower',
                 extent=(RAmin, RAmax, DECmin, DECmax))
    pylab.xlim(pylab.xlim()[::-1])
    pylab.colorbar()

    pylab.figure()
    pylab.imshow(kappa_SN[4:-4,4:-4].imag.T,
                 origin='lower',
                 extent=(RAmin, RAmax, DECmin, DECmax))
    pylab.xlim(pylab.xlim()[::-1])
    pylab.colorbar()