示例#1
0
    def make_delta_sim(self):
        r"""this produces self.sim_map_delta"""
        print "making sim in units of overdensity"
        freq_axis = self.sim_map.get_axis('freq') / 1.e6
        z_axis = units.nu21 / freq_axis - 1.0

        simobj = corr21cm.Corr21cm()
        T_b = simobj.T_b(z_axis) * 1e-3

        self.sim_map_delta = copy.deepcopy(self.sim_map)
        self.sim_map_delta /= T_b[:, np.newaxis, np.newaxis]
示例#2
0
def generate_delta_sim(input_file, output_file):
    r"""make the map with the temperature divided out (delta)"""
    print "reading %s -> %s (dividing by T_b(z))" % (input_file, output_file)

    simmap = algebra.make_vect(algebra.load(input_file))
    freq_axis = simmap.get_axis('freq') / 1.e6
    z_axis = units.nu21 / freq_axis - 1.0

    simobj = corr21cm.Corr21cm()
    T_b = simobj.T_b(z_axis)*1e-3

    simmap /= T_b[:, np.newaxis, np.newaxis]

    print "saving to" + output_file
    algebra.save(output_file, simmap)
示例#3
0
from simulations import corr21cm

from utils import cubicspline as cs

## Modify with path to wigglez power spectrum
## Should be a file with columns of k-vals and ps-values.

## By default this will use the first two as k and ps, if not, try
## using the colspec keyword argument
ps_wigglez = cs.LogInterpolater.fromfile("<wigglez_ps_file>")


## Put an exponential cutoff into the powerspectrum at kstar in order
## to regularise small scales for integrals (eventually I'll fold this
## into the Corr21cm class).
kstar = 10.0  # units: h Mpc^{-1}
ps = lambda k: np.exp(-0.5 * (k / kstar)**2) * ps_wigglez(k)


## Set the redshift that the powerspectrum is normalised about.
z_ps = <wigglez ps effective redshift>


## Create the 21cm correlations object
cr = corr21cm.Corr21cm(ps = ps, redshift = z_ps)
示例#4
0
from simulations import corr21cm, ps_estimation

cr = corr21cm.Corr21cm()
#cr.add_mean = True

# Reduce the number of pixels to make faster
cr.x_num = 64
cr.y_num = 64
cr.nu_num = 64

# Adjust angular and frequency ranges to make close to a cube.
cr.x_width = 10.0  # degrees
cr.y_width = 10.0  # degrees
cr.nu_lower = 500.0  # MHz
cr.nu_upper = 600.0  # MHz

# Generate two realisations
f1 = cr.getfield()
f2 = cr.getfield()

# Calculate 2D statistics (that is treating kpar and kperp
# separately).

# 2D powerspectra of each field
ps1 = ps_estimation.ps_azimuth(f1, window=True, kmodes=False)
ps2 = ps_estimation.ps_azimuth(f2, window=True, kmodes=False)
# 2D cross power spectrum
ps12 = ps_estimation.crossps_azimuth(f1, f2, window=True, kmodes=False)

# Rescale to make differences more obvious
ch1 = ps12 / (ps1 * ps2)**0.5
示例#5
0
    'radio_data_file1': 'sec_A_15hr_41-69_cleaned_clean_map_I.npy',
    'radio_noiseinv_file1': 'sec_A_15hr_41-69_cleaned_noise_inv_I.npy',
    'radio_data_file2': 'sec_A_15hr_41-69_cleaned_clean_map_I.npy',
    'radio_noiseinv_file2': 'sec_A_15hr_41-69_cleaned_noise_inv_I.npy',
    'freq': (),
    'lags': (),
    'output_shelve_file': 'test.shelve',
    'convolve': False,
    'subtract_mean': True,
    'speedup': False
}
prefix = 'fs_'

if __name__ == '__main__':
    # find the mean brightness used in simulations
    corrobj = corr21cm.Corr21cm()
    T_b_sim = corrobj.T_b(1420. / 800. - 1)
    print T_b_sim

    #splt.repair_shelve_files(batch15_param, "sim_xloss_correlate_mode",
    #                         params_default, prefix)
    #splt.repair_shelve_files(batch16_param, "sim_auto_correlate_rand",
    #                         params_default, prefix)

    #print splt.compare_corr(batch6_param, batch7_param)

    #splt.process_batch_correlations(batch10_param, cross_power=True)
    #splt.process_batch_correlations(batch15_param,
    #                multiplier=1./T_b_sim*1.e-3, cross_power=True)
    #splt.process_batch_correlations(batch16_param,
    #                                multiplier=1./T_b_sim*1.e-3,
示例#6
0
from simulations import corr21cm, foregroundsck, lofar
from utils import units

import scipy.linalg as la

nf = 128
nul = 500.0
nuh = 700.0

freq = np.linspace(nul, nuh, nf)

z = units.nu21 / freq
z1, z2 = np.meshgrid(z, z)

cr = corr21cm.Corr21cm()

cs = cr.angular_correlation(0.0, z1, z2)

noise_power = 1e-7

fsyn = foregroundsck.Synchrotron()

cf = fsyn.angular_correlation(0.0) * fsyn.frequency_covariance(
    freq[:, np.newaxis], freq[np.newaxis, :])

cn = cf + np.identity(nf) * noise_power

evals, evecs = la.eigh(cs, cn)

cr2 = corr21cm.Corr21cm()
示例#7
0
def make_corr_plots(filename):
    corrobj = corr21cm.Corr21cm()
    T_b_sim = corrobj.T_b(1420. / 800. - 1)
    print T_b_sim
    master = shelve.open(filename, "r")
    nlag = 15
    nmode = 11
    nsim = 9

    # load the autocorr simulations --------------------------------------------
    #splt.process_batch_correlations(autocorr_sim_param, cross_power=False,
    #                                multiplier=1.)

    autocorr_sim_data = splt.batch_correlations_statistics(
        autocorr_sim_param, randtoken="rand", include_signal=False)

    lagl = autocorr_sim_data[0]
    lagc = autocorr_sim_data[1]
    lagr = autocorr_sim_data[2]
    autocorr_sim = autocorr_sim_data[3]
    autocorr_sim_err = autocorr_sim_data[4]
    autocorr_sim_cov = autocorr_sim_data[5]

    # now convert from the 1e-3 Omega_HI in simulations to 0.5e-3
    #autocorr_sim /= 2.
    #autocorr_sim_err /= 2.
    # calibrate to xcorr: 0.56126646473 0.229801269703
    autocorr_sim_low = autocorr_sim * (0.56 - 0.23)
    autocorr_sim_high = autocorr_sim * (0.56 + 0.23)
    autocorr_sim_center = autocorr_sim * 0.56

    for correntry in zip(lagl, lagc, lagr, autocorr_sim_low,
                         autocorr_sim_center, autocorr_sim_high):
        print "%5.3g %5.3g %5.3g %5.3g %5.3g %5.3g" % correntry

    # treat the autocorr and loss simulations----------------------------------
    lags = np.zeros((3, nlag))
    autocorr = np.zeros((nmode, nlag))
    autocorr_err = np.zeros((nmode, nlag))
    autocorr_sim = np.zeros((nsim, nmode, nlag))
    autocorr_sim_avg = np.zeros((nmode, nlag))
    compensated_autocorr = np.zeros((nmode, nlag))
    compensated_autocorr_err = np.zeros((nmode, nlag))
    mode_compensation = np.zeros((nmode, nlag))
    for mode_index in range(0, nmode):
        mode_num = mode_index * 5
        autocorr_name = "autocorr" + repr(mode_num) + "_mode"
        entry = master[autocorr_name]
        autocorr[mode_index, :] = entry["corr1D"]
        autocorr_err[mode_index, :] = entry["corr1D_std"]
        lags[0, :] = entry["x_axis"][0]
        lags[1, :] = entry["x_axis"][1]
        lags[2, :] = entry["x_axis"][2]
        for sim_index in range(0, nsim):
            sim_name = "sim" + repr(sim_index + 1) + "_mode" + repr(mode_num)
            entry = master[sim_name]
            autocorr_sim[sim_index, mode_index, :] = entry["corr1D"]
        autocorr_sim_avg = np.mean(autocorr_sim, axis=0) / 1000.

    zero_modes = autocorr_sim_avg[0, :]
    mode_compensation = autocorr_sim_avg / zero_modes[None, :]
    compensated_autocorr = autocorr / mode_compensation
    compensated_autocorr_err = autocorr_err / mode_compensation

    print "-" * 80
    for lagind in range(0, nlag):
        lag_bounds = splt.fancy_vector(lags[:, lagind], "%5.2g")
        modes = splt.fancy_vector(autocorr[:, lagind], "%5.2g")
        modes_err = splt.fancy_vector(autocorr_err[:, lagind], "%5.2g")
        print lag_bounds + modes + modes_err

    print "-" * 80
    for lagind in range(0, nlag):
        lag_bounds = splt.fancy_vector(lags[:, lagind], "%5.2g")
        modes = splt.fancy_vector(mode_compensation[:, lagind], "%5.2g")
        print lag_bounds + modes

    print "-" * 80
    for lagind in range(0, nlag):
        lag_bounds = splt.fancy_vector(lags[:, lagind], "%5.2g")
        modes = splt.fancy_vector(autocorr_sim_avg[:, lagind], "%5.2g")
        print lag_bounds + modes

    print "-" * 80
    for lagind in range(0, nlag):
        lag_bounds = splt.fancy_vector(lags[:, lagind], "%5.2g")
        modes = splt.fancy_vector(compensated_autocorr[:, lagind], "%5.2g")
        modes_err = splt.fancy_vector(compensated_autocorr_err[:, lagind],
                                      "%5.2g")
        print lag_bounds + modes + modes_err

    master.close()
示例#8
0
def make_xcorr_plotdata():
    # find the mean brightness used in simulations
    corrobj = corr21cm.Corr21cm()
    T_b_sim = corrobj.T_b(1420. / 800. - 1)
    print T_b_sim

    #splt.process_batch_correlations(xcorr_real_param,
    #                multiplier=1., cross_power=True)
    ##splt.process_batch_correlations(xcorr_sim_param, cross_power=True,
    ##                                multiplier=1./(T_b_sim/1.e3))
    #splt.process_batch_correlations(xcorr_sim_param, cross_power=True,
    #                                multiplier=1./(T_b_sim))
    #splt.process_batch_correlations(xcorr_loss_sim_param,
    #                multiplier=1./T_b_sim*1.e-3, cross_power=True)
    #splt.process_batch_correlations(xcorr_variants_param,
    #                multiplier=1., cross_power=True)

    #splt.plot_batch_correlations(xcorr_real_param,
    #                        dir_prefix="plots/xcorr_real/",
    #                        color_range=[-0.04, 0.04], cross_power=True)
    #splt.plot_batch_correlations(xcorr_sim_param,
    #                        dir_prefix="plots/xcorr_sim/",
    #                        color_range=[-0.04, 0.04], cross_power=True)
    #splt.plot_batch_correlations(xcorr_loss_param,
    #                        dir_prefix="plots/xcorr_loss/",
    #                        color_range=[-0.04, 0.04], cross_power=True)
    #splt.plot_batch_correlations(xcorr_loss_sim_param,
    #                        dir_prefix="plots/xcorr_loss_sim/",
    #                        color_range=[-0.04, 0.04], cross_power=True)
    #splt.plot_batch_correlations(xcorr_variants_param,
    #                        dir_prefix="plots/xcorr_variants/",
    #                        color_range=[-0.04, 0.04], cross_power=True)

    # find the real xcorr signal with errors
    xcorr_data = splt.batch_correlations_statistics(xcorr_real_param,
                                                    randtoken="rand",
                                                    include_signal=True)
    # find the simulated xcorr signal with errors
    xcorr_sim_data = splt.batch_correlations_statistics(xcorr_sim_param,
                                                        randtoken="rand",
                                                        include_signal=False)

    # find the compensation function from simulations
    compmode = splt.batch_compensation_function(xcorr_loss_sim_param)

    lagl = xcorr_data[0]
    lagc = xcorr_data[1]
    lagr = xcorr_data[2]
    xcorr_null = xcorr_data[3]
    xcorr_signal = xcorr_data[6]
    xcorr_cov = xcorr_data[5]
    xcorr_err = xcorr_data[4]
    xcorr_sim = xcorr_sim_data[3]
    xcorr_sim_err = xcorr_sim_data[4]
    xcorr_sim_cov = xcorr_sim_data[5]
    # 0=0, 5=1, 10=2, 15=3, etc.
    compensation = compmode[3, :]

    (amp, amp_err) = utils.ampfit(xcorr_signal, xcorr_cov + xcorr_sim_cov,
                                  xcorr_sim)
    print amp, amp_err

    # now convert from the 1e-3 Omega_HI in simulations to 0.5e-3
    #xcorr_sim /= 2.
    #xcorr_sim_err /= 2.
    xcorr_sim *= amp
    xcorr_sim_err *= amp

    for correntry in zip(lagl, lagc, lagr, xcorr_signal, xcorr_null, xcorr_err,
                         xcorr_sim, xcorr_sim_err, compensation):
        print "%5.3g %5.3g %5.3g %5.3g %5.3g %5.3g %5.3g %5.3g %5.3g" % correntry