import numpy as np
import sys, os
import re
import pickle
from cobaya.run import run
from getdist.mcsamples import loadMCSamples
import matplotlib.pyplot as plt

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

spec_dir = "spectra_actxplanck_newbin/"
cov_dir = "covariances_actxplanck_newbin/"

output_dir = "output_calib/"
pspy_utils.create_directory(output_dir)

spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"]
modes = ["TT", "TE", "ET", "EE"]

# Multipoole range to use
lmin = {
    "dr6_pa4_f150": 900,
    "dr6_pa4_f220": 1500,
    "dr6_pa5_f090": 600,
    "dr6_pa5_f150": 900,
    "dr6_pa6_f090": 600,
    "dr6_pa6_f150": 900
}
lmax = 2000
include_fg = d["include_fg"]
fg_dir = d["fg_dir"]

fg_components = d["fg_components"]
fg_components["tt"].remove("tSZ_and_CIB")
for comp in ["tSZ", "cibc", "tSZxCIB"]:
    fg_components["tt"].append(comp)

window_dir = "windows"
mcm_dir = "mcms"
noise_data_dir = "sim_data/noise_ps"
specDir = "spectra"

lmax_simu = lmax

pspy_utils.create_directory(specDir)

spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"]
spin_pairs = ["spin0xspin0", "spin0xspin2", "spin2xspin0", "spin2xspin2"]

all_freqs = [freq for exp in experiments for freq in d["freqs_%s" % exp]]
ncomp = 3
ps_cmb = powspec.read_spectrum(d["clfile"])[:ncomp, :ncomp]

if include_fg == True:
    l, ps_fg = maps_to_params_utils.get_foreground_matrix(
        fg_dir, fg_components, all_freqs, lmax_simu + 1)

so_mpi.init(True)
subtasks = so_mpi.taskrange(imin=d["iStart"], imax=d["iStop"])
示例#3
0
"""
This script computes the mode coupling matrices and the binning matrices Bbl
for the different surveys and arrays.
"""

from pspy import so_map, so_mcm, pspy_utils, so_dict, so_mpi
import sys

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

mcm_dir = "mcms"
pspy_utils.create_directory(mcm_dir)

surveys = d["surveys"]
lmax = d["lmax"]

if d["use_toeplitz"] == True:
    print("we will use the toeplitz approximation")
    l_exact, l_band, l_toep = 800, 2000, 2750
else:
    l_exact, l_band, l_toep = None, None, None

sv1_list, ar1_list, sv2_list, ar2_list = [], [], [], []
n_mcms = 0
for id_sv1, sv1 in enumerate(surveys):
    arrays_1 = d["arrays_%s" % sv1]
    for id_ar1, ar1 in enumerate(arrays_1):
        for id_sv2, sv2 in enumerate(surveys):
            arrays_2 = d["arrays_%s" % sv2]
            for id_ar2, ar2 in enumerate(arrays_2):
示例#4
0
import sys

import numpy as np
import pylab as plt
from pspy import pspy_utils, so_dict

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

surveys = d["surveys"]
fg_array = np.loadtxt(d["fgfile"])

bestfit_dir = "best_fits"
plot_dir = "plots/best_fits/"

pspy_utils.create_directory(bestfit_dir)
pspy_utils.create_directory(plot_dir)

spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"]

clth = {}
lth, clth["TT"], clth["EE"], clth["BB"], clth["TE"] = np.loadtxt(
    d["theoryfile"], unpack=True)

# fmt: off
combin = [
    "TT_90x90", "TT_90x150", "TT_150x150", "TE_90x90", "TE_90x150",
    "TE_150x150", "EE_90x90", "EE_90x150", "EE_150x150"
]

fg_dict = {}
示例#5
0
from pixell import enmap
import time
import scipy.interpolate
import planck_utils

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

auxMapDir='window'
mcmDir='mcm'
spectraDir='spectra'

ps_model_dir='model'
plot_dir='plot'

pspy_utils.create_directory(ps_model_dir)
pspy_utils.create_directory(plot_dir)

spectra=['TT','TE','TB','ET','BT','EE','EB','BE','BB']
type=d['type']
freqs=d['freqs']
binning_file=d['binning_file']
lmax=d['lmax']
splits=['hm1','hm2']
size=d['noise_binning_size']
experiment='Planck'

lth = np.arange(2, lmax+2)

bl={}
for freq in freqs:
type = d["type"]
binning_file = d["binning_file"]
sim_alm_dtype = d["sim_alm_dtype"]

if sim_alm_dtype == "complex64":
    sim_alm_dtype = np.complex64
elif sim_alm_dtype == "complex128":
    sim_alm_dtype = np.complex128

window_dir = "windows"
mcm_dir = "mcms"
tf_dir = "sim_spectra_for_tf"
bestfit_dir = "best_fits"
ps_model_dir = "noise_model"

pspy_utils.create_directory(tf_dir)

spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"]
spin_pairs = ["spin0xspin0", "spin0xspin2", "spin2xspin0", "spin2xspin2"]

# let's list the different frequencies used in the code
freq_list = []
for sv in surveys:
    arrays = d["arrays_%s" % sv]
    for ar in arrays:
        freq_list += [d["nu_eff_%s_%s" % (sv, ar)]]
freq_list = list(dict.fromkeys(freq_list))  # this bit removes doublons

id_freq = {}
# create a list assigning an integer index to each freq (used later in the code to generate fg simulations)
for count, freq in enumerate(freq_list):
"""
This script compute all alms squared windows, it's a necessary step of covariance computation.
"""
from pspy import so_dict, so_map, sph_tools, so_spectra, pspy_utils, so_mpi
import numpy as np
import sys

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

surveys = d["surveys"]
lmax = d["lmax"]
niter = d["niter"]
sq_win_alms_dir = "sq_win_alms"

pspy_utils.create_directory(sq_win_alms_dir)

sv1_list, ar1_list, sv2_list, ar2_list = [], [], [], []
n_alms = 0
for id_sv1, sv1 in enumerate(surveys):
    arrays_1 = d["arrays_%s" % sv1]
    for id_ar1, ar1 in enumerate(arrays_1):
        for id_sv2, sv2 in enumerate(surveys):
            arrays_2 = d["arrays_%s" % sv2]
            for id_ar2, ar2 in enumerate(arrays_2):
                # This ensures that we do not repeat redundant computations
                if (id_sv1 == id_sv2) & (id_ar1 > id_ar2): continue
                if (id_sv1 > id_sv2): continue
                sv1_list += [sv1]
                ar1_list += [ar1]
                sv2_list += [sv2]
示例#8
0
import pylab as plt, numpy as np
from pspy import pspy_utils, so_dict, so_map
import ps_tools
import sys
import pickle
import time
import spectra_plot_utils

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])
run_name = d["run_name"]

spectra_dir = "spectra_%s" % run_name
plot_dir = "plot_%s" % run_name

pspy_utils.create_directory(spectra_dir)
pspy_utils.create_directory(plot_dir)

lmax = d["lmax"]
type = d["type"]
clfile = d["clfile"]
l_exact_array = d["l_exact_array"]
l_band_array = d["l_band_array"]
l_toep_array = d["l_toep_array"]
compute_T_only = d["compute_T_only"]
l_lo, l_hi, lb, delta_l = pspy_utils.read_binning_file("data/binning.dat",
                                                       lmax)

id_sim = d["id_sim"]
result_ps = {}
result_cov = {}
示例#9
0
import numpy as np, healpy as hp, pylab as plt
from pspy import pspy_utils, so_dict
import os, sys
import wget
import tarfile
import astropy.io.fits as fits

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

# You have to spefify the data directory in which the products will be downloaded
data_dir = d['data_dir']
freqs = d['freqs']

pspy_utils.create_directory(data_dir)

# Choose what you want to download, if this is your first try, all of this should be set to True
download_maps = True
download_likelihood_mask = True
download_beams = True

if download_maps == True:
    print('Download Planck data maps')
    maps_dir = data_dir + '/maps'
    pspy_utils.create_directory(maps_dir)
    # Planck keep inconsistent notation for the halfmission, sometimes 'halfmission-1' sometimes 'hm1'
    splits = ['halfmission-1', 'halfmission-2']
    for hm in splits:
        for f in freqs:
            url = 'http://pla.esac.esa.int/pla/aio/product-action?MAP.MAP_ID=HFI_SkyMap_%s_2048_R3.01_%s.fits' % (
示例#10
0
To run it:
python get_planck_mcm_Bbl.py global.dict
'''
import numpy as np,healpy as hp,pylab as plt
from pspy import so_dict, so_map,so_mcm,sph_tools,so_spectra,pspy_utils, so_map_preprocessing
import os,sys
from pixell import enmap
import time

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

auxMapDir='window'
mcmDir='mcm'

pspy_utils.create_directory(auxMapDir)
pspy_utils.create_directory(mcmDir)

freqs=d['freqs']
niter=d['niter']
lmax=d['lmax']
type=d['type']
binning_file=d['binning_file']
pixWin=d['pixWin']
splits=d['splits']
experiment='Planck'

print ('Compute Planck 2018 mode coupling matrices')

for c1,freq1 in enumerate(freqs):
    
示例#11
0
experiments = d["experiments"]
binning_file = d["binning_file"]
lmax = d["lmax"]
type = d["type"]

isims = np.arange(100)

bin_lo, bin_hi, bin_c, bin_size = pspy_utils.read_binning_file(
    binning_file, lmax)
n_bins = len(bin_hi)

like_dir = "like_products"
mcm_dir = "mcms"
cov_dir = "covariances"

pspy_utils.create_directory(like_dir)

spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"]

spin_pairs = ["spin0xspin0", "spin0xspin2", "spin2xspin0", "spin2xspin2"]

g = open("%s/spectra_list.txt" % like_dir, mode="w")

for id_exp1, exp1 in enumerate(experiments):
    freqs1 = d["freqs_%s" % exp1]
    for id_f1, f1 in enumerate(freqs1):
        for id_exp2, exp2 in enumerate(experiments):
            freqs2 = d["freqs_%s" % exp2]
            for id_f2, f2 in enumerate(freqs2):

                if (id_exp1 == id_exp2) & (id_f1 > id_f2): continue
for  different frequency channels of different CMB experiments and write them to disk.
"""
from pspy import so_map, so_window, so_mcm, pspy_utils, so_dict
import healpy as hp
import numpy as np
import pylab as plt
import sys

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

window_dir = "windows"
mcm_dir = "mcms"
plot_dir = "plots/windows/"

pspy_utils.create_directory(window_dir)
pspy_utils.create_directory(mcm_dir)
pspy_utils.create_directory(plot_dir)

experiments = d["experiments"]
lmax = d["lmax"]
lmax_mcm = d["lmax_mcm"]

# The first step of this code is to generate the window functions for the different frequency channels
# of the different experiments.

print("Geneating window functions")

for exp in experiments:
    freqs = d["freqs_%s" % exp]
示例#13
0
surveys = d["surveys"]


# Here enter the folder where the actual run is
run_dir = ""
bestfit_dir = "%s/best_fits" % run_dir
ps_model_dir = "%s/noise_model" % run_dir

dummy_data_dir = "dummy_data"

lmax = 2000
ra0, ra1, dec0, dec1 = -10, 10, -10, 10
res = 3

pspy_utils.create_directory(dummy_data_dir)

# let's list the different frequencies used in the code
freq_list = []
for sv in surveys:
    arrays = d["arrays_%s" % sv]
    for ar in arrays:
        freq_list += [d["nu_eff_%s_%s" % (sv, ar)]]
freq_list = list(dict.fromkeys(freq_list)) # this bit removes doublons

id_freq = {}
# create a list assigning an integer index to each freq (used later in the code to generate fg simulations)
for count, freq in enumerate(freq_list):
    id_freq[freq] = count
    
# we read cmb and fg best fit power spectrum
示例#14
0
type = d['type']
binning_file = d['binning_file']
remove_mono_dipo_T = d['remove_mono_dipo_T']
remove_mono_dipo_pol = d['remove_mono_dipo_pol']
experiment = 'Planck'
splits = d['splits']
include_sys = d['include_systematics']
include_foregrounds = d['include_foregrounds']
use_noise_th = d['use_noise_th']

if include_sys == True:
    simSpectraDir = 'sim_spectra_syst'
else:
    simSpectraDir = 'sim_spectra'

pspy_utils.create_directory(simSpectraDir)

nside = 2048
ncomp = 3

template = so_map.healpix_template(ncomp, nside)

if include_foregrounds == True:
    ps_th = np.load('%s/signal_fg_matrix.npy' % theoryFgDir)
else:
    ps_th = powspec.read_spectrum(d['theoryfile'])[:ncomp, :ncomp]

nSplits = len(splits)

l, Nl_T, Nl_P = planck_utils.get_noise_matrix_spin0and2(
    ps_model_dir,
示例#15
0
from itertools import combinations

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

surveys = d["surveys"]
n_surveys = len(surveys)
arrays = [d["arrays_%s" % survey] for survey in surveys]

spec_dir = "../../spectra/"
cov_dir = "../../covariances/"

output_dir = "../outputs/"
output_plot_dir = os.path.join(output_dir, "plots/")
output_data_dir = os.path.join(output_dir, "data/")
pspy_utils.create_directory(output_plot_dir)
pspy_utils.create_directory(output_data_dir)

spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"]
modes = ["TT", "TE", "ET", "EE"]

#### SEASON NULL TESTS ####
if n_surveys < 2:
    sys.exit("Cannot make season null tests with only 1 season.")

spectra_names = [[m for m in cwr(arrays[i], 2)] for i in range(n_surveys)]
survey_differences = [m for m in combinations(surveys, 2)]
index_differences = [m for m in combinations(np.arange(n_surveys), 2)]

chi2_array = {}
chi2_dict = {}
示例#16
0
import sys

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

n_sims = d["n_sims"]
beamed_clfile = d["beamed_clfile"]
res_arcmin = d["res_arcmin"]
name_split = d["name_split"]
n_splits = d["n_splits"]
rms_uKarcmin_T = d["rms_uKarcmin_T"] * np.sqrt(n_splits)
ra0, ra1, dec0, dec1 = d["ra0"], d["ra1"], d["dec0"], d["dec1"]
run_name = d["run_name"]

sim_dir = "sims_%s" % run_name
pspy_utils.create_directory(sim_dir)

eps = 1
template_car = so_map.car_template(3, ra0 - eps, ra1 + eps, dec0 - eps,
                                   dec1 + eps, res_arcmin)

for iii in range(n_sims):
    print("generate sim %03d" % iii)
    cmb = template_car.synfast(beamed_clfile)
    for i in range(n_splits):
        name = "sim_%03d_%s%d" % (iii, name_split, i)
        split = cmb.copy()
        noise = so_map.white_noise(split, rms_uKarcmin_T=rms_uKarcmin_T)
        split.data += noise.data
        split.write_map("%s/%s.fits" % (sim_dir, name))
        split.plot(file_name="%s/%s" % (sim_dir, name))
import numpy as np
import pylab as plt
from pspy import so_dict, so_spectra, pspy_utils
import os, sys
import planck_utils
import matplotlib as mpl
label_size = 14
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

figure_dir = 'figures'
pspy_utils.create_directory(figure_dir)

spectraDir = 'spectra'

if d['use_ffp10'] == True:
    mc_dir = 'monteCarlo_ffp10'
    plot_name = 'all_cross_ffp10'
else:
    mc_dir = 'monteCarlo'
    plot_name = 'all_cross'

spectra = ['TT', 'TE', 'TB', 'ET', 'BT', 'EE', 'EB', 'BE', 'BB']
binning_file = d['binning_file']
freqs = d['freqs']
lthmax = 1500
示例#18
0
import numpy as np
from pspy import pspy_utils, so_dict
import sys
import wget
import tarfile
import astropy.io.fits as fits

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

# You have to spefify the data directory in which the products will be downloaded
data_dir = d["data_dir"]
freqs = d["freqs"]

pspy_utils.create_directory(data_dir)

EB_mask_dir = data_dir + "/EB_masks"
pspy_utils.create_directory(EB_mask_dir)
# Planck keep inconsistent notation for the halfmission, sometimes 'halfmission-1' sometimes 'hm1'
for f in freqs:
    if f == "143": continue
    url = "http://pla.esac.esa.int/pla/aio/product-action?MAP.MAP_ID=HFI_BiasMap_%s-CO-noiseRatio_2048_R3.00_full.fits" % (
        f)
    print(url)
    wget.download(
        url, "%s/HFI_BiasMap_%s-CO-noiseRatio_2048_R3.00_full.fits" %
        (EB_mask_dir, f))

url = "http://pla.esac.esa.int/pla/aio/product-action?MAP.MAP_ID=HFI_Mask_PointSrc_2048_R2.00.fits"
print(url)
示例#19
0
d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

# the apodisation lenght of the point source mask in degree
apod_pts_source_degree = d["apod_pts_source_degree"]
# the apodisation lenght of the survey x gal x cross linking mask
apod_survey_degree = d["apod_survey_degree"]
# we will skip the edges of the survey where the noise is very difficult to model
skip_from_edges_degree = d["skip_from_edges_degree"]
# the threshold on the amount of cross linking to keep the data in
cross_link_threshold = d["cross_link_threshold"]

window_dir = "windows"
surveys = d["surveys"]

pspy_utils.create_directory(window_dir)
ps_mask = so_map.read_map(d["ps_mask"])
gal_mask = so_map.read_map(d["gal_mask"])

patch = None
if "patch" in d:
    patch = so_map.read_map(d["patch"])


# here we list the different windows that need to be computed, we will then do a MPI loops over this list
sv_list, ar_list = [], []
n_wins = 0
for sv in surveys:
    arrays = d["arrays_%s" % sv]
    for ar in arrays:
        sv_list += [sv]
示例#20
0
from pspy import so_dict, pspy_utils, so_spectra, so_cov
from itertools import combinations_with_replacement as cwr
from itertools import product
import data_analysis_utils
import numpy as np
import pylab as plt
import sys, os

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

cov_dir = "covariances"
spec_dir = "spectra"
like_product_dir = "like_product"

pspy_utils.create_directory(like_product_dir)

surveys = d["surveys"]
type = d["type"]
lmax = d["lmax"]
binning_file = d["binning_file"]

bin_lo, bin_hi, lb, bin_size = pspy_utils.read_binning_file(binning_file, lmax)
n_bins = len(bin_hi)

# we will need the covariance matrix and the projection matrix
cov_mat = np.load("%s/truncated_analytic_cov.npy" % cov_dir)
P_mat = np.load("%s/projection_matrix.npy" % cov_dir)

# let's get a list of all frequencies we plan to study
freq_list = []
示例#21
0

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

experiments = d["experiments"]
type = d["type"]
iStart = d["iStart"]
iStop = d["iStop"] + 1

mcm_dir = "mcms"
cov_dir = "covariances"
spec_dir = "spectra"
sacc_dir = "like_products_sacc"

pspy_utils.create_directory(sacc_dir)

pols = ["T", "E", "B"]
map_types = {"T": "0", "E": "e", "B": "b"}


def get_x_iterator():
    for id_ea, ea in enumerate(experiments):
        freqs_a = d["freqs_%s" % ea]
        for id_fa, fa in enumerate(freqs_a):
            for id_eb, eb in enumerate(experiments):
                freqs_b = d["freqs_%s" % eb]
                for id_fb, fb in enumerate(freqs_b):
                    if (id_ea == id_eb) & (id_fa > id_fb): continue
                    if (id_ea > id_eb): continue
                    for ipa, pa in enumerate(pols):
import matplotlib
matplotlib.use('Agg')
import numpy as np, healpy as hp, pylab as plt
from pspy import so_dict, so_map, so_mcm, sph_tools, so_spectra, pspy_utils, so_map_preprocessing
import os, sys
from pixell import enmap, powspec
import time
import planck_utils

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

theoryFgDir = 'theory_and_fg'

pspy_utils.create_directory(theoryFgDir)
freqs = d['freqs']
spectra = ['TT', 'TE', 'TB', 'ET', 'BT', 'EE', 'EB', 'BE', 'BB']

freq_pairs = []
for c1, freq1 in enumerate(freqs):
    for c2, freq2 in enumerate(freqs):
        if c1 > c2: continue
        freq_pairs += [[freq1, freq2]]

clth = {}
fg = {}

lth, cl_TT, cl_EE, cl_BB, cl_TE = np.loadtxt(
    'theory_file/cosmo2017_10K_acc3_lensedCls.dat', unpack=True)
clth['TT'] = cl_TT
clth['TE'] = cl_TE
示例#23
0
import time

# We start by reading the info in the dictionnary
d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

experiment=d['experiment']
content=d['content']
name=d['combinaison_name']

plot_dir='maps_plot'
combined_map_dir='combined_maps'
survey_mask_dir='survey_masks'

# Create three folders, one for the plot of the simulations, one for storing the combined maps and one for the survey masks
pspy_utils.create_directory(plot_dir)
pspy_utils.create_directory(combined_map_dir)
pspy_utils.create_directory(survey_mask_dir)


# We loop on all the different experiments that we want to consider
for exp in experiment:
    # Each experiment could have its associated nside and frequency list
    nside=d['nside_%s'%exp]
    freqs=d['freq_%s'%exp]
    for count,freq in enumerate(freqs):
        #We create a template for each frequency and add all composents present in 'content'
        map_all=so_map.healpix_template(ncomp=3,nside=nside)
        for cont in content:
            maps_list= d['%s_maps'%cont]
            map=maps_list[count]
示例#24
0
The inputs for the script are the Planck beam and likelihood masks.
To run it:
python get_planck_mcm_Bbl.py global.dict
'''
import numpy as np
import healpy as hp
from pspy import so_dict, so_map, so_mcm, pspy_utils, so_mpi
import sys

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

windows_dir = "windows"
mcm_dir = "mcms"

pspy_utils.create_directory(windows_dir)
pspy_utils.create_directory(mcm_dir)

freqs = d["freqs"]
niter = d["niter"]
lmax = d["lmax"]
type = d["type"]
binning_file = d["binning_file"]
pixwin = d["pixwin"]
splits = d["splits"]
experiment = "Planck"

print("Compute Planck 2018 mode coupling matrices")

freq1_list, hm1_list, freq2_list, hm2_list = [], [], [], []
n_mcms = 0
示例#25
0
from pspy import pspy_utils, so_dict, so_spectra
import numpy as np
import sys

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

type = d["type"]
surveys = d["surveys"]
iStart = d["iStart"]
iStop = d["iStop"]

spec_dir = "sim_spectra"
cov_dir = "covariances"

pspy_utils.create_directory(cov_dir)

spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"]

spec_list = []
for id_sv1, sv1 in enumerate(surveys):
    arrays_1 = d["arrays_%s" % sv1]
    for id_ar1, ar1 in enumerate(arrays_1):
        for id_sv2, sv2 in enumerate(surveys):
            arrays_2 = d["arrays_%s" % sv2]
            for id_ar2, ar2 in enumerate(arrays_2):
                if (id_sv1 == id_sv2) & (id_ar1 > id_ar2): continue
                if (id_sv1 > id_sv2): continue
                spec_list += ["%s_%sx%s_%s" % (sv1, ar1, sv2, ar2)]

for sid1, spec1 in enumerate(spec_list):
示例#26
0
"""
This script generates fake beams and transfer function.
to run it:
python systematic_model.py
"""

import numpy as np, pylab as plt, matplotlib as mpl
from pspy import pspy_utils

syst_dir = 'systematics'
pspy_utils.create_directory(syst_dir)

FWHM = 7.30
FWHM_syst = 0.95 * FWHM

beam_FWHM_rad = np.deg2rad(FWHM_syst) / 60
beam = beam_FWHM_rad / np.sqrt(8 * np.log(2))
l = np.arange(2, 5000)
bl = np.exp(-l * (l + 1) * beam**2 / 2.)
np.savetxt('%s/beam.dat' % (syst_dir), np.transpose([l, bl]))

lmax_array = [400, 200]
min_TF_array = [0.7, 0.8]

cal = 1.
pol_eff = 1.

cal_array = [cal, cal * pol_eff]
name_array = ['T', 'pol']

for name, lmax, min_TF, cal in zip(name_array, lmax_array, min_TF_array,
示例#27
0
freqs = d["freqs"]
lmax = d["lmax"]
lrange = d["lrange"]
bestfit_dir = "best_fits"


plot_dir = "plots/sim_analysis/"

if d["use_ffp10"] == True:
    sim_spectra_dir = "sim_spectra_ffp10"
    mc_dir = "montecarlo_ffp10"
else:
    sim_spectra_dir = "sim_spectra"
    mc_dir = "montecarlo"

pspy_utils.create_directory(mc_dir)
pspy_utils.create_directory(plot_dir)

freq_pairs = []
for cross in cwr(freqs, 2):
    freq_pairs += [[cross[0], cross[1]]]

vec_list = []
bin_range = {}

for iii in range(iStart, iStop):
    vec = []
    bin_start = 0
    bin_stop = 0
    
    for spec in ["TT", "EE", "TE"]:
示例#28
0
lth, Clth = pspy_utils.ps_lensed_theory_to_dict(clfile,
                                                output_type="Cl",
                                                lmax=lmax,
                                                start_at_zero=False)

Cb_th = {}
lb, Cb_th["EE"] = planck_utils.binning(lth, Clth["EE"], lmax, binning_file)
lb, Cb_th["BB"] = planck_utils.binning(lth, Clth["BB"], lmax, binning_file)

if d["use_ffp10"]:
    mc_dir = "montecarlo_ffp10_larger_bin"
else:
    mc_dir = "montecarlo"

chain_dir = "chains"
pspy_utils.create_directory(chain_dir)

lmin, lmax = 100, 1500
id = np.where((lb >= lmin) & (lb <= lmax))
lb, Cb_th["EE"], Cb_th["BB"] = lb[id], Cb_th["EE"][id], Cb_th["BB"][id]

nbins = len(lb)

Cb_th_array = np.zeros((2, nbins))
Cb_th_array[0, :] = Cb_th["EE"]
Cb_th_array[1, :] = Cb_th["BB"]

# first we read the data
cov = np.load("%s/mc_covariance_EB.npy" % mc_dir)
size_cov = cov.shape[0]
cov_EB = cov[np.int(2 * size_cov / 3):, np.int(2 * size_cov / 3):]
"""
#import matplotlib
#matplotlib.use("Agg")
from pspy import pspy_utils, so_dict
import numpy as np
import pylab as plt
from itertools import combinations_with_replacement as cwr
import os
import sys
import so_noise_calculator_public_20180822 as noise_calc
from copy import deepcopy

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

pspy_utils.create_directory("sim_data")

plot_dir = "plots/model/"
pspy_utils.create_directory(plot_dir)

linestyle = {}
linestyle["LAT"] = "solid"
linestyle["Planck"] = "dashed"

# We start with SO, we have to specify a sensitivity mode (2: goal, 1: baseline), and f_sky
# both parameters are specified in the dictionnary

sensitivity_mode = d["sensitivity_mode"]
f_sky_LAT = d["f_sky_LAT"]
freqs = {}
freqs["LAT"] = ["27", "39", "93", "145", "225", "280"]
示例#30
0
#import matplotlib
#matplotlib.use('Agg')
import numpy as np
import pylab as plt
from pspy import so_dict, so_map, so_mcm, sph_tools, so_spectra, pspy_utils, so_map_preprocessing, so_mpi
import os, sys
from pixell import enmap
import time, os

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])

spectraDir = 'spectra'
mcmDir = 'mcm'

pspy_utils.create_directory(spectraDir)
pspy_utils.create_directory(mcmDir)

spectra = ['TT', 'TE', 'TB', 'ET', 'BT', 'EE', 'EB', 'BE', 'BB']

arrays = d['arrays']
niter = d['niter']
lmax = 3000
type = d['type']
binning_file = d['binning_file']
theoryfile = d['theoryfile']
fsky = {}
fsky['pa1'] = 'fsky0.01081284'
fsky['pa2'] = 'fsky0.01071187'

apo = so_map.read_map(d['apo_path'])