def __init__(self, NSIDE, As): self.NSIDE = NSIDE self.Npix = 12 * NSIDE**2 self.As = As print("Initialising sampler") self.cosmo = Class() #print("Maps") #A recommenter #self.Qs, self.Us, self.sigma_Qs, self.sigma_Us = aggregate_by_pixels_params(get_pixels_params(self.NSIDE)) #print("betas") self.matrix_mean, self.matrix_var = aggregate_mixing_params( get_mixing_matrix_params(self.NSIDE)) print("Cosmo params") self.cosmo_means = np.array(COSMO_PARAMS_MEANS) self.cosmo_stdd = np.diag(COSMO_PARAMS_SIGMA) self.instrument = pysm.Instrument( get_instrument('litebird', self.NSIDE)) self.components = [CMB(), Dust(150.), Synchrotron(150.)] self.mixing_matrix = MixingMatrix(*self.components) self.mixing_matrix_evaluator = self.mixing_matrix.evaluator( self.instrument.Frequencies) self.noise_covar_one_pix = self.noise_covariance_in_freq(self.NSIDE) #A recommenter #self.noise_stdd_all = np.concatenate([np.sqrt(self.noise_covar_one_pix) for _ in range(2*self.Npix)]) print("End of initialisation")
def __setstate__(self, state): self.__dict__.update(state) self.cosmo = Class() self.components = [CMB(), Dust(150.), Synchrotron(150.)] self.mixing_matrix = MixingMatrix(*self.components) self.mixing_matrix_evaluator = self.mixing_matrix.evaluator( self.instrument.Frequencies)
def __init__(self, NSIDE): self.NSIDE = NSIDE self.Npix = 12 * NSIDE**2 print("Initialising sampler") self.cosmo = Class() print("Maps") self.templates_map, self.templates_var = aggregate_pixels_params( get_pixels_params(self.NSIDE)) print("betas") self.matrix_mean, self.matrix_var = aggregate_mixing_params( get_mixing_matrix_params(self.NSIDE)) print("Cosmo params") self.cosmo_means = np.array(COSMO_PARAMS_MEANS) self.cosmo_var = (np.diag(COSMO_PARAMS_SIGMA) / 2)**2 plt.hist(self.templates_map) plt.savefig("mean_values.png") plt.close() plt.hist(self.templates_var) plt.savefig("std_values.png") plt.close() self.instrument = pysm.Instrument( get_instrument('litebird', self.NSIDE)) self.components = [CMB(), Dust(150.), Synchrotron(150.)] self.mixing_matrix = MixingMatrix(*self.components) self.mixing_matrix_evaluator = self.mixing_matrix.evaluator( self.instrument.Frequencies) print("End of initialisation")
def _get_sky(tag): np.random.seed(0) stokes, nside, nsidepar, components, mask, instrument = tag.split('__') n_stokes = _get_n_stokes(stokes) nside = _get_nside(nside) assert len(nside) == 1 nside = nside[0] nsidepar = _get_nside(nsidepar) components = _get_component(components) mask = _get_mask(mask, nside) instrument = _get_instrument(instrument, nside) try: freqs = instrument.Frequencies except AttributeError: freqs = instrument['Frequencies'] x0 = [x for c in components for x in c.defaults] if max(nsidepar) and len(x0): if len(nsidepar) == 1: nsidepar = nsidepar * len(x0) for i in range(len(x0)): #NOTE: spectral parameters are the default +-15% # This bound was tweaked to pass the tests: the problematic ones are # those in which we fit for both a powerlaw and a curved-powerlaw. factor = np.linspace(0.85, 1.15, _my_nside2npix(nsidepar[i])) np.random.shuffle(factor) x0[i] = x0[i] * factor try: ux0 = [_my_ud_grade(x0_i, nside) for x0_i in x0] except: breakpoint() A = MixingMatrix(*components).eval(freqs, *ux0) if stokes in 'IP': A = A[:, np.newaxis] else: A = MixingMatrix(*components).eval(freqs, *x0) x0 = np.array(x0) n_pix = hp.nside2npix(nside) n_comp = len(components) shape = (n_pix, n_comp) if stokes == 'N' else (n_pix, n_stokes, n_comp) s = np.linspace(10., 20., n_pix * n_stokes * n_comp) np.random.shuffle(s) s = s.reshape(shape) data = _mv(A, s) data[mask] = hp.UNSEEN s[mask] = hp.UNSEEN if max(nsidepar) and len(x0): for i in range(len(x0)): x_mask = _my_ud_grade(mask.astype(float), nsidepar[i]) == 1. x0[i][..., x_mask] = hp.UNSEEN return data.T, s.T, x0
def _get_sky(tag): np.random.seed(0) stokes, nside, nsidepar, components, mask, instrument = tag.split('__') n_stokes = _get_n_stokes(stokes) nside = _get_nside(nside) nsidepar = _get_nside(nsidepar) components = _get_component(components) mask = _get_mask(mask, nside) instrument = _get_instrument(instrument, nside) try: freqs = instrument.Frequencies except AttributeError: freqs = instrument['Frequencies'] x0 = [x for c in components for x in c.defaults] if nsidepar and len(x0): for i in range(len(x0)): factor = np.linspace(0.8, 1.2, hp.nside2npix(nsidepar)) np.random.shuffle(factor) x0[i] = x0[i] * factor ux0 = [hp.ud_grade(x0_i, nside) for x0_i in x0] A = MixingMatrix(*components).eval(freqs, *ux0) if stokes in 'IP': A = A[:, np.newaxis] else: A = MixingMatrix(*components).eval(freqs, *x0) x0 = np.array(x0) n_pix = hp.nside2npix(nside) n_comp = len(components) shape = (n_pix, n_comp) if stokes == 'N' else (n_pix, n_stokes, n_comp) s = np.linspace(10., 20., n_pix * n_stokes * n_comp) np.random.shuffle(s) s = s.reshape(shape) data = _mv(A, s) data[mask] = hp.UNSEEN s[mask] = hp.UNSEEN if nsidepar and len(x0): x_mask = hp.ud_grade(mask.astype(float), nsidepar) == 1. x0[..., x_mask] = hp.UNSEEN return data.T, s.T, x0
def setUp(self): self.DX = 5e-4 # NOTE: this is a bit fine-tuned np.random.seed(0) self.n_freq = 6 self.nu = np.logspace(1, 2.5, self.n_freq) self.n_stokes = 3 self.n_pixels = 2 self.components = [cm.CMB(), cm.Dust(200.), cm.Synchrotron(70.)] self.mm = MixingMatrix(*(self.components)) self.params = [1.54, 20, -3] self.A = self.mm.eval(self.nu, *(self.params)) self.A_dB = self.mm.diff(self.nu, *(self.params)) self.A_dBdB = self.mm.diff_diff(self.nu, *(self.params)) self.invN = uniform(size=(self.n_pixels, self.n_stokes, self.n_freq, self.n_freq)) self.invN += _T(self.invN) self.invN += 10 * np.eye(self.n_freq) self.invN *= 10
def __init__(self, instrument, components, d_fgs, lmin, lmax): self.instrument = standardize_instrument(instrument) self.nside = hp.npix2nside(d_fgs.shape[-1]) self.n_stokes = d_fgs.shape[1] self.n_freqs = d_fgs.shape[0] self.invN = np.diag( hp.nside2resol(self.nside, arcmin=True) / (instrument.depth_p))**2 self.mask = d_fgs[0, 0, :] != 0. self.fsky = self.mask.astype(float).sum() / self.mask.size self.ell = np.arange(lmin, lmax + 1) self.lmin = lmin self.lmax = lmax self.d_fgs = d_fgs #print('fsky = ', self.fsky) print('======= ESTIMATION OF SPECTRAL PARAMETERS =======') self.A = MixingMatrix(*components) self.A_ev = self.A.evaluator(instrument.frequency) self.A_dB_ev = self.A.diff_evaluator(instrument.frequency) x0 = np.array([x for c in components for x in c.defaults]) if self.n_stokes == 3: # if T and P were provided, extract P d_comp_sep = d_fgs[:, 1:, :] else: d_comp_sep = d_fgs self.res = comp_sep(self.A_ev, d_comp_sep.T, self.invN, self.A_dB_ev, self.A.comp_of_dB, x0) self.res.params = self.A.params #res.s = res.s.T self.A_maxL = self.A_ev(self.res.x) self.A_dB_maxL = self.A_dB_ev(self.res.x) self.A_dBdB_maxL = self.A.diff_diff_evaluator( self.instrument.frequency)(self.res.x) print('res.x = ', self.res.x)
class TestAlgebraPhysical(unittest.TestCase): def setUp(self): self.DX = 5e-4 # NOTE: this is a bit fine-tuned np.random.seed(0) self.n_freq = 6 self.nu = np.logspace(1, 2.5, self.n_freq) self.n_stokes = 3 self.n_pixels = 2 self.components = [cm.CMB(), cm.Dust(200.), cm.Synchrotron(70.)] self.mm = MixingMatrix(*(self.components)) self.params = [1.54, 20, -3] self.A = self.mm.eval(self.nu, *(self.params)) self.A_dB = self.mm.diff(self.nu, *(self.params)) self.A_dBdB = self.mm.diff_diff(self.nu, *(self.params)) self.invN = uniform(size=(self.n_pixels, self.n_stokes, self.n_freq, self.n_freq)) self.invN += _T(self.invN) self.invN += 10 * np.eye(self.n_freq) self.invN *= 10 def test_W_dB_invN(self): W_dB_analytic = W_dB(self.A, self.A_dB, self.mm.comp_of_dB, self.invN) W_params = W(self.A, self.invN) for i in range(len(self.params)): diff_params = [p for p in self.params] diff_params[i] = self.DX + diff_params[i] diff_A = self.mm.eval(self.nu, *diff_params) diff_W = W(diff_A, self.invN) W_dB_numerical = (diff_W - W_params) / self.DX aac(W_dB_numerical, W_dB_analytic[i], rtol=1e-3) def test_W_dB(self): W_dB_analytic = W_dB(self.A, self.A_dB, self.mm.comp_of_dB) W_params = W(self.A) for i in range(len(self.params)): diff_params = [p for p in self.params] diff_params[i] = self.DX + diff_params[i] diff_A = self.mm.eval(self.nu, *diff_params) diff_W = W(diff_A) W_dB_numerical = (diff_W - W_params) / self.DX aac(W_dB_numerical, W_dB_analytic[i], rtol=1e-3) def test_P_dBdB(self): P_dBdB_analytic = P_dBdB(self.A, self.A_dB, self.A_dBdB, self.mm.comp_of_dB) def get_P_displaced(i, j): def P_displaced(i_step, j_step): diff_params = [p for p in self.params] diff_params[i] = i_step * self.DX + diff_params[i] diff_params[j] = j_step * self.DX + diff_params[j] diff_A = self.mm.eval(self.nu, *diff_params) return P(diff_A) return P_displaced for i in range(len(self.params)): for j in range(len(self.params)): Pdx = get_P_displaced(i, j) if i == j: P_dBdB_numerical = ( (-2 * Pdx(0, 0) + Pdx(+1, 0) + Pdx(-1, 0)) / self.DX**2) else: P_dBdB_numerical = ( (Pdx(1, 1) - Pdx(+1, -1) - Pdx(-1, 1) + Pdx(-1, -1)) / (4 * self.DX**2)) aac(P_dBdB_numerical, P_dBdB_analytic[i][j], rtol=1.5e-1) def test_P_dBdB_invN(self): invN = self.invN[0, 0] P_dBdB_analytic = P_dBdB(self.A, self.A_dB, self.A_dBdB, self.mm.comp_of_dB, invN) def get_P_displaced(i, j): def P_displaced(i_step, j_step): diff_params = [p for p in self.params] diff_params[i] = i_step * self.DX + diff_params[i] diff_params[j] = j_step * self.DX + diff_params[j] diff_A = self.mm.eval(self.nu, *diff_params) return P(diff_A, invN) return P_displaced for i in range(len(self.params)): for j in range(len(self.params)): Pdx = get_P_displaced(i, j) if i == j: P_dBdB_numerical = ( (-2 * Pdx(0, 0) + Pdx(+1, 0) + Pdx(-1, 0)) / self.DX**2) else: P_dBdB_numerical = ( (Pdx(1, 1) - Pdx(+1, -1) - Pdx(-1, 1) + Pdx(-1, -1)) / (4 * self.DX**2)) aac(P_dBdB_numerical, P_dBdB_analytic[i][j], rtol=3.0e-1) def test_W_dBdB(self): W_dBdB_analytic = W_dBdB(self.A, self.A_dB, self.A_dBdB, self.mm.comp_of_dB) def get_W_displaced(i, j): def W_displaced(i_step, j_step): diff_params = [p for p in self.params] diff_params[i] = i_step * self.DX + diff_params[i] diff_params[j] = j_step * self.DX + diff_params[j] diff_A = self.mm.eval(self.nu, *diff_params) return W(diff_A) return W_displaced for i in range(len(self.params)): for j in range(len(self.params)): Wdx = get_W_displaced(i, j) if i == j: W_dBdB_numerical = ( (-2 * Wdx(0, 0) + Wdx(+1, 0) + Wdx(-1, 0)) / self.DX**2) else: W_dBdB_numerical = ( (Wdx(1, 1) - Wdx(+1, -1) - Wdx(-1, 1) + Wdx(-1, -1)) / (4 * self.DX**2)) aac(W_dBdB_numerical, W_dBdB_analytic[i][j], rtol=1e-1) def test_W_dBdB_invN(self): W_dBdB_analytic = W_dBdB(self.A, self.A_dB, self.A_dBdB, self.mm.comp_of_dB, self.invN) def get_W_displaced(i, j): def W_displaced(i_step, j_step): diff_params = [p for p in self.params] diff_params[i] = i_step * self.DX + diff_params[i] diff_params[j] = j_step * self.DX + diff_params[j] diff_A = self.mm.eval(self.nu, *diff_params) return W(diff_A, self.invN) return W_displaced for i in range(len(self.params)): for j in range(len(self.params)): Wdx = get_W_displaced(i, j) if i == j: W_dBdB_numerical = ( (-2 * Wdx(0, 0) + Wdx(+1, 0) + Wdx(-1, 0)) / self.DX**2) else: W_dBdB_numerical = ( (Wdx(1, 1) - Wdx(+1, -1) - Wdx(-1, 1) + Wdx(-1, -1)) / (4 * self.DX**2)) aac(W_dBdB_numerical, W_dBdB_analytic[i][j], rtol=2.5e-1)
COSMO_PARAMS_NAMES = [ "n_s", "omega_b", "omega_cdm", "100*theta_s", "ln10^{10}A_s", "tau_reio" ] COSMO_PARAMS_MEANS = [0.9665, 0.02242, 0.11933, 1.04101, 3.047, 0.0561] COSMO_PARAMS_SIGMA = [0.0038, 0.00014, 0.00091, 0.00029, 0.014, 0.0071] LiteBIRD_sensitivities = np.array([ 36.1, 19.6, 20.2, 11.3, 10.3, 8.4, 7.0, 5.8, 4.7, 7.0, 5.8, 8.0, 9.1, 11.4, 19.6 ]) Qs, Us, sigma_Qs, sigma_Us = aggregate_by_pixels_params( get_pixels_params(NSIDE)) instrument = pysm.Instrument(get_instrument('litebird', NSIDE)) components = [CMB(), Dust(150.), Synchrotron(150.)] mixing_matrix = MixingMatrix(*components) mixing_matrix_evaluator = mixing_matrix.evaluator(instrument.Frequencies) def noise_covariance_in_freq(nside): cov = LiteBIRD_sensitivities**2 / hp.nside2resol(nside, arcmin=True)**2 return cov noise_covar_one_pix = noise_covariance_in_freq(NSIDE) def sample_mixing_matrix_parallel(betas): return mixing_matrix_evaluator(betas)[:, 1:]
class Forecast(object): def __init__(self, instrument, components, d_fgs, lmin, lmax): self.instrument = standardize_instrument(instrument) self.nside = hp.npix2nside(d_fgs.shape[-1]) self.n_stokes = d_fgs.shape[1] self.n_freqs = d_fgs.shape[0] self.invN = np.diag( hp.nside2resol(self.nside, arcmin=True) / (instrument.depth_p))**2 self.mask = d_fgs[0, 0, :] != 0. self.fsky = self.mask.astype(float).sum() / self.mask.size self.ell = np.arange(lmin, lmax + 1) self.lmin = lmin self.lmax = lmax self.d_fgs = d_fgs #print('fsky = ', self.fsky) print('======= ESTIMATION OF SPECTRAL PARAMETERS =======') self.A = MixingMatrix(*components) self.A_ev = self.A.evaluator(instrument.frequency) self.A_dB_ev = self.A.diff_evaluator(instrument.frequency) x0 = np.array([x for c in components for x in c.defaults]) if self.n_stokes == 3: # if T and P were provided, extract P d_comp_sep = d_fgs[:, 1:, :] else: d_comp_sep = d_fgs self.res = comp_sep(self.A_ev, d_comp_sep.T, self.invN, self.A_dB_ev, self.A.comp_of_dB, x0) self.res.params = self.A.params #res.s = res.s.T self.A_maxL = self.A_ev(self.res.x) self.A_dB_maxL = self.A_dB_ev(self.res.x) self.A_dBdB_maxL = self.A.diff_diff_evaluator( self.instrument.frequency)(self.res.x) print('res.x = ', self.res.x) def _get_Cl_noise(self): i_cmb = self.A.components.index('CMB') try: bl = np.array([ hp.gauss_beam(np.radians(b / 60.), lmax=self.lmax) for b in self.instrument.fwhm ]) except AttributeError: bl = np.ones((len(self.instrument.frequency), self.lmax + 1)) nl = (bl / np.radians(self.instrument.depth_p / 60.)[:, np.newaxis])**2 AtNA = np.einsum('fi, fl, fj -> lij', self.A_maxL, nl, self.A_maxL) inv_AtNA = np.linalg.inv(AtNA) return inv_AtNA.swapaxes(-3, -1)[i_cmb, i_cmb, self.lmin:] def _get_cls_fg(self): print('======= COMPUTATION OF CL_FGS =======') if self.n_stokes == 3: d_spectra = self.d_fgs else: # Only P is provided, add T for map2alm d_spectra = np.zeros((self.n_freqs, 3, self.d_fgs.shape[2]), dtype=self.d_fgs.dtype) d_spectra[:, 1:] = self.d_fgs # Compute cross-spectra almBs = [ hp.map2alm(freq_map, lmax=self.lmax, iter=10)[2] for freq_map in d_spectra ] Cl_fgs = np.zeros((self.n_freqs, self.n_freqs, self.lmax + 1), dtype=self.d_fgs.dtype) for f1 in range(self.n_freqs): for f2 in range(self.n_freqs): if f1 > f2: Cl_fgs[f1, f2] = Cl_fgs[f2, f1] else: Cl_fgs[f1, f2] = hp.alm2cl(almBs[f1], almBs[f2], lmax=self.lmax) Cl_fgs = Cl_fgs[..., self.lmin:] / self.fsky return Cl_fgs def _get_sys_stat_residuals(self): Cl_fgs = self._get_cls_fg() i_cmb = self.A.components.index('CMB') print('======= ESTIMATION OF STAT AND SYS RESIDUALS =======') W_maxL = W(self.A_maxL, invN=self.invN)[i_cmb, :] W_dB_maxL = W_dB(self.A_maxL, self.A_dB_maxL, self.A.comp_of_dB, invN=self.invN)[:, i_cmb] W_dBdB_maxL = W_dBdB(self.A_maxL, self.A_dB_maxL, self.A_dBdB_maxL, self.A.comp_of_dB, invN=self.invN)[:, :, i_cmb] V_maxL = np.einsum('ij,ij...->...', self.res.Sigma, W_dBdB_maxL) # Check dimentions assert ((self.n_freqs, ) == W_maxL.shape == W_dB_maxL.shape[1:] == W_dBdB_maxL.shape[2:] == V_maxL.shape) assert (len(self.res.params) == W_dB_maxL.shape[0] == W_dBdB_maxL.shape[0] == W_dBdB_maxL.shape[1]) # elementary quantities defined in Stompor, Errard, Poletti (2016) Cl_xF = {} Cl_xF['yy'] = _utmv(W_maxL, Cl_fgs.T, W_maxL) # (ell,) Cl_xF['YY'] = _mmm(W_dB_maxL, Cl_fgs.T, W_dB_maxL.T) # (ell, param, param) Cl_xF['yz'] = _utmv(W_maxL, Cl_fgs.T, V_maxL) # (ell,) Cl_xF['Yy'] = _mmv(W_dB_maxL, Cl_fgs.T, W_maxL) # (ell, param) Cl_xF['Yz'] = _mmv(W_dB_maxL, Cl_fgs.T, V_maxL) # (ell, param) # bias and statistical foregrounds residuals #self.res.noise = Cl_noise self.res.bias = Cl_xF['yy'] + 2 * Cl_xF['yz'] # S16, Eq 23 self.res.stat = np.einsum('ij, lij -> l', self.res.Sigma, Cl_xF['YY']) # E11, Eq. 12 self.res.var = self.res.stat**2 + 2 * np.einsum( 'li, ij, lj -> l', # S16, Eq. 28 Cl_xF['Yy'], self.res.Sigma, Cl_xF['Yy']) return self.res.bias, self.res.stat, self.res.var
class Sampler: def __init__(self, NSIDE, As): self.NSIDE = NSIDE self.Npix = 12 * NSIDE**2 self.As = As print("Initialising sampler") self.cosmo = Class() #print("Maps") #A recommenter #self.Qs, self.Us, self.sigma_Qs, self.sigma_Us = aggregate_by_pixels_params(get_pixels_params(self.NSIDE)) #print("betas") self.matrix_mean, self.matrix_var = aggregate_mixing_params( get_mixing_matrix_params(self.NSIDE)) print("Cosmo params") self.cosmo_means = np.array(COSMO_PARAMS_MEANS) self.cosmo_stdd = np.diag(COSMO_PARAMS_SIGMA) self.instrument = pysm.Instrument( get_instrument('litebird', self.NSIDE)) self.components = [CMB(), Dust(150.), Synchrotron(150.)] self.mixing_matrix = MixingMatrix(*self.components) self.mixing_matrix_evaluator = self.mixing_matrix.evaluator( self.instrument.Frequencies) self.noise_covar_one_pix = self.noise_covariance_in_freq(self.NSIDE) #A recommenter #self.noise_stdd_all = np.concatenate([np.sqrt(self.noise_covar_one_pix) for _ in range(2*self.Npix)]) print("End of initialisation") def __getstate__(self): state_dict = self.__dict__.copy() del state_dict["mixing_matrix_evaluator"] del state_dict["cosmo"] del state_dict["mixing_matrix"] del state_dict["components"] return state_dict def __setstate__(self, state): self.__dict__.update(state) self.cosmo = Class() self.components = [CMB(), Dust(150.), Synchrotron(150.)] self.mixing_matrix = MixingMatrix(*self.components) self.mixing_matrix_evaluator = self.mixing_matrix.evaluator( self.instrument.Frequencies) def prepare_sigma(self, input): sampled_beta, i = input mixing_mat = list(self.sample_mixing_matrix_parallel(sampled_beta)) mean = np.dot(mixing_mat, (self.Qs + self.Us)[i]) sigma = np.diag(self.noise_covar_one_pix) + np.einsum( "ij,jk,lk", mixing_mat, (np.diag( (self.sigma_Qs + self.sigma_Us)[i])**2), mixing_mat) sigma_symm = (sigma + sigma.T) / 2 log_det = np.log(scipy.linalg.det(2 * np.pi * sigma_symm)) return mean, sigma_symm, log_det def sample_mixing_matrix_parallel(self, betas): return self.mixing_matrix_evaluator(betas)[:, 1:] def sample_normal(self, mu, stdd, diag=False): standard_normal = np.random.normal(0, 1, size=mu.shape[0]) if diag: normal = np.multiply(stdd, standard_normal) else: normal = np.dot(stdd, standard_normal) normal += mu return normal def noise_covariance_in_freq(self, nside): cov = LiteBIRD_sensitivities**2 / hp.nside2resol(nside, arcmin=True)**2 return cov def sample_model_parameters(self): #sampled_cosmo = self.sample_normal(self.cosmo_means, self.cosmo_stdd) sampled_cosmo = np.array( [0.9665, 0.02242, 0.11933, 1.04101, self.As, 0.0561]) #sampled_beta = self.sample_normal(self.matrix_mean, self.matrix_var, diag = True).reshape((self.Npix, -1), order = "F") sampled_beta = self.matrix_mean.reshape((self.Npix, -1), order="F") return sampled_cosmo, sampled_beta def sample_CMB_QU(self, cosmo_params): params = { 'output': OUTPUT_CLASS, 'l_max_scalars': L_MAX_SCALARS, 'lensing': LENSING } params.update(cosmo_params) print(params) self.cosmo.set(params) self.cosmo.compute() cls = self.cosmo.lensed_cl(L_MAX_SCALARS) eb_tb = np.zeros(shape=cls["tt"].shape) _, Q, U = hp.synfast( (cls['tt'], cls['ee'], cls['bb'], cls['te'], eb_tb, eb_tb), nside=self.NSIDE, new=True) self.cosmo.struct_cleanup() self.cosmo.empty() return Q, U def sample_mixing_matrix(self, betas): #mat_pixels = [] #for i in range(self.Npix): # m = self.mixing_matrix_evaluator(betas[i,:])[:, 1:] # mat_pixels.append(m) mat_pixels = (self.mixing_matrix_evaluator(beta)[:, 1:] for beta in betas) return mat_pixels def sample_mixing_matrix_full(self, betas): #mat_pixels = [] #for i in range(self.Npix): # m = self.mixing_matrix_evaluator(betas[i,:]) # mat_pixels.append(m) mat_pixels = (self.mixing_matrix_evaluator(beta) for beta in betas) return mat_pixels def sample_model(self, input_params): random_seed = input_params np.random.seed(random_seed) cosmo_params, _ = self.sample_model_parameters() cosmo_dict = { l[0]: l[1] for l in zip(COSMO_PARAMS_NAMES, cosmo_params.tolist()) } tuple_QU = self.sample_CMB_QU(cosmo_dict) map_CMB = np.concatenate(tuple_QU) result = {"map_CMB": map_CMB, "cosmo_params": cosmo_params} with open("B3DCMB/data/temp" + str(random_seed), "wb") as f: pickle.dump(result, f) return cosmo_params def compute_weight(self, input): observed_data = config.sky_map noise_level, random_seed = input np.random.seed(random_seed) with open("B3DCMB/data/temp" + str(random_seed), "rb") as f: data = pickle.load(f) map_CMB = data["map_CMB"] print("Duplicating CMB") duplicate_CMB = (l for l in map_CMB for _ in range(15)) print("Splitting for computation") #Le problème est surement que chaque ligne de X doit être en fortran order, ce qui du coup est aussi C order !!! x = np.ascontiguousarray( (observed_data - np.array(list(duplicate_CMB)) - np.array(config.means)).reshape(self.Npix * 2, -1)) print("Computing log weights") #r = np.sum((np.dot(l[1], scipy.linalg.solve(l[0], l[1].T)) for l in zip(config.sigmas_symm, x))) r = compute_exponent(config.sigmas_symm, x, 2 * self.Npix) lw = (-1 / 2) * r + config.denom return lw def sample_data(self): print("Sampling parameters") cosmo_params, sampled_beta = self.sample_model_parameters() print("Computing mean and cov of map") mean_map = np.array([i for l in self.Qs + self.Us for i in l]) stdd_map = [i for l in self.sigma_Qs + self.sigma_Us for i in l] print("Sampling maps Dust and Sync") maps = self.sample_normal(mean_map, stdd_map, diag=True) print("Computing cosmo params") cosmo_dict = { l[0]: l[1] for l in zip(COSMO_PARAMS_NAMES, cosmo_params.tolist()) } print("Sampling CMB signal") tuple_QU = self.sample_CMB_QU(cosmo_dict) map_CMB = np.concatenate(tuple_QU) print("Creating mixing matrix") mixing_matrix = self.sample_mixing_matrix(sampled_beta) print("Scaling to frequency maps") #freq_maps = np.dot(scipy.linalg.block_diag(*2*mixing_matrix), maps.T) freq_pixels = [] mix1, mix2 = tee(mixing_matrix) for i, mat in enumerate(chain(mix1, mix2)): freq_pix = np.dot(mat, maps[2 * i:(2 * i + 2)].T) freq_pixels.append(freq_pix) freq_maps = np.concatenate(freq_pixels) print("Adding CMB to frequency maps") duplicated_cmb = np.repeat(map_CMB, 15) print("Creating noise") noise = self.sample_normal(np.zeros(2 * 15 * self.Npix), self.noise_stdd_all, diag=True) print("Adding noise to the maps") sky_map = np.add(np.add(freq_maps, duplicated_cmb), noise) #sky_map = np.add(freq_maps, duplicated_cmb) return { "sky_map": sky_map, "cosmo_params": cosmo_params, "betas": sampled_beta }
logprob = -0.5*np.sum(templates_map**2/np.diag(covariance_templates)**2) return logprob # build a random vector of template of size 4 x Npix # for example : template_test = np.random.normal(0,1,(4,Npix)) print(p_template_computation(template_test)) ############################################ #### CONSTRUCTING THE MIXING MATRIX A ############################################ # define the instrumental specifications instrument = pysm.Instrument(get_instrument('litebird', NSIDE)) # define the components in the sky and their scaling laws components=[CMB(), Dust(150.), Synchrotron(150.)] # initiate function to estimate scaling laws for LiteBIRD frequencies A = MixingMatrix(*components) A_ev = A.evaluator(instrument.Frequencies) print(A_ev([1.56,20,-3.1])) #################################################### #### CONSTRUCTING TEMPLATES OF BETA AND SIGMA BETA #### -> p(\beta} \propto exp[-1/2 (\beta-\bar{\beta})^T \sigma_\beta^{-2} (\beta-\bar{\beta})] #################################################### dust_spectral_indices_ = hp.read_map('B3DCMB/COM_CompMap_dust-commander_0256_R2.00.fits', field=(3,5,6,8)) dust_spectral_indices_ = hp.ud_grade(dust_spectral_indices_, nside_out=NSIDE) beta_dust = dust_spectral_indices_[2] sigma_beta_dust = dust_spectral_indices_[3] temp_dust = dust_spectral_indices_[0] sigma_temp_dust = dust_spectral_indices_[1] beta_sync = hp.read_map('B3DCMB/sync_beta.fits', field=(0)) beta_sync = hp.ud_grade(beta_sync, nside_out=NSIDE)
class Sampler: def __init__(self, NSIDE): self.NSIDE = NSIDE self.Npix = 12 * NSIDE**2 print("Initialising sampler") self.cosmo = Class() print("Maps") self.templates_map, self.templates_var = aggregate_pixels_params( get_pixels_params(self.NSIDE)) print("betas") self.matrix_mean, self.matrix_var = aggregate_mixing_params( get_mixing_matrix_params(self.NSIDE)) print("Cosmo params") self.cosmo_means = np.array(COSMO_PARAMS_MEANS) self.cosmo_var = (np.diag(COSMO_PARAMS_SIGMA) / 2)**2 plt.hist(self.templates_map) plt.savefig("mean_values.png") plt.close() plt.hist(self.templates_var) plt.savefig("std_values.png") plt.close() self.instrument = pysm.Instrument( get_instrument('litebird', self.NSIDE)) self.components = [CMB(), Dust(150.), Synchrotron(150.)] self.mixing_matrix = MixingMatrix(*self.components) self.mixing_matrix_evaluator = self.mixing_matrix.evaluator( self.instrument.Frequencies) print("End of initialisation") def __getstate__(self): state_dict = self.__dict__.copy() del state_dict["mixing_matrix_evaluator"] del state_dict["cosmo"] del state_dict["mixing_matrix"] del state_dict["components"] return state_dict def __setstate__(self, state): self.__dict__.update(state) self.cosmo = Class() self.components = [CMB(), Dust(150.), Synchrotron(150.)] self.mixing_matrix = MixingMatrix(*self.components) self.mixing_matrix_evaluator = self.mixing_matrix.evaluator( self.instrument.Frequencies) def sample_normal(self, mu, sigma, s=None): return np.random.multivariate_normal(mu, sigma, s) def sample_model_parameters(self): #sampled_cosmo = self.sample_normal(self.cosmo_means, self.cosmo_var) sampled_cosmo = np.array([ 0.9665, 0.02242, 0.11933, 1.04101, 3.047, 0.0561 ]) - 2 * np.array(COSMO_PARAMS_SIGMA) #sampled_beta = self.sample_normal(self.matrix_mean, self.matrix_var).reshape((self.Npix, -1), order = "F") sampled_beta = self.matrix_mean.reshape((self.Npix, -1), order="F") return sampled_cosmo, sampled_beta def sample_CMB_QU(self, cosmo_params): params = { 'output': OUTPUT_CLASS, 'l_max_scalars': L_MAX_SCALARS, 'lensing': LENSING } params.update(cosmo_params) self.cosmo.set(params) self.cosmo.compute() cls = self.cosmo.lensed_cl(L_MAX_SCALARS) eb_tb = np.zeros(shape=cls["tt"].shape) _, Q, U = hp.synfast( (cls['tt'], cls['ee'], cls['bb'], cls['te'], eb_tb, eb_tb), nside=self.NSIDE, new=True) self.cosmo.struct_cleanup() self.cosmo.empty() return Q, U def sample_mixing_matrix(self, betas): mat_pixels = [] for i in range(self.Npix): m = self.mixing_matrix_evaluator(betas[i, :]) mat_pixels.append(m) mixing_matrix = np.stack(mat_pixels, axis=0) return mixing_matrix def sample_model(self): cosmo_params, sampled_beta = self.sample_model_parameters() #maps = self.sample_normal(self.templates_map, self.templates_var) cosmo_dict = { l[0]: l[1] for l in zip(COSMO_PARAMS_NAMES, cosmo_params.tolist()) } tuple_QU = self.sample_CMB_QU(cosmo_dict) map_CMB = np.stack(tuple_QU, axis=1) ''' mixing_matrix = self.sample_mixing_matrix(sampled_beta) map_Sync = np.stack([maps[0:self.Npix], maps[self.Npix:2*self.Npix]], axis = 1) map_Dust = np.stack([maps[2*self.Npix:3*self.Npix], maps[3*self.Npix:]], axis = 1) entire_map = np.stack([map_CMB, map_Dust, map_Sync], axis = 1) dot_prod = [] for j in range(self.Npix): m = np.dot(mixing_matrix[j, :, :], entire_map[j, :, :]) dot_prod.append(m) sky_map = np.stack(dot_prod, axis = 0) ''' sky_map = map_CMB return { "sky_map": sky_map, "cosmo_params": cosmo_params, "betas": sampled_beta } #sampler = Sampler(NSIDE) #r = sampler.sample_model(1) #['beta_d' 'temp' 'beta_pl'] #['beta_d' 'temp'] #['beta_pl']