def kappa_predicted(self): self.comoving_d() c_light = 3e5 #km/s # Eq. 9 Amara et al. constant = ((100. * self.cosmo['h'])**2 * self.cosmo['omega_M_0']) * \ (3/2.) * (1/c_light**2) if type(self.zs) is np.ndarray:#This only works if zs is already binned! if self.pdf_zs is None: self.pdf_zs = np.arange(len(self.d_c)*len(self.d_s)).reshape((len(self.d_c),len(self.d_s)))* 0.0 + 1.0 #DEFAULT Flat Distribution else: self.pdf_zs = np.resize(self.pdf_zs,(len(self.d_c),len(self.d_s))) self.pdf_zs /= np.linalg.norm(self.pdf_zs[0,:],ord=1)#normalize probabilities to be used in integral self.pdf_zs = np.transpose(self.pdf_zs) twod_d_s = np.transpose(np.resize(self.d_s,(len(self.d_c),len(self.d_s)))) twod_d_c = np.resize(self.d_c,(len(self.d_s),len(self.d_c))) integral_2 = (self.pdf_zs*(twod_d_s - twod_d_c) / twod_d_s) #integral_2_summed = np.resize([integral_2[x,:].sum() for x in range(len(self.d_s))],len(self.d_c))#do integral integral_2_summed = [integral_2[:,x].sum() for x in range(len(self.d_c))] integral_1 = ((self.d_c * integral_2_summed) * \ (self.delta_d / self.a))[:,np.newaxis][:,np.newaxis] else: integral_1 = ((self.d_c * (self.d_s - self.d_c) / self.d_s) * \ (self.delta_d / self.a))[:,np.newaxis][:,np.newaxis]#NOW 3D # Smooth the 3d density field and find kappa from that self.mask_3d = np.ones(self.delta3d.shape) * self.mask xxx, self.delta3d_sm, yyy = convolve_mask_fft(self.delta3d, \ self.mask_3d, self.g_3d, ignore=0.0) self.kappa_pred_3d = constant * np.sum(integral_1 * self.delta3d_sm, \ axis=0) # Use unsmoothed density field and generate kappa from that. Later # smooth the 2D kappa field self.kappa_pred = constant * np.sum(integral_1 * self.delta3d, axis=0) xxx, self.kappa_pred, yyy = convolve_mask_fft(self.kappa_pred, self.mask, self.g_2d, ignore=0.0) self.gamma_p = ku.kappa_to_gamma(self.kappa_pred,self.pixel_scale,dt2=None) #if self.pix_source_z: # print len(integral_pix), self.delta3d.shape, self.kappa_pred.shape #else: print integral_1.shape, self.delta3d.shape, self.kappa_pred.shape np.savez('kappa_predicted.npz', kappa=self.kappa_pred)
def true_values(self, g_to_k=False, e_sign = [-1, -1], col_names=['RA', 'DEC', 'z', 'E1', 'E2', 'W', 'SN', 'Re']): """g_to_k=True implies that create kappa from gamma, otherwise just read kappa directly from the fits table. e_sign tells what is the correct sign for e1 and e2. col_names tells the column names in the fits file. It works only with g_to_k=False. Otherwise use difault name for column from the simulation""" if g_to_k: sourcefile1 = os.path.split(self.sourcefile)[1].split('.')[0] ofile = 'pixelized_%s.npz'%sourcefile1 ofile, mask = ku.pixelize_shear_CFHT('.', self.sourcefile, \ self.pixel_scale, ofile=ofile, \ bin_ra=self.raedges, bin_dec=self.decedges, zmin=self.zmin_s, zmax=self.zmax_s, col_names=col_names) f = np.load('pixelized_%s.npz'%sourcefile1) epsilon = f['epsilon'] Nm = f['number'] dt2 = self.pixel_scale dt1 = self.pixel_scale self.mask = f['mask'] xxx, e1, yyy = convolve_mask_fft(epsilon.real, self.mask, self.g_2d, ignore=0.50) xxx, e2, yyy = convolve_mask_fft(epsilon.imag, self.mask, self.g_2d, ignore=0.50) xxx, Nm, yyy = convolve_mask_fft(Nm, self.mask, self.g_2d, ignore=0.50) Nm[Nm == 0] = 1 epsilon = e_sign[0] * e1 + e_sign[1] * 1j * e2 epsilon /= Nm self.kappa_true = ku.gamma_to_kappa(epsilon, dt1, dt2=dt2).real self.gamma1_true = epsilon.real self.gamma2_true = epsilon.imag else: #Reading source catalog to get the shear field f = pyfits.open(self.sourcefile) d = f[1].data header = f[1].header f.close() z_source = d.field('Z') con = (z_source >= self.zmin_s) & (z_source <= self.zmax_s) ra_sh = d.field('RA')[con] dec_sh = d.field('DEC')[con] gamma1_true = d.field('GAMMA1')[con] gamma2_true = d.field('GAMMA2')[con] z_source = z_source[con] N, E = np.histogramdd(np.array([dec_sh, ra_sh]).T, bins=(self.decedges, self.raedges)) self.mask = N.copy() + 1 self.mask_lens = N.copy() + 1 Ng1, E = np.histogramdd(np.array([dec_sh, ra_sh]).T, bins=(self.decedges, self.raedges), weights=gamma1_true) Ng2, E = np.histogramdd(np.array([dec_sh, ra_sh]).T, bins=(self.decedges, self.raedges), weights=gamma2_true)#Not sure I understand why the gammas are the weights N[N == 0] = 1 self.mask[self.mask > 0] = 1 self.mask_lens[self.mask_lens > 0] = 1 self.gamma1_true = Ng1 / (1. * N) self.gamma2_true = Ng2 / (1. * N) if 'KAPPA' in header.values(): kappa_true = d.field('KAPPA')[con] Nk, E = np.histogramdd(np.array([dec_sh, ra_sh]).T, bins=(self.decedges, self.raedges), weights=kappa_true) self.kappa_true = Nk / (1. * N) else: dt2 = self.pixel_scale dt1 = self.pixel_scale epsilon = e_sign[0] * self.gamma1_true + \ e_sign[1] * 1j * self.gamma2_true self.kappa_true = ku.gamma_to_kappa(epsilon, dt1, dt2=dt2).real #Masked convolution xxx, self.kappa_true, yyy = convolve_mask_fft(self.kappa_true, \ self.mask_lens, \ self.g_2d, ignore=0.0) xxx, self.gamma1_true, yyy = convolve_mask_fft(self.gamma1_true,\ self.mask, \ self.g_2d, ignore=0.0) self.gamma1_true *= e_sign[0] xxx, self.gamma2_true, yyy = convolve_mask_fft(self.gamma2_true, \ self.mask, \ self.g_2d, ignore=0.0) self.gamma2_true *= e_sign[1] self.gamma_true = self.gamma1_true + 1j * self.gamma2_true self.gamma_tp = ku.kappa_to_gamma(self.kappa_true,self.pixel_scale,dt2=None)