def getReconstructionsAligned(idShell, Ilm_na, Lgrid, Lmax=7, vmin = 1e-4, nside=128): Ir = np.zeros((hp.nside2npix(nside), np.size(idShell))) for id, Ilm in enumerate(Ilm_na): Ilm = np.ascontiguousarray(Ilm) if id == 0: '''align the first shell with theoretically calculated intensity''' Ith = np.load('first_shell_to_start_alignment.npy') almth = hp.map2alm(Ith, Lmax-1) almr = alignShells(almth, Ilm, Lmax-1, Lgrid) rI = hp.alm2map(almr, nside) neg = np.where(rI < 0) rI[neg] = vmin Ir[:, 0] = rI else: ''' Align the succesive shells with respect to each preceding shell ''' almr = alignShells(almr, Ilm, Lmax-1, Lgrid) rI = hp.alm2map(almr, nside) neg = np.where(rI < 0) rI[neg] = vmin Ir[:, id] = rI return Ir
def sphtrans_inv_real_pol(alm, nside): """Inverse spherical harmonic transform onto a real polarised field. Parameters ---------- alm : np.ndarray[npol, lmax+1, lmax+1] The array of alms. Only expects the real half of the array. The first index is over polarisation T, Q, U (and optionally V). nside : integer The Healpix resolution of the final map. Returns ------- hpmaps : np.ndarray[npol, 12*nside**2] The T, Q, U and optionally V maps of the sky. """ npol = alm.shape[0] if alm.shape[1] != alm.shape[2] or not (npol == 3 or npol == 4): raise Exception("a_lm array wrong shape.") almp = [pack_alm(alm[0]), pack_alm(alm[1]), pack_alm(alm[2])] maps = np.zeros((npol, 12*nside**2), dtype=np.float64) maps[:3] = np.array(healpy.alm2map(almp, nside, verbose=False)) if npol == 4: maps[3] = healpy.alm2map(pack_alm(alm[3]), nside, verbose=False) return np.array(maps)
def generate_gaus_map(self, readGmap=-1): if (readGmap<0): self.gausalm0=hp.synalm(self.inputCls) else: self.gausalm0=hp.read_alm(self.mapsdir+"gmap_"+str(readGmap)+".fits") self.gausalm1=np.copy(self.gausalm0); self.gausalm1[0]=0.0 if (self.nodipole): ndxfl=np.ones(len(self.inputCls)) ndxfl[1]=0.0 hp.almxfl(self.gausalm1, ndxfl, inplace=True) hp.almxfl(self.gausalm0, ndxfl, inplace=True) self.gausmap0=hp.alm2map(self.gausalm0, nside=self.NSIDE) # includes the monopole bit self.gausmap1=hp.alm2map(self.gausalm1, nside=self.NSIDE) # does not include the monopole
def show_CMB_T_map(self,Tmap=None, max=100, title = "CMB graviational potential fluctuations as seen from inside the LSS", from_perspective_of = "observer", cmap=None): if Tmap is None: self.NSIDE = 256 self.Tmap = hp.alm2map(self.alm,self.NSIDE) else: self.Tmap = Tmap if from_perspective_of == "observer": dpi = 300 figsize_inch = 60, 40 fig = plt.figure(figsize=figsize_inch, dpi=dpi) # Sky map: hp.mollview(self.Tmap, rot=(-90,0,0), min=-max, max=max, title=title + ", $\ell_{max}=$%d " % self.truncated_lmax, cmap=cmap, unit="$\mu$K") plt.savefig(title+".png", dpi=dpi, bbox_inches="tight") else: # Interactive "external" view ([like this](http://zonca.github.io/2013/03/interactive-3d-plot-of-sky-map.html)) pass # beatbox.zoncaview(self.Tmap) # This did not work, sadly. Maybe we can find a 3D # spherical surface plot routine using matplotlib? For # now, just use the healpix vis. R = (0.0,0.0,0.0) # (lon,lat,psi) to specify center of map and rotation to apply hp.orthview(self.Tmap,rot=R,flip='geo',half_sky=True,title="CMB graviational potential fluctuations as seen from outside the LSS, $\ell_{max}$=%d" % self.truncated_lmax) print "Ahem - we can't visualize maps on the surface of the sphere yet, sorry." return
def simulate_cmb(nside=2048, lmax=3000, frequency=100,smear=False, nomap = False, beam=None, beamP=None, save=False, filename='testcmb.fits', cl_file='bf_base_cmbonly_plikHMv18_TT_lowTEB_lmax4000.minimum.theory_cl'): ls, cltt, clte, clee, clbb = get_theory_cls(lmax=lmax, cl_file=cl_file) Tlm, Elm, Blm = hp.synalm( (cltt, clee, clbb, clte), new=True, lmax=lmax) if smear: if (beam is None) or (beamP is None) : hdulist = fits.open(data_path + 'HFI_RIMO_Beams-100pc_R2.00.fits') beam = hdulist[beam_index['{}'.format(frequency)]].data.NOMINAL[0][:lmax+1] beamP = hdulist[beam_index['{}P'.format(frequency)]].data.NOMINAL[0][:lmax+1] hp.sphtfunc.almxfl(Tlm, beam, inplace=True) hp.sphtfunc.almxfl(Elm, beamP, inplace=True) hp.sphtfunc.almxfl(Blm, beamP, inplace=True) if nomap: return Tlm,Elm,Blm Tmap = hp.alm2map( Tlm, nside ) Qmap, Umap = hp.alm2map_spin( (Elm, Blm), nside, 2, lmax=lmax) if save: hp.write_map([Tmap,Qmap,Umap],data_path + filename) return Tmap, Qmap, Umap
def fisher_single(par,v) : # v -> seen pixels nb=len(par.bins)-1 npix=hp.nside2npix(par.nside) npix_seen=len(par.ip_seen) lmax=3*par.nside-1 larr=np.arange(lmax+1) fisher=np.zeros([nb,nb]) pixsize=4*np.pi/hp.nside2npix(par.nside) v_map=np.zeros(npix); v_map[par.ip_seen]=v vcm1=invert_covar(par,v_map) v_lm=hp.map2alm(v_map,iter=0) vcm1_lm=hp.map2alm(vcm1,iter=0) for iba in np.arange(nb) : # print " Row %d"%iba transfer=np.zeros(lmax+1); transfer[par.bins[iba]:par.bins[iba+1]]=1. v_map2=hp.alm2map(hp.almxfl(v_lm,transfer),par.nside,verbose=False)/pixsize #Q_a * v v_map2cm1=invert_covar(par,v_map2) #C^-1 * Q_a * v va_lm=hp.map2alm(v_map2cm1,iter=0) cl_vcm1_va=(2*larr+1)*hp.alm2cl(vcm1_lm,alms2=va_lm) for ibb in np.arange(nb-iba)+iba : fisher[iba,ibb]=np.sum(cl_vcm1_va[par.bins[ibb]:par.bins[ibb+1]])/pixsize**2 if iba!=ibb : fisher[ibb,iba]=fisher[iba,ibb] return fisher
def test_map2alm_pol_gal_cut(self): tmp = [np.empty(o.size * 2) for o in self.mapiqu] for t, o in zip(tmp, self.mapiqu): t[::2] = o maps = [ self.mapiqu, [o.astype(np.float32) for o in self.mapiqu], [t[::2] for t in tmp], ] for use_weights in [False, True]: for input in maps: gal_cut = 30 nside = hp.get_nside(input) npix = hp.nside2npix(nside) gal_mask = ( np.abs(hp.pix2ang(nside, np.arange(npix), lonlat=True)[1]) < gal_cut ) alm = hp.map2alm( input, iter=10, use_weights=use_weights, gal_cut=gal_cut ) output = hp.alm2map(alm, 32) for i, o in zip(input, output): # Testing requires low tolerances because of the # mask boundary i[gal_mask] = 0 np.testing.assert_allclose(i, o, atol=1e-2)
def main(nsim=1, fnl=0.0): nl = 1024 nside_fnl = 512 # Load map, mll print "" print "Loading alm_g, alm_ng and creating map..." fn_almg = ('data/fnl_sims/alm_l_%04d_v3.fits' % (nsim,)) #fn_almg = ('data/fnl_sims/alm_l_%i.fits' % (nsim,)) almg = hp.read_alm(fn_almg) #almg = almg[:hp.Alm.getsize(nl)] fn_almng = ('data/fnl_sims/alm_nl_%04d_v3.fits' % (nsim,)) #fn_almng = ('data/fnl_sims/alm_nl_%i.fits' % (nsim,)) almng = hp.read_alm(fn_almng) #almng = almng[:hp.Alm.getsize(nl)] alm = almg * (2.7e6) + fnl * almng * (2.7e6) # convert to units of uK to be consistent with other maps map_sim_fnl = hp.alm2map(alm, nside=nside_fnl) #print "Normalizing map..." #map_sim_fnl *= (1e6 * 2.7) # convert to units of uK to be consistent with other maps fn_map = 'data/fnl_sims/map_fnl_%i_sim_%i.fits' % (int(fnl), nsim) print "Writing map: %s" % fn_map hp.write_map(fn_map, map_sim_fnl)
def to_map(self, nside, pixwin=False, fwhm=0.0, sigma=None, pol=True, verbose=False): '''Return data for the Healpix map in 'RING' mode with the specified nside (power of 2) generated by these Alm coefficients. Parameters ---------- nside : int, scalar The nside of the output map. pixwin : bool, optional Smooth the alm using the pixel window functions. Default: False. fwhm : float, scalar, optional The fwhm of the Gaussian used to smooth the map (applied on alm) [in radians] sigma : float, scalar, optional The sigma of the Gaussian used to smooth the map (applied on alm) [in radians] pol : bool, optional If True, assumes input alms are TEB. Output will be TQU maps. (input must be 1 or 3 alms) If False, apply spin 0 harmonic transform to each alm. (input can be any number of alms) If there is only one input alm, it has no effect. Default: True. Returns ------- maps : array or list of arrays A Healpix map in RING scheme at nside or a list of T,Q,U maps (if polarized input)''' return healpy.alm2map(self.get_data(), nside, lmax=self._lmax, mmax=self._mmax, pixwin=pixwin, fwhm=fwhm, sigma=sigma, pol=pol, verbose=verbose)
def setUp(self): self.nside = 64 self.lmax = 96 alm_size = 4753 np.random.seed(123) self.input_alm = np.ones(alm_size, dtype=np.complex) self.m = hp.alm2map(self.input_alm, nside=self.nside, lmax=self.lmax)
def rotate_map_to_axis(m, ra, dec, nest=False, method="direct"): """Rotate a sky map to place a given line of sight on the +z axis. Parameters ---------- m : np.ndarray The input HEALPix array. ra : float Right ascension of axis in radians. To specify the axis in geocentric coordinates, supply ra=(lon + gmst), where lon is the geocentric longitude and gmst is the Greenwich mean sidereal time in radians. dec : float Declination of axis in radians. To specify the axis in geocentric coordinates, supply dec=lat, where lat is the geocentric latitude in radians. nest : bool, default=False Indicates whether the input sky map is in nested rather than ring-indexed HEALPix coordinates (default: ring). method : 'direct' or 'fft' Select whether to use spherical harmonic transformation ('fft') or direct coordinate transformation ('direct') Returns ------- m_rotated : np.ndarray The rotated HEALPix array. """ npix = len(m) nside = hp.npix2nside(npix) theta = 0.5 * np.pi - dec phi = ra if method == "fft": if nest: m = hp.reorder(m, n2r=True) alm = hp.map2alm(m) hp.rotate_alm(alm, -phi, -theta, 0.0) ret = hp.alm2map(alm, nside, verbose=False) if nest: ret = hp.reorder(ret, r2n=True) elif method == "direct": R = hp.Rotator(rot=np.asarray([0, theta, -phi]), deg=False, inv=False, eulertype="Y") theta, phi = hp.pix2ang(nside, np.arange(npix), nest=nest) ipix = hp.ang2pix(nside, *R(theta, phi), nest=nest) ret = m[ipix] else: raise ValueError("Unrecognized method: {0}".format(method)) return ret
def mkfullsky(corr, nside, alms=False): """Construct a set of correlated Healpix maps. Make a set of full sky gaussian random fields, given the correlation structure. Useful for constructing a set of different redshift slices. Parameters ---------- corr : np.ndarray (lmax+1, numz, numz) The correlation matrix :math:`C_l(z, z')`. nside : integer The resolution of the Healpix maps. alms : boolean, optional If True return the alms instead of the sky maps. Returns ------- hpmaps : np.ndarray (numz, npix) The Healpix maps. hpmaps[i] is the i'th map. """ numz = corr.shape[1] maxl = corr.shape[0]-1 if corr.shape[2] != numz: raise Exception("Correlation matrix is incorrect shape.") trans = np.zeros_like(corr) for i in range(maxl+1): trans[i] = nputil.matrix_root_manynull(corr[i], truncate=False) la, ma = healpy.Alm.getlm(maxl) matshape = la.shape + (numz,) # Construct complex gaussian random variables of unit variance gaussvars = (np.random.standard_normal(matshape) + 1.0J * np.random.standard_normal(matshape)) / 2.0**0.5 # Transform variables to have correct correlation structure for i, l in enumerate(la): gaussvars[i] = np.dot(trans[l], gaussvars[i]) if alms: alm_freq = np.zeros((numz, maxl+1, maxl+1), dtype=np.complex128) for i in range(numz): alm_freq[i] = hputil.unpack_alm(gaussvars[:, i], maxl) return alm_freq hpmaps = np.empty((numz, healpy.nside2npix(nside))) # Perform the spherical harmonic transform for each z for i in range(numz): hpmaps[i] = healpy.alm2map(gaussvars[:,i].copy(), nside) return hpmaps
def direct(self, input, output): if input.ndim == 1: input = input[:, None] output = output[:, None] for i, o in zip(input.T, output.T): ialm = hp.map2alm(i) alm_smoothed = hp.almxfl(ialm, self.fl) o[...] = hp.alm2map(alm_smoothed, hp.npix2nside(len(i)))# * \
def harmonic_ud_grade(m, nside_in, nside_out): """ Decompose a map at a resolution nside_in into spherical harmonic components and then resynthesize the map at nside_out. """ lmax = 3 * nside_in - 1 alm = hp.map2alm(m, lmax=lmax) return hp.alm2map(alm, nside_out, lmax=lmax, verbose=False)
def A_matrix_func(data): """ cf eq 25 of eriksen 2004 """ Chalf = np.sqrt(data.cl_th[: data.lmax + 1]) * data.beam[: data.lmax + 1] map2 = hp.alm2map(hp.almxfl(real2complex_alm(data.alm), Chalf), data.nside) * data.invvar alm2 = hp.almxfl(hp.map2alm(map2, data.lmax, use_weights=False) * hp.nside2npix(data.nside) / 4.0 / np.pi, Chalf) return data.alm + complex2real_alm(alm2)
def return_map(map_class): """ We solve for C^{-1/2}x, here is to recover x """ Shalf = np.sqrt(map_class.cl_th[: map_class.lmax + 1]) alm_out = hp.almxfl(real2complex_alm(map_class.alm), Shalf) cl_out = hp.alm2cl(alm_out) map_out = hp.alm2map(alm_out, map_class.nside) return cl_out, map_out
def hires_healpixellize(map_data,theta,phi,nside_interp,nside_synth): map = healpixellize(map_data,theta,phi,nside_interp) lmax = 3*nside_interp-1 # l,m=hp.Alm.getlm(lmax) alm = hp.map2alm(map,lmax=lmax) return hp.alm2map(alm,nside_synth,lmax=lmax)
def TQU_TO_TEB_maps(sky_TQU, lmax): nside = hp.get_nside(sky_TQU) alm_in = hp.map2alm(sky_TQU, lmax, pol=True) sky_TEB = np.empty((3, 12*nside**2)) for i in range(3): sky_TEB[i] = hp.alm2map(alm_in[i], nside, pol=False) return sky_TEB
def remove_low_l(map1, mask1): map1m=hp.ma(map1) map1m.mask=np.logical_not(mask1) map2=np.ma.masked_values(map1, UNSEEN) alm2=hp.map2alm(map2, lmax=LMAX) fac2=np.array([1.]*(LMAX+1)) fac2[0:lREM]=0. alm2n=hp.sphtfunc.almxfl(alm2, fac2, inplace=False) map2new=hp.ma(hp.alm2map(alm2n, 2048)) map2new.mask=np.logical_not(mask1) return map2new
def harmonic_ud_grade(hmap, nside): """ Decompose a map at a resolution nside_in into spherical harmonic components and then resynthesize the map at nside_out. """ npix_in = len(hmap) nside_in = hp.npix2nside(npix_in) lmax_in = 3 * nside_in - 1 hmap_out = hp.alm2map(hp.map2alm(hmap, lmax=lmax_in), nside, verbose=False) return hmap_out
def TEB_to_TQU_maps(sky_TEB, lmax): nside = hp.get_nside(sky_TEB) alm = np.empty((3, (lmax + 1)**2)) for i in range(3): alm[i] = hp.map2alm(sky_TEB[i], lmax, pol=False) sky_TQU = hp.alm2map(alm, nside, lmax, pol=True) return sky_TQU
def simulate_tebp_correlated(cl_tebp_arr,nside,lmax) : alms=healpy.synalm(cl_tebp_arr,lmax=lmax,new=True) aphi=alms[-1] acmb=alms[0:-1] #Set to zero above map resolution to avoid aliasing beam_cut=np.ones(3*nside) for ac in acmb : healpy.almxfl(ac,beam_cut,inplace=True) cmb=np.array(healpy.alm2map(acmb,nside,pol=True)) return cmb,aphi
def test_map2alm_pol(self): tmp = [np.empty(o.size*2) for o in self.mapiqu] for t, o in zip(tmp, self.mapiqu): t[::2] = o maps = [self.mapiqu, [o.astype(np.float32) for o in self.mapiqu], [t[::2] for t in tmp]] for input in maps: alm = hp.map2alm(input, iter=10) output = hp.alm2map(alm, 32) for i, o in zip(input, output): np.testing.assert_allclose(i, o, atol=1e-4)
def simulate_tebp_correlated(cl_tebp_arr,nside,lmax,seed): np.random.seed(seed) alms=hp.synalm(cl_tebp_arr,lmax=lmax,new=True) aphi=alms[-1] acmb=alms[0:-1] #Set to zero above map resolution to avoid aliasing beam_cut=np.ones(3*nside) for ac in acmb : hp.almxfl(ac,beam_cut,inplace=True) cmb=np.array(hp.alm2map(acmb,nside,pol=True,verbose=False)) return cmb,aphi
def best_daughter_sp(mother, true, error, differences, loops, nside): #takes inputs of mother (guessalm), true(real map of shear), error on true, differences in alm space of values between true and mother, iterations to output the best guess mother2=np.asanyarray(mother) malm=mother2.copy() #mother input as pixel space map of three arrays - a scalar (ie galaxy density), gamma1, gamma2 true2=np.asanyarray(true) # [galdens, E_lm_m, B_lm_m]=hp.map2alm(mother2.copy(), pol=True) # malm=E_lm.copy() [galdensr, E_lm, B_lm]=hp.map2alm(true2.copy(), pol=True) fudge=error.copy() correct=true[1]+1j*true[2] daughter1=make_daughter_sp(malm.copy(), differences, nside=nside) bestdaughter=daughter1.copy() d1in=[galdensr, daughter1, B_lm] [num, d1shear1, d1shear2]=hp.alm2map(d1in, nside=nside, pol=True) chilist=[0]*loops chilist[0]=chi_squared(d1shear1+1j*d1shear2, correct, fudge) # print(chilist.shape) print(chilist[0].shape) bestchi=chilist[0] for g in range(0, loops): daughter2=make_daughter_sp(malm.copy(), differences, nside=nside) d2in=[galdensr, daughter2, B_lm] [num, d2shear1, d2shear2]=hp.alm2map(d2in, nside=nside, pol=True) chiagain=chi_squared(d2shear1+1j*d2shear2, correct, fudge) chilist[g]=chiagain print(bestchi.shape) if chilist[g] < bestchi: bestchi=chiagain bestdaughter=daughter2.copy() print('woo') print(g) dout=[galdensr, bestdaughter, B_lm] hp.visufunc.mollview(map_mass_lm(bestdaughter.copy(), (3*nside)-1, nside)) mpl.savefig('fittedmass.png') mpl.close() [gals, bestmapg1, g2]=hp.alm2map(dout, nside=nside, pol=True) return bestmapg1+(1j*g2), bestchi
def generate_gaus_map(self, readGmap=-1): if (readGmap<0): self.gausalm=hp.synalm(self.inputCls) else: # code assumes that self.mapsdir="maps50/" self.gausalm=hp.read_alm(self.mapsdir+"gmap_"+str(readGmap)+".fits") ns=0.965 C0N=(1.0-np.exp(-(ns-1)*self.efolds))/(ns-1) C050=(1.0-np.exp(-(ns-1)*50))/(ns-1) alm0r=np.sqrt(C0N/C050) self.gausalm[0]=self.gausalm[0]*alm0r self.gausmap=hp.alm2map(self.gausalm, nside=self.NSIDE) # includes the monopole bit
def simulate(self, det, idx): assert( det in (dmc.wmap_das + dmc.wmap_bands) ) tlm = self.sim_tlm_cmb.get_sim_tlm(idx) hp.almxfl(tlm, self.get_beam(det), inplace=True) tmap = hp.alm2map(tlm, self.nside) tmap_nse = self.simulate_nse(det) tmap += tmap_nse; del tmap_nse return tmap
def show_one_spherical_harmonic_of_CMB_T_map(self,l=1,m=1,max=20): """ To do this we need to make a healpy-format alm array, with just one non-zero complex value in it, which we extract from the parent alm array. Since healpy only returns positive m coefficients, we just ask to see component with that |m|. """ projected_alm = self.alm * 0.0 i = hp.Alm.getidx(self.lmax, l, np.abs(m)) # Note |m| here projected_alm[i] = self.alm[i] projected_map = hp.alm2map(projected_alm,self.NSIDE) hp.mollview(projected_map) return
def map_mass_lm(E_lm, L, nside): #courtesy of Boris Leistedt's code lfac=np.zeros(E_lm.shape) for el in range(L): for em in range(L): lfac[lm_indices(el, em, L)]=el phiE_lm=-2*E_lm/np.sqrt((lfac+2)*(lfac+1)*(lfac)*(lfac-1)) phiE_lm[np.isnan(phiE_lm)]=0 kappa_lm=-lfac*(lfac+1)*(phiE_lm/2) kappa_lm[np.isnan(kappa_lm)]=0 kappa_map=hp.alm2map(kappa_lm, nside=nside, lmax=3*nside-1, pol=False) return kappa_map
def apply_alm(self, alm): # applies Y^T N^{-1} Y npix = len(self.n_inv) hp.almxfl(alm, self.b_transf, inplace=True) tmap = hp.alm2map(alm, hp.npix2nside(npix)) self.apply_map(tmap) alm[:] = hp.map2alm(tmap, lmax=util_alm.nlm2lmax(len(alm)), iter=0, regression=False) alm[:] *= (npix / (4.*np.pi)) hp.almxfl(alm, self.b_transf, inplace=True)
return maxi, maxang, mini, minang iterations = [] # Initialize data arrays D = np.zeros((1 + len(iterations), nDipoles)) raAvg = np.zeros((len(lats), len(fovs), nDipoles)) covs = np.zeros((len(lats), len(fovs), nDipoles)) # Loop over dipoles orientations for dipole in range(0, nDipoles): almtemp = h.map2alm(np.zeros(12 * nside * nside), lmax=lmax) almtemp[h.sphtfunc.Alm.getidx(lmax=lmax, l=1, m=0)] = dipole almtemp[h.sphtfunc.Alm.getidx(lmax=lmax, l=1, m=1)] = 10 - dipole truemap = h.alm2map(almtemp, nside, lmax=lmax) truemap *= strength / max(truemap) print "=========" print "Dipole %d" % dipole maxi, maxang, mini, minang = GetMaxMin(truemap) D[0, dipole] = 90. - maxang[0] / degree # Strength for RA average subtracted # Loop over possible detector latitudes for ilat, lat in enumerate(lats): for ifov, fov in enumerate(fovs): raAvgmap = truemap.copy() raAvgFOVmap = truemap.copy() decHi = lat + fov * .5
def test_satellite_scan(lmax=700, mmax=2, fwhm=43, ra0=-10, dec0=-57.5, az_throw=50, scan_speed=2.8, hwp_mode=None, alpha=45., beta=45., alpha_period=5400., beta_period=600., delta_az=0., delta_el=0., delta_psi=0., jitter_amp=1.0): ''' Simulates a satellite scan strategy using a random LCDM realisation and a 3 x 3 grid of Gaussian beams pairs. Bins tods into maps and compares to smoothed input maps (no pair- differencing). MPI-enabled. Keyword arguments --------- lmax : int, bandlimit (default : 700) mmax : int, assumed azimuthal bandlimit beams (symmetric in this example so 2 would suffice) (default : 2) fwhm : float, The beam FWHM in arcmin (default : 40) ra0 : float, Ra coord of centre region (default : -10) dec0 : float, (default : -57.5) Ra coord of centre region az_throw : float, Scan width in azimuth (in degrees) (default : 50) scan_speed : float, Scan speed in deg/s (default : 1) hwp_mode : str, None HWP modulation mode, either "continuous", "stepped" or None. Use freq of 1 or 1/10800 Hz respectively (default : None) ''' print('Simulating a satellite...') mlen = 3 * 24 * 60 * 60 # hardcoded mission length # Create LCDM realization ell, cls = get_cls() np.random.seed(25) # make sure all MPI ranks use the same seed alm = hp.synalm(cls, lmax=lmax, new=True, verbose=True) # uK sat = ScanStrategy( mlen, # mission duration in sec. external_pointing=True, # Telling code to use non-standard scanning sample_rate=12.01, # sample rate in Hz location='space') # Instrument at south pole # Create a 3 x 3 square grid of Gaussian beams sat.create_focal_plane(nrow=7, ncol=7, fov=15, lmax=lmax, fwhm=fwhm) # calculate tods in two chunks sat.partition_mission(0.5 * sat.nsamp) # Allocate and assign parameters for mapmaking sat.allocate_maps(nside=256) scan_opts = dict(q_bore_func=sat.satellite_scan, ctime_func=sat.satellite_ctime, q_bore_kwargs=dict(), ctime_kwargs=dict()) # Generate timestreams, bin them and store as attributes sat.scan_instrument_mpi(alm, verbose=1, ra0=ra0, dec0=dec0, az_throw=az_throw, nside_spin=256, max_spin=mmax, **scan_opts) # Solve for the maps maps, cond, proj = sat.solve_for_map(fill=np.nan, return_proj=True) # Plotting if sat.mpi_rank == 0: print('plotting results') cart_opts = dict(unit=r'[$\mu K_{\mathrm{CMB}}$]') # plot rescanned maps plot_iqu(maps, '../scratch/img/', 'rescan_satellite', sym_limits=[250, 5, 5], plot_func=hp.mollview, **cart_opts) # plot smoothed input maps nside = hp.get_nside(maps[0]) hp.smoothalm(alm, fwhm=np.radians(fwhm / 60.), verbose=False) maps_raw = hp.alm2map(alm, nside, verbose=False) plot_iqu(maps_raw, '../scratch/img/', 'raw_satellite', sym_limits=[250, 5, 5], plot_func=hp.mollview, **cart_opts) # plot difference maps for arr in maps_raw: # replace stupid UNSEEN crap arr[arr == hp.UNSEEN] = np.nan diff = maps_raw - maps plot_iqu(diff, '../scratch/img/', 'diff_satellite', sym_limits=[1e-6, 1e-6, 1e-6], plot_func=hp.mollview, **cart_opts) # plot condition number map cart_opts.pop('unit', None) plot_map(cond, '../scratch/img/', 'cond_satellite', min=2, max=5, unit='condition number', plot_func=hp.mollview, **cart_opts) plot_map(proj[0], '../scratch/img/', 'hits_satellite', unit='Hits', plot_func=hp.mollview, **cart_opts)
def test_ghosts(lmax=700, mmax=5, fwhm=43, ra0=-10, dec0=-57.5, az_throw=50, scan_speed=2.8, rot_period=4.5 * 60 * 60, hwp_mode=None): ''' Similar test to `scan_bicep`, but includes reflected ghosts Simulates a 24h BICEP2-like scan strategy using a random LCDM realisation and a 3 x 3 grid of Gaussian beams pairs. Bins tods into maps and compares to smoothed input maps (no pair- differencing). MPI-enabled. Keyword arguments --------- lmax : int, bandlimit (default : 700) mmax : int, assumed azimuthal bandlimit beams (symmetric in this example so 2 would suffice) (default : 5) fwhm : float, The beam FWHM in arcmin (default : 40) ra0 : float, Ra coord of centre region (default : -10) dec0 : float, (default : -57.5) Ra coord of centre region az_throw : float, Scan width in azimuth (in degrees) (default : 50) scan_speed : float, Scan speed in deg/s (default : 1) rot_period : float, The instrument rotation period in sec (default : 600) hwp_mode : str, None HWP modulation mode, either "continuous", "stepped" or None. Use freq of 1 or 1/10800 Hz respectively (default : None) ''' mlen = 24 * 60 * 60 # hardcoded mission length # Create LCDM realization ell, cls = get_cls() np.random.seed(25) # make sure all MPI ranks use the same seed alm = hp.synalm(cls, lmax=lmax, new=True, verbose=True) # uK b2 = ScanStrategy( mlen, # mission duration in sec. sample_rate=12.01, # sample rate in Hz location='spole') # Instrument at south pole # Create a 3 x 3 square grid of Gaussian beams b2.create_focal_plane(nrow=3, ncol=3, fov=5, lmax=lmax, fwhm=fwhm) # Create reflected ghosts for every detector # We create two ghosts per detector. They overlap # but have different fwhm. First ghost is just a # scaled down version of the main beam, the second # has a much wider Gaussian shape. # After this initialization, the code takes # the ghosts into account without modifications b2.create_reflected_ghosts(b2.beams, amplitude=0.01, ghost_tag='ghost_1', dead=False) b2.create_reflected_ghosts(b2.beams, amplitude=0.01, fwhm=100, ghost_tag='ghost_2', dead=False) # calculate tods in two chunks b2.partition_mission(0.5 * b2.nsamp) # Allocate and assign parameters for mapmaking b2.allocate_maps(nside=256) # set instrument rotation b2.set_instr_rot(period=rot_period, angles=[68, 113, 248, 293]) # Set HWP rotation if hwp_mode == 'continuous': b2.set_hwp_mod(mode='continuous', freq=1.) elif hwp_mode == 'stepped': b2.set_hwp_mod(mode='stepped', freq=1 / (3 * 60 * 60.)) # Generate timestreams, bin them and store as attributes b2.scan_instrument_mpi(alm, verbose=1, ra0=ra0, dec0=dec0, az_throw=az_throw, nside_spin=256, max_spin=mmax) # Solve for the maps maps, cond = b2.solve_for_map(fill=np.nan) # Plotting if b2.mpi_rank == 0: print('plotting results') cart_opts = dict( rot=[ra0, dec0, 0], lonra=[-min(0.5 * az_throw, 90), min(0.5 * az_throw, 90)], latra=[-min(0.375 * az_throw, 45), min(0.375 * az_throw, 45)], unit=r'[$\mu K_{\mathrm{CMB}}$]') # plot rescanned maps plot_iqu(maps, '../scratch/img/', 'rescan_ghost', sym_limits=[250, 5, 5], plot_func=hp.cartview, **cart_opts) # plot smoothed input maps nside = hp.get_nside(maps[0]) hp.smoothalm(alm, fwhm=np.radians(fwhm / 60.), verbose=False) maps_raw = hp.alm2map(alm, nside, verbose=False) plot_iqu(maps_raw, '../scratch/img/', 'raw_ghost', sym_limits=[250, 5, 5], plot_func=hp.cartview, **cart_opts) # plot difference maps for arr in maps_raw: # replace stupid UNSEEN crap arr[arr == hp.UNSEEN] = np.nan diff = maps_raw - maps plot_iqu(diff, '../scratch/img/', 'diff_ghost', sym_limits=[1e+1, 1e-1, 1e-1], plot_func=hp.cartview, **cart_opts) # plot condition number map cart_opts.pop('unit', None) plot_map(cond, '../scratch/img/', 'cond_ghost', min=2, max=5, unit='condition number', plot_func=hp.cartview, **cart_opts) # plot input spectrum cls[3][cls[3] <= 0.] *= -1. dell = ell * (ell + 1) / 2. / np.pi plt.figure() for i, label in enumerate(['TT', 'EE', 'BB', 'TE']): plt.semilogy(ell, dell * cls[i], label=label) plt.legend() plt.ylabel(r'$D_{\ell}$ [$\mu K^2_{\mathrm{CMB}}$]') plt.xlabel(r'Multipole [$\ell$]') plt.savefig('../scratch/img/cls_ghost.png') plt.close()
fname_alm = "Planck_data/COM_Lensing_2048_R2.00/dat_klm.fits" # Mask fname_msk = "Planck_data/COM_Lensing_2048_R2.00/mask.fits.gz" # Output prefix predir_out = "/mnt/extraspace/damonge/S8z_data/derived_products/planck_lensing/" os.system("mkdir -p " + predir_out) # Read original alms print("Reading kappa alms") alm_g = hp.read_alm(predir_in + fname_alm) # Rotate alms to Equatorial coordinates print("Rotating to Equatorial") alm_c = rotate_alm_g_c(alm_g) # Compute map print("Transforming to pixels") mpk = hp.alm2map(alm_c, nside) print("Writing output map") hp.write_map(predir_out + "map_kappa_ns%d.fits" % nside, mpk, overwrite=True) # Same thing with mask print("Reading original mask") msk_g = hp.read_map(predir_in + fname_msk, verbose=False) print("Rotating to Equatorial") msk_c = rotate_map_g_c(msk_g) # Binarize msk_c[msk_c < 0.5] = 0 msk_c[msk_c >= 0.5] = 1. # Up/down-grade to chosen pixelization msk_c = hp.ud_grade(msk_c, nside_out=nside) print("Writing output mask")
region, "tsz", 'cmb', "joint", beam=True) dbfile = tutils.get_generic_fname(tdir, region, "tsz", 'cib', "joint", beam=True) imask = enmap.read_map( tutils.get_generic_fname(tdir, region, "tsz", None, "joint", mask=True)) dmask = hp.alm2map( cs.map2alm(imask, lmax=lmax).astype(np.complex128), nside) dmask[dmask < 0.5] = 0 dmask[dmask > 0.5] = 1 jmask = dmask * mask io.mollview(jmask, os.environ['WORK'] + '/jmask_%s.png' % region) fsky = jmask.sum() / jmask.size print(fsky * 41252.) delta = hdelta * jmask galm = hp.map2alm(delta, lmax=lmax) for bfile, imap in zip([ybfile, cbfile, dbfile], [ymap, cmap, dmap]): yalm = hp.map2alm( hp.alm2map(cs.map2alm(imap, lmax=lmax).astype(np.complex128), nside=nside) * jmask,
def mapmaker(params, post): # size of the blm array blm_size = Alm.getsize(params['lmax']) ## we will plot with a larger nside than the analysis for finer plots nside = 2 * params['nside'] npix = hp.nside2npix(nside) # Initialize power skymap omega_map = np.zeros(npix) blmax = params['lmax'] for ii in range(post.shape[0]): sample = post[ii, :] # Omega at 1 mHz Omega_1mHz = (10**(sample[3])) * (1e-3 / 25)**(sample[2]) ## blms. blms = np.append([1], sample[4:]) ## Complex array of blm values for both +ve m values blm_vals = np.zeros(blm_size, dtype='complex') ## this is b00, alsways set to 1 blm_vals[0] = 1 norm, cnt = 1, 1 for lval in range(1, blmax + 1): for mval in range(lval + 1): idx = Alm.getidx(blmax, lval, mval) if mval == 0: blm_vals[idx] = blms[cnt] cnt = cnt + 1 else: ## prior on amplitude, phase blm_vals[idx] = blms[cnt] * np.exp(1j * blms[cnt + 1]) cnt = cnt + 2 norm = np.sum(blm_vals[0:(blmax + 1)]**2) + np.sum( 2 * np.abs(blm_vals[(blmax + 1):])**2) prob_map = (1.0 / norm) * (hp.alm2map(blm_vals, nside, verbose=False))**2 ## add to the omega map omega_map = omega_map + Omega_1mHz * prob_map omega_map = omega_map / post.shape[0] hp.mollview(omega_map, title='Posterior predictive skymap of $\\Omega(f= 1mHz)$') hp.graticule() plt.savefig(params['out_dir'] + '/post_skymap.png', dpi=150) print('saving injected skymap at ' + params['out_dir'] + '/post_skymap.png') plt.close() #### ------------ Now plot median value # median values of the posteriors med_vals = np.median(post, axis=0) ## blms. blms_median = np.append([1], med_vals[4:]) # Omega at 1 mHz Omega_1mHz_median = (10**(med_vals[3])) * (1e-3 / 25)**(med_vals[2]) ## Complex array of blm values for both +ve m values blm_median_vals = np.zeros(blm_size, dtype='complex') ## this is b00, alsways set to 1 blm_median_vals[0] = 1 cnt = 1 for lval in range(1, blmax + 1): for mval in range(lval + 1): idx = Alm.getidx(blmax, lval, mval) if mval == 0: blm_median_vals[idx] = blms_median[cnt] cnt = cnt + 1 else: ## prior on amplitude, phase blm_median_vals[idx] = blms_median[cnt] * np.exp( 1j * blms_median[cnt + 1]) cnt = cnt + 2 norm = np.sum(blm_median_vals[0:(blmax + 1)]**2) + np.sum( 2 * np.abs(blm_median_vals[(blmax + 1):])**2) Omega_median_map = Omega_1mHz_median * (1.0 / norm) * (hp.alm2map( blm_median_vals, nside, verbose=False))**2 hp.mollview(omega_map, title='median skymap of $\\Omega(f= 1mHz)$') hp.graticule() plt.savefig(params['out_dir'] + '/post_median_skymap.png', dpi=150) print('saving injected skymap at ' + params['out_dir'] + '/post_median_skymap.png') plt.close() return
for ipix in xrange(0, num_pix): product = arr1[ipix] * arr2[ipix] * arr3[ipix] bi_sum += product bi_sum /= (4.0 * np.pi * num_pix ) # If masked then num_pix is total number of unmasked pixels return bi_sum # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ filename = '/dataspace/sandeep/Bispectrum_data/fnl_test/Elsner_alm/alm_l_0001_v3.fits' filename1 = '/dataspace/sandeep/Bispectrum_data/fnl_test/Elsner_alm/alm_nl_0001_v3.fits' alm_1 = hp.read_alm(filename) alm_nl_1 = hp.read_alm(filename1) map_20 = hp.alm2map((alm_1 + 20 * alm_nl_1) * 2.7255, 1024) lmax = 2500 """ cl = hp.anafast(map_20, lmax=lmax) ell = np.arange(len(cl)) # map_save = hp.mollview(map_20*1e6*2.7255, min=-500, max=500, unit="$\mu K$") # gives me the attached figure. plt.figure(1) plt.plot(ell, ell * (ell+1) * cl, color='crimson') plt.xlabel('ell'); plt.ylabel('ell(ell+1)cl'); plt.grid() plt.savefig("/home/sandeep/Benjamin_Test_cl.eps", dpi=100) plt.show() """ nside_f_est = 1024
def apply_smoothing_and_coord_transform(input_map, fwhm=None, rot=None, lmax=None, map_dist=None): """Apply smoothing and coordinate rotation to an input map it applies the `healpy.smoothing` Gaussian smoothing kernel if `map_dist` is None, otherwise applies distributed smoothing with `libsharp`. In the distributed case, no rotation is supported. Parameters ---------- input_map : ndarray Input map, of shape `(3, npix)` This is assumed to have no beam at this point, as the simulated small scale tempatle on which the simulations are based have no beam. fwhm : astropy.units.Quantity Full width at half-maximum, defining the Gaussian kernels to be applied. rot: hp.Rotator Apply a coordinate rotation give a healpy `Rotator`, e.g. if the inputs are in Galactic, `hp.Rotator(coord=("G", "C"))` rotates to Equatorial Returns ------- smoothed_map : np.ndarray Array containing the smoothed sky """ if map_dist is None: nside = hp.get_nside(input_map) alm = hp.map2alm( input_map, lmax=lmax, use_pixel_weights=True if nside > 16 else False, verbose=False, ) if fwhm is not None: hp.smoothalm(alm, fwhm=fwhm.to_value(u.rad), verbose=False, inplace=True, pol=True) if rot is not None: rot.rotate_alm(alm, inplace=True) smoothed_map = hp.alm2map(alm, nside=nside, verbose=False, pixwin=False) else: assert (rot is None) or ( rot.coordin == rot.coordout), "No rotation supported in distributed smoothing" smoothed_map = mpi.mpi_smoothing(input_map, fwhm, map_dist) if hasattr(input_map, "unit"): smoothed_map <<= input_map.unit return smoothed_map
import matplotlib.pyplot as plt nside = 128 L = 128 J_min = 1 B = 3 J = pys2let_j_max(B, L, J_min) # The filename of some random healpix map fname = '/Users/bl/Dropbox/Wavelets/s2let/data/somecmbsimu_hpx_128.fits' # Read healpix map and compute alms. # f_lm has size L*(L+1)/2 f_ini = hp.read_map(fname) # Initial map f_lm = hp.map2alm(f_ini, lmax=L - 1) # Its alms f = hp.alm2map(f_lm, nside=nside, lmax=L - 1) # Band limited version hp.mollview(f) # Call pys2let and compute wavelet transform. Returns the harmonic coefficients of the wavelets. # f_scal_lm has size L*(L+1)/2 # f_wav_lm has size L*(L+1)/2 by J-J_min+1 f_wav_lm, f_scal_lm = analysis_axisym_lm_wav(f_lm, B, L, J_min) # Reconstruct healpix maps on the sphere and plot them f_scal = hp.alm2map(f_scal_lm, nside=nside, lmax=L - 1) hp.mollview(f_scal) f_wav = np.empty([12 * nside * nside, J - J_min + 1]) for j in range(J - J_min + 1): flm = f_wav_lm[:, j].ravel() f_wav[:, j] = hp.alm2map(flm, nside=nside, lmax=L - 1)
def residual(nside, alms, lmax, blms, Ideal_comp=True, Conv_comp=False, Smooth_comp=False, View_diffMap=False, Plot=False): ''' Compute the APS of the residual maps which are the differnce maps between the output map and the input map convolved with the beam. --------- nside: int the nside of the map alms : array-like array of alm arrays that share lmax and mmax. For each frequency we have three healpy alm array lmax: int The bandlmit. blms : array-like array of alm arrays that share lmax and mmax. Realtive to optical beam. For each frequency we have three healpy blm array Keyword arguments ----------------- View_diffMap : bool if True: Display the residual maps (default : False) Plot : bool if True: shows the residual APS and store them in the output folder (default : False) ''' Freq = [90, 95, 100, 105, 110] for freq, alm, blm in zip(Freq, alms, blms): O_maps = hp.read_map(opj(dir_out, 'Output_maps/Bconv_' + str(freq) + 'GHz.fits'), field=(0, 1, 2), verbose=False) In_maps = hp.read_map(opj(dir_inp, 'pysm_maps/CMB_' + str(freq) + 'GHz.fits'), field=(0, 1, 2), verbose=False) O_maps[np.isnan(O_maps)] = 0. In_maps[np.isnan(In_maps)] = 0. # ## COMPARISON wrt IDEAL CASE if Ideal_comp: Ideal_maps = hp.read_map(opj( dir_ideal, 'Output_maps/Bconv_' + str(freq) + 'GHz.fits'), field=(0, 1, 2), verbose=False) Ideal_maps[np.isnan(Ideal_maps)] = 0. cl_in = hp.anafast(Ideal_maps, lmax=lmax - 1, mmax=4) res_maps_ring = O_maps - Ideal_maps res_maps_ring[np.isnan(res_maps_ring)] = 0. # ## COMPARISON wrt PYSM MAP CONVOLVED WITH OPTICAL BEAM elif Conv_comp: blmax = hp.Alm.getlmax(alm[0].size) blm = trunc_alm(blm, blmax) alm = alm * blm In_maps_sm = hp.alm2map(alm, nside=hp.get_nside(O_maps), pol=True, verbose=False) cl_in = hp.anafast(In_maps_sm, lmax=lmax - 1, mmax=4) hp.write_map(opj(dir_out, 'Smoot_IM/In_map_smoot_' + str(freq) + 'GHz.fits'), In_maps_sm, overwrite=True) res_maps_ring = O_maps - In_maps_sm res_maps_ring[np.isnan(res_maps_ring)] = 0. # ## COMPARISON wrt PYSM MAP SMOOTHED WITH GAUSSIAN BEAM elif Smooth_comp: In_maps_gauss = hp.smoothing(In_maps, fwhm=np.radians(32.2 / 60), verbose=False) cl_in = hp.anafast(In_maps_gauss, lmax=lmax - 1, mmax=4) hp.write_map(opj(dir_out, 'Gauss_IM/In_map_gauss_' + str(freq) + 'GHz.fits'), In_maps_gauss, overwrite=True) res_maps_ring = O_maps - In_maps_gauss res_maps_ring[np.isnan(res_maps_ring)] = 0. hp.write_map(opj(dir_out, 'Res_Maps/res_maps_' + str(freq) + 'GHz.fits'), res_maps_ring, overwrite=True) cl_res = hp.anafast(res_maps_ring, lmax=lmax - 1, mmax=4) ## TT,EE,BB,TE,EB,TB np.save( os.path.join(dir_out + 'residual_spectra/Cell_' + str(freq) + 'GHz.npy'), cl_res) ratio_cl_res = np.zeros((6, lmax)) ratio_cl_res[:, 2:] = ( cl_res[:, 2:lmax] / cl_in[:, 2:lmax] ) * 100 ### the first two multipoles in cl_in are zeros, so we will have a numerical problem.. np.save( os.path.join(dir_out + 'residual_spectra/perc_Cell_' + str(freq) + 'GHz.npy'), ratio_cl_res) if View_diffMap: cart_opts = dict(unit=r'[$\mu K_{\mathrm{CMB}}$]') hp.cartview(res_maps_ring[0], min=-250, max=250, **cart_opts) hp.cartview(res_maps_ring[1], min=-5, max=5, **cart_opts) hp.cartview(res_maps_ring[2], min=-5, max=5, **cart_opts) plt.show() if Plot: plot_APS_residual( ratio_cl_res[0], ratio_cl_res[1], ratio_cl_res[2], dir_out + 'residual_spectra/plots/Cell' + str(freq) + 'GHz.pdf', r'$\nu = $' + str(freq) + 'GHz')
mask_planck = hp.read_map('data/planck/data/mask_ns2048.fits', dtype=None, verbose=False).astype(float) map_planck = hp.read_map('data/planck/data/map_kappa_ns2048.fits', dtype=None, verbose=False).astype(float) alm_planck = hp.map2alm(map_planck) ll, nll, cll = np.loadtxt('data/nlkk.dat', unpack=True) ll = ll.astype(int) cl = np.zeros(ll[-1] + 1) cl[ll[0]:] = cll nl = np.zeros(ll[-1] + 1) nl[ll[0]:] = nll wl = (cl - nl) / np.maximum(cl, np.ones_like(cl) * 1E-10) alm_planck = hp.almxfl(alm_planck, wl) map_planck = hp.alm2map(alm_planck, nside, verbose=False) map_delta = f.mp #alm_delta = hp.map2alm(map_delta) #alm_delta = hp.almxfl(alm_delta, wl) #map_delta = hp.alm2map(alm_delta, nside, verbose=False) ut.plot_lotss_map(map_planck * mask_lofar * mask_planck, mask=mask_lofar * mask_planck, title=r'$\kappa$', fname='plots/kappa.pdf') ut.plot_lotss_map(p_map0p1, mask=mask_lofar, title=r'Depth, $I_{\rm cut}=0.1\,{\rm MJy}$', fname='plots/depth_0p1.pdf') ut.plot_lotss_map(p_map2, mask=mask_lofar,
def g2eb(g1, g2): nside = gnside(g1) (ae, ab) = hp.map2alm_spin((g1, g2), 2) ke = hp.alm2map(ae, nside, pol=False) kb = hp.alm2map(ab, nside, pol=False) return ke, kb
def get_sim_tmap(self, idx): T = hp.alm2map(hp.almxfl(cmb_len_ffp10.get_sim_tlm(idx), self.transf), self.nside) nlevt_pix = self.nlevt / np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) / 60. T += self.pix_libphas.get_sim(idx, idf=0) * nlevt_pix return T
assert (lmax_in > lmax_out), "New lmax must be less than lmax of input alms" alm_out = np.zeros(hp.Alm.getsize(lmax_out), dtype=np.complex) for il in xrange(0, lmax_out + 1): alm_out[hp.Alm.getidx(lmax_out, il, np.arange(0, il + 1))] = alm_in[hp.Alm.getidx( lmax_in, il, np.arange(0, il + 1))] return alm_out # Usage: new_lmax = 8000 alm = lower_lmax(alm, new_lmax) # Make map from alms tmap = hp.alm2map(alm, nside) # Look at map interactively hp.mollzoom(tmap) # SPT-only products: # Read filter transfer functions tlm = hp.read_alm('tlm_150GHz_lmax10000.fits') # Read beam file bl = np.fromfile('bl_gauss_fwhm1p75am_lmax10000.bin') # Create new beam: fwhm_arcmin_new = 1.85 # FWHM of new beam bl_new = hp.gauss_beam(fwhm_arcmin_new / 60. * np.pi / 180., lmax=10000)
# Initialize data arrays D = np.zeros((1 + len(iterations), nDipoles)) #raAvg = np.zeros((len(lats),nDipoles)) #covs = np.zeros((len(lats),nDipoles)) raAvg = np.zeros((len(lats), len(fovs), nDipoles)) covs = np.zeros((len(lats), len(fovs), nDipoles)) noM = np.zeros(nDipoles) # Loop over dipoles orientations for dipole in range(0, nDipoles): almtemp = h.map2alm(np.zeros(12 * nside * nside), lmax=lmax) almtemp[h.sphtfunc.Alm.getidx(lmax=lmax, l=1, m=0)] = dipole almtemp[h.sphtfunc.Alm.getidx(lmax=lmax, l=1, m=1)] = 10 - dipole truemap = h.alm2map(almtemp, nside, lmax=lmax) truemap *= strength / max(truemap) print "=========" print "Dipole %d" % dipole maxi, maxang, mini, minang = GetMaxMin(truemap) D[0, dipole] = 90. - maxang[0] / degree # Strength observed with method after i iterations # Loop over Niterations for j, iteration in enumerate(iterations): print " Observed" #r=h.read_map("dipoles/CR_dipole%02d_64_360_iteration%d.fits.gz" % (dipole,iteration)) #r=h.read_map("/data/scratch/fiorino/hawc/local/dipole%02d-wholesky/CR_HAWC_64_360_iteration%d.fits" % (dipole,iteration)) r = h.read_map( "/data/scratch/fiorino/hawc/local/dipole%02d-boost10-hawcsky/CR_HAWC_64_360_iteration%d.fits"
def almPlots(path, outDir, bundle, nside=128, lmax=500, filterband='i', raRange=[-50, 50], decRange=[-65, 5], subsetsToConsider=[[130, 165], [240, 300]], showPlots=True): """ Plot the skymaps/cartview plots corresponding to alms with specified l-ranges. Automatically creates the output directories and saves the plots. Required Parameters ------------------- * path: str: path to the main directory where output directory is saved * outDir: str: name of the main output directory * bundle: metricBundle object. Optional Parameters ------------------- * nside: int: HEALpix resolution parameter. Default: 128 * lmax: int: upper limit on the multipole. Default: 500 * filterBand: str: any one of 'u', 'g', 'r', 'i', 'z', 'y'. Default: 'i' * raRange: float array: range of right ascention (in degrees) to consider in cartview plot; only useful when cartview= True. Default: [-50,50] * decRange: float array: range of declination (in degrees) to consider in cartview plot; only useful when cartview= True. Default: [-65,5] * subsetsToConsider: array of int arrays: l-ranges to consider, e.g. use [[50, 100]] to consider 50<l<100. Currently built to handle five subsets (= number of colors built in). Default: [[130,165], [240, 300]] * showPlots: boolean: set to True if want to show figures. Default: True """ # set up the output directory outDir2 = 'almAnalysisPlots_%s<RA<%s_%s<Dec<%s' % ( raRange[0], raRange[1], decRange[0], decRange[1]) if not os.path.exists('%s%s/%s' % (path, outDir, outDir2)): os.makedirs('%s%s/%s' % (path, outDir, outDir2)) outDir3 = 'almSkymaps' if not os.path.exists('%s%s/%s/%s' % (path, outDir, outDir2, outDir3)): os.makedirs('%s%s/%s/%s' % (path, outDir, outDir2, outDir3)) outDir4 = 'almCartviewMaps' if not os.path.exists('%s%s/%s/%s' % (path, outDir, outDir2, outDir4)): os.makedirs('%s%s/%s/%s' % (path, outDir, outDir2, outDir4)) # ------------------------------------------------------------------------ # In order to consider the out-of-survey area as with data=0, assign the masked region of the # skymaps the median of the in-survey data, and then subtract the median off the entire survey. # Add the median back later. This gets rid of the massive fake monopole and allows reconstructing # the full skymap from components. surveyMedianDict = {} surveyStdDict = {} for dither in bundle: inSurvey = np.where(bundle[dither].metricValues.mask == False)[0] outSurvey = np.where(bundle[dither].metricValues.mask == True)[0] bundle[dither].metricValues.mask[outSurvey] = False # data pixels surveyMedian = np.median(bundle[dither].metricValues.data[inSurvey]) surveyStd = np.std(bundle[dither].metricValues.data[inSurvey]) # assign data[outOfSurvey]= medianData[inSurvey] bundle[dither].metricValues.data[outSurvey] = surveyMedian # subtract median off bundle[dither].metricValues.data[:] = bundle[ dither].metricValues.data[:] - surveyMedian # save median for later use surveyMedianDict[dither] = surveyMedian surveyStdDict[dither] = surveyStd # ------------------------------------------------------------------------ # now find the alms correponding to the map. for dither in bundle: array = hp.anafast(bundle[dither].metricValues.filled( bundle[dither].slicer.badval), alm=True, lmax=500) cl = array[0] alm = array[1] l = np.arange(len(cl)) lsubsets = {} colorArray = ['y', 'r', 'g', 'm', 'c'] color = {} for case in range(len(subsetsToConsider)): lsubsets[case] = ((l > subsetsToConsider[case][0]) & (l < subsetsToConsider[case][1])) color[case] = colorArray[case] # ------------------------------------------------------------------------ # plot things out plt.clf() plt.plot(l, (cl * l * (l + 1)) / (2.0 * np.pi), color='b') for key in list(lsubsets.keys()): plt.plot(l[lsubsets[key]], (cl[lsubsets[key]] * l[lsubsets[key]] * (l[lsubsets[key]] + 1)) / (2.0 * np.pi), color=color[key]) plt.title(dither) plt.xlabel('$\ell$') plt.ylabel(r'$\ell(\ell+1)C_\ell/(2\pi)$') filename = 'cls_%s.png' % (dither) plt.savefig('%s%s/%s/%s' % (path, outDir, outDir2, filename), format='png', bbox_inches='tight') if showPlots: plt.show() else: plt.close() surveyMedian = surveyMedianDict[dither] surveyStd = surveyStdDict[dither] # ------------------------------------------------------------------------ # plot full-sky-alm plots first nTicks = 5 colorMin = surveyMedian - 1.5 * surveyStd colorMax = surveyMedian + 1.5 * surveyStd increment = (colorMax - colorMin) / float(nTicks) ticks = np.arange(colorMin + increment, colorMax, increment) # full skymap hp.mollview(hp.alm2map(alm, nside=nside, lmax=lmax) + surveyMedian, flip='astro', rot=(0, 0, 0), min=colorMin, max=colorMax, title='', cbar=False) hp.graticule(dpar=20, dmer=20, verbose=False) plt.title('Full Map') ax = plt.gca() im = ax.get_images()[0] fig = plt.gcf() cbaxes = fig.add_axes([0.1, 0.015, 0.8, 0.04]) # [left, bottom, width, height] cb = plt.colorbar(im, orientation='horizontal', format='%.2f', ticks=ticks, cax=cbaxes) cb.set_label('$%s$-band Coadded Depth' % filterband) filename = 'alm_FullMap_%s.png' % (dither) plt.savefig('%s%s/%s/%s/%s' % (path, outDir, outDir2, outDir3, filename), format='png', bbox_inches='tight') # full cartview hp.cartview(hp.alm2map(alm, nside=nside, lmax=lmax) + surveyMedian, lonra=raRange, latra=decRange, flip='astro', min=colorMin, max=colorMax, title='', cbar=False) hp.graticule(dpar=20, dmer=20, verbose=False) plt.title('Full Map') ax = plt.gca() im = ax.get_images()[0] fig = plt.gcf() cbaxes = fig.add_axes([0.1, -0.05, 0.8, 0.04]) # [left, bottom, width, height] cb = plt.colorbar(im, orientation='horizontal', format='%.2f', ticks=ticks, cax=cbaxes) cb.set_label('$%s$-band Coadded Depth' % filterband) filename = 'alm_Cartview_FullMap_%s.png' % (dither) plt.savefig('%s%s/%s/%s/%s' % (path, outDir, outDir2, outDir4, filename), format='png', bbox_inches='tight') # prepare for the skymaps for l-range subsets colorMin = surveyMedian - 0.1 * surveyStd colorMax = surveyMedian + 0.1 * surveyStd increment = (colorMax - colorMin) / float(nTicks) increment = 1.15 * increment ticks = np.arange(colorMin + increment, colorMax, increment) # ------------------------------------------------------------------------ # consider each l-range for case in list(lsubsets.keys()): index = [] lowLim = subsetsToConsider[case][0] upLim = subsetsToConsider[case][1] for ll in np.arange(lowLim, upLim + 1): for mm in np.arange(0, ll + 1): index.append(hp.Alm.getidx(lmax=lmax, l=ll, m=mm)) alms1 = alm.copy() alms1.fill(0) alms1[index] = alm[index] # an unmasked array # plot the skymap hp.mollview(hp.alm2map(alms1, nside=nside, lmax=lmax) + surveyMedian, flip='astro', rot=(0, 0, 0), min=colorMin, max=colorMax, title='', cbar=False) hp.graticule(dpar=20, dmer=20, verbose=False) plt.title('%s<$\ell$<%s' % (lowLim, upLim)) ax = plt.gca() im = ax.get_images()[0] fig = plt.gcf() cbaxes = fig.add_axes([0.1, 0.015, 0.8, 0.04]) # [left, bottom, width, height] cb = plt.colorbar(im, orientation='horizontal', format='%.3f', ticks=ticks, cax=cbaxes) cb.set_label('$%s$-band Coadded Depth' % filterband) filename = 'almSkymap_%s<l<%s_%s.png' % (lowLim, upLim, dither) plt.savefig('%s%s/%s/%s/%s' % (path, outDir, outDir2, outDir3, filename), format='png', bbox_inches='tight') # plot cartview hp.cartview(hp.alm2map(alms1, nside=nside, lmax=lmax) + surveyMedian, lonra=raRange, latra=decRange, flip='astro', min=colorMin, max=colorMax, title='', cbar=False) hp.graticule(dpar=20, dmer=20, verbose=False) plt.title('%s<$\ell$<%s' % (lowLim, upLim)) ax = plt.gca() im = ax.get_images()[0] fig = plt.gcf() cbaxes = fig.add_axes([0.1, -0.05, 0.8, 0.04]) # [left, bottom, width, height] cb = plt.colorbar(im, orientation='horizontal', format='%.3f', ticks=ticks, cax=cbaxes) cb.set_label('$%s$-band Coadded Depth' % filterband) filename = 'almCartview_%s<l<%s_%s.png' % (lowLim, upLim, dither) plt.savefig('%s%s/%s/%s/%s' % (path, outDir, outDir2, outDir4, filename), format='png', bbox_inches='tight') if showPlots: plt.show() else: plt.close('all')
def rotate_map_to_axis(m, ra, dec, nest=False, method='direct'): """Rotate a sky map to place a given line of sight on the +z axis. Parameters ---------- m : np.ndarray The input HEALPix array. ra : float Right ascension of axis in radians. To specify the axis in geocentric coordinates, supply ra=(lon + gmst), where lon is the geocentric longitude and gmst is the Greenwich mean sidereal time in radians. dec : float Declination of axis in radians. To specify the axis in geocentric coordinates, supply dec=lat, where lat is the geocentric latitude in radians. nest : bool, default=False Indicates whether the input sky map is in nested rather than ring-indexed HEALPix coordinates (default: ring). method : 'direct' or 'fft' Select whether to use spherical harmonic transformation ('fft') or direct coordinate transformation ('direct') Returns ------- m_rotated : np.ndarray The rotated HEALPix array. """ npix = len(m) nside = hp.npix2nside(npix) theta = 0.5 * np.pi - dec phi = ra if method == 'fft': if nest: m = hp.reorder(m, n2r=True) alm = hp.map2alm(m) hp.rotate_alm(alm, -phi, -theta, 0.0) ret = hp.alm2map(alm, nside, verbose=False) if nest: ret = hp.reorder(ret, r2n=True) elif method == 'direct': R = hp.Rotator( rot=np.asarray([0, theta, -phi]), deg=False, inv=False, eulertype='Y') theta, phi = hp.pix2ang(nside, np.arange(npix), nest=nest) ipix = hp.ang2pix(nside, *R(theta, phi), nest=nest) ret = m[ipix] else: raise ValueError('Unrecognized method: {0}'.format(method)) return ret
def get_tmap(self, idx): """Real-space Wiener filtered tmap. \sum_{lm} MAP_talm _0 Ylm(n). """ return hp.alm2map(self.ivfs.get_sim_tmliklm(idx), self.nside)
def degrade_maps(qml_nside, maps_in, pol, qml_mask=None): """cut high multipoles of input map and degrade it to low resolution Parameters ---------- nsimu:integer mumber of samples qml_nside : integer nisde of low multipoles maps mapsA, mapsB: numpy.ndarray mapsA noise maps of chanel A mapsB noise maps of chanel B(optional) Returns ---------- maps: Return a degrade maps with Nside = qml_nside """ print() print("down grade maps to Nside = %d" % qml_nside) print(maps_in.shape) if len(maps_in.shape) == 2: nsimu = 1 maps_in = np.array([maps_in]) else: nsimu = maps_in.shape[0] nside = hp.npix2nside(maps_in.shape[-1]) Slmax_old = 3 * nside Slmax_new = 3 * qml_nside print("nsimu = %d" % nsimu) print("nside = %d" % nside) npix = hp.nside2npix(qml_nside) if qml_mask == (): qml_mask = np.ones(npix, bool) else: if npix != len(qml_mask): print("The nside of qml_mask inconsistent with qml_nside.") ALM = hp.Alm maps = [] for n in np.arange(nsimu): progress_bar(n, nsimu) n1 = n + 1 #map_out = hp.read_map(maps_dir+"%d.fits"%n1,field=(0,1,2),dtype=float,verbose=0) #map_in = hp.read_map(maps_dir+"%d.fits"%n1,field=(0,1,2),dtype=np.float64,verbose=0) map_in = maps_in[n] alm_old = hp.map2alm(map_in, lmax=Slmax_old, pol=pol, use_weights=True, iter=3) alm_new = np.zeros_like(np.array(alm_old)[:, :ALM.getsize(Slmax_new)]) for l in np.arange(Slmax_new + 1): for m in np.arange(l + 1): idx_new = ALM.getidx(Slmax_new, l, m) idx_old = ALM.getidx(Slmax_old, l, m) if l <= qml_nside: alm_new[:, idx_new] = alm_old[:, idx_old] elif l <= Slmax_new: alm_new[:, idx_new] = alm_old[:, idx_old] * 0.5 * ( 1 + np.sin(l * np.pi / 2 / qml_nside)) map_out = hp.alm2map(alm_new, nside=qml_nside, pixwin=False) map_out = map_out * qml_mask # hp.write_map(output_dir+"/map_out_%d.fits"%n1,map_out,dtype=np.float64) # hp.mollview(map_out[1], cmap=plt.cm.jet, title='Q map output') # plt.savefig(output_dir+'/map_out_%d.eps'%n1,bbox_inches='tight',pad_inches=0.1) maps.append(map_out) maps = np.array(maps) return maps
if temp2 > 0.0: diffCRmap[i] = temp1 / temp2 - CRmap[i] # remove m=0 multipole moments : #LMAX=180 LMAX = 8 out = H.anafast(diffCRmap, alm=True, lmax=LMAX) for i in range(0, LMAX + 1): index = H.sphtfunc.Alm.getidx(LMAX, i, 0) out[1][index] = 0.0 diffCRmap = H.alm2map(out[1], nside, lmax=LMAX) # calculate new norm for timeidx in range(minstep, maxstep): temp1 = 0.0 temp2 = 0.0 beta = timeidx / (1. * Ntimestep) * np.pi * 2 + HAWClon1 cb = np.cos(beta) sb = np.sin(beta) clat = np.cos(HAWClat1) slat = np.sin(HAWClat1)
def test_scan_spole(self): ''' Perform a (low resolution) scan and see if TOD make sense. ''' mlen = 10 * 60 rot_period = 120 mmax = 2 ra0=-10 dec0=-57.5 fwhm = 200 nside = 256 az_throw = 10 polang = 20. ces_opts = dict(ra0=ra0, dec0=dec0, az_throw=az_throw, scan_speed=2.) scs = ScanStrategy(duration=mlen, sample_rate=10, location='spole') # Create a 1 x 1 square grid of Gaussian beams. scs.create_focal_plane(nrow=1, ncol=1, fov=4, lmax=self.lmax, fwhm=fwhm, polang=polang) beam = scs.beams[0][0] scs.init_detpair(self.alm, beam, nside_spin=nside, max_spin=mmax) scs.partition_mission() chunk = scs.chunks[0] ces_opts.update(chunk) # Populate boresight. scs.constant_el_scan(**ces_opts) # Test without returning anything (default behaviour). scs.scan(beam, **chunk) tod = scs.scan(beam, return_tod=True, **chunk) self.assertEqual(tod.size, chunk['end'] - chunk['start']) pix, nside_out, pa, hwp_ang = scs.scan(beam, return_point=True, **chunk) self.assertEqual(pix.size, tod.size) self.assertEqual(nside, nside_out) self.assertEqual(pa.size, tod.size) self.assertEqual(hwp_ang, 0) # Turn on HWP scs.set_hwp_mod(mode='continuous', freq=1., start_ang=0) scs.rotate_hwp(**chunk) tod2, pix2, nside_out2, pa2, hwp_ang2 = scs.scan(beam, return_tod=True, return_point=True, **chunk) np.testing.assert_almost_equal(pix, pix2) np.testing.assert_almost_equal(pix, pix2) np.testing.assert_almost_equal(pa, pa2) self.assertTrue(np.any(np.not_equal(tod, tod2)), True) self.assertEqual(nside_out, nside_out2) self.assertEqual(hwp_ang2.size, tod.size) # Construct TOD manually. polang = beam.polang maps_sm = np.asarray(hp.alm2map(self.alm, nside, verbose=False, fwhm=np.radians(beam.fwhm / 60.))) np.testing.assert_almost_equal(maps_sm[0], scs.spinmaps['main_beam']['maps'][0][0]) q = np.real(scs.spinmaps['main_beam']['maps'][1][mmax + 2]) u = np.imag(scs.spinmaps['main_beam']['maps'][1][mmax + 2]) np.testing.assert_almost_equal(maps_sm[1], q) np.testing.assert_almost_equal(maps_sm[2], u) tod_man = maps_sm[0][pix] tod_man += (maps_sm[1][pix] \ * np.cos(-2 * np.radians(pa + polang + 2 * hwp_ang2))) tod_man += (maps_sm[2][pix] \ * np.sin(-2 * np.radians(pa + polang + 2 * hwp_ang2))) np.testing.assert_almost_equal(tod2, tod_man)
def main(): d, cls_, s_obs = utils.generate_sky_map() _, cls_others, _ = utils.generate_sky_map() config.observations = d group_mat = utils.compute_grouping_matrix() alm = np.random.normal(size=config.N) + 1j * np.random.normal(size=config.N) alm_origin = alm.copy() alm.imag[:config.L_MAX_SCALARS+1] = 0 alm[[0, 1, config.L_MAX_SCALARS+1]] = 0 print(alm) print(alm_origin) A = utils.get_Ylm() A_transp = utils.get_Ylm_transp() map1 = utils.sph_transform_by_hand(alm, A) map2 = hp.alm2map(alm, nside=config.NSIDE) print(map1 - map2) map2_bis = hp.alm2map(alm_origin, nside=config.NSIDE) print(map2 - map2_bis) conjgrad3 = conjugateGradient.CG3() conjgrad4 = conjugateGradient.CG4(group_mat, A, A_transp) #testeur = np.random.normal(size=(config.L_MAX_SCALARS+1)**2) + 1j*np.random.normal(size=(config.L_MAX_SCALARS+1)**2) #testeur.imag[:config.L_MAX_SCALARS + 1] = 0 #testeur[[0, 1, config.L_MAX_SCALARS + 1]] = 0 #m = np.dot(group_mat.T, testeur) #m2 = conjugateGradient.flatten_map4(testeur[config.N - (config.L_MAX_SCALARS + 1):]) #print(m) #print(m2) u = np.concatenate((2*np.ones(config.N - (config.L_MAX_SCALARS + 2)), np.ones(config.L_MAX_SCALARS-1), 3*np.ones(config.N - (config.L_MAX_SCALARS + 2)))) u = np.concatenate((np.random.normal(size=config.N - (config.L_MAX_SCALARS + 2)), np.random.normal(size=config.L_MAX_SCALARS-1), np.random.normal(size=config.N - (config.L_MAX_SCALARS + 2)))) #print(conjugateGradient.unflat_map_to_pix4(u) - np.dot(group_mat, u)[config.N - (config.L_MAX_SCALARS +1):]) m1 = np.dot(A_transp, np.dot(A, np.dot(group_mat, u))) m2 = hp.map2alm(hp.alm2map(conjugateGradient.unflat_map_to_pix4(u), nside=config.NSIDE), lmax=config.L_MAX_SCALARS) print(m2-m1[config.N-(config.L_MAX_SCALARS+2)+1:]) print(m1) #print(A_transp.shape) #e1 = np.dot(group_mat.T, np.dot(A_transp,d)) #e2 = hp.map2alm(d, lmax=config.L_MAX_SCALARS) #print(e1) #print(e2) #print(e1[config.N - (config.L_MAX_SCALARS + 1):] - e2) #print(e1 - e2) u = np.concatenate((2*np.ones(config.N - (config.L_MAX_SCALARS + 2)), np.ones(config.L_MAX_SCALARS-1), 3*np.ones(config.N - (config.L_MAX_SCALARS + 2)))) #u = np.concatenate((np.random.normal(size=config.N - (config.L_MAX_SCALARS + 2)), np.random.normal(size=config.L_MAX_SCALARS-1), # np.random.normal(size=config.N - (config.L_MAX_SCALARS + 2)))) #u[0] = 0 #u[config.N - config.L_MAX_SCALARS] = 0 #u[config.N - config.L_MAX_SCALARS +1] = 0 #u[config.N] = 0 degroup_mat = utils.compute_grouping_inverse() e = np.dot(group_mat, u) print(e[74+4]) print(e[74 - (config.N - (config.L_MAX_SCALARS +1) + 2) + 1 - 4]) print(74 - (config.N - (config.L_MAX_SCALARS +1) + 2) + 1 - 4) v = np.dot(degroup_mat, np.dot(group_mat, u)) print(v) print((config.N - (config.L_MAX_SCALARS +1) + 2) + 1 - 4) print(np.dot(group_mat, u)) #e = np.dot(degroup_mat, np.dot(A_transp, np.dot(A, np.dot(group_mat, u)))) #print(e - u) #print(u) #print("\n") #print(np.vstack((u, e)).T) """ extended_cls = [cl for l in range(config.L_MAX_SCALARS + 1) for cl in cls_[l:]] #extended_cls_real = (extended_cls[:(config.L_MAX_SCALARS + 1)] + extended_cls[(config.L_MAX_SCALARS + 2):])[2:] #extended_cls_imag = extended_cls[(config.L_MAX_SCALARS + 2):] #extended_cls = np.array(extended_cls_real + extended_cls_imag) diag = extended_cls denom_second = 1/np.array(extended_cls) denom_first = 1/np.array(extended_cls[config.L_MAX_SCALARS+1:]) denom_second[[0, 1, config.L_MAX_SCALARS + 1]] = 0 denom_first[0] = 0 inverse_C = np.concatenate((denom_first, denom_second)) cg_Matrix = np.diag(inverse_C) + M print(cg_Matrix) inv_Matrix = np.linalg.solve(cg_Matrix, np.ones(81)) print(inv_Matrix[Int, Int]) """ conjgrad2 = conjugateGradient.CG2() conjgrad1 = conjugateGradient.CG() conjgrad3 = conjugateGradient.CG3() conjgrad4 = conjugateGradient.CG4(group_mat, A, A_transp) true_mean2, err = conjgrad2.compute_mean(d, cls_others) if err != 0: print("Conjugate gradient did not converge") return None true_mean3, err = conjgrad3.compute_mean(d, cls_others) if err != 0: print("Conjugate gradient did not converge") return None true_mean4, err = conjgrad4.compute_mean(d, cls_others) if err != 0: print("Conjugate gradient did not converge") return None variance_int1, err = conjgrad1.get_var(Int) variance_int, err = conjgrad2.get_var(Int) variance_int2 = variance_int.real if err != 0: print("Conjugate gradient did not converge for variance") return None variance_int3, err = conjgrad3.get_var(Int) variance_int3 = variance_int.real if err != 0: print("Conjugate gradient did not converge for variance") return None """ variance_true, precision_true, mat, A = utils.compute_variance_matrix(cls_others) chol_var = np.linalg.cholesky(variance_true) chol_precision = np.linalg.cholesky(precision_true) u = hp.map2alm((1/config.noise_covar)*d, lmax=config.L_MAX_SCALARS) u = conjugateGradient.flatten_map3(u) mean = np.dot(variance_true, u) true_mean2 = conjugateGradient.flatten_map3(unflat_map_to_pix(true_mean2)) print("\n") print(mean[Int]) print(true_mean2[Int]) print(true_mean4[Int]) print(true_mean3[Int]) print("\n") print(variance_int1) print(variance_int2) print(variance_int3) print(variance_true[Int, Int]) print(conjgrad3.get_var(Int)) print(conjgrad4.get_var(Int)) h_cg2 = [] for i in range(1000): print(i) sol, err = conjgrad2.run(d, cls_others) h_cg2.append(sol) h_cg1 = [] for i in range(1000): print(i) sol, err = conjgrad1.run(d, cls_others) h_cg1.append(mala.flatten_map(sol)) h_cg3 = [] for i in range(1000): print(i) sol, err = conjgrad3.run(d, cls_others) h_cg3.append(sol) h_cg4 = [] for i in range(1000): print(i) sol, err = conjgrad4.run(d, cls_others) h_cg4.append(sol) h_chol = [] true_mean = np.dot(variance_true, conjugateGradient.flatten_map3(hp.sphtfunc.map2alm((1 / config.noise_covar) * d, lmax=config.L_MAX_SCALARS))) for i in range(1000): print(i) #v = np.dot(chol_var, np.random.normal(size = 77)) + true_mean #omega0 = np.sqrt(1/conjgrad3.cls)*np.random.normal(size=len(conjgrad3.cls)) #omega1 = hp.sphtfunc.map2alm((1/np.sqrt(config.noise_covar))*np.random.normal(size=config.Npix) # ,lmax=config.L_MAX_SCALARS) #omega1 = np.dot(degroup_mat, np.dot(A_transp, (1 / np.sqrt(config.noise_covar))*np.random.normal(size=config.Npix))) #u = hp.sphtfunc.map2alm(((1/config.noise_covar) * d), lmax=config.L_MAX_SCALARS) u = np.dot(degroup_mat,np.dot(A_transp, (1 / config.noise_covar) * d)) #b = omega0 + conjugateGradient.flatten_map3(u+omega1) #b = omega0 + u + omega1 b = np.dot(chol_precision, np.random.normal(size = precision_true.shape[1])) + u v = np.dot(variance_true, b) h_chol.append(v) #h, estim_mean = gradient_ascent(d, cls_) #h1 = np.array(h_cg1) #h2 = np.array(h_cg2) h4 = np.array(h_cg4) hchol = np.array(h_chol) stdd2 = np.std(hchol[:, Int]) #emp_var = np.var(h[:, Int]) #print(h[:, Int]) #plt.hist(h1[:, Int], bins=25, alpha=0.2, density=True, label="Old") #plt.hist(h2[:, Int], bins=25, alpha=0.2, density=True, label="New") plt.hist(h4[:, Int], bins=15, alpha=0.5, density=True, label="CG4") plt.hist(hchol[:, Int], bins=15, alpha=0.5, density=True, label="Cholesky") plt.axvline(x=true_mean4[Int], color="k", linewidth=1) plt.axvline(x=true_mean4[Int] + np.sqrt(variance_true[Int, Int]), color="k", linewidth=1) plt.axvline(x=true_mean4[Int] - np.sqrt(variance_true[Int, Int]), color="k", linewidth=1) plt.axvline(x=true_mean4[Int] + stdd2, color="r", linewidth=1) plt.axvline(x=true_mean4[Int] - stdd2, color="r", linewidth=1) plt.legend(loc="upper right") #plt.axvline(x=true_mean2[Int] + np.sqrt(emp_var), color="red", linewidth=1) #plt.axvline(x=true_mean2[Int] - np.sqrt(emp_var), color="red", linewidth=1) plt.show() """ #ICI POUR MALA !!! grad_cst = utils.compute_gradient_log_constant_part(d) #history, s = mala.mala3(cls_others, d, grad_cst) print("\n") print("\n") print("\n") #history, s = mala.mala_good(cls_others, d, grad_cst, unadjusted=False) history, s = crankNicolson_good(cls_, d) h = np.array(history)[:, Int] #h_r = np.array(history_r)[:, Int] plt.plot(h) #plt.plot(h_r) plt.axhline(y=true_mean4[Int], color='green', linewidth=1) plt.axhline(y=true_mean4[Int] + np.sqrt(variance_int), color='green', linewidth=1) plt.axhline(y=true_mean4[Int] - np.sqrt(variance_int), color='green', linewidth=1) plt.show() plt.close() #emp_var = np.var(h) #plt.hist(h, label="ULA", density=True, alpha=0.5) #plt.hist(np.array(h_cg4)[:, Int], bins=25, label="CG", density=True, alpha=0.5) #plt.axvline(x=true_mean4[Int], color='red', linewidth=1) #plt.axvline(x=true_mean4[Int] + np.sqrt(variance_int), color='green', linewidth=1) #plt.axvline(x=true_mean4[Int] - np.sqrt(variance_int), color='green', linewidth=1) #plt.axvline(x=true_mean4[Int] + np.sqrt(emp_var), color='red', linewidth=1) #plt.axvline(x=true_mean4[Int] - np.sqrt(emp_var), color='red', linewidth=1) #plt.legend(loc="upper right") #plt.show() #plt.close() #print(variance_int) """ #Ici pour gradient !!! history, s = gradient_ascent_good(d, cls_others) h = np.array(history) plt.plot(h[:, Int]) plt.axhline(y=true_mean4[Int], color='red', linewidth=1) plt.axhline(y=true_mean3[Int], color='green', linewidth=1) plt.show() plt.close() print(h[0, Int]) print(history[0][Int]) print(np.array(history).shape) print(history) """ """
def scan_atacama(lmax=700, mmax=5, fwhm=40, mlen=48 * 60 * 60, nrow=3, ncol=3, fov=5.0, ra0=[-10, 170], dec0=[-57.5, 0], el_min=45., cut_el_min=False, az_throw=50, scan_speed=1, rot_period=0, hwp_mode='continuous'): ''' Simulates 48h of an atacama-based telescope with a 3 x 3 grid of Gaussian beams pairs. Prefers to scan the bicep patch but will try to scan the ABS_B patch if the first is not visible. Keyword arguments --------- lmax : int bandlimit (default : 700) mmax : int assumed azimuthal bandlimit beams (symmetric in this example so 2 would suffice) (default : 5) fwhm : float The beam FWHM in arcmin (default : 40) mlen : int The mission length [seconds] (default : 48 * 60 * 60) nrow : int Number of detectors along row direction (default : 3) ncol : int Number of detectors along column direction (default : 3) fov : float The field of view in degrees (default : 5.0) ra0 : float, array-like Ra coord of centre region (default : [-10., 85.]) dec0 : float, array-like Ra coord of centre region (default : [-57.5, 0.]) el_min : float Minimum elevation range [deg] (default : 45) cut_el_min: bool If True, excludes timelines where el would be less than el_min az_throw : float Scan width in azimuth (in degrees) (default : 10) scan_speed : float Scan speed in deg/s (default : 1) rot_period : float The instrument rotation period in sec (default : 600) hwp_mode : str, None HWP modulation mode, either "continuous", "stepped" or None. Use freq of 1 or 1/10800 Hz respectively (default : continuous) ''' # hardcoded mission length # Create LCDM realization ell, cls = get_cls() np.random.seed(25) # make sure all MPI ranks use the same seed alm = hp.synalm(cls, lmax=lmax, new=True, verbose=True) # uK ac = ScanStrategy( mlen, # mission duration in sec. sample_rate=12.01, # sample rate in Hz location='atacama') # Instrument at south pole # Create a 3 x 3 square grid of Gaussian beams ac.create_focal_plane(nrow=nrow, ncol=ncol, fov=fov, lmax=lmax, fwhm=fwhm) # calculate tods in two chunks ac.partition_mission(0.5 * ac.mlen * ac.fsamp) # Allocate and assign parameters for mapmaking ac.allocate_maps(nside=256) # set instrument rotation ac.set_instr_rot(period=rot_period) # Set HWP rotation if hwp_mode == 'continuous': ac.set_hwp_mod(mode='continuous', freq=1.) elif hwp_mode == 'stepped': ac.set_hwp_mod(mode='stepped', freq=1 / (3 * 60 * 60.)) # Generate timestreams, bin them and store as attributes ac.scan_instrument_mpi(alm, verbose=2, ra0=ra0, dec0=dec0, az_throw=az_throw, nside_spin=256, el_min=el_min, cut_el_min=cut_el_min, create_memmap=True) # Solve for the maps maps, cond = ac.solve_for_map(fill=np.nan) # Plotting if ac.mpi_rank == 0: print('plotting results') img_out_path = '../scratch/img/' moll_opts = dict(unit=r'[$\mu K_{\mathrm{CMB}}$]') # plot rescanned maps plot_iqu(maps, img_out_path, 'rescan_atacama', sym_limits=[250, 5, 5], plot_func=hp.mollview, **moll_opts) # plot smoothed input maps nside = hp.get_nside(maps[0]) hp.smoothalm(alm, fwhm=np.radians(fwhm / 60.), verbose=False) maps_raw = hp.alm2map(alm, nside, verbose=False) plot_iqu(maps_raw, img_out_path, 'raw_atacama', sym_limits=[250, 5, 5], plot_func=hp.mollview, **moll_opts) # plot difference maps for arr in maps_raw: # replace stupid UNSEEN crap arr[arr == hp.UNSEEN] = np.nan diff = maps_raw - maps plot_iqu(diff, img_out_path, 'diff_atacama', sym_limits=[1e-6, 1e-6, 1e-6], plot_func=hp.mollview, **moll_opts) # plot condition number map moll_opts.pop('unit', None) plot_map(cond, img_out_path, 'cond_atacama', min=2, max=5, unit='condition number', plot_func=hp.mollview, **moll_opts) # plot input spectrum cls[3][cls[3] <= 0.] *= -1. dell = ell * (ell + 1) / 2. / np.pi plt.figure() for i, label in enumerate(['TT', 'EE', 'BB', 'TE']): plt.semilogy(ell, dell * cls[i], label=label) plt.legend() plt.ylabel(r'$D_{\ell}$ [$\mu K^2_{\mathrm{CMB}}$]') plt.xlabel(r'Multipole [$\ell$]') plt.savefig('../scratch/img/cls_atacama.png') plt.close() print("Results written to {}".format(os.path.abspath(img_out_path)))
def almrec(tab, nside): alm = tab2alm(tab) map_out = hp.alm2map(alm, nside, verbose=False) return map_out
def idea_jon(): nside_spin = 512 ra0 = 0 dec0 = -90 az_throw = 10 max_spin = 5 fwhm = 32.2 scan_opts = dict(verbose=1, ra0=ra0, dec0=dec0, az_throw=az_throw, nside_spin=nside_spin, max_spin=max_spin, binning=True) lmax = 800 alm = tools.gauss_blm(1e-5, lmax, pol=False) ell = np.arange(lmax + 1) fl = np.sqrt((2 * ell + 1) / 4. / np.pi) hp.almxfl(alm, fl, mmax=None, inplace=True) fm = (-1)**(hp.Alm.getlm(lmax)[1]) alm *= fm alm = tools.get_copol_blm(alm) # create Beam properties and pickle (this is just to test load_focal_plane) import tempfile import shutil import pickle opj = os.path.join blm_dir = os.path.abspath( opj(os.path.dirname(__file__), '../tests/test_data/example_blms')) po_file = opj(blm_dir, 'blm_hp_X1T1R1C8A_800_800.npy') eg_file = opj(blm_dir, 'blm_hp_eg_X1T1R1C8A_800_800.npy') tmp_dir = tempfile.mkdtemp() beam_file = opj(tmp_dir, 'beam_opts.pkl') beam_opts = dict(az=0, el=0, polang=0., btype='Gaussian', name='X1T1R1C8', fwhm=fwhm, lmax=800, mmax=800, amplitude=1., po_file=po_file, eg_file=eg_file) with open(beam_file, 'wb') as handle: pickle.dump(beam_opts, handle, protocol=pickle.HIGHEST_PROTOCOL) # init scan strategy and instrument ss = ScanStrategy( 1., # mission duration in sec. sample_rate=10000, location='spole') ss.allocate_maps(nside=1024) ss.load_focal_plane(tmp_dir, no_pairs=True) # remove tmp dir and contents shutil.rmtree(tmp_dir) ss.set_el_steps(0.01, steps=np.linspace(-10, 10, 100)) # Generate maps with Gaussian beams ss.scan_instrument_mpi(alm, **scan_opts) ss.reset_el_steps() # Solve for the maps maps_g, cond_g = ss.solve_for_map(fill=np.nan) # Generate maps with elliptical Gaussian beams ss.allocate_maps(nside=1024) ss.beams[0][0].btype = 'EG' ss.scan_instrument_mpi(alm, **scan_opts) ss.reset_el_steps() # Solve for the maps maps_eg, cond_eg = ss.solve_for_map(fill=np.nan) # Generate map with Physical Optics beams and plot them ss.allocate_maps(nside=1024) ss.beams[0][0].btype = 'PO' ss.scan_instrument_mpi(alm, **scan_opts) ss.reset_el_steps() # Solve for the maps maps_po, cond_po = ss.solve_for_map(fill=np.nan) # Plotting print('plotting results') cart_opts = dict( #rot=[ra0, dec0, 0], lonra=[-min(0.5 * az_throw, 10), min(0.5 * az_throw, 10)], latra=[-min(0.375 * az_throw, 10), min(0.375 * az_throw, 10)], unit=r'[$\mu K_{\mathrm{CMB}}$]') # plot smoothed input maps nside = hp.get_nside(maps_g[0]) hp.smoothalm(alm, fwhm=np.radians(fwhm / 60.), verbose=False) maps_raw = hp.alm2map(alm, nside, verbose=False) plot_iqu(maps_raw, '../scratch/img/', 'raw_delta', sym_limits=[1, 1, 1], plot_func=hp.cartview, **cart_opts)
theta, phi = hp.pix2ang(nside, pixels) mollProj = hp.projector.MollweideProj() x, y = mollProj.ang2xy(theta, phi) theta = 90 - np.degrees(theta) #lat phi = np.degrees(phi) #lon seismic_cmap = cm.get_cmap('seismic') seismic_cmap.set_under('w') gradphiR, gradphiI = hp.read_map( "/scratch2/r/rbond/phamloui/lenspix_files/cib_v2_phi/8Gpc_n2048_nb18_nt16_phi_sis_2_ns2048_zmin0.0_zmax2.8_hp_grad.fits", field=(0, 1)) # gradphiR = hp.ud_grade(gradphiR, 512) # gradphiI = hp.ud_grade(gradphiI, 512) gravPotential = hp.alm2map( hp.read_alm( "/scratch2/r/rbond/phamloui/lenspix_files/cib_v2_phi/8Gpc_n2048_nb18_nt16_phi_sis_2_ns2048_zmin0.0_zmax2.8_hp.fits" ), 2048) unlensed = hp.read_map( "/scratch2/r/rbond/phamloui/lenspix_files/cib_v2_unlensed/cib_fullsky_ns2048_zmin2.80_zmax3.00_nu217_ns2048_tot.fits" ) lensed = hp.read_map( "/scratch2/r/rbond/phamloui/lenspix_files/cib_v2_lensed/lensed_cib_fullsky_ns2048_zmin2.80_zmax3.00_nu217_ns2048_tot.fits" ) # print lensed[np.isnan(lensed)].shape # gradIndices = np.random.choice(nside**2*12, nside**2*12/40000, replace=False) # gradIndices = pixels[((phi<=90) | (phi>=270)) & (np.absolute(theta)<=45)] # gradIndices = np.random.choice(gradIndices, gradIndices.shape[0]/400, replace=False) # get nan coordinates and overplot there lensedUnseen = np.copy(lensed) lensedNanToZero = np.copy(lensed)
def rotate_map_g_c(map_in, c2g=False): ns = hp.npix2nside(len(map_in)) alm_in = hp.map2alm(map_in) alm_out = rotate_alm_g_c(alm_in, c2g=False) return hp.alm2map(alm_out, ns, verbose=False)
img, phi, theta = SWHT.swht.make3Dimage(imgCoeffs, dim=[opts.pixels, opts.pixels]) fig, ax = SWHT.display.disp3D(img, phi, theta, dmode='abs', cmap='jet') # save complex image to pickle file print 'Writing image to file %s ...' % outFn, SWHT.fileio.writeSWHTImgPkl(outFn, [img, phi, theta], fDict, mode='3D') print 'done' elif opts.imageMode.startswith( 'heal' ): # plot healpix and save healpix file using the opts.pkl name print 'Generating HEALPix Image with %i NSIDE' % (opts.pixels) # use the healpy.alm2map function as it is much faster, there is a ~1% difference between the 2 functions, this is probably due to the inner workings of healpy #m = SWHT.swht.makeHEALPix(imgCoeffs, nside=opts.pixels) # TODO: a rotation issue m = hp.alm2map(SWHT.util.array2almVec(imgCoeffs), opts.pixels) # TODO: a rotation issue # save complex image to HEALPix file print 'Writing image to file %s ...' % outFn, hp.write_map( outFn, m.real, coord='C' ) # only writing the real component, this should be fine, maybe missing some details, but you know, the sky should be real. print 'done' elif opts.imageMode.startswith('coeff'): # plot the complex coefficients fig, ax = SWHT.display.dispCoeffs(imgCoeffs, zeroDC=True, vis=False) if not (opts.savefig is None): plt.savefig(opts.savefig) if not opts.nodisplay: if opts.imageMode.startswith('heal'): hp.mollview(m.real, coord='CG') plt.show()
def propOpp(cube=None, flo=100., fhi=200., lmax=100, npznamelist=None, save_cubes=True): """ Propogate the Q and U components of an IQUV cube through the Oppermann et al. 2012 RM map. The cube must be 4 or 2 by npix by nfreq. If 4, the middle two arrays will be assumed to be Q & U IN THAT ORDER. Or if fromnpz=True, then provide an array of npz names (cube length assumptionsremain) """ ## load the maps if npznamelist != None: assert (cube == None) nNpz = len(npznamelist) assert (nNpz == 2 or nNpz == 4) if nNpz == 2: Q = np.load(npznamelist[0])['maps'] U = np.load(npznamelist[1])['maps'] else: Q = np.load(npznamelist[1])['maps'] U = np.load(npznamelist[2])['maps'] elif cube != None: Q = cube[1] U = cube[2] else: raise ImplmentationError('No map information provided.') ## nbins = Q.shape[0] nu = np.linspace(flo, fhi, num=nbins) lam = 3e8 / (nu * 1e6) lam2 = np.power(lam, 2) d = pyfits.open('opp2012.fits') RM = d[3].data.field(0) """ The RM map is nside=128. Everything else is nside=512. We're smoothing on scales larger than the pixellization this introduces, so no worries. RMmap=hp.ud_grade(RM,nside_out=512) hp.mollview(RMmap,title='Oppermann map') RMmap=hp.smoothing(RMmap,fwhm=np.radians(1.)) hp.mollview(RMmap,title='Oppermann map smoothed') plt.show() """ #Downsample RM variance in alm space #Upsample it in pixellization #Is this kosher? Qn, Un = [np.zeros((nbins, hp.nside2npix(128))) for i in range(2)] for i in range(Q.shape[0]): # print Q[:,i].shape Qn[i] = hp.alm2map(hp.map2alm(Q[i], lmax=3 * 512 - 1), nside=128) Un[i] = hp.alm2map(hp.map2alm(U[i], lmax=3 * 512 - -1), nside=128) RMmap = RM # RMmap = hp.alm2map(hp.map2alm(RM,lmax=lmax),nside=512) Qmaps_rot = np.zeros_like(Qn) Umaps_rot = np.zeros_like(Un) # phi = np.outer(RMmap,lam2) for i in range(nbins): phi = RMmap * lam2[i] fara_rot = (Qn[i] + 1.j * Un[i]) * np.exp( -2.j * phi) #Eq. 9 of Moore et al. 2013 Qmaps_rot[i] = fara_rot.real Umaps_rot[i] = fara_rot.imag QU = [Qmaps_rot, Umaps_rot] if save_cubes == True: print 'Saving Q U rotated' np.savez('cube_Qrot_%s-%sMHz.npz' % (str(flo), str(fhi)), maps=Qmaps_rot) np.savez('cube_Urot_%s-%sMHz.npz' % (str(flo), str(fhi)), maps=Umaps_rot) return np.array(QU)
def accum(self, tmap, coeffs): assert (len(coeffs) == self.nmodes) nside = hp.npix2nside(len(tmap)) tmap += hp.alm2map(xyz_to_alm(coeffs), nside)