def doubleworker(i): #i = (j,n,map_index,scale_lmax,smoothing_lmax) print "Doubling l_max of input map", i[2] + 1, "/", nmaps #Map loading within sub-process if i[0] >= 0: #Wavelet scales wav_fits = wav_fits_root[i[2]] + '_' + wavparam_code + str( i[0]) + '_n' + str(i[1] + 1) + '.npy' else: #Scaling function wav_fits = scal_fits[i[2]] map = np.load(wav_fits, mmap_mode='r') #Map still only stored on disk alms = ps.map2alm_mw(map, i[3], spin) #alm's to l(j) - come out in MW order del map alms = zeropad((ps.lm2lm_hp(alms, i[3]), i[3], i[4])) #New alm's larger map = ps.alm2map_mw(ps.lm_hp2lm(alms, i[4]), i[4], spin) #Input alm's in MW order del alms #SAVE doubled map if i[0] >= 0: #Wavelet scales double_fits = wav_fits_root[i[2]] + '_' + wavparam_code + str( i[0]) + '_n' + str(i[1] + 1) + '_double.npy' else: #Scaling function double_fits = scal_fits[i[2]][:-4] + '_double.npy' np.save(double_fits, map) del map return 0
def analworker(i): print "This is analysis worker starting for map", i + 1, "/", nmaps QU_maps = hp.read_map(fits[i], field=(1, 2)) #(Q,U) pixrecip = np.concatenate((np.ones(2), np.reciprocal( hp.pixwin(hp.get_nside(QU_maps[0]), pol=True)[1][2:smoothing_lmax]) )) #P pixwin #Not defined for l < 2 pm_alms = hp.map2alm_spin(QU_maps, spin, lmax=smoothing_lmax - 1) del QU_maps hp.almxfl(pm_alms[0], pixrecip, inplace=True) #Correcting for pixwin hp.almxfl(pm_alms[1], pixrecip, inplace=True) #Correcting for pixwin #Reorder to S2LET alms pm_alms[0] = ps.lm_hp2lm(pm_alms[0], smoothing_lmax) pm_alms[1] = ps.lm_hp2lm(pm_alms[1], smoothing_lmax) P_alms = -1. * pm_alms[0] - 1.j * pm_alms[1] #CHECK THIS IS CORRECT! del pm_alms wav_maps, scal_maps = ps.analysis_lm2wav_manualtiling( P_alms, smoothing_lmax, ndir, spin, scal_tiles, wav_tiles.T.ravel(), scal_bandlims, wav_bandlims) del P_alms np.save(scal_outfits[i], scal_maps) del scal_maps #Splitting up output wavelet maps offset = 0 for j in xrange(jmax + 1): for n in xrange(ndir): bandlim = wav_bandlims[j] nelem = bandlim * (2. * bandlim - 1.) wav_outfits = wav_outfits_root[i] + '_j' + str(j) + '_n' + str( n + 1) + '.npy' np.save(wav_outfits, wav_maps[offset:offset + nelem]) offset += nelem del wav_maps return 0
def smoothworker( i ): #(j,n,map_index1,map_index2,smoothing_lmax,scale_fwhm) [map_index2<=map_index1] print "Smoothing independent covariance element", i[2], ",", i[3] #Map loading within sub-process if i[0] >= 0: #Wavelet scales wav_fits1 = wav_fits_root[i[2]] + '_' + wavparam_code + str( i[0]) + '_n' + str(i[1] + 1) + '_double.npy' wav_fits2 = wav_fits_root[i[3]] + '_' + wavparam_code + str( i[0]) + '_n' + str(i[1] + 1) + '_double.npy' else: #Scaling function wav_fits1 = scal_fits[i[2]][:-4] + '_double.npy' wav_fits2 = scal_fits[i[3]][:-4] + '_double.npy' map1 = np.real(np.load(wav_fits1, mmap_mode='r')) #Throw away zero imaginary part map2 = np.real(np.load(wav_fits2, mmap_mode='r')) R = np.multiply(map1, map2) + 0.j #Add back in zero imaginary part del map1, map2 alms = ps.lm2lm_hp( ps.map2alm_mw(R, i[4], spin), i[4] ) #No pixwin correct. with MW - calc alms to smooth - come out in MW order - so converted to HPX order del R gausssmooth = hp.gauss_beam(i[5], lmax=i[4] - 1) hp.almxfl(alms, gausssmooth, inplace=True) #Multiply by gaussian beam print "Synthesising smoothed covariance map for element", i[2], ",", i[3] Rsmooth = np.real(ps.alm2map_mw( ps.lm_hp2lm(alms, i[4]), i[4], spin)) #Throw away zero imaginary part - input alm's in MW order del alms #SAVE smoothed covariance if i[0] >= 0: #Wavelet scales R_fits = wav_outfits_root + '_' + wavparam_code + str( i[0]) + '_n' + str(i[1] + 1) + '_Rsmooth' + str(i[2]) + str( i[3]) + '.npy' else: #Scaling function R_fits = scal_outfits[:-4] + '_Rsmooth' + str(i[2]) + str( i[3]) + '.npy' np.save(R_fits, Rsmooth) del Rsmooth return 0
def smoothworker( i ): #(j,n,map_index1,map_index2,smoothing_lmax,scale_fwhm) [map_index2<=map_index1] print "Smoothing independent covariance element", i[2], ",", i[3] #Map loading within sub-process if i[0] >= 0: #Wavelet scales wav_fits1 = wav_fits_root[i[2]] + '_j' + str( i[0]) + '_n' + str(i[1] + 1) + '.npy' wav_fits2 = wav_fits_root[i[3]] + '_j' + str( i[0]) + '_n' + str(i[1] + 1) + '.npy' else: #Scaling function wav_fits1 = scal_fits[i[2]] wav_fits2 = scal_fits[i[3]] map1 = np.load(wav_fits1, mmap_mode='r') #Complex spin-wavelet coefficients map2 = np.conjugate(np.load(wav_fits2, mmap_mode='r')) R = np.multiply(map1, map2) #W_c W_d^* del map1, map2 alms = ps.lm2lm_hp( ps.map2alm_mw(R, i[4], 0), i[4] ) #No pixwin correct. with MW - calc alms to smooth - come out in MW order - so converted to HPX order del R gausssmooth = hp.gauss_beam(i[5], lmax=i[4] - 1) hp.almxfl(alms, gausssmooth, inplace=True) #Multiply by gaussian beam print "Synthesising smoothed covariance map for element", i[2], ",", i[3] Rsmooth = ps.alm2map_mw(ps.lm_hp2lm(alms, i[4]), i[4], 0) #Input alm's in MW order del alms #Save smoothed covariance if i[0] >= 0: #Wavelet scales R_fits = wav_outfits_root + '_j' + str( i[0]) + '_n' + str(i[1] + 1) + '_Rsmooth' + str(i[2]) + str( i[3]) + '.npy' else: #Scaling function R_fits = scal_outfits[:-4] + '_Rsmooth' + str(i[2]) + str( i[3]) + '.npy' np.save(R_fits, Rsmooth) del Rsmooth return 0
def s2let_ilc(mapsextra): #mapsextra = (j,n) print "Running S2LET ILC on wavelet scale", mapsextra[ 0], "/", jmax, "direction", mapsextra[1] + 1, "/", ndir, "\n" if mapsextra[0] >= 0: #Wavelet scales scale_lmax = wav_bandlims[mapsextra[0]] else: #Scaling function scale_lmax = int(scal_bandlims) smoothing_lmax = 2 * (scale_lmax - 1) + 1 #Doubling lmax for input maps with zero-padding #Serial version '''mapsdouble = np.zeros((nrows,ps.mw_size(smoothing_lmax)),dtype=np.complex128) #Pre-allocate array for i in xrange(nrows): mapsdouble[i,:] = doubleworker((mapsextra[0][i],mapsextra[1],smoothing_lmax,mapsextra[2]))''' #Parallel version mapsextra2 = [(mapsextra[0], mapsextra[1], i, scale_lmax, smoothing_lmax) for i in xrange(nmaps)] print "Forming pool" pool2 = mg.Pool(nprocess2) print "Farming out workers to run doubling function" double_output = pool2.map(doubleworker, mapsextra2) print "Have returned from doubling workers\n" pool2.close() pool2.join() del pool2 #Calculate scale_fwhm for smoothing kernel nsamp = 1200. npix = hp.nside2npix(1 << (int(0.5 * scale_lmax) - 1).bit_length()) #Equivalent no. HEALPIX pixels scale_fwhm = 4. * mh.sqrt(nsamp / npix) #TESTING larger covariance kernel scale_fwhm = 15. * scale_fwhm #Smooth covariance matrices #Serial version '''Rsmoothflat = np.zeros_like(Rflat) #Pre-allocate array for i in xrange(nindepelems): Rsmoothflat[i,:] = smoothworker((Rflat[i],smoothing_lmax,mapsextra[2],gausssmooth,mapsextra[1],mapsextra[3],i,mapsextra[4])) del Rflat''' #Parallel version nindepelems = int(nmaps * (nmaps + 1) * .5) #No. indep. elements in symmetric covariance matrix Rextra = [None] * nindepelems k = 0 for i in xrange(nmaps): for j in xrange(i + 1): Rextra[k] = (mapsextra[0], mapsextra[1], i, j, smoothing_lmax, scale_fwhm) k += 1 print "Forming pool" pool3 = mg.Pool(nprocess3) print "Farming out workers to run smoothing function" R_output = pool3.map(smoothworker, Rextra) print "Have returned from smoothing workers\n" pool3.close() pool3.join() del pool3 #Load R maps and form matrices print "Pre-allocating memory for complete covariance tensor\n" Rsmooth = np.zeros((ps.mw_size(smoothing_lmax), nmaps, nmaps), dtype=np.float64) #Pre-allocate array for i in xrange(nmaps): for j in xrange(i + 1): if mapsextra[0] >= 0: #Wavelet scales R_fits = wav_outfits_root + '_' + wavparam_code + str( mapsextra[0]) + '_n' + str( mapsextra[1] + 1) + '_Rsmooth' + str( i + 9 - nmaps) + str(j + 9 - nmaps) + '.npy' else: #Scaling function R_fits = scal_outfits[:-4] + '_Rsmooth' + str( i + 9 - nmaps) + str(j + 9 - nmaps) + '.npy' Rsmooth[:, i, j] = np.load(R_fits) if i != j: Rsmooth[:, j, i] = Rsmooth[:, i, j] #Compute inverse covariance matrices print "Calculating inverse covariance matrices\n" Rinv = np.linalg.inv( Rsmooth ) #Parallel vers. slower!?- LARGEST MEMORY COST: 2*9*9*(8000^2)*complex128=0.2TB del Rsmooth #Compute weights vectors (at each pixel) wknumer = np.sum(Rinv, axis=-1) del Rinv wkdenom = np.sum(wknumer, axis=-1) wk = wknumer / wkdenom[:, None] del wknumer, wkdenom #Map loading within sub-process mapsdouble = np.zeros((len(wk), len(wk[0])), dtype=np.float64) #Pre-allocate array for i in xrange(nmaps): if mapsextra[0] >= 0: #Wavelet scales wav_fits = wav_fits_root[i + 9 - nmaps] + '_' + wavparam_code + str( mapsextra[0]) + '_n' + str(mapsextra[1] + 1) + '_double.npy' else: #Scaling function wav_fits = scal_fits[i + 9 - nmaps][:-4] + '_double.npy' mapsdouble[:, i] = np.real(np.load( wav_fits, mmap_mode='r')) #Throw away zero imaginary part #Dot weights with maps (at each small pixel) - at double l(j) finalmap = np.sum(np.multiply(wk, mapsdouble), axis=-1) + 0.j #Add back in zero imaginary part del wk, mapsdouble #Downgrade resolution of MW maps print "Downgrading resolution of CMB wavelet map" finalmapalms = ps.lm2lm_hp( ps.map2alm_mw(finalmap, smoothing_lmax, spin), smoothing_lmax) #Come out in MW order - so converted to HPX order del finalmap if mapsextra[0] >= 0: #Wavelet scales alms_fname = wav_outfits_root + '_' + wavparam_code + str( mapsextra[0]) + '_n' + str(mapsextra[1] + 1) + '_alms.fits' else: #Scaling function alms_fname = scal_outfits[:-4] + '_alms.fits' hp.write_alm(alms_fname, finalmapalms, lmax=scale_lmax - 1, mmax=scale_lmax - 1) del finalmapalms finalmapalmstruncate = hp.read_alm(alms_fname) finalmaphalf = ps.alm2map_mw(ps.lm_hp2lm(finalmapalmstruncate, scale_lmax), scale_lmax, spin) del finalmapalmstruncate #Saving output map if mapsextra[0] >= 1: #0: #Wavelet scales wav_outfits = wav_outfits_root + '_' + wavparam_code + str( mapsextra[0]) + '_n' + str(mapsextra[1] + 1) + '.npy' elif mapsextra[0] == 0: #FOR NEW SCALING FUNC wav_outfits = wav_outfits_0 + '_' + wavparam_code + str( mapsextra[0]) + '_n' + str(mapsextra[1] + 1) + '.npy' else: #Scaling function wav_outfits = scal_outfits[:-4] + '.npy' np.save(wav_outfits, finalmaphalf) del finalmaphalf return 0
if res == False: raise ValueError('\nAn invalid wavelet tiling has been chosen.\n') else: print '\nA valid wavelet tiling has been chosen.\n' ''' #Plot array fname = '/Users/keir/Documents/planck2015_2_cmb_realisations/planck2015_2_cmb_map_1.fits' #fname = '/Users/keir/Documents/s2let_ilc_planck/COM_CompMap_dust-commander_0256_R2.00.fits' f_ini = hp.read_map(fname) # Initial map f_lm = hp.map2alm(f_ini, lmax=L-1) # Its alms f = hp.alm2map(f_lm, nside=nside, lmax=L-1) # Band limited version # Convert to MW sampling from spherical harmonics f_mw = ps.alm2map_mw(ps.lm_hp2lm(f_lm, L), L, spin) print 'Running analysis_lm2wav' #f_wav, f_scal = ps.analysis_lm2wav_manualtiling(f_lm, L, N, spin, hybrid_scal_l, hybrid_wav_l.T.ravel(), hybrid_scal_bandlimit, hybrid_wav_bandlimits) f_wav, f_scal = ps.analysis_lm2wav(f_lm, B, L, J_min, N, spin, upsample) def mollweide_grid(thetas, phis): MAX_ITERATIONS = 1000 TOL = 1e-10 thetas = np.pi/2 - thetas phis = phis - np.pi t = thetas for it in xrange(MAX_ITERATIONS): dt = (t + np.sin(t) - np.pi*np.sin(thetas)) / (1 + np.cos(t)) t = t - dt if np.max(np.abs(dt)) < TOL: