def doubleworker(i): #i = (j,n,map_index,scale_lmax,smoothing_lmax) print "Doubling l_max of input map", i[2] + 1, "/", nmaps #Map loading within sub-process if i[0] >= 0: #Wavelet scales wav_fits = wav_fits_root[i[2]] + '_' + wavparam_code + str( i[0]) + '_n' + str(i[1] + 1) + '.npy' else: #Scaling function wav_fits = scal_fits[i[2]] map = np.load(wav_fits, mmap_mode='r') #Map still only stored on disk alms = ps.map2alm_mw(map, i[3], spin) #alm's to l(j) - come out in MW order del map alms = zeropad((ps.lm2lm_hp(alms, i[3]), i[3], i[4])) #New alm's larger map = ps.alm2map_mw(ps.lm_hp2lm(alms, i[4]), i[4], spin) #Input alm's in MW order del alms #SAVE doubled map if i[0] >= 0: #Wavelet scales double_fits = wav_fits_root[i[2]] + '_' + wavparam_code + str( i[0]) + '_n' + str(i[1] + 1) + '_double.npy' else: #Scaling function double_fits = scal_fits[i[2]][:-4] + '_double.npy' np.save(double_fits, map) del map return 0
def smoothworker( i): #(Rflat[i],smoothing_lmax,spin,gausssmooth,scale_lmax,n,i,j) print "Smoothing another independent covariance element" alms = ps.map2alm_mw( i[0], i[1], i[2]) #No pixwin correct. with MW sampling - calc alms to smooth #del i[0] #Everything gets moved down one index hp.almxfl(alms, i[3], inplace=True) #Multiply by gaussian beam '''if i[5] != -1: #If n != -1 (i.e. the maps are directional) print "Picking out directional component of covariance map" #Testing convolving gaussian-smoothed covariance maps with directional wavelet jmin_min = 1 #Need to truncate alm's to scale_lmax (Is this necessary?) alms_fname = 'alms_dirwav_' + str(i[7]) + '_' + str(i[5]) + '_' + str(i[6]) + '.fits' hp.write_alm(alms_fname,alms,lmax=i[4]-1,mmax=i[4]-1) del alms alms_truncate = hp.read_alm(alms_fname) print "Analysing covariance map" #Could increase wavparam!? wav,scal = ps.analysis_lm2wav(alms_truncate,wavparam,i[4],jmin_min,ndir,spin,upsample) del alms_truncate #Delete wrong directions by zero-ing them print "Deleting wrong directions" jmax_min = ps.pys2let_j_max(wavparam,i[4],jmin_min) for j in xrange(jmin_min,jmax_min+1): for n in xrange(0,ndir): if n != i[5]: offset,new_scale_lmax,nelem,nelem_wav = ps.wav_ind(j,n,wavparam,i[4],ndir,jmin_min,upsample) wav[offset:offset+nelem] = 0. print "Synthesising directional covariance map" alms = ps.synthesis_wav2lm(wav,scal,wavparam,i[4],jmin_min,ndir,spin,upsample) del wav,scal #Expand alm's with zero-padding print "Zero-padding the alm's" nzeros = i[1] - i[4] #No. zeros to pad new_alms_temp = np.concatenate((alms[:i[4]],np.zeros(nzeros))) for em in xrange(1,i[4]): startindex = em*i[4] - .5*em*(em-1) new_alms_temp = np.concatenate((new_alms_temp,alms[startindex:(startindex+i[4]-em)],np.zeros(nzeros))) del alms print "Temporary length of alm's =", len(new_alms_temp) nfinalzeros = hp.Alm.getsize(i[1]-1) - len(new_alms_temp) alms = np.concatenate((new_alms_temp,np.zeros(nfinalzeros))) del new_alms_temp print "Final length of alm's =", len(alms)''' #hp.almxfl(alms,i[3],inplace=True) #Multiply by gaussian beam print "Synthesising smoothed covariance map" Rsmoothflat = ps.alm2map_mw( alms, i[1], i[2]) #Smooth covariance in MW - calc final map to scale del alms return Rsmoothflat
def varworker(wav_map_indices): #(j,n) wav_fits = wav_fits_root + '_j' + str( wav_map_indices[0]) + '_n' + str(wav_map_indices[1] + 1) + '.npy' map = np.load(wav_fits, mmap_mode='r') scale_lmax = wavparam**(wav_map_indices[0] + 1) #lambda^(j+1) if scale_lmax > ellmax: scale_lmax = ellmax scale_lmin = wavparam**(wav_map_indices[0] - 1) #lambda^(j-1) alms = ps.map2alm_mw(map, scale_lmax, spin) del map alms = reducealms((alms, scale_lmax, scale_lmin)) map_reduced = ps.alm2map_mw(alms, scale_lmax, spin) del alms map_squared = np.square(map_reduced.real) del map_reduced return map_squared
def smoothworker( i ): #(j,n,map_index1,map_index2,smoothing_lmax,scale_fwhm) [map_index2<=map_index1] print "Smoothing independent covariance element", i[2], ",", i[3] #Map loading within sub-process if i[0] >= 0: #Wavelet scales wav_fits1 = wav_fits_root[i[2]] + '_' + wavparam_code + str( i[0]) + '_n' + str(i[1] + 1) + '_double.npy' wav_fits2 = wav_fits_root[i[3]] + '_' + wavparam_code + str( i[0]) + '_n' + str(i[1] + 1) + '_double.npy' else: #Scaling function wav_fits1 = scal_fits[i[2]][:-4] + '_double.npy' wav_fits2 = scal_fits[i[3]][:-4] + '_double.npy' map1 = np.real(np.load(wav_fits1, mmap_mode='r')) #Throw away zero imaginary part map2 = np.real(np.load(wav_fits2, mmap_mode='r')) R = np.multiply(map1, map2) + 0.j #Add back in zero imaginary part del map1, map2 alms = ps.lm2lm_hp( ps.map2alm_mw(R, i[4], spin), i[4] ) #No pixwin correct. with MW - calc alms to smooth - come out in MW order - so converted to HPX order del R gausssmooth = hp.gauss_beam(i[5], lmax=i[4] - 1) hp.almxfl(alms, gausssmooth, inplace=True) #Multiply by gaussian beam print "Synthesising smoothed covariance map for element", i[2], ",", i[3] Rsmooth = np.real(ps.alm2map_mw( ps.lm_hp2lm(alms, i[4]), i[4], spin)) #Throw away zero imaginary part - input alm's in MW order del alms #SAVE smoothed covariance if i[0] >= 0: #Wavelet scales R_fits = wav_outfits_root + '_' + wavparam_code + str( i[0]) + '_n' + str(i[1] + 1) + '_Rsmooth' + str(i[2]) + str( i[3]) + '.npy' else: #Scaling function R_fits = scal_outfits[:-4] + '_Rsmooth' + str(i[2]) + str( i[3]) + '.npy' np.save(R_fits, Rsmooth) del Rsmooth return 0
def smoothworker( i ): #(j,n,map_index1,map_index2,smoothing_lmax,scale_fwhm) [map_index2<=map_index1] print "Smoothing independent covariance element", i[2], ",", i[3] #Map loading within sub-process if i[0] >= 0: #Wavelet scales wav_fits1 = wav_fits_root[i[2]] + '_j' + str( i[0]) + '_n' + str(i[1] + 1) + '.npy' wav_fits2 = wav_fits_root[i[3]] + '_j' + str( i[0]) + '_n' + str(i[1] + 1) + '.npy' else: #Scaling function wav_fits1 = scal_fits[i[2]] wav_fits2 = scal_fits[i[3]] map1 = np.load(wav_fits1, mmap_mode='r') #Complex spin-wavelet coefficients map2 = np.conjugate(np.load(wav_fits2, mmap_mode='r')) R = np.multiply(map1, map2) #W_c W_d^* del map1, map2 alms = ps.lm2lm_hp( ps.map2alm_mw(R, i[4], 0), i[4] ) #No pixwin correct. with MW - calc alms to smooth - come out in MW order - so converted to HPX order del R gausssmooth = hp.gauss_beam(i[5], lmax=i[4] - 1) hp.almxfl(alms, gausssmooth, inplace=True) #Multiply by gaussian beam print "Synthesising smoothed covariance map for element", i[2], ",", i[3] Rsmooth = ps.alm2map_mw(ps.lm_hp2lm(alms, i[4]), i[4], 0) #Input alm's in MW order del alms #Save smoothed covariance if i[0] >= 0: #Wavelet scales R_fits = wav_outfits_root + '_j' + str( i[0]) + '_n' + str(i[1] + 1) + '_Rsmooth' + str(i[2]) + str( i[3]) + '.npy' else: #Scaling function R_fits = scal_outfits[:-4] + '_Rsmooth' + str(i[2]) + str( i[3]) + '.npy' np.save(R_fits, Rsmooth) del Rsmooth return 0
def doubleworker(i): print "Doubling l_max of another input map" alms = ps.map2alm_mw(i[0], i[1], i[3]) #alm's to l(j) #del i[0] #Everything gets moved down one index print "Zero-padding the alm's" nzeros = i[2] - i[1] #No. zeros to pad new_alms_temp = np.concatenate((alms[:i[1]], np.zeros(nzeros))) for em in xrange(1, i[1]): startindex = em * i[1] - .5 * em * (em - 1) new_alms_temp = np.concatenate( (new_alms_temp, alms[startindex:(startindex + i[1] - em)], np.zeros(nzeros))) del alms print "Temporary length of alm's =", len(new_alms_temp) nfinalzeros = hp.Alm.getsize(i[2] - 1) - len(new_alms_temp) new_alms_temp = np.concatenate((new_alms_temp, np.zeros(nfinalzeros))) print "Final length of alm's =", len(new_alms_temp) mapsdouble = ps.alm2map_mw(new_alms_temp, i[2], i[3]) del new_alms_temp return mapsdouble
def doubleworker(i): #i = (j,n,map_index,scale_lmax,smoothing_lmax) print "Doubling l_max of input map", i[2]+1, "/", nmaps #TESTING map loading within sub-process wav_fits = wav_fits_root[i[2]] + '_j' + str(i[0]) + '_n' + str(i[1]+1) + '.npy' if i[0] == -1: #Scaling function wav_fits = scal_fits[i[2]] map = np.load(wav_fits,mmap_mode='r') #Map still only stored on disk alms = ps.map2alm_mw(map,i[3],spin) #alm's to l(j) del map alms = zeropad((alms,i[3],i[4])) #New alm's larger map = ps.alm2map_mw(alms,i[4],spin) del alms #SAVE doubled map double_fits = wav_fits_root[i[2]] + '_j' + str(i[0]) + '_n' + str(i[1]+1) + '_double.npy' if i[0] == -1: #Scaling function double_fits = scal_fits[i[2]][:-4] + '_double.npy' np.save(double_fits,map) del map return 0
def s2let_ilc_dir_para(mapsextra): #mapsextra = (maps,scale_lmax,spin,n,j,i) print "\nRunning Directional S2LET ILC on wavelet scale", mapsextra[ 4], "/", jmax, "direction", mapsextra[3] + 1, "/", ndir, "\n" nrows = len(mapsextra[0]) #No. rows in covar. matrix smoothing_lmax = 2. * mapsextra[1] #=4.*nside(j) #Doubling lmax for input maps with zero-padding '''pool = mg.Pool(nprocess) mapsextra = [(maps[i],scale_lmax,smoothing_lmax,spin) for i in xrange(nrows)] del maps mapsdouble = np.array(pool.map(doubleworker,mapsextra)) del mapsextra''' #Serial version mapsdouble = np.zeros((nrows, ps.mw_size(smoothing_lmax)), dtype=np.complex128) #Pre-allocate array for i in xrange(nrows): mapsdouble[i, :] = doubleworker( (mapsextra[0][i], mapsextra[1], smoothing_lmax, mapsextra[2])) #mapsdouble = np.array(mapsdouble) #Calculating covariance matrix (at each pixel) #R = [None]*len(mapsdouble) R = np.zeros((len(mapsdouble), len(mapsdouble), len(mapsdouble[0])), dtype=np.complex128) #Pre-allocate array for i in xrange(len(mapsdouble)): R[i, :, :] = np.multiply(mapsdouble, np.roll(mapsdouble, -i, axis=0)) #R = np.array(R) #Calculate scale_fwhm & smoothing_lmax nsamp = 1200. npix = hp.nside2npix(0.5 * mapsextra[1]) #Equivalent number of HEALPix pixels scale_fwhm = 4. * mh.sqrt(nsamp / npix) #Smooth covariance matrices nindepelems = int( nrows * (nrows + 1) * .5) #No. independent elements in symmetric covariance matrix Rflat = np.reshape(R, (nrows * nrows, len(R[0, 0]))) #Flatten first two axes #del R #NEW!!! Rflatlen = len(Rflat) gausssmooth = hp.gauss_beam(scale_fwhm, smoothing_lmax - 1) #Testing zero-ing gaussian smoothing beam gauss_lmax = mapsextra[1] gausssmooth[gauss_lmax:] = 0. '''alms = [None]*nindepelems #alms_hp = [None]*nindepelems #alms_smooth = [None]*nindepelems Rsmoothflat = [None]*nindepelems #Only really need to pre-allocate this for i in xrange(nindepelems): #PARALLELISE print "Smoothing independent covariance element", i+1, "/", nindepelems alms[i] = ps.map2alm_mw(Rflat[i],scale_lmax,spin) #No pixwin correct. with MW sampling #alms_hp[i] = ps.lm2lm_hp(alms[i],smoothing_lmax) #Now in healpy ordering hp.almxfl(alms[i],gausssmooth,inplace=True) #Multiply by gaussian beam #alms_smooth[i] = ps.lm_hp2lm(alms_hp[i],smoothing_lmax) #Back in MW ordering Rsmoothflat[i] = ps.alm2map_mw(alms[i],scale_lmax,spin) #Smooth covariance in MW Rsmoothflat = np.array(Rsmoothflat)''' #Parallel version '''pool = mg.Pool(nprocess) Rflatextra = [(Rflat[i],smoothing_lmax,spin,gausssmooth,scale_lmax,en,i) for i in xrange(nindepelems)] del Rflat Rsmoothflat = np.array(pool.map(smoothworker,Rflatextra)) del Rflatextra''' #Serial version Rsmoothflat = np.zeros_like(Rflat) #Pre-allocate array for i in xrange(nindepelems): Rsmoothflat[i, :] = smoothworker( (Rflat[i], smoothing_lmax, mapsextra[2], gausssmooth, mapsextra[1], mapsextra[3], i, mapsextra[4])) del Rflat #Rsmoothflat = np.array(Rsmoothflat) #Rearranging and padding out elements of Rsmooth Rsmoothflat[: nrows] = 0.5 * Rsmoothflat[: nrows] #Multiply diag elements by half- not double-count Rsmoothflat = np.vstack( (Rsmoothflat, np.zeros( (Rflatlen - len(Rsmoothflat), len(Rsmoothflat[0]))))) #Zero-pad Rsmoothfat = np.reshape( Rsmoothflat, (nrows, nrows, len(Rsmoothflat[0]))) #Reshape Rsmooth as mat. del Rsmoothflat for i in xrange(1, len(Rsmoothfat[0])): Rsmoothfat[:, i, :] = np.roll(Rsmoothfat[:, i, :], i, axis=0) #Now in correct order-but with gaps Rsmoothfat = Rsmoothfat + np.transpose(Rsmoothfat, axes=(1, 0, 2)) #Gaps filled in #Compute inverse covariance matrices Rinv = np.linalg.inv(np.transpose( Rsmoothfat, axes=(2, 0, 1))) #Parallel vers. actually slower!? #del Rsmoothfat #Compute weights vectors (at each pixel) wknumer = np.sum(Rinv, axis=-1) del Rinv wkdenom = np.sum(wknumer, axis=-1) wk = wknumer / wkdenom[:, None] del wknumer, wkdenom #Dot weights with maps (at each small pixel) - at double l(j) finalmap = np.sum(np.multiply(wk, mapsdouble.T), axis=-1) del wk, mapsdouble #Downgrade resolution of MW maps print "Downgrading resolution of CMB wavelet map" finalmapalms = ps.map2alm_mw(finalmap, smoothing_lmax, mapsextra[2]) del finalmap #hp.write_alm('alms.fits',finalmapalms,lmax=mapsextra[1]-1,mmax=mapsextra[1]-1) alms_fname = 'alms_' + str(mapsextra[5]) + '.fits' hp.write_alm(alms_fname, finalmapalms, lmax=mapsextra[1] - 1, mmax=mapsextra[1] - 1) del finalmapalms finalmapalmstruncate = hp.read_alm(alms_fname) finalmaphalf = ps.alm2map_mw(finalmapalmstruncate, mapsextra[1], mapsextra[2]) del finalmapalmstruncate #Saving output map wav_outfits = wav_outfits_root + '_j' + str( mapsextra[4]) + '_n' + str(mapsextra[3] + 1) + '.npy' if mapsextra[4] == -1: wav_outfits = scal_outfits np.save(wav_outfits, finalmaphalf) del finalmaphalf return 0
def s2let_ilc(mapsextra): #mapsextra = (j,n) print "Running S2LET ILC on wavelet scale", mapsextra[ 0], "/", jmax, "direction", mapsextra[1] + 1, "/", ndir, "\n" if mapsextra[0] >= 0: #Wavelet scales scale_lmax = wav_bandlims[mapsextra[0]] else: #Scaling function scale_lmax = int(scal_bandlims) smoothing_lmax = 2 * (scale_lmax - 1) + 1 #Doubling lmax for input maps with zero-padding #Serial version '''mapsdouble = np.zeros((nrows,ps.mw_size(smoothing_lmax)),dtype=np.complex128) #Pre-allocate array for i in xrange(nrows): mapsdouble[i,:] = doubleworker((mapsextra[0][i],mapsextra[1],smoothing_lmax,mapsextra[2]))''' #Parallel version mapsextra2 = [(mapsextra[0], mapsextra[1], i, scale_lmax, smoothing_lmax) for i in xrange(nmaps)] print "Forming pool" pool2 = mg.Pool(nprocess2) print "Farming out workers to run doubling function" double_output = pool2.map(doubleworker, mapsextra2) print "Have returned from doubling workers\n" pool2.close() pool2.join() del pool2 #Calculate scale_fwhm for smoothing kernel nsamp = 1200. npix = hp.nside2npix(1 << (int(0.5 * scale_lmax) - 1).bit_length()) #Equivalent no. HEALPIX pixels scale_fwhm = 4. * mh.sqrt(nsamp / npix) #TESTING larger covariance kernel scale_fwhm = 15. * scale_fwhm #Smooth covariance matrices #Serial version '''Rsmoothflat = np.zeros_like(Rflat) #Pre-allocate array for i in xrange(nindepelems): Rsmoothflat[i,:] = smoothworker((Rflat[i],smoothing_lmax,mapsextra[2],gausssmooth,mapsextra[1],mapsextra[3],i,mapsextra[4])) del Rflat''' #Parallel version nindepelems = int(nmaps * (nmaps + 1) * .5) #No. indep. elements in symmetric covariance matrix Rextra = [None] * nindepelems k = 0 for i in xrange(nmaps): for j in xrange(i + 1): Rextra[k] = (mapsextra[0], mapsextra[1], i, j, smoothing_lmax, scale_fwhm) k += 1 print "Forming pool" pool3 = mg.Pool(nprocess3) print "Farming out workers to run smoothing function" R_output = pool3.map(smoothworker, Rextra) print "Have returned from smoothing workers\n" pool3.close() pool3.join() del pool3 #Load R maps and form matrices print "Pre-allocating memory for complete covariance tensor\n" Rsmooth = np.zeros((ps.mw_size(smoothing_lmax), nmaps, nmaps), dtype=np.float64) #Pre-allocate array for i in xrange(nmaps): for j in xrange(i + 1): if mapsextra[0] >= 0: #Wavelet scales R_fits = wav_outfits_root + '_' + wavparam_code + str( mapsextra[0]) + '_n' + str( mapsextra[1] + 1) + '_Rsmooth' + str( i + 9 - nmaps) + str(j + 9 - nmaps) + '.npy' else: #Scaling function R_fits = scal_outfits[:-4] + '_Rsmooth' + str( i + 9 - nmaps) + str(j + 9 - nmaps) + '.npy' Rsmooth[:, i, j] = np.load(R_fits) if i != j: Rsmooth[:, j, i] = Rsmooth[:, i, j] #Compute inverse covariance matrices print "Calculating inverse covariance matrices\n" Rinv = np.linalg.inv( Rsmooth ) #Parallel vers. slower!?- LARGEST MEMORY COST: 2*9*9*(8000^2)*complex128=0.2TB del Rsmooth #Compute weights vectors (at each pixel) wknumer = np.sum(Rinv, axis=-1) del Rinv wkdenom = np.sum(wknumer, axis=-1) wk = wknumer / wkdenom[:, None] del wknumer, wkdenom #Map loading within sub-process mapsdouble = np.zeros((len(wk), len(wk[0])), dtype=np.float64) #Pre-allocate array for i in xrange(nmaps): if mapsextra[0] >= 0: #Wavelet scales wav_fits = wav_fits_root[i + 9 - nmaps] + '_' + wavparam_code + str( mapsextra[0]) + '_n' + str(mapsextra[1] + 1) + '_double.npy' else: #Scaling function wav_fits = scal_fits[i + 9 - nmaps][:-4] + '_double.npy' mapsdouble[:, i] = np.real(np.load( wav_fits, mmap_mode='r')) #Throw away zero imaginary part #Dot weights with maps (at each small pixel) - at double l(j) finalmap = np.sum(np.multiply(wk, mapsdouble), axis=-1) + 0.j #Add back in zero imaginary part del wk, mapsdouble #Downgrade resolution of MW maps print "Downgrading resolution of CMB wavelet map" finalmapalms = ps.lm2lm_hp( ps.map2alm_mw(finalmap, smoothing_lmax, spin), smoothing_lmax) #Come out in MW order - so converted to HPX order del finalmap if mapsextra[0] >= 0: #Wavelet scales alms_fname = wav_outfits_root + '_' + wavparam_code + str( mapsextra[0]) + '_n' + str(mapsextra[1] + 1) + '_alms.fits' else: #Scaling function alms_fname = scal_outfits[:-4] + '_alms.fits' hp.write_alm(alms_fname, finalmapalms, lmax=scale_lmax - 1, mmax=scale_lmax - 1) del finalmapalms finalmapalmstruncate = hp.read_alm(alms_fname) finalmaphalf = ps.alm2map_mw(ps.lm_hp2lm(finalmapalmstruncate, scale_lmax), scale_lmax, spin) del finalmapalmstruncate #Saving output map if mapsextra[0] >= 1: #0: #Wavelet scales wav_outfits = wav_outfits_root + '_' + wavparam_code + str( mapsextra[0]) + '_n' + str(mapsextra[1] + 1) + '.npy' elif mapsextra[0] == 0: #FOR NEW SCALING FUNC wav_outfits = wav_outfits_0 + '_' + wavparam_code + str( mapsextra[0]) + '_n' + str(mapsextra[1] + 1) + '.npy' else: #Scaling function wav_outfits = scal_outfits[:-4] + '.npy' np.save(wav_outfits, finalmaphalf) del finalmaphalf return 0
def smoothworker(i): #(j,n,map_index1,map_index2,smoothing_lmax,scale_fwhm) [map_index2<=map_index1] print "Smoothing another independent covariance element", i[2], ",", i[3] #Map loading within sub-process wav_fits1 = wav_fits_root[i[2]] + '_j' + str(i[0]) + '_n' + str(i[1]+1) + '_double.npy' wav_fits2 = wav_fits_root[i[3]] + '_j' + str(i[0]) + '_n' + str(i[1]+1) + '_double.npy' if i[0] == -1: #Scaling function wav_fits1 = scal_fits[i[2]][:-4] + '_double.npy' wav_fits2 = scal_fits[i[3]][:-4] + '_double.npy' map1 = np.real(np.load(wav_fits1,mmap_mode='r')) #Throw away zero imaginary part map2 = np.real(np.load(wav_fits2,mmap_mode='r')) R = np.multiply(map1,map2) + 0.j #Add back in zero imaginary part del map1,map2 alms = ps.map2alm_mw(R,i[4],spin) #No pixwin correct. with MW - calc alms to smooth del R gausssmooth = hp.gauss_beam(i[5],lmax=i[4]-1) hp.almxfl(alms,gausssmooth,inplace=True) #Multiply by gaussian beam '''if i[5] != -1: #If n != -1 (i.e. the maps are directional) print "Picking out directional component of covariance map" #Testing convolving gaussian-smoothed covariance maps with directional wavelet jmin_min = 1 #Need to truncate alm's to scale_lmax (Is this necessary?) alms_fname = 'alms_dirwav_' + str(i[7]) + '_' + str(i[5]) + '_' + str(i[6]) + '.fits' hp.write_alm(alms_fname,alms,lmax=i[4]-1,mmax=i[4]-1) del alms alms_truncate = hp.read_alm(alms_fname) print "Analysing covariance map" #Could increase wavparam!? wav,scal = ps.analysis_lm2wav(alms_truncate,wavparam,i[4],jmin_min,ndir,spin,upsample) del alms_truncate #Delete wrong directions by zero-ing them print "Deleting wrong directions" jmax_min = ps.pys2let_j_max(wavparam,i[4],jmin_min) for j in xrange(jmin_min,jmax_min+1): for n in xrange(0,ndir): if n != i[5]: offset,new_scale_lmax,nelem,nelem_wav = ps.wav_ind(j,n,wavparam,i[4],ndir,jmin_min,upsample) wav[offset:offset+nelem] = 0. print "Synthesising directional covariance map" alms = ps.synthesis_wav2lm(wav,scal,wavparam,i[4],jmin_min,ndir,spin,upsample) del wav,scal #Expand alm's with zero-padding print "Zero-padding the alm's" nzeros = i[1] - i[4] #No. zeros to pad new_alms_temp = np.concatenate((alms[:i[4]],np.zeros(nzeros))) for em in xrange(1,i[4]): startindex = em*i[4] - .5*em*(em-1) new_alms_temp = np.concatenate((new_alms_temp,alms[startindex:(startindex+i[4]-em)],np.zeros(nzeros))) del alms print "Temporary length of alm's =", len(new_alms_temp) nfinalzeros = hp.Alm.getsize(i[1]-1) - len(new_alms_temp) alms = np.concatenate((new_alms_temp,np.zeros(nfinalzeros))) del new_alms_temp print "Final length of alm's =", len(alms)''' print "Synthesising smoothed covariance map for element", i[2], ",", i[3] Rsmooth = np.real(ps.alm2map_mw(alms,i[4],spin)) #Throw away zero imaginary part del alms #SAVE smoothed covariance R_fits = wav_outfits_root + '_j' + str(i[0]) + '_n' + str(i[1]+1) + '_Rsmooth' + str(i[2]) + str(i[3]) +'.npy' if i[0] == -1: #Scaling function R_fits = scal_outfits[:-4] + '_Rsmooth' + str(i[2]) + str(i[3]) +'.npy' np.save(R_fits,Rsmooth) del Rsmooth return 0
def s2let_ilc_dir_para(mapsextra): #mapsextra = (j,n) print "\nRunning Directional S2LET ILC on wavelet scale", mapsextra[0], "/", jmax, "direction", mapsextra[1]+1, "/", ndir, "\n" #For TESTING dropping channels '''if mapsextra[0] <= 33: nmaps = 9 elif 34 <= mapsextra[0] <= 35: nmaps = 8 elif 36 <= mapsextra[0] <= 40: nmaps = 7 elif 41 <= mapsextra[0] <= 42: nmaps = 6 elif mapsextra[0] >= 43: nmaps = 5''' scale_lmax = int(mh.ceil(wavparam**(mapsextra[0]+1))) #lambda^(j+1) - rounded up and made integer if scale_lmax > ellmax: scale_lmax = ellmax if mapsextra[0] == -1: #Scaling function scale_lmax = int(mh.ceil(wavparam**jmin)) #lambda^(jmin) - rounded up and made integer smoothing_lmax = 2.*(scale_lmax-1.)+1 #Doubling lmax for input maps with zero-padding #Serial version '''mapsdouble = np.zeros((nrows,ps.mw_size(smoothing_lmax)),dtype=np.complex128) #Pre-allocate array for i in xrange(nrows): mapsdouble[i,:] = doubleworker((mapsextra[0][i],mapsextra[1],smoothing_lmax,mapsextra[2]))''' #Parallel version mapsextra2 = [(mapsextra[0],mapsextra[1],i,scale_lmax,smoothing_lmax) for i in xrange(nmaps)] print "Forming pool" pool2 = mg.Pool(nprocess2) print "\nFarming out workers to run doubling function" double_output = pool2.map(doubleworker,mapsextra2) print "Have returned from doubling workers\n" pool2.close() pool2.join() del pool2 #Calculate scale_fwhm for smoothing kernel nsamp = 1200. npix = hp.nside2npix(1<<(int(0.5*scale_lmax)-1).bit_length()) #Equivalent no. HEALPIX pixels scale_fwhm = 4. * mh.sqrt(nsamp / npix) #Smooth covariance matrices #Serial version '''Rsmoothflat = np.zeros_like(Rflat) #Pre-allocate array for i in xrange(nindepelems): Rsmoothflat[i,:] = smoothworker((Rflat[i],smoothing_lmax,mapsextra[2],gausssmooth,mapsextra[1],mapsextra[3],i,mapsextra[4])) del Rflat''' #Parallel version nindepelems = int(nmaps*(nmaps+1)*.5) #No. indep. elements in symmetric covariance matrix Rextra = [None]*nindepelems k=0 for i in xrange(nmaps): for j in xrange(i+1): Rextra[k] = (mapsextra[0],mapsextra[1],i,j,smoothing_lmax,scale_fwhm) k+=1 print "Forming pool" pool3 = mg.Pool(nprocess3) print "\nFarming out workers to run smoothing function" R_output = pool3.map(smoothworker,Rextra) print "Have returned from smoothing workers\n" pool3.close() pool3.join() del pool3 #Load R maps and form matrices print "Pre-allocating memory for complete covariance tensor\n" Rsmooth = np.zeros((ps.mw_size(smoothing_lmax),nmaps,nmaps),dtype=np.float64) #Pre-allocate array for i in xrange(nmaps): for j in xrange(i+1): R_fits = wav_outfits_root + '_j' + str(mapsextra[0]) + '_n' + str(mapsextra[1]+1) + '_Rsmooth' + str(i + 9 - nmaps) + str(j + 9 - nmaps) +'.npy' if mapsextra[0] == -1: #Scaling function R_fits = scal_outfits[:-4] + '_Rsmooth' + str(i + 9 - nmaps) + str(j + 9 - nmaps) +'.npy' print "Here" Rsmooth[:,i,j] = np.load(R_fits) print "Here2" if i != j: Rsmooth[:,j,i] = Rsmooth[:,i,j] #Compute inverse covariance matrices print "Calculating inverse covariance matrices\n" Rinv = np.linalg.inv(Rsmooth) #Parallel vers. slower!?- LARGEST MEMORY COST: 2*9*9*(8000^2)*complex128=0.2TB del Rsmooth #Compute weights vectors (at each pixel) wknumer = np.sum(Rinv,axis=-1) del Rinv wkdenom = np.sum(wknumer,axis=-1) wk = wknumer / wkdenom[:,None] del wknumer,wkdenom #Saving or loading weights tensor for TESTING wkfits = wav_outfits_root + '_j' + str(mapsextra[0]) + '_n' + str(mapsextra[1]+1) + '_weights.npy' if mapsextra[0] == -1: wkfits = scal_outfits[:-4] + '_weights.npy' np.save(wkfits,wk) #wk = np.load(wkfits) #Dropping some channels for TESTING '''if mapsextra[0] > 33: wk[:,0] = 0. if mapsextra[0] > 35: wk[:,1] = 0. if mapsextra[0] > 40: wk[:,2] = 0. if mapsextra[0] > 42: wk[:,3] = 0.''' #Map loading within sub-process mapsdouble = np.zeros((len(wk),len(wk[0])),dtype=np.float64) #Pre-allocate array for i in xrange(nmaps): wav_fits = wav_fits_root[i + 9 - nmaps] + '_j' + str(mapsextra[0]) + '_n' + str(mapsextra[1]+1) + '_double.npy' if mapsextra[0] == -1: #Scaling function wav_fits = scal_fits[i + 9 - nmaps][:-4] + '_double.npy' mapsdouble[:,i] = np.real(np.load(wav_fits,mmap_mode='r')) #Throw away zero imaginary part #Dot weights with maps (at each small pixel) - at double l(j) finalmap = np.sum(np.multiply(wk,mapsdouble),axis=-1) + 0.j #Add back in zero imaginary part del wk,mapsdouble #Downgrade resolution of MW maps print "Downgrading resolution of CMB wavelet map" finalmapalms = ps.map2alm_mw(finalmap,smoothing_lmax,spin) del finalmap alms_fname = wav_outfits_root + '_j' + str(mapsextra[0]) + '_n' + str(mapsextra[1]+1) + '_alms.fits' if mapsextra[0] == -1: #Scaling function alms_fname = scal_outfits[:-4] + '_alms.fits' hp.write_alm(alms_fname,finalmapalms,lmax=scale_lmax-1,mmax=scale_lmax-1) del finalmapalms finalmapalmstruncate = hp.read_alm(alms_fname) finalmaphalf = ps.alm2map_mw(finalmapalmstruncate,scale_lmax,spin) del finalmapalmstruncate #Saving output map wav_outfits = wav_outfits_root + '_j' + str(mapsextra[0]) + '_n' + str(mapsextra[1]+1) + '.npy' if mapsextra[0] == -1: wav_outfits = scal_outfits[:-4] + '.npy' np.save(wav_outfits,finalmaphalf) del finalmaphalf return 0