def __init__(self, comm=None, signal_map="signal_map", lmax=None, grid=None, fwhm_deg=None, beam=None, out="smoothed_signal_map"): autotimer = timing.auto_timer(type(self).__name__) # We call the parent class constructor, which currently does nothing super().__init__() self.comm = comm self.signal_map = signal_map self.lmax = lmax self.out = out self.grid = grid # distribute alms local_m_indices = np.arange(self.comm.rank, lmax + 1, self.comm.size, dtype=np.int32) self.order = libsharp.packed_real_order(lmax, ms=local_m_indices) if (fwhm_deg is not None) and (beam is not None): raise Exception("OpSmooth error, specify either fwhm_deg or beam, " "not both") if (fwhm_deg is None) and (beam is None): raise Exception("OpSmooth error, specify fwhm_deg or beam") if fwhm_deg is not None: self.beam = hp.gauss_beam(fwhm=np.radians(fwhm_deg), lmax=lmax, pol=True) else: self.beam = beam
def test_basic(): lmax = 10 nside = 8 rank = MPI.COMM_WORLD.Get_rank() ms = np.arange(rank, lmax + 1, MPI.COMM_WORLD.Get_size(), dtype=np.int32) order = libsharp.packed_real_order(lmax, ms=ms) grid = libsharp.healpix_grid(nside) alm = np.zeros(order.local_size()) if rank == 0: alm[0] = 1 elif rank == 1: alm[0] = 1 map = libsharp.synthesis(grid, order, np.repeat(alm[None, None, :], 3, 0), comm=MPI.COMM_WORLD) assert np.all(map[2, :] == map[1, :]) and np.all(map[1, :] == map[0, :]) map = map[0, 0, :] if rank == 0: healpy.mollzoom(map) from matplotlib.pyplot import show show()
def distribute_rings_libsharp(mpi_comm, nside, lmax): """Create a libsharp map distribution based on rings Build a libsharp grid object to distribute a HEALPix map balancing North and South distribution of rings to achieve the best performance on Harmonic Transforms Returns the grid object and the pixel indices array in RING ordering Parameters --------- mpi_comm : mpi4py.MPI.Comm mpi4py communicator nside : int nside of the map Returns ------- grid : libsharp.healpix_grid libsharp object that includes metadata about HEALPix distributed rings local_pix : np.ndarray integer array of local pixel indices in the current MPI process in RING ordering """ import libsharp nrings = 4 * nside - 1 # four missing pixels # ring indices are 1-based ring_indices_emisphere = np.arange(2 * nside, dtype=np.int32) + 1 local_ring_indices = ring_indices_emisphere[mpi_comm.rank::mpi_comm.size] # to improve performance, symmetric rings north/south need to be in the same rank # therefore we use symmetry to create the full ring indexing if local_ring_indices[-1] == 2 * nside: # has equator ring local_ring_indices = np.concatenate( [local_ring_indices[:-1], nrings - local_ring_indices[::-1] + 1]) else: # does not have equator ring local_ring_indices = np.concatenate( [local_ring_indices, nrings - local_ring_indices[::-1] + 1]) libsharp_grid = libsharp.healpix_grid(nside, rings=local_ring_indices) # returns start index of the ring and number of pixels startpix, ringpix, _, _, _ = hp.ringinfo(nside, local_ring_indices.astype(int)) local_npix = libsharp_grid.local_size() local_pixels = expand_pix(startpix, ringpix, local_npix).astype(int) local_m_indices = np.arange(mpi_comm.rank, lmax + 1, mpi_comm.size, dtype=np.int32) libsharp_order = libsharp.packed_real_order(lmax, ms=local_m_indices) return local_pixels, libsharp_grid, libsharp_order
def smooth_pol(self, fwhm_in, nside_in, maps_in): if fwhm_in > .9 * self.fwhm and nside_in == self.nside: return maps_in if fwhm_in > .9 * self.fwhm and self.nside < nside_in: # Simple ud_grade if self.global_rank == 0 and self.verbose: print('Downgrading Nside {} -> {}' ''.format(nside_in, self.nside), flush=True) maps_out = [] npix_in = hp.nside2npix(nside_in) my_pix_in = self.get_my_pix(nside_in) for (qmap, umap) in maps_in: my_mapout = np.zeros(npix_in, dtype=np.float) qmapout = np.zeros(npix_in, dtype=np.float) umapout = np.zeros(npix_in, dtype=np.float) my_mapout[my_pix_in] = qmap self.comm.Allreduce(my_mapout, qmapout) my_mapout[my_pix_in] = umap self.comm.Allreduce(my_mapout, umapout) del my_mapout maps_out.append((hp.ud_grade(qmapout, self.nside)[self.my_pix], hp.ud_grade(umapout, self.nside)[self.my_pix])) else: # Full smoothing lmax = self.optimal_lmax(fwhm_in, nside_in) total_beam = self.total_beam(fwhm_in, lmax, pol=True) if self.global_rank == 0 and self.verbose: print('Smoothing {} -> {}. lmax = {}. Nside {} -> {}' ''.format(fwhm_in, self.fwhm, lmax, nside_in, self.nside), flush=True) local_m = np.arange(self.rank, lmax + 1, self.ntask, dtype=np.int32) alminfo = packed_real_order(lmax, ms=local_m) grid_in = self.get_grid(nside_in) grid_out = self.get_grid(self.nside) maps_out = [] for (local_map_Q, local_map_U) in maps_in: map_P = np.ascontiguousarray(np.vstack( [local_map_Q, local_map_U]).reshape((1, 2, -1)), dtype=np.float64) alm_P = analysis(grid_in, alminfo, map_P, spin=2, comm=self.comm) if total_beam is not None: alminfo.almxfl(alm_P, total_beam) map_P = synthesis(grid_out, alminfo, alm_P, spin=2, comm=self.comm)[0] maps_out.append(map_P) return maps_out
def smooth(self, fwhm_in, nside_in, maps_in): """ Smooth a distributed map and change the resolution. """ if fwhm_in > .9 * self.fwhm and nside_in == self.nside: return maps_in if fwhm_in > .9 * self.fwhm and self.nside < nside_in: # Simple ud_grade if self.global_rank == 0 and self.verbose: print('Downgrading Nside {} -> {}' ''.format(nside_in, self.nside), flush=True) maps_out = [] npix_in = hp.nside2npix(nside_in) my_pix_in = self.get_my_pix(nside_in) for m in maps_in: my_outmap = np.zeros(npix_in, dtype=np.float) outmap = np.zeros(npix_in, dtype=np.float) my_outmap[my_pix_in] = m self.comm.Allreduce(my_outmap, outmap) del my_outmap maps_out.append(hp.ud_grade(outmap, self.nside)[self.my_pix]) else: # Full smoothing lmax = self.optimal_lmax(fwhm_in, nside_in) total_beam = self.total_beam(fwhm_in, lmax, pol=False) if self.global_rank == 0 and self.verbose: print('Smoothing {} -> {}. lmax = {}. Nside {} -> {}' ''.format(fwhm_in, self.fwhm, lmax, nside_in, self.nside), flush=True) local_m = np.arange(self.rank, lmax + 1, self.ntask, dtype=np.int32) alminfo = packed_real_order(lmax, ms=local_m) grid_in = self.get_grid(nside_in) grid_out = self.get_grid(self.nside) maps_out = [] for local_map in maps_in: map_I = np.ascontiguousarray(local_map.reshape([1, 1, -1]), dtype=np.float64) alm_I = analysis(grid_in, alminfo, map_I, spin=0, comm=self.comm) if total_beam is not None: alminfo.almxfl(alm_I, total_beam) map_I = synthesis(grid_out, alminfo, alm_I, spin=0, comm=self.comm)[0][0] maps_out.append(map_I) return maps_out
def _distribute_alm(self): """ Distribute the a_lm across the communicator. This includes translating the complex a_lm in to real coefficients """ self._local_m_indices = np.arange(self._rank, self._mmax + 1, self._ntask, dtype=np.int32) self._alminfo = packed_real_order(self._lmax, ms=self._local_m_indices) my_nalm = 0 for m in self._local_m_indices: # All but the m=0 mode create two entries in the # real a_lm array if m == 0: my_nalm += self._lmax + 1 - m else: my_nalm += 2 * (self._lmax + 1 - m) my_alm = np.zeros([self._nnz, my_nalm]) sqrt2 = np.sqrt(2) for comp in range(self._nnz): i = 0 ii = 0 for m in range(self._mmax + 1): if m % self._ntask != self._rank: # not a local m-mode i += self._lmax + 1 - m continue for _ in range(self._lmax + 1 - m): if m == 0: my_alm[comp, ii] = self._alm[comp, i].real else: my_alm[comp, ii] = self._alm[comp, i].real * sqrt2 ii += 1 my_alm[comp, ii] = self._alm[comp, i].imag * sqrt2 ii += 1 i += 1 self._alm = my_alm return
def test_basic(): lmax = 10 nside = 8 rank = MPI.COMM_WORLD.Get_rank() ms = np.arange(rank, lmax + 1, MPI.COMM_WORLD.Get_size(), dtype=np.int32) order = libsharp.packed_real_order(lmax, ms=ms) grid = libsharp.healpix_grid(nside) alm = np.zeros(order.local_size()) if rank == 0: alm[0] = 1 elif rank == 1: alm[0] = 1 map = libsharp.synthesis(grid, order, np.repeat(alm[None, None, :], 3, 0), comm=MPI.COMM_WORLD) assert np.all(map[2, :] == map[1, :]) and np.all(map[1, :] == map[0, :]) map = map[0, 0, :] print(rank, "shape", map.shape) print(rank, "mean", map.mean())
local_pix = expand_pix(startpix, ringpix, local_npix) local_map = input_map[:, local_pix] local_hitmap = np.zeros(npix) local_hitmap[local_pix] = 1 hp.write_map("hitmap_{}.fits".format(rank), local_hitmap, overwrite=True) print("rank", rank, "npix", npix, "local_npix", local_npix, "local_map len", len(local_map), "unique pix", len(np.unique(local_pix))) local_m_indices = np.arange(rank, lmax + 1, MPI.COMM_WORLD.Get_size(), dtype=np.int32) if not mpi: local_m_indices = None order = libsharp.packed_real_order(lmax, ms=local_m_indices) local_nl = order.local_size() print("rank", rank, "local_nl", local_nl, "mval", order.mval()) mpi_comm = MPI.COMM_WORLD if mpi else None # map2alm # maps in libsharp are 3D, 2nd dimension is IQU, 3rd is pixel alm_sharp_I = libsharp.analysis(grid, order, np.ascontiguousarray(local_map[0].reshape((1, 1, -1))), spin=0, comm=mpi_comm) alm_sharp_P = libsharp.analysis(grid, order, np.ascontiguousarray(local_map[1:].reshape((1, 2, -1))), spin=2, comm=mpi_comm)
local_hitmap = np.zeros(npix) local_hitmap[local_pix] = 1 hp.write_map("hitmap_{}.fits".format(rank), local_hitmap, overwrite=True) print("rank", rank, "npix", npix, "local_npix", local_npix, "local_map len", len(local_map), "unique pix", len(np.unique(local_pix))) local_m_indices = np.arange(rank, lmax + 1, MPI.COMM_WORLD.Get_size(), dtype=np.int32) if not mpi: local_m_indices = None order = libsharp.packed_real_order(lmax, ms=local_m_indices) local_nl = order.local_size() print("rank", rank, "local_nl", local_nl, "mval", order.mval()) mpi_comm = MPI.COMM_WORLD if mpi else None # map2alm # maps in libsharp are 3D, 2nd dimension is IQU, 3rd is pixel alm_sharp_I = libsharp.analysis(grid, order, np.ascontiguousarray(local_map[0].reshape( (1, 1, -1))), spin=0, comm=mpi_comm) alm_sharp_P = libsharp.analysis(grid,