def test_construction(self): arr = mpiarray.MPIArray((10, 11), axis=1) l, s, e = mpiutil.split_local(11) # Check that global shape is set correctly assert arr.global_shape == (10, 11) assert arr.shape == (10, l) assert arr.local_offset == (0, s) assert arr.local_shape == (10, l)
def stokes2lin(self): """Convert the Stokes polarized data to linear polarization.""" try: pol = self.pol except KeyError: raise RuntimeError('Polarization of the data is unknown, can not convert') if pol.attrs['pol_type'] == 'linear' and pol.shape[0] == 4: warning.warn('Data is already linear polarization, no need to convert') return if pol.attrs['pol_type'] == 'stokes' and pol.shape[0] == 4: # redistribute to 0 axis if polarization is the distributed axis original_dist_axis = self.main_data_dist_axis if 'polarization' == self.main_data_axes[self.main_data_dist_axis]: self.redistribute(0) pol = pol[:].tolist() p = self.pol_dict # create a new MPIArray to hold the new data md = mpiarray.MPIArray(self.main_data.shape, axis=self.main_data_dist_axis, comm=self.comm, dtype=self.main_data.dtype) # convert to linear xx, yy, xy, yx md.local_array[:, :, 0] = self.main_data.local_data[:, :, pol.index(p['I'])] + self.main_data.local_data[:, :, pol.index(p['Q'])] # xx md.local_array[:, :, 1] = self.main_data.local_data[:, :, pol.index(p['I'])] - self.main_data.local_data[:, :, pol.index(p['Q'])] # yy md.local_array[:, :, 2] = self.main_data.local_data[:, :, pol.index(p['U'])] + 1.0J * self.main_data.local_data[:, :, pol.index(p['V'])] # xy md.local_array[:, :, 3] = self.main_data.local_data[:, :, pol.index(p['U'])] - 1.0J * self.main_data.local_data[:, :, pol.index(p['V'])] # yx attr_dict = {} # temporarily save attrs of this dataset memh5.copyattrs(self.main_data.attrs, attr_dict) del self[self.main_data_name] # create main data self.create_dataset(self.main_data_name, shape=md.shape, dtype=md.dtype, data=md, distributed=True, distributed_axis=self.main_data_dist_axis) memh5.copyattrs(attr_dict, self.main_data.attrs) del self['pol'] self.create_dataset('pol', data=np.array([p['xx'], p['yy'], p['xy'], p['yx']]), dtype='i4') self['pol'].attrs['pol_type'] = 'linear' # redistribute self to original axis self.redistribute(original_dist_axis) else: raise RuntimeError('Can not convert to linear polarization')
def test_redistribution(self): gshape = (1, 11, 2, 14, 3, 4) nelem = np.prod(gshape) garr = np.arange(nelem).reshape(gshape) l0, s0, e0 = mpiutil.split_local(11) l1, s1, e1 = mpiutil.split_local(14) l2, s2, e2 = mpiutil.split_local(4) arr = mpiarray.MPIArray(gshape, axis=1, dtype=np.int64) arr[:] = garr[:, s0:e0] arr2 = arr.redistribute(axis=3) assert (arr2 == garr[:, :, :, s1:e1]).view(np.ndarray).all() arr3 = arr.redistribute(axis=5) assert (arr3 == garr[:, :, :, :, :, s2:e2]).view(np.ndarray).all()
def test_global_setslice(self): rank = mpiutil.rank size = mpiutil.size darr = mpiarray.MPIArray((size * 5, 20), axis=0) # Initialise the distributed array for li, gi in darr.enumerate(axis=0): darr[li] = 10 * (10 * rank + li) + np.arange(20) # Construct numpy array which should be equivalent to the global array whole_array = ( 10 * (10 * np.arange(4.0)[:, np.newaxis] + np.arange(5.0)[np.newaxis, :]).flatten()[:, np.newaxis] + np.arange(20)[np.newaxis, :]) # Extract the section for each rank distributed along axis=0 local_array = whole_array[(rank * 5):((rank + 1) * 5)] # Set slice # Check a simple assignment to a slice along the non-parallel axis darr.global_slice[:, 6] = -2.0 local_array[:, 6] = -2.0 assert (darr == local_array).all() # Check a partial assignment along the parallel axis darr.global_slice[7:, 7:9] = -3.0 whole_array[7:, 7:9] = -3.0 assert (darr == local_array).all() # Check assignment of a single index on the parallel axis darr.global_slice[6] = np.arange(20.0) whole_array[6] = np.arange(20.0) assert (darr == local_array).all() # Check copy of one column into the other darr.global_slice[:, 8] = darr.global_slice[:, 9] whole_array[:, 8] = whole_array[:, 9] assert (darr == local_array).all()
def test_io(self): import h5py # Cleanup directories fname = 'testdset.hdf5' if mpiutil.rank0 and os.path.exists(fname): os.remove(fname) mpiutil.barrier() gshape = (19, 17) ds = mpiarray.MPIArray(gshape, dtype=np.int64) ga = np.arange(np.prod(gshape)).reshape(gshape) l0, s0, e0 = mpiutil.split_local(gshape[0]) ds[:] = ga[s0:e0] ds.redistribute(axis=1).to_hdf5(fname, 'testds', create=True) if mpiutil.rank0: with h5py.File(fname, 'r') as f: h5ds = f['testds'][:] assert (h5ds == ga).all() ds2 = mpiarray.MPIArray.from_hdf5(fname, 'testds') assert (ds2 == ds).all() mpiutil.barrier() if mpiutil.rank0 and os.path.exists(fname): os.remove(fname)
def test_gather(self): rank = mpiutil.rank size = mpiutil.size block = 2 global_shape = (2, 3, size * block) global_array = np.zeros(global_shape, dtype=np.float64) global_array[..., :] = np.arange(size * block) arr = mpiarray.MPIArray(global_shape, dtype=np.float64, axis=2) arr[:] = global_array[..., (rank * block):((rank + 1) * block)] assert (arr.allgather() == global_array).all() gather_rank = 1 if size > 1 else 0 ga = arr.gather(rank=gather_rank) if rank == gather_rank: assert (ga == global_array).all() else: assert ga is None
def test_wrap(self): ds = mpiarray.MPIArray((10, 17)) df = np.fft.rfft(ds, axis=1) assert type(df) == np.ndarray da = mpiarray.MPIArray.wrap(df, axis=0) assert type(da) == mpiarray.MPIArray assert da.global_shape == (10, 9) l0, s0, e0 = mpiutil.split_local(10) assert da.local_shape == (l0, 9) if mpiutil.rank0: df = df[:-1] if mpiutil.size > 1: with self.assertRaises(Exception): mpiarray.MPIArray.wrap(df, axis=0)
def test_reshape(self): gshape = (1, 11, 2, 14) l0, s0, e0 = mpiutil.split_local(11) arr = mpiarray.MPIArray(gshape, axis=1, dtype=np.int64) arr2 = arr.reshape((None, 28)) # Check type assert isinstance(arr2, mpiarray.MPIArray) # Check global shape assert arr2.global_shape == (11, 28) # Check local shape assert arr2.local_shape == (l0, 28) # Check local offset assert arr2.local_offset == (s0, 0) # Check axis assert arr2.axis == 0
def process(self, sensitivity): """Derive an RFI mask from sensitivity data. Parameters ---------- sensitivity : containers.SystemSensitivity Sensitivity data to derive the RFI mask from. Returns ------- rfimask : containers.RFIMask RFI mask derived from sensitivity. """ ## Constants # Convert MAD to RMS MAD_TO_RMS = 1.4826 # The difference between the exponents in the usual # scaling of the RMS (n**0.5) and the scaling used # in the sumthreshold algorithm (n**log2(1.5)) RMS_SCALING_DIFF = np.log2(1.5) - 0.5 # Distribute over polarisation as we need all times and frequencies # available simultaneously sensitivity.redistribute("pol") # Divide sensitivity to get a radiometer test radiometer = sensitivity.measured[:] * tools.invert_no_zero( sensitivity.radiometer[:]) radiometer = mpiarray.MPIArray.wrap(radiometer, axis=1) freq = sensitivity.freq npol = len(sensitivity.pol) nfreq = len(freq) static_flag = ~self._static_rfi_mask_hook(freq) madmask = mpiarray.MPIArray((npol, nfreq, len(sensitivity.time)), axis=0, dtype=np.bool) madmask[:] = False stmask = mpiarray.MPIArray((npol, nfreq, len(sensitivity.time)), axis=0, dtype=np.bool) stmask[:] = False for li, ii in madmask.enumerate(axis=0): # Only process this polarisation if we should be including it, # otherwise skip and let it be implicitly set to False (i.e. not # masked) if self.include_pol and sensitivity.pol[ii] not in self.include_pol: continue # Initial flag on weights equal to zero. origflag = sensitivity.weight[:, ii] == 0.0 # Remove median at each frequency, if asked. if self.remove_median: for ff in range(nfreq): radiometer[ff, li] -= np.median( radiometer[ff, li][~origflag[ff]].view(np.ndarray)) # Combine weights with static flag start_flag = origflag | static_flag[:, None] # Obtain MAD and TV masks this_madmask, tvmask = self._mad_tv_mask(radiometer[:, li], start_flag, freq) # combine MAD and TV masks madmask[li] = this_madmask | tvmask # Add TV channels to ST start flag. start_flag = start_flag | tvmask # Determine initial threshold med = np.median(radiometer[:, li][~start_flag].view(np.ndarray)) mad = np.median( abs(radiometer[:, li][~start_flag].view(np.ndarray) - med)) threshold1 = (mad * MAD_TO_RMS * self.start_threshold_sigma * self.max_m**RMS_SCALING_DIFF) # SumThreshold mask stmask[li] = rfi.sumthreshold( radiometer[:, li], self.max_m, start_flag=start_flag, threshold1=threshold1, correct_for_missing=True, ) # Perform an OR (.any) along the pol axis and reform into an MPIArray # along the freq axis madmask = mpiarray.MPIArray.wrap(madmask.redistribute(1).any(0), 0) stmask = mpiarray.MPIArray.wrap(stmask.redistribute(1).any(0), 0) # Pick which of the MAD or SumThreshold mask to use (or blend them) if self.mask_type == "mad": finalmask = madmask elif self.mask_type == "sumthreshold": finalmask = stmask else: # Combine ST and MAD masks madtimes = self._combine_st_mad_hook(sensitivity.time) finalmask = stmask finalmask[:, madtimes] = madmask[:, madtimes] # Collect all parts of the mask onto rank 1 and then broadcast to all ranks finalmask = mpiarray.MPIArray.wrap(finalmask, 0).allgather() # Apply scale invariant rank (SIR) operator, if asked for. if self.sir: finalmask = self._apply_sir(finalmask, static_flag) # Create container to hold mask rfimask = containers.RFIMask(axes_from=sensitivity) rfimask.mask[:] = finalmask return rfimask
def process(self, mmodes): """Make a map from the given m-modes. Parameters ---------- mmodes : containers.MModes Returns ------- map : containers.Map """ from cora.util import hputil # Fetch various properties bt = self.beamtransfer lmax = bt.telescope.lmax mmax = min(bt.telescope.mmax, len(mmodes.index_map['m']) - 1) nfreq = len(mmodes.index_map['freq']) # bt.telescope.nfreq def find_key(key_list, key): try: return map(tuple, list(key_list)).index(tuple(key)) except TypeError: return list(key_list).index(key) except ValueError: return None # Figure out mapping between the frequencies bt_freq = self.beamtransfer.telescope.frequencies mm_freq = mmodes.index_map['freq']['centre'] freq_ind = [find_key(bt_freq, mf) for mf in mm_freq] # Trim off excess m-modes mmodes.redistribute('freq') m_array = mmodes.vis[:(mmax + 1)] m_array = m_array.redistribute(axis=0) m_weight = mmodes.weight[:(mmax + 1)] m_weight = m_weight.redistribute(axis=0) # Create array to store alms in. alm = mpiarray.MPIArray((nfreq, 4, lmax + 1, mmax + 1), axis=3, dtype=np.complex128, comm=mmodes.comm) alm[:] = 0.0 # Loop over all m's and solve from m-mode visibilities to alms. for mi, m in m_array.enumerate(axis=0): for fi in range(nfreq): v = m_array[mi, :, fi].view(np.ndarray) a = alm[fi, ..., mi].view(np.ndarray) Ni = m_weight[mi, :, fi].view(np.ndarray) a[:] = self._solve_m(m, fi, v, Ni) # Redistribute back over frequency alm = alm.redistribute(axis=0) # Copy into square alm array for transform almt = mpiarray.MPIArray((nfreq, 4, lmax + 1, lmax + 1), dtype=np.complex128, axis=0, comm=mmodes.comm) almt[..., :(mmax + 1)] = alm alm = almt # Perform spherical harmonic transform to map space maps = hputil.sphtrans_inv_sky(alm, self.nside) maps = mpiarray.MPIArray.wrap(maps, axis=0) m = containers.Map(nside=self.nside, axes_from=mmodes, comm=mmodes.comm) m.map[:] = maps return m
def test_global_getslice(self): rank = mpiutil.rank size = mpiutil.size darr = mpiarray.MPIArray((size * 5, 20), axis=0) # Initialise the distributed array for li, gi in darr.enumerate(axis=0): darr[li] = 10 * (10 * rank + li) + np.arange(20) # Construct numpy array which should be equivalent to the global array whole_array = 10 * ( 10 * np.arange(4.0)[:, np.newaxis] + np.arange(5.0)[np.newaxis, :] ).flatten()[:, np.newaxis] + np.arange(20)[np.newaxis, :] # Extract the section for each rank distributed along axis=0 local_array = whole_array[(rank * 5):((rank + 1) * 5)] # Extract the correct section for each rank distributed along axis=0 local_array_T = whole_array[:, (rank * 5):((rank + 1) * 5)] # Check that these are the same assert (local_array == darr).all() # Check a simple slice on the non-parallel axis arr = darr.global_slice[:, 3:5] res = local_array[:, 3:5] assert isinstance(arr, mpiarray.MPIArray) assert (arr == res).all() # Check a single element extracted from the non-parallel axis arr = darr.global_slice[:, 3] res = local_array[:, 3] assert (arr == res).all() # These tests denpend on the size being at least 2. if size > 1: # Check a slice on the parallel axis arr = darr.global_slice[:7, 3:5] res = { 0: local_array[:, 3:5], 1: local_array[:2, 3:5], 2: None, 3: None } assert arr == res[rank] if arr is None else (arr == res[rank]).all() # Check a single element from the parallel axis arr = darr.global_slice[7, 3:5] res = {0: None, 1: local_array[2, 3:5], 2: None, 3: None} assert arr == res[rank] if arr is None else (arr == res[rank]).all() # Check a slice on the redistributed parallel axis darr_T = darr.redistribute(axis=1) arr = darr_T.global_slice[3:5, :7] res = { 0: local_array_T[3:5, :], 1: local_array_T[3:5, :2], 2: None, 3: None } assert arr == res[rank] if arr is None else (arr == res[rank]).all() # Check a slice that removes an axis darr = mpiarray.MPIArray((10, 20, size * 5), axis=2) dslice = darr.global_slice[:, 0, :] assert dslice.global_shape == (10, size * 5) assert dslice.local_shape == (10, 5) # Check ellipsis and slice at the end darr = mpiarray.MPIArray((size * 5, 20, 10), axis=0) dslice = darr.global_slice[..., 4:9] assert dslice.global_shape == (size * 5, 20, 5) assert dslice.local_shape == (5, 20, 5) # Check slice that goes off the end of the axis darr = mpiarray.MPIArray((size, 136, 2048), axis=0) dslice = darr.global_slice[..., 2007:2087] assert dslice.global_shape == (size, 136, 41) assert dslice.local_shape == (1, 136, 41)
def _generate_phase(self, time): ntime = len(time) freq = self.freq nfreq = len(freq) # Generate the correlation function cf_delay = self._corr_func(self.corr_length_delay, self.sigma_delay) # Check if we are simulating relative delays or common mode delays if self.sim_type == "relative": n_realisations = self.ninput_local # Generate delay fluctuations self.delay_error = gain.generate_fluctuations( time, cf_delay, n_realisations, self._prev_time, self._prev_delay) gain_phase = (2.0 * np.pi * freq[:, np.newaxis, np.newaxis] * 1e6 * self.delay_error[np.newaxis, :, :] / np.sqrt(self.ndays)) if self.sim_type == "common_mode_cyl": n_realisations = 1 ninput = self.ninput_global # Generates as many random delay errors as there are cylinders if self.comm.rank == 0: if self.common_mode_type == "sinusoidal": P1 = self.sinusoidal_period[0] P2 = self.sinusoidal_period[1] omega1 = 2 * np.pi / P1 omega2 = 2 * np.pi / P2 delay_error = (self.sigma_delay * (np.sin(omega1 * time) - np.sin(omega2 * time))[np.newaxis, :]) if self.common_mode_type == "random": delay_error = gain.generate_fluctuations( time, cf_delay, n_realisations, self._prev_time, self._prev_delay, ) else: delay_error = None # Broadcast to other ranks self.delay_error = self.comm.bcast(delay_error, root=0) # Split frequencies to processes. lfreq, sfreq, efreq = mpiutil.split_local(nfreq) # Create an array to hold all inputs, which are common-mode within # a cylinder gain_phase = np.zeros((lfreq, ninput, ntime), dtype=complex) # Since we have 2 cylinders populate half of them with a delay) # TODO: generalize this for 3 or even 4 cylinders in the future. gain_phase[:, ninput // self.ncyl:, :] = ( 2.0 * np.pi * freq[sfreq:efreq, np.newaxis, np.newaxis] * 1e6 * self.delay_error[np.newaxis, :, :] / np.sqrt(self.ndays)) gain_phase = mpiarray.MPIArray.wrap(gain_phase, axis=0, comm=self.comm) # Redistribute over input to match rest of the code gain_phase = gain_phase.redistribute(axis=1) gain_phase = gain_phase.view(np.ndarray) if self.sim_type == "common_mode_iceboard": nchannel = self.nchannel ninput = self.ninput_global # Number of channels on a board nboards = ninput // nchannel # Generates as many random delay errors as there are iceboards if self.comm.rank == 0: delay_error = gain.generate_fluctuations( time, cf_delay, nboards, self._prev_time, self._prev_delay) else: delay_error = None # Broadcast to other ranks self.delay_error = self.comm.bcast(delay_error, root=0) # Calculate the corresponding phase by multiplying with frequencies phase = (2.0 * np.pi * freq[:, np.newaxis, np.newaxis] * 1e6 * self.delay_error[np.newaxis, :] / np.sqrt(self.ndays)) # Create an array to hold all inputs, which are common-mode within # one iceboard gain_phase = mpiarray.MPIArray((nfreq, ninput, ntime), axis=1, dtype=np.complex128, comm=self.comm) gain_phase[:] = 0.0 # Loop over inputs and and group common-mode phases on every board for il, ig in gain_phase.enumerate(axis=1): # Get the board number bi bi = int(ig / nchannel) gain_phase[:, il] = phase[:, bi] gain_phase = gain_phase.view(np.ndarray) self._prev_delay = self.delay_error self._prev_time = time return gain_phase
def process(self, map_): """Simulate a SiderealStream Parameters ---------- map : :class:`containers.Map` The sky map to process to into a sidereal stream. Frequencies in the map, must match the Beam Transfer matrices. Returns ------- ss : SiderealStream Stacked sidereal day. feeds : list of CorrInput Description of the feeds simulated. """ if self.done: raise pipeline.PipelineStopIteration # Read in telescope system bt = self.beamtransfer tel = self.telescope lmax = tel.lmax mmax = tel.mmax nfreq = tel.nfreq npol = tel.num_pol_sky lfreq, sfreq, efreq = mpiutil.split_local(nfreq) lm, sm, em = mpiutil.split_local(mmax + 1) # Set the minimum resolution required for the sky. ntime = 2 * mmax + 1 freqmap = map_.index_map["freq"][:] row_map = map_.map[:] if (tel.frequencies != freqmap["centre"]).any(): raise ValueError( "Frequencies in map do not match those in Beam Transfers.") # Calculate the alm's for the local sections row_alm = hputil.sphtrans_sky(row_map, lmax=lmax).reshape( (lfreq, npol * (lmax + 1), lmax + 1)) # Trim off excess m's and wrap into MPIArray row_alm = row_alm[..., :(mmax + 1)] row_alm = mpiarray.MPIArray.wrap(row_alm, axis=0) # Perform the transposition to distribute different m's across processes. Neat # tip, putting a shorter value for the number of columns, trims the array at # the same time col_alm = row_alm.redistribute(axis=2) # Transpose and reshape to shift m index first. col_alm = col_alm.transpose((2, 0, 1)).reshape( (None, nfreq, npol, lmax + 1)) # Create storage for visibility data vis_data = mpiarray.MPIArray((mmax + 1, nfreq, bt.ntel), axis=0, dtype=np.complex128) vis_data[:] = 0.0 # Iterate over m's local to this process and generate the corresponding # visibilities for mp, mi in vis_data.enumerate(axis=0): vis_data[mp] = bt.project_vector_sky_to_telescope( mi, col_alm[mp].view(np.ndarray)) # Rearrange axes such that frequency is last (as we want to divide # frequencies across processors) row_vis = vis_data.transpose((0, 2, 1)) # Parallel transpose to get all m's back onto the same processor col_vis_tmp = row_vis.redistribute(axis=2) col_vis_tmp = col_vis_tmp.reshape((mmax + 1, 2, tel.npairs, None)) # Transpose the local section to make the m's the last axis and unwrap the # positive and negative m at the same time. col_vis = mpiarray.MPIArray((tel.npairs, nfreq, ntime), axis=1, dtype=np.complex128) col_vis[:] = 0.0 col_vis[..., 0] = col_vis_tmp[0, 0] for mi in range(1, mmax + 1): col_vis[..., mi] = col_vis_tmp[mi, 0] col_vis[..., -mi] = col_vis_tmp[ mi, 1].conj() # Conjugate only (not (-1)**m - see paper) del col_vis_tmp # Fourier transform m-modes back to get final timestream. vis_stream = np.fft.ifft(col_vis, axis=-1) * ntime vis_stream = vis_stream.reshape((tel.npairs, lfreq, ntime)) vis_stream = vis_stream.transpose((1, 0, 2)).copy() # Try and fetch out the feed index and info from the telescope object. try: feed_index = tel.input_index except AttributeError: feed_index = tel.nfeed # Construct a product map prod_map = np.zeros(tel.uniquepairs.shape[0], dtype=[("input_a", int), ("input_b", int)]) prod_map["input_a"] = tel.uniquepairs[:, 0] prod_map["input_b"] = tel.uniquepairs[:, 1] # Construct container and set visibility data sstream = containers.SiderealStream( freq=freqmap, ra=ntime, input=feed_index, prod=prod_map, distributed=True, comm=map_.comm, ) sstream.vis[:] = mpiarray.MPIArray.wrap(vis_stream, axis=0) sstream.weight[:] = 1.0 self.done = True return sstream
def test_io(self): import h5py # Cleanup directories fname = "testdset.hdf5" if mpiutil.rank0 and os.path.exists(fname): os.remove(fname) mpiutil.barrier() gshape = (19, 17) ds = mpiarray.MPIArray(gshape, dtype=np.int64) ga = np.arange(np.prod(gshape)).reshape(gshape) l0, s0, e0 = mpiutil.split_local(gshape[0]) ds[:] = ga[s0:e0] ds.redistribute(axis=1).to_hdf5(fname, "testds", create=True) if mpiutil.rank0: with h5py.File(fname, "r") as f: h5ds = f["testds"][:] assert (h5ds == ga).all() ds2 = mpiarray.MPIArray.from_hdf5(fname, "testds") assert (ds2 == ds).all() mpiutil.barrier() # Check that reading over another distributed axis works ds3 = mpiarray.MPIArray.from_hdf5(fname, "testds", axis=1) assert ds3.shape[0] == gshape[0] assert ds3.shape[1] == mpiutil.split_local(gshape[1])[0] ds3 = ds3.redistribute(axis=0) assert (ds3 == ds).all() mpiutil.barrier() # Check a read with an arbitrary slice in there. This only checks the shape is correct. ds4 = mpiarray.MPIArray.from_hdf5(fname, "testds", axis=1, sel=(np.s_[3:10:2], np.s_[1:16:3])) assert ds4.shape[0] == 4 assert ds4.shape[1] == mpiutil.split_local(5)[0] mpiutil.barrier() # Check the read with a slice along the axis being read ds5 = mpiarray.MPIArray.from_hdf5(fname, "testds", axis=1, sel=(np.s_[:], np.s_[3:15:2])) assert ds5.shape[0] == gshape[0] assert ds5.shape[1] == mpiutil.split_local(6)[0] ds5 = ds5.redistribute(axis=0) assert (ds5 == ds[:, 3:15:2]).all() mpiutil.barrier() # Check the read with a slice along the axis being read ds6 = mpiarray.MPIArray.from_hdf5(fname, "testds", axis=0, sel=(np.s_[:], np.s_[3:15:2])) ds6 = ds6.redistribute(axis=0) assert (ds6 == ds[:, 3:15:2]).all() mpiutil.barrier() if mpiutil.rank0 and os.path.exists(fname): os.remove(fname)
def process(self, mmodes): """Make a map from the given m-modes. Parameters ---------- mmodes : containers.MModes Returns ------- map : containers.Map """ from cora.util import hputil # Fetch various properties bt = self.beamtransfer lmax = bt.telescope.lmax mmax = min(bt.telescope.mmax, len(mmodes.index_map["m"]) - 1) nfreq = len(mmodes.index_map["freq"]) # bt.telescope.nfreq # Figure out mapping between the frequencies bt_freq = self.beamtransfer.telescope.frequencies mm_freq = mmodes.index_map["freq"]["centre"] freq_ind = tools.find_keys(bt_freq, mm_freq, require_match=True) # Trim off excess m-modes mmodes.redistribute("freq") m_array = mmodes.vis[:(mmax + 1)] m_array = m_array.redistribute(axis=0) m_weight = mmodes.weight[:(mmax + 1)] m_weight = m_weight.redistribute(axis=0) # Create array to store alms in. alm = mpiarray.MPIArray( (nfreq, 4, lmax + 1, mmax + 1), axis=3, dtype=np.complex128, comm=mmodes.comm, ) alm[:] = 0.0 # Loop over all m's and solve from m-mode visibilities to alms. for mi, m in m_array.enumerate(axis=0): self.log.debug("Processing m=%i (local %i/%i)", m, mi + 1, m_array.local_shape[0]) # Get and cache the beam transfer matrix, but trim off any l < m. # if self.bt_cache is None: # self.bt_cache = (m, bt.beam_m(m)) # self.log.debug("Cached beamtransfer for m=%i", m) for fi in range(nfreq): v = m_array[mi, :, fi].view(np.ndarray) a = alm[fi, ..., mi].view(np.ndarray) Ni = m_weight[mi, :, fi].view(np.ndarray) a[:] = self._solve_m(m, freq_ind[fi], v, Ni) self.bt_cache = None # Redistribute back over frequency alm = alm.redistribute(axis=0) # Copy into square alm array for transform almt = mpiarray.MPIArray( (nfreq, 4, lmax + 1, lmax + 1), dtype=np.complex128, axis=0, comm=mmodes.comm, ) almt[..., :(mmax + 1)] = alm alm = almt # Perform spherical harmonic transform to map space maps = hputil.sphtrans_inv_sky(alm, self.nside) maps = mpiarray.MPIArray.wrap(maps, axis=0) m = containers.Map(nside=self.nside, axes_from=mmodes, comm=mmodes.comm) m.map[:] = maps return m