def subtract_signal(self, tod, cworld, rank, masksampler, mapsampler, local_intervals): """ Subtract a signal estimate from the TOD and update the flags for noise estimation. """ start_signal_subtract = MPI.Wtime() for det in tod.local_dets: if rank == 0: print('Subtracting signal for {}'.format(det), flush=True) tod.cache.report() fsample = self._rimo[det].fsample epsilon = self._rimo[det].epsilon eta = (1 - epsilon) / (1 + epsilon) signal = tod.local_signal(det, name=self._signal) flags = tod.local_flags(det, name=self._flags) flags &= self._detmask for ival in local_intervals: ind = slice(ival.first, ival.last + 1) sig = signal[ind] flg = flags[ind] quat = tod.local_pointing(det)[ind] if self._pol: theta, phi, psi = qa.to_angles(quat) iw = np.ones_like(theta) qw = eta * np.cos(2 * psi) uw = eta * np.sin(2 * psi) iquw = np.column_stack([iw, qw, uw]) else: theta, phi = qa.to_position(quat) if masksampler is not None: maskflg = masksampler.at(theta, phi) < 0.5 flg[maskflg] |= 255 if mapsampler is not None: if self._pol: bg = mapsampler.atpol(theta, phi, iquw) else: bg = mapsampler.at(theta, phi) if self._calibrate_signal_estimate: good = flg == 0 ngood = np.sum(good) if ngood > 1: templates = np.vstack([np.ones(ngood), bg[good]]) invcov = np.dot(templates, templates.T) cov = np.linalg.inv(invcov) proj = np.dot(templates, sig[good]) coeff = np.dot(cov, proj) bg = coeff[0] + coeff[1] * bg sig -= bg cworld.barrier() stop_signal_subtract = MPI.Wtime() if rank == 0: print('TOD signal-subtracted in {:.2f} s'.format( stop_signal_subtract - start_signal_subtract), flush=True) return fsample
def _get_h_n(self, data, n, detector): """ Compute and store the next order of h_n """ for obs in data.obs: tod = obs["tod"] focalplane = obs["focalplane"] # HWP angle is not yet used but will be needed soon try: hwpang = tod.local_hwp_angle() except: hwpang = None if n == 1: nsample = tod.local_samples[1] hweight = np.ones(nsample, dtype=np.float64) tod.cache.put(self.hweight_name, hweight, replace=True) for det in tod.local_dets: if det != detector: continue cos_1_name = "{}_{}".format(self.cos_1_prefix, det) sin_1_name = "{}_{}".format(self.sin_1_prefix, det) cos_n_name = "{}_{}".format(self.cos_n_prefix, det) sin_n_name = "{}_{}".format(self.sin_n_prefix, det) if n == 1: quats = tod.local_pointing(det) theta, phi, psi = qa.to_angles(quats) cos_n_new = np.cos(psi) sin_n_new = np.sin(psi) tod.cache.put(cos_1_name, cos_n_new, replace=True) tod.cache.put(sin_1_name, sin_n_new, replace=True) weight_name = "{}_{}".format(self.hweight_name, det) tod.cache.add_alias(weight_name, self.hweight_name) else: # Use the angle sum identities to evaluate the # next cos(n * psi) and sin(n * psi) cos_1 = tod.cache.reference(cos_1_name) sin_1 = tod.cache.reference(sin_1_name) cos_n_old = tod.cache.reference(cos_n_name).copy() sin_n_old = tod.cache.reference(sin_n_name).copy() cos_n_new = cos_n_old * cos_1 - sin_n_old * sin_1 sin_n_new = sin_n_old * cos_1 + cos_n_old * sin_1 tod.cache.put(cos_n_name, cos_n_new, replace=True) tod.cache.put(sin_n_name, sin_n_new, replace=True) return
def exec(self, data): """ Generate atmosphere timestreams. This iterates over all observations and detectors and generates the atmosphere timestreams. Args: data (toast.Data): The distributed data. """ autotimer = timing.auto_timer(type(self).__name__) group = data.comm.group for obs in data.obs: try: obsname = obs['name'] except Exception: obsname = 'observation' prefix = '{} : {} : '.format(group, obsname) tod = self._get_from_obs('tod', obs) comm = tod.mpicomm obsindx = self._get_from_obs('id', obs) telescope = self._get_from_obs('telescope_id', obs) site = self._get_from_obs('site_id', obs) altitude = self._get_from_obs('altitude', obs) weather = self._get_from_obs('weather', obs) fp_radius = np.radians(self._get_from_obs('fpradius', obs)) # Get the observation time span and initialize the weather # object if one is provided. times = tod.local_times() tmin = times[0] tmax = times[-1] tmin_tot = comm.allreduce(tmin, op=MPI.MIN) tmax_tot = comm.allreduce(tmax, op=MPI.MAX) weather.set(site, self._realization, tmin_tot) """ The random number generator accepts a key and a counter, each made of two 64bit integers. Following tod_math.py we set key1 = realization * 2^32 + telescope * 2^16 + component key2 = obsindx * 2^32 counter1 = currently unused (0) counter2 = sample in stream (incremented internally in the atm code) """ key1 = self._realization * 2 ** 32 + telescope * 2 ** 16 \ + self._component key2 = site * 2**16 + obsindx counter1 = 0 counter2 = 0 if self._freq is not None: absorption = atm_get_absorption_coefficient( altitude, weather.air_temperature, weather.surface_pressure, weather.pwv, self._freq) loading = atm_get_atmospheric_loading(altitude, weather.air_temperature, weather.surface_pressure, weather.pwv, self._freq) tod.meta['loading'] = loading else: absorption = None if self._cachedir is None: cachedir = None else: # The number of atmospheric realizations can be large. Use # sub-directories under cachedir. subdir = str(int((obsindx % 1000) // 100)) subsubdir = str(int((obsindx % 100) // 10)) subsubsubdir = str(obsindx % 10) cachedir = os.path.join(self._cachedir, subdir, subsubdir, subsubsubdir) if comm.rank == 0: try: os.makedirs(cachedir) except FileExistsError: pass comm.Barrier() if comm.rank == 0: print(prefix + 'Setting up atmosphere simulation', flush=self._flush) comm.Barrier() # Cache the output common flags common_ref = tod.local_common_flags(self._common_flag_name) # Read the extent of the AZ/EL boresight pointing, and use that # to compute the range of angles needed for simulating the slab. (min_az_bore, max_az_bore, min_el_bore, max_el_bore) = tod.scan_range # print("boresight scan range = {}, {}, {}, {}".format( # min_az_bore, max_az_bore, min_el_bore, max_el_bore)) # Use a fixed focal plane radius so that changing the actual # set of detectors will not affect the simulated atmosphere. elfac = 1 / np.cos(max_el_bore + fp_radius) azmin = min_az_bore - fp_radius * elfac azmax = max_az_bore + fp_radius * elfac if azmin < -2 * np.pi: azmin += 2 * np.pi azmax += 2 * np.pi elif azmax > 2 * np.pi: azmin -= 2 * np.pi azmax -= 2 * np.pi elmin = min_el_bore - fp_radius elmax = max_el_bore + fp_radius azmin = comm.allreduce(azmin, op=MPI.MIN) azmax = comm.allreduce(azmax, op=MPI.MAX) elmin = comm.allreduce(elmin, op=MPI.MIN) elmax = comm.allreduce(elmax, op=MPI.MAX) if elmin < 0 or elmax > np.pi / 2: raise RuntimeError( 'Error in CES elevation: elmin = {:.2f}, elmax = {:.2f}' ''.format(elmin, elmax)) comm.Barrier() # Loop over the time span in "wind_time"-sized chunks. # wind_time is intended to reflect the correlation length # in the atmospheric noise. tmin = tmin_tot istart = 0 while tmin < tmax_tot: while times[istart] < tmin: istart += 1 tmax = tmin + self._wind_time if tmax < tmax_tot: # Extend the scan to the next turnaround istop = istart while istop < times.size and times[istop] < tmax: istop += 1 while istop < times.size and (common_ref[istop] | tod.TURNAROUND == 0): istop += 1 if istop < times.size: tmax = times[istop] else: tmax = tmax_tot else: tmax = tmax_tot istop = times.size ind = slice(istart, istop) nind = istop - istart if self._report_timing: comm.Barrier() tstart = MPI.Wtime() comm.Barrier() if comm.rank == 0: print(prefix + 'Instantiating the atmosphere for t = {}' ''.format(tmin - tmin_tot), flush=self._flush) comm.Barrier() T0_center = weather.air_temperature wx = weather.west_wind wy = weather.south_wind w_center = np.sqrt(wx**2 + wy**2) wdir_center = np.arctan2(wy, wx) sim = atm_sim_alloc( azmin, azmax, elmin, elmax, tmin, tmax, self._lmin_center, self._lmin_sigma, self._lmax_center, self._lmax_sigma, w_center, 0, wdir_center, 0, self._z0_center, self._z0_sigma, T0_center, 0, self._zatm, self._zmax, self._xstep, self._ystep, self._zstep, self._nelem_sim_max, self._verbosity, comm, self._gangsize, key1, key2, counter1, counter2, cachedir) if sim == 0: raise RuntimeError(prefix + 'Failed to allocate simulation') if self._report_timing: comm.Barrier() tstop = MPI.Wtime() if comm.rank == 0 and tstop - tstart > 1: print(prefix + 'OpSimAtmosphere: Initialized ' 'atmosphere in {:.2f} s'.format(tstop - tstart), flush=self._flush) tstart = tstop comm.Barrier() use_cache = cachedir is not None if comm.rank == 0: fname = os.path.join( cachedir, '{}_{}_{}_{}_metadata.txt'.format( key1, key2, counter1, counter2)) if use_cache and os.path.isfile(fname): print(prefix + 'Loading the atmosphere for t = {} ' 'from {}'.format(tmin - tmin_tot, fname), flush=self._flush) cached = True else: print(prefix + 'Simulating the atmosphere for t = {}' ''.format(tmin - tmin_tot), flush=self._flush) cached = False err = atm_sim_simulate(sim, use_cache) if err != 0: raise RuntimeError(prefix + 'Simulation failed.') # Advance the sample counter in case wind_time broke the # observation in parts counter2 += 100000000 if self._report_timing: comm.Barrier() tstop = MPI.Wtime() if comm.rank == 0 and tstop - tstart > 1: if cached: op = 'Loaded' else: op = 'Simulated' print(prefix + 'OpSimAtmosphere: {} atmosphere in ' '{:.2f} s'.format(op, tstop - tstart), flush=self._flush) tstart = tstop if self._verbosity > 0: self._plot_snapshots(sim, prefix, obsname, azmin, azmax, elmin, elmax, tmin, tmax, comm) nsamp = tod.local_samples[1] if self._report_timing: comm.Barrier() tstart = MPI.Wtime() if comm.rank == 0: print(prefix + 'Observing the atmosphere', flush=self._flush) for det in tod.local_dets: # Cache the output signal cachename = '{}_{}'.format(self._out, det) if tod.cache.exists(cachename): ref = tod.cache.reference(cachename) else: ref = tod.cache.create(cachename, np.float64, (nsamp, )) # Cache the output flags flag_ref = tod.local_flags(det, self._flag_name) if self._apply_flags: good = np.logical_and( common_ref[ind] & self._common_flag_mask == 0, flag_ref[ind] & self._flag_mask == 0) ngood = np.sum(good) if ngood == 0: continue azelquat = tod.read_pntg(detector=det, local_start=istart, n=nind, azel=True)[good] atmdata = np.zeros(ngood, dtype=np.float64) else: ngood = nind azelquat = tod.read_pntg(detector=det, local_start=istart, n=nind, azel=True) atmdata = np.zeros(nind, dtype=np.float64) # Convert Az/El quaternion of the detector back into # angles for the simulation. theta, phi, _ = qa.to_angles(azelquat) # Azimuth is measured in the opposite direction # than longitude az = 2 * np.pi - phi el = np.pi / 2 - theta if np.ptp(az) < np.pi: azmin_det = np.amin(az) azmax_det = np.amax(az) else: # Scanning across the zero azimuth. azmin_det = np.amin(az[az > np.pi]) - 2 * np.pi azmax_det = np.amax(az[az < np.pi]) elmin_det = np.amin(el) elmax_det = np.amax(el) if ((not (azmin <= azmin_det and azmax_det <= azmax) and not (azmin <= azmin_det - 2 * np.pi and azmax_det - 2 * np.pi <= azmax)) or not (elmin <= elmin_det and elmin_det <= elmax)): raise RuntimeError( prefix + 'Detector Az/El: [{:.5f}, {:.5f}], ' '[{:.5f}, {:.5f}] is not contained in ' '[{:.5f}, {:.5f}], [{:.5f} {:.5f}]' ''.format(azmin_det, azmax_det, elmin_det, elmax_det, azmin, azmax, elmin, elmax)) # Integrate detector signal err = atm_sim_observe(sim, times[ind], az, el, atmdata, ngood, 0) if err != 0: # Observing failed print(prefix + 'OpSimAtmosphere: Observing FAILED. ' 'det = {}, rank = {}'.format(det, comm.rank), flush=self._flush) atmdata[:] = 0 flag_ref[ind] = 255 if self._gain: atmdata *= self._gain if absorption is not None: # Apply the frequency-dependent absorption-coefficient atmdata *= absorption if self._apply_flags: ref[ind][good] += atmdata else: ref[ind] += atmdata del ref err = atm_sim_free(sim) if err != 0: raise RuntimeError(prefix + 'Failed to free simulation.') if self._report_timing: comm.Barrier() tstop = MPI.Wtime() if comm.rank == 0 and tstop - tstart > 1: print(prefix + 'OpSimAtmosphere: Observed atmosphere ' 'in {:.2f} s'.format(tstop - tstart), flush=self._flush) tmin = tmax return
def tod_to_frames( tod, start_frame, n_frames, frame_offsets, frame_sizes, cache_signal=None, cache_flags=None, cache_common_flags=None, copy_common=None, copy_detector=None, mask_flag_common=255, mask_flag=255, units=None, dets=None, compress=False, ): """Gather all data from the distributed TOD cache for a set of frames. Args: tod (toast.TOD): instance of a TOD class. start_frame (int): the first frame index. n_frames (int): the number of frames. frame_offsets (array_like): list of the first samples of all frames. frame_sizes (list): list of the number of samples in each frame. cache_signal (str): if None, read signal from TOD. Otherwise use this cache prefix for the detector signal timestreams. cache_flags (str): if None read det flags from TOD. Otherwise use this cache prefix for the detector flag timestreams. cache_common_flags (str): if None, read common flags from TOD. Otherwise use this cache prefix. copy_common (tuple): (cache name, G3 type, frame name) of each extra common field to copy from cache. copy_detector (tuple): (cache name prefix, G3 type, G3 map type, frame name) of each distributed detector field (excluding the "signal") to copy from cache. mask_flag_common (int): Bitmask to apply to common flags. mask_flag (int): Bitmask to apply to per-detector flags. units: G3 units of the detector data. dets (list): List of detectors to include in the frame. If None, use all of the detectors in the TOD object. compress (bool or dict): If True or a dictionary of compression parameters, store the timestreams as FLAC-compressed, 24-bit integers instead of uncompressed doubles. Returns: (list): List of frames on rank zero. Other processes have a list of None values. """ comm = tod.mpicomm rank = 0 if comm is not None: rank = comm.rank comm_row = tod.grid_comm_row # Detector names if dets is None: detnames = tod.detectors else: detnames = [] use_dets = set(dets) for det in tod.detectors: if det in use_dets: detnames.append(det) # Local sample range local_first = tod.local_samples[0] nlocal = tod.local_samples[1] # The process grid detranks, sampranks = tod.grid_size rankdet, ranksamp = tod.grid_ranks def get_local_cache(prow, field, cacheoff, ncache): """Read a local slice of a cache field. """ mtype = None pdata = None nnz = 0 if rankdet == prow: ref = tod.cache.reference(field) nnz = 1 if (len(ref.shape) > 1) and (ref.shape[1] > 0): nnz = ref.shape[1] if comm is not None: if ref.dtype == np.dtype(np.float64): mtype = MPI.DOUBLE elif ref.dtype == np.dtype(np.int64): mtype = MPI.INT64_T elif ref.dtype == np.dtype(np.int32): mtype = MPI.INT32_T elif ref.dtype == np.dtype(np.uint8): mtype = MPI.UINT8_T else: msg = "Cannot use cache field {} of type {}"\ .format(field, ref.dtype) raise RuntimeError(msg) if cacheoff is not None: pdata = ref.flatten()[nnz * cacheoff:nnz * (cacheoff + ncache)] else: pdata = np.zeros(0, dtype=ref.dtype) return (pdata, nnz, mtype) def gather_field(prow, pdata, nnz, mpitype, cacheoff, ncache, tag): """Gather a single timestream buffer to the root process. """ is_none = pdata is None all_none = comm.allreduce(is_none, MPI.LAND) if all_none: # This situation arises at least when gathering HWP angle from LAT return None gdata = None # We are going to allreduce this later, so that every process # knows the dimensions of the field. gproc = 0 allnnz = 0 # Size of the local buffer pz = 0 if pdata is not None: pz = len(pdata) if rankdet == prow: psizes = None if comm_row is None: psizes = [pz] else: psizes = comm_row.gather(pz, root=0) disp = None totsize = None if ranksamp == 0: # We are the process collecting the gathered data. allnnz = nnz gproc = rank # Compute the displacements into the receive buffer. disp = [0] for ps in psizes[:-1]: last = disp[-1] disp.append(last + ps) totsize = np.sum(psizes) # allocate receive buffer gdata = np.zeros(totsize, dtype=pdata.dtype) if comm_row is None: pdata[:] = gdata else: comm_row.Gatherv(pdata, [gdata, psizes, disp, mpitype], root=0) del disp del psizes # Now send this data to the root process of the whole communicator. # Only one process (the first one in process row "prow") has data # to send. if comm is not None: # All processes find out which one did the gather gproc = comm.allreduce(gproc, MPI.SUM) # All processes find out the field dimensions allnnz = comm.allreduce(allnnz, MPI.SUM) mtag = 10 * tag rdata = None if gproc == 0: if gdata is not None: if allnnz == 1: rdata = gdata else: rdata = gdata.reshape((-1, allnnz)) else: # Data not yet on rank 0 if rank == 0: # Receive data from the first process in this row rtype = comm.recv(source=gproc, tag=(mtag + 1)) rsize = comm.recv(source=gproc, tag=(mtag + 2)) rdata = np.zeros(rsize, dtype=np.dtype(rtype)) comm.Recv(rdata, source=gproc, tag=mtag) # Reshape if needed if allnnz > 1: rdata = rdata.reshape((-1, allnnz)) elif (rank == gproc): # Send our data comm.send(gdata.dtype.char, dest=0, tag=(mtag + 1)) comm.send(len(gdata), dest=0, tag=(mtag + 2)) comm.Send(gdata, 0, tag=mtag) return rdata # For efficiency, we are going to gather the data for all frames at once. # Then we will split those up when doing the write. # Frame offsets relative to the memory buffers we are gathering fdataoff = [0] for f in frame_sizes[:-1]: last = fdataoff[-1] fdataoff.append(last + f) # The list of frames- only on the root process. fdata = None if rank == 0: fdata = [ core3g.G3Frame(core3g.G3FrameType.Scan) for f in range(n_frames) ] else: fdata = [None for f in range(n_frames)] def split_field(data, g3t, framefield, mapfield=None, g3units=units, times=None): """Split a gathered data buffer into frames- only on root process. """ if data is None: return if g3t == core3g.G3VectorTime: # Special case for time values stored as int64_t, but # wrapped in a class. for f in range(n_frames): dataoff = fdataoff[f] ndata = frame_sizes[f] g3times = list() for t in range(ndata): g3times.append(core3g.G3Time(data[dataoff + t])) if mapfield is None: fdata[f][framefield] = core3g.G3VectorTime(g3times) else: fdata[f][framefield][mapfield] = \ core3g.G3VectorTime(g3times) del g3times elif g3t == so3g.IntervalsInt: # Flag vector is written as a simple boolean. for f in range(n_frames): dataoff = fdataoff[f] ndata = frame_sizes[f] # Extract flag vector (0 or 1) for this frame frame_flags = (data[dataoff:dataoff + ndata] != 0).astype(int) # Convert bit 0 to an IntervalsInt. ival = so3g.IntervalsInt.from_mask(frame_flags, 1)[0] if mapfield is None: fdata[f][framefield] = ival else: fdata[f][framefield][mapfield] = ival elif g3t == core3g.G3Timestream: if times is None: raise RuntimeError( "You must provide the time stamp vector with a " "Timestream object") for f in range(n_frames): dataoff = fdataoff[f] ndata = frame_sizes[f] timeslice = times[cacheoff + dataoff:cacheoff + dataoff + ndata] tstart = timeslice[0] * 1e8 tstop = timeslice[-1] * 1e8 if mapfield is None: if g3units is None: fdata[f][framefield] = \ g3t(data[dataoff : dataoff + ndata]) else: fdata[f][framefield] = \ g3t(data[dataoff : dataoff + ndata], g3units) fdata[f][framefield].start = core3g.G3Time(tstart) fdata[f][framefield].stop = core3g.G3Time(tstop) else: # Individual detector data. The only fields that # we (optionally) compress. if g3units is None: tstream = g3t(data[dataoff:dataoff + ndata]) else: tstream = g3t(data[dataoff:dataoff + ndata], g3units) if compress and "compressor_gain_" + framefield in fdata[f]: (tstream, gain, offset) = recode_timestream(tstream, compress) fdata[f]["compressor_gain_" + framefield][mapfield] = gain fdata[f]["compressor_offset_" + framefield][mapfield] = offset fdata[f][framefield][mapfield] = tstream fdata[f][framefield][mapfield].start = core3g.G3Time( tstart) fdata[f][framefield][mapfield].stop = core3g.G3Time(tstop) else: # The bindings of G3Vector seem to only work with # lists. This is probably horribly inefficient. for f in range(n_frames): dataoff = fdataoff[f] ndata = frame_sizes[f] if len(data.shape) == 1: fdata[f][framefield] = \ g3t(data[dataoff : dataoff + ndata].tolist()) else: # We have a 2D quantity fdata[f][framefield] = \ g3t(data[dataoff : dataoff + ndata, :].flatten() .tolist()) return # Compute the overlap of all frames with the local process. We want to # to find the full sample range that this process overlaps the total set # of frames. cacheoff = None ncache = 0 for f in range(n_frames): # Compute overlap of the frame with the local samples. fcacheoff, froff, nfr = s3utils.local_frame_indices( local_first, nlocal, frame_offsets[f], frame_sizes[f]) if fcacheoff is not None: if cacheoff is None: cacheoff = fcacheoff ncache = nfr else: ncache += nfr # Now gather the full sample data one field at a time. The root process # splits up the results into frames. # First collect boresight data. In addition to quaternions for the Az/El # pointing, we convert this back into angles that follow the specs # for telescope pointing. times = None if rankdet == 0: times = tod.local_times() if comm is not None: times = gather_field(0, times, 1, MPI.DOUBLE, cacheoff, ncache, 0) bore = None if rankdet == 0: bore = tod.read_boresight(local_start=cacheoff, n=ncache).flatten() if comm is not None: bore = gather_field(0, bore, 4, MPI.DOUBLE, cacheoff, ncache, 0) if rank == 0: split_field(bore.reshape(-1, 4), core3g.G3VectorDouble, "boresight_radec") bore = None if rankdet == 0: bore = tod.read_boresight_azel(local_start=cacheoff, n=ncache).flatten() if comm is not None: bore = gather_field(0, bore, 4, MPI.DOUBLE, cacheoff, ncache, 1) if rank == 0: split_field(bore.reshape(-1, 4), core3g.G3VectorDouble, "boresight_azel") corotator_angle = None if rankdet == 0: cache_name = "corotator_angle_deg" if tod.cache.exists(cache_name): corotator_angle = np.radians(tod.cache.reference(cache_name)) if comm is not None: corotator_angle = gather_field(0, corotator_angle, 1, MPI.DOUBLE, cacheoff, ncache, 0) if rank == 0: split_field(corotator_angle, core3g.G3VectorDouble, "corotator_angle", times=times) if rank == 0: for f in range(n_frames): fdata[f]["boresight"] = core3g.G3TimestreamMap() ang_theta, ang_phi, ang_psi = qa.to_angles(bore) # Astronomical convention for azimuth is opposite to spherical # coordinate phi. ang_az = -ang_phi ang_el = (np.pi / 2.0) - ang_theta ang_roll = ang_psi split_field(ang_az, core3g.G3Timestream, "boresight", "az", None, times=times) split_field(ang_el, core3g.G3Timestream, "boresight", "el", None, times=times) split_field(ang_roll, core3g.G3Timestream, "boresight", "roll", None, times=times) hwp_angle = None if rankdet == 0: hwp_angle = tod.local_hwp_angle() if comm is not None: hwp_angle = gather_field(0, hwp_angle, 1, MPI.DOUBLE, cacheoff, ncache, 0) if rank == 0: split_field(hwp_angle, core3g.G3VectorDouble, "hwp_angle", times=times) # Now the position and velocity information pos = None if rankdet == 0: pos = tod.read_position(local_start=cacheoff, n=ncache).flatten() if comm is not None: pos = gather_field(0, pos, 3, MPI.DOUBLE, cacheoff, ncache, 2) if rank == 0: split_field(pos.reshape(-1, 3), core3g.G3VectorDouble, "site_position") vel = None if rankdet == 0: vel = tod.read_velocity(local_start=cacheoff, n=ncache).flatten() if comm is not None: vel = gather_field(0, vel, 3, MPI.DOUBLE, cacheoff, ncache, 3) if rank == 0: split_field(vel.reshape(-1, 3), core3g.G3VectorDouble, "site_velocity") # Now handle the common flags- either from a cache object or from the # TOD methods cflags = None nnz = 1 if cache_common_flags is None: if rankdet == 0: cflags = np.array( tod.read_common_flags(local_start=cacheoff, n=ncache)) cflags &= mask_flag_common else: cflags, nnz, mtype = get_local_cache(0, cache_common_flags, cacheoff, ncache) if cflags is not None: cflags &= mask_flag_common if comm is not None: mtype = MPI.UINT8_T cflags = gather_field(0, cflags, nnz, mtype, cacheoff, ncache, 4) if rank == 0: split_field(cflags, so3g.IntervalsInt, "flags_common") # Any extra common fields if comm is not None: comm.barrier() if copy_common is not None: for cindx, (cname, g3typ, fname) in enumerate(copy_common): cdata, nnz, mtype = get_local_cache(0, cname, cacheoff, ncache) cdata = gather_field(0, cdata, nnz, mtype, cacheoff, ncache, cindx) if rank == 0: split_field(cdata, g3typ, fname) # Now read all per-detector quantities. # For each detector field, processes which have the detector # in their local_dets should be in the same process row. if rank == 0: for f in range(n_frames): fdata[f]["signal"] = core3g.G3TimestreamMap() if compress: fdata[f]["compressor_gain_signal"] = core3g.G3MapDouble() fdata[f]["compressor_offset_signal"] = core3g.G3MapDouble() fdata[f]["flags"] = so3g.MapIntervalsInt() if copy_detector is not None: for cname, g3typ, g3maptyp, fnm in copy_detector: fdata[f][fnm] = g3maptyp() if compress: fdata[f]["compressor_gain_" + fnm] = core3g.G3MapDouble() fdata[f]["compressor_offset_" + fnm] = core3g.G3MapDouble() for dindx, dname in enumerate(detnames): drow = -1 # Demodulation may have synthesized new detector names dnames = [] for x in tod.local_dets: if x.endswith(dname): dnames.append(x) if dnames: drow = rankdet # As a sanity check, verify that every process which # has this detector is in the same process row. rowcheck = None if comm is None: rowcheck = [drow] else: rowcheck = comm.gather(drow, root=0) prow = 0 if rank == 0: rc = np.array([x for x in rowcheck if (x >= 0)], dtype=np.int32) prow = np.max(rc) if np.min(rc) != prow: msg = "Processes with detector {} are not in the "\ "same row of the process grid\n".format(dname) sys.stderr.write(msg) if comm is not None: comm.abort() # Every process finds out which process row is participating. if comm is not None: prow = comm.bcast(prow, root=0) all_dnames = comm.allgather(dnames) dnames = list(set(np.concatenate(all_dnames).flat)) # "signal" for dname in dnames: detdata = None nnz = 1 if cache_signal is None: if rankdet == prow: detdata = tod.local_signal(dname)[cacheoff:cacheoff + ncache] else: cache_det = "{}_{}".format(cache_signal, dname) detdata, nnz, mtype = get_local_cache(prow, cache_det, cacheoff, ncache) if comm is not None: mtype = MPI.DOUBLE detdata = gather_field(prow, detdata, nnz, mtype, cacheoff, ncache, dindx) if rank == 0: split_field(detdata, core3g.G3Timestream, "signal", mapfield=dname, times=times) # "flags" for dname in dnames: detdata = None nnz = 1 if cache_flags is None: if rankdet == prow: detdata = tod.local_flags(dname)[cacheoff:cacheoff + ncache] detdata &= mask_flag else: cache_det = "{}_{}".format(cache_flags, dname) detdata, nnz, mtype = get_local_cache(prow, cache_det, cacheoff, ncache) if detdata is not None: detdata &= mask_flag if comm is not None: mtype = MPI.UINT8_T detdata = gather_field(prow, detdata, nnz, mtype, cacheoff, ncache, dindx) if rank == 0: split_field(detdata, so3g.IntervalsInt, "flags", mapfield=dname) # Now copy any additional fields. for dname in dnames: if copy_detector is not None: for cname, g3typ, g3maptyp, fnm in copy_detector: cache_det = "{}_{}".format(cname, dname) detdata, nnz, mtype = get_local_cache( prow, cache_det, cacheoff, ncache) if comm is not None: detdata = gather_field(prow, detdata, nnz, mtype, cacheoff, ncache, dindx) if rank == 0: split_field(detdata, g3typ, fnm, mapfield=dname, times=times) return fdata
def exec(self, data): for obs in data.obs: tod = obs["tod"] focalplane = obs["focalplane"] # Get HWP angle chi = tod.local_hwp_angle() for det in tod.local_dets: signal = tod.local_signal(det, self._name) band = focalplane[det]["band"] freq = { "SAT_f030": "027", "SAT_f040": "039", "SAT_f090": "093", "SAT_f150": "145", "SAT_f230": "225", "SAT_f290": "278", }[band] # Get incident angle det_quat = focalplane[det]["quat"] det_theta, det_phi, det_psi = qa.to_angles(det_quat) # Get observing elevation azelquat = tod.read_pntg(detector=det, azel=True) el = np.pi / 2 - qa.to_position(azelquat)[0] # Get polarization weights iweights = np.ones(signal.size) qweights = np.cos(2 * det_psi) uweights = np.sin(2 * det_psi) # Interpolate HWPSS to incident angle theta_deg = np.degrees(det_theta) itheta_high = np.searchsorted(self.thetas, theta_deg) itheta_low = itheta_high - 1 theta_low = self.thetas[itheta_low] theta_high = self.thetas[itheta_high] r = (theta_deg - theta_low) / (theta_high - theta_low) transmission = ( (1 - r) * self.all_stokes[freq]["transmission"][itheta_low] + r * self.all_stokes[freq]["transmission"][itheta_high]) reflection = ( (1 - r) * self.all_stokes[freq]["reflection"][itheta_low] + r * self.all_stokes[freq]["reflection"][itheta_high]) emission = ( (1 - r) * self.all_stokes[freq]["emission"][itheta_low] + r * self.all_stokes[freq]["emission"][itheta_high]) # Scale HWPSS for observing elevation el_ref = np.radians(50) scale = np.sin(el_ref) / np.sin(el) # Observe HWPSS with the detector iquv = (transmission + reflection).T iquss = (iweights * np.interp(chi, self.chis, iquv[0]) + qweights * np.interp(chi, self.chis, iquv[1]) + uweights * np.interp(chi, self.chis, iquv[2])) * scale iquv = emission.T iquss += (iweights * np.interp(chi, self.chis, iquv[0]) + qweights * np.interp(chi, self.chis, iquv[1]) + uweights * np.interp(chi, self.chis, iquv[2])) iquss -= np.median(iquss) # Co-add with the cached signal signal += iquss return
def exec(self, data): xaxis, _, zaxis = np.eye(3, dtype=np.float64) nullquat = np.array([0, 0, 0, 1], dtype=np.float64) # Read velocity for obs in data.obs: tod = obs["tod"] vel = tod.local_velocity(margin=self._margin) if self._single_precision: vel = vel.astype(np.float32) vel = tod.cache.put(tod.VELOCITY_NAME, vel, replace=True) del vel for obs in data.obs: tod = obs["tod"] tod.purge_eff_cache() # Read position if self._keep_pos: for obs in data.obs: tod = obs["tod"] try: # LFI pointing files are missing the position pos = tod.local_position_position(margin=self._margin) except Exception: pos = None if pos is not None and self._single_precision: pos = pos.astype(np.float32) tod.cache.put(tod.POSITION_NAME, pos, replace=True) del pos for obs in data.obs: tod = obs["tod"] tod.purge_eff_cache() # Read phase if self._keep_phase: for obs in data.obs: tod = obs["tod"] phase = tod.local_phase(margin=self._margin) if self._single_precision: phase = phase.astype(np.float32) tod.cache.put(tod.PHASE_NAME, phase, replace=True) del phase for obs in data.obs: tod = obs["tod"] tod.purge_eff_cache() # Generate attitude for obs in data.obs: tod = obs["tod"] nsamp = tod.local_samples[1] commonflags = tod.local_common_flags(margin=self._margin) satquats = None for detector in tod.local_dets: if detector[-1] in "01" and detector[-2] != "-": # Single diode, share pointing with the other diode # in the same radiometer arm if detector[-1] == "1": # We may not need to process this diode detector2 = detector[:-1] + "0" if detector2 in tod.local_dets: continue det = to_radiometer(detector) diodes = to_diodes(det) else: det = detector diodes = [] vel = tod.local_velocity() if len(vel) != nsamp + 2 * self._margin: raise Exception("Cached velocities do not include margins.") if satquats is None: pdata, satquats = tod.read_pntg( detector=detector, margin=self._margin, deaberrate=True, velocity=vel, full_output=True, ) else: pdata = tod.read_pntg( detector=detector, margin=self._margin, deaberrate=True, velocity=vel, satquats=satquats, ) del vel if len(pdata) != nsamp + 2 * self._margin: raise Exception("Cached quats do not include margins.") if self._apply_flags: flags = tod.local_flags(det, margin=self._margin) if len(flags) != nsamp + 2 * self._margin: raise Exception("Cached flags do not include margins.") totflags = flags != 0 totflags[commonflags != 0] = True if self._single_precision: cachename = "{}_{}".format(tod.POINTING_NAME, det) tod.cache.put(cachename, pdata.astype(np.float32), replace=True) for diode in diodes: alias = "{}_{}".format(tod.POINTING_NAME, diode) tod.cache.add_alias(alias, cachename) if self._apply_flags: pdata[totflags, :] = nullquat theta, phi, psi = qa.to_angles(pdata) pixels = hp.ang2pix(self._nside, theta, phi, nest=True) if self._apply_flags: pixels[totflags] = -1 epsilon = self.RIMO[det].epsilon eta = (1 - epsilon) / (1 + epsilon) weights = None if self._mode == "I": weights = np.ones([nsamp + 2 * self._margin, 1], dtype=np.float64) elif self._mode == "IQU": Ival = np.ones(nsamp + 2 * self._margin) Qval = eta * np.cos(2 * psi) Uval = eta * np.sin(2 * psi) weights = np.column_stack((Ival, Qval, Uval)) else: raise RuntimeError("invalid mode for Planck Pointing") pixelsname = "{}_{}".format(tod.PIXEL_NAME, det) if self._single_precision: tod.cache.put(pixelsname, pixels.astype(np.int32), replace=True) else: tod.cache.put(pixelsname, pixels.astype(np.int64), replace=True) for diode in diodes: alias = "{}_{}".format(tod.PIXEL_NAME, diode) tod.cache.add_alias(alias, pixelsname) weightsname = "{}_{}".format(tod.WEIGHT_NAME, det) if self._single_precision: tod.cache.put(weightsname, weights.astype(np.float32), replace=True) else: tod.cache.put(weightsname, weights.astype(np.float64), replace=True) for diode in diodes: alias = "{}_{}".format(tod.WEIGHT_NAME, diode) tod.cache.add_alias(alias, weightsname) for obs in data.obs: tod = obs["tod"] tod.purge_eff_cache() if not self._keep_vel: for obs in data.obs: tod = obs["tod"] tod.cache.destroy(tod.VELOCITY_NAME) return
def tod_to_frames(tod, start_frame, n_frames, frame_offsets, frame_sizes, cache_signal=None, cache_flags=None, cache_common_flags=None, copy_common=None, copy_detector=None, mask_flag_common=255, mask_flag=255, units=None): """Gather all data from the distributed TOD cache for a set of frames. Args: tod (toast.TOD): instance of a TOD class. start_frame (int): the first frame index. n_frames (int): the number of frames. frame_offsets (array_like): list of the first samples of all frames. frame_sizes (list): list of the number of samples in each frame. cache_signal (str): if None, read signal from TOD. Otherwise use this cache prefix for the detector signal timestreams. cache_flags (str): if None read det flags from TOD. Otherwise use this cache prefix for the detector flag timestreams. cache_common_flags (str): if None, read common flags from TOD. Otherwise use this cache prefix. copy_common (tuple): (cache name, G3 type, frame name) of each extra common field to copy from cache. copy_detector (tuple): (cache name prefix, G3 type, G3 map type, frame name) of each distributed detector field (excluding the "signal") to copy from cache. mask_flag_common (int): Bitmask to apply to common flags. mask_flag (int): Bitmask to apply to per-detector flags. units: G3 units of the detector data. Returns: (list): List of frames on rank zero. Other processes have a list of None values. """ # Detector names detnames = tod.detectors # Local sample range local_first = tod.local_samples[0] nlocal = tod.local_samples[1] # The process grid detranks, sampranks = tod.grid_size rankdet, ranksamp = tod.grid_ranks def get_local_cache(prow, fld, cacheoff, ncache): """Read a local slice of a cache field. """ mtype = None pdata = None if rankdet == prow: ref = tod.cache.reference(fld) nnz = 1 if (len(ref.shape) > 1) and (ref.shape[1] > 0): nnz = ref.shape[1] if ref.dtype == np.dtype(np.float64): mtype = MPI.DOUBLE elif ref.dtype == np.dtype(np.int64): mtype = MPI.INT64_T elif ref.dtype == np.dtype(np.int32): mtype = MPI.INT32_T elif ref.dtype == np.dtype(np.uint8): mtype = MPI.UINT8_T else: msg = "Cannot use cache field {} of type {}"\ .format(fld, ref.dtype) raise RuntimeError(msg) if cacheoff is not None: pdata = ref.flatten()[nnz * cacheoff:nnz * (cacheoff + ncache)] else: pdata = np.zeros(0, dtype=ref.dtype) return (pdata, nnz, mtype) def gather_field(prow, pdata, nnz, mpitype, cacheoff, ncache, tag): """Gather a single timestream buffer to the root process. """ gdata = None # We are going to allreduce this later, so that every process # knows the dimensions of the field. gproc = 0 allnnz = 0 # Size of the local buffer pz = len(pdata) if rankdet == prow: psizes = tod.grid_comm_row.gather(pz, root=0) disp = None totsize = None if ranksamp == 0: # We are the process collecting the gathered data. allnnz = nnz gproc = tod.mpicomm.rank # Compute the displacements into the receive buffer. disp = [0] for ps in psizes[:-1]: last = disp[-1] disp.append(last + ps) totsize = np.sum(psizes) # allocate receive buffer gdata = np.zeros(totsize, dtype=pdata.dtype) tod.grid_comm_row.Gatherv(pdata, [gdata, psizes, disp, mpitype], root=0) del disp del psizes # Now send this data to the root process of the whole communicator. # Only one process (the first one in process row "prow") has data # to send. # All processes find out which one did the gather gproc = tod.mpicomm.allreduce(gproc, MPI.SUM) # All processes find out the field dimensions allnnz = tod.mpicomm.allreduce(allnnz, MPI.SUM) mtag = 10 * tag rdata = None if gproc == 0: if gdata is not None: if allnnz == 1: rdata = gdata else: rdata = gdata.reshape((-1, allnnz)) else: # Data not yet on rank 0 if tod.mpicomm.rank == 0: # Receive data from the first process in this row rtype = tod.mpicomm.recv(source=gproc, tag=(mtag + 1)) rsize = tod.mpicomm.recv(source=gproc, tag=(mtag + 2)) rdata = np.zeros(rsize, dtype=np.dtype(rtype)) tod.mpicomm.Recv(rdata, source=gproc, tag=mtag) # Reshape if needed if allnnz > 1: rdata = rdata.reshape((-1, allnnz)) elif (tod.mpicomm.rank == gproc): # Send our data tod.mpicomm.send(gdata.dtype.char, dest=0, tag=(mtag + 1)) tod.mpicomm.send(len(gdata), dest=0, tag=(mtag + 2)) tod.mpicomm.Send(gdata, 0, tag=mtag) return rdata # For efficiency, we are going to gather the data for all frames at once. # Then we will split those up when doing the write. # Frame offsets relative to the memory buffers we are gathering fdataoff = [0] for f in frame_sizes[:-1]: last = fdataoff[-1] fdataoff.append(last + f) # The list of frames- only on the root process. fdata = None if tod.mpicomm.rank == 0: fdata = [ core3g.G3Frame(core3g.G3FrameType.Scan) for f in range(n_frames) ] else: fdata = [None for f in range(n_frames)] def flags_to_intervals(flgs): """Convert a flag vector to an interval list. """ groups = [[i for i, value in it] for key, it in itertools.groupby(enumerate(flgs), key=operator.itemgetter(1)) if key != 0] chunks = list() for grp in groups: chunks.append([grp[0], grp[-1]]) return chunks def split_field(data, g3t, framefield, mapfield=None, g3units=units): """Split a gathered data buffer into frames. """ if tod.mpicomm.rank == 0: if g3t == core3g.G3VectorTime: # Special case for time values stored as int64_t, but # wrapped in a class. for f in range(n_frames): dataoff = fdataoff[f] ndata = frame_sizes[f] g3times = list() for t in range(ndata): g3times.append(core3g.G3Time(data[dataoff + t])) if mapfield is None: fdata[f][framefield] = core3g.G3VectorTime(g3times) else: fdata[f][framefield][mapfield] = \ core3g.G3VectorTime(g3times) del g3times elif g3t == so3g.IntervalsInt: # This means that the data is actually flags # and we should convert it into a list of intervals. fint = flags_to_intervals(data) for f in range(n_frames): dataoff = fdataoff[f] ndata = frame_sizes[f] datalast = dataoff + ndata chunks = list() idomain = (0, ndata - 1) for intr in fint: # Interval sample ranges are defined relative to the # frame itself. cfirst = None clast = None if (intr[0] < datalast) and (intr[1] >= dataoff): # there is some overlap... if intr[0] < dataoff: cfirst = 0 else: cfirst = intr[0] - dataoff if intr[1] >= datalast: clast = ndata - 1 else: clast = intr[1] - dataoff chunks.append([cfirst, clast]) if mapfield is None: if len(chunks) == 0: fdata[f][framefield] = \ so3g.IntervalsInt() else: fdata[f][framefield] = \ so3g.IntervalsInt.from_array( np.array(chunks, dtype=np.int64)) fdata[f][framefield].domain = idomain else: if len(chunks) == 0: fdata[f][framefield][mapfield] = \ so3g.IntervalsInt() else: fdata[f][framefield][mapfield] = \ so3g.IntervalsInt.from_array( np.array(chunks, dtype=np.int64)) fdata[f][framefield][mapfield].domain = idomain del fint elif g3t == core3g.G3Timestream: for f in range(n_frames): dataoff = fdataoff[f] ndata = frame_sizes[f] if mapfield is None: if g3units is None: fdata[f][framefield] = \ g3t(data[dataoff:dataoff+ndata]) else: fdata[f][framefield] = \ g3t(data[dataoff:dataoff+ndata], g3units) else: if g3units is None: fdata[f][framefield][mapfield] = \ g3t(data[dataoff:dataoff+ndata]) else: fdata[f][framefield][mapfield] = \ g3t(data[dataoff:dataoff+ndata], g3units) else: # The bindings of G3Vector seem to only work with # lists. This is probably horribly inefficient. for f in range(n_frames): dataoff = fdataoff[f] ndata = frame_sizes[f] if len(data.shape) == 1: fdata[f][framefield] = \ g3t(data[dataoff:dataoff+ndata].tolist()) else: # We have a 2D quantity fdata[f][framefield] = \ g3t(data[dataoff:dataoff+ndata, :].flatten() .tolist()) return # Compute the overlap of all frames with the local process. We want to # to find the full sample range that this process overlaps the total set # of frames. cacheoff = None ncache = 0 for f in range(n_frames): # Compute overlap of the frame with the local samples. fcacheoff, froff, nfr = s3utils.local_frame_indices( local_first, nlocal, frame_offsets[f], frame_sizes[f]) if fcacheoff is not None: if cacheoff is None: cacheoff = fcacheoff ncache = nfr else: ncache += nfr # Now gather the full sample data one field at a time. The root process # splits up the results into frames. # First collect boresight data. In addition to quaternions for the Az/El # pointing, we convert this back into angles that follow the specs # for telescope pointing. bore = None if rankdet == 0: bore = tod.read_boresight(local_start=cacheoff, n=ncache) bore = gather_field(0, bore.flatten(), 4, MPI.DOUBLE, cacheoff, ncache, 0) split_field(bore.reshape(-1, 4), core3g.G3VectorDouble, "qboresight_radec") bore = None if rankdet == 0: bore = tod.read_boresight_azel(local_start=cacheoff, n=ncache) bore = gather_field(0, bore.flatten(), 4, MPI.DOUBLE, cacheoff, ncache, 1) split_field(bore.reshape(-1, 4), core3g.G3VectorDouble, "qboresight_azel") if tod.mpicomm.rank == 0: for f in range(n_frames): fdata[f]["boresight"] = core3g.G3TimestreamMap() ang_theta, ang_phi, ang_psi = qa.to_angles(bore) ang_az = ang_phi ang_el = (np.pi / 2.0) - ang_theta ang_roll = ang_psi split_field(ang_az, core3g.G3Timestream, "boresight", "az", None) split_field(ang_el, core3g.G3Timestream, "boresight", "el", None) split_field(ang_roll, core3g.G3Timestream, "boresight", "roll", None) # Now the position and velocity information pos = None if rankdet == 0: pos = tod.read_position(local_start=cacheoff, n=ncache) pos = gather_field(0, pos.flatten(), 3, MPI.DOUBLE, cacheoff, ncache, 2) split_field(pos.reshape(-1, 3), core3g.G3VectorDouble, "site_position") vel = None if rankdet == 0: vel = tod.read_velocity(local_start=cacheoff, n=ncache) vel = gather_field(0, vel.flatten(), 3, MPI.DOUBLE, cacheoff, ncache, 3) split_field(vel.reshape(-1, 3), core3g.G3VectorDouble, "site_velocity") # Now handle the common flags- either from a cache object or from the # TOD methods cflags = None nnz = 1 mtype = MPI.UINT8_T if cache_common_flags is None: if rankdet == 0: cflags = tod.read_common_flags(local_start=cacheoff, n=ncache) cflags &= mask_flag_common else: cflags, nnz, mtype = get_local_cache(0, cache_common_flags, cacheoff, ncache) cflags &= mask_flag_common cflags = gather_field(0, cflags, nnz, mtype, cacheoff, ncache, 4) split_field(cflags, so3g.IntervalsInt, "flags_common") # Any extra common fields tod.mpicomm.barrier() if copy_common is not None: for cindx, (cname, g3typ, fname) in enumerate(copy_common): cdata, nnz, mtype = get_local_cache(0, cname, cacheoff, ncache) cdata = gather_field(0, cdata, nnz, mtype, cacheoff, ncache, cindx) split_field(cdata, g3typ, fname) # Now read all per-detector quantities. # For each detector field, processes which have the detector # in their local_dets should be in the same process row. if tod.mpicomm.rank == 0: for f in range(n_frames): fdata[f]["signal"] = core3g.G3TimestreamMap() fdata[f]["flags"] = so3g.MapIntervalsInt() if copy_detector is not None: for cname, g3typ, g3maptyp, fnm in copy_detector: fdata[f][fnm] = g3maptyp() for dindx, dname in enumerate(detnames): drow = -1 if dname in tod.local_dets: drow = rankdet # As a sanity check, verify that every process which # has this detector is in the same process row. rowcheck = tod.mpicomm.gather(drow, root=0) prow = 0 if tod.mpicomm.rank == 0: rc = np.array([x for x in rowcheck if (x >= 0)], dtype=np.int32) prow = np.max(rc) if np.min(rc) != prow: msg = "Processes with detector {} are not in the "\ "same row of the process grid\n".format(dname) sys.stderr.write(msg) tod.mpicomm.abort() # Every process finds out which process row is participating. prow = tod.mpicomm.bcast(prow, root=0) # "signal" detdata = None nnz = 1 mtype = MPI.DOUBLE if cache_signal is None: if rankdet == prow: detdata = tod.read(detector=dname, local_start=cacheoff, n=ncache) else: cache_det = "{}_{}".format(cache_signal, dname) detdata, nnz, mtype = get_local_cache(prow, cache_det, cacheoff, ncache) detdata = gather_field(prow, detdata, nnz, mtype, cacheoff, ncache, dindx) split_field(detdata, core3g.G3Timestream, "signal", mapfield=dname) # "flags" detdata = None nnz = 1 mtype = MPI.UINT8_T if cache_flags is None: if rankdet == prow: detdata = tod.read_flags(detector=dname, local_start=cacheoff, n=ncache) detdata &= mask_flag else: cache_det = "{}_{}".format(cache_flags, dname) detdata, nnz, mtype = get_local_cache(prow, cache_det, cacheoff, ncache) detdata &= mask_flag detdata = gather_field(prow, detdata, nnz, mtype, cacheoff, ncache, dindx) split_field(detdata, so3g.IntervalsInt, "flags", mapfield=dname) # Now copy any additional fields. if copy_detector is not None: for cname, g3typ, g3maptyp, fnm in copy_detector: cache_det = "{}_{}".format(cname, dname) detdata, nnz, mtype = get_local_cache(prow, cache_det, cacheoff, ncache) detdata = gather_field(prow, detdata, nnz, mtype, cacheoff, ncache, dindx) split_field(detdata, g3typ, fnm, mapfield=dname) return fdata
def main(): log = Logger.get() gt = GlobalTimers.get() gt.start("toast_planck_reduce (total)") mpiworld, procs, rank, comm = get_comm() if comm.world_rank == 0: print("Running with {} processes at {}".format( procs, str(datetime.datetime.now()))) parser = argparse.ArgumentParser(description='Simple dipole pipeline', fromfile_prefix_chars='@') parser.add_argument('--rimo', required=True, help='RIMO file') parser.add_argument('--freq', required=True, type=np.int, help='Frequency') parser.add_argument('--dets', required=False, default=None, help='Detector list (comma separated)') parser.add_argument('--effdir', required=True, help='Input Exchange Format File directory') parser.add_argument('--effdir_pntg', required=False, help='Input Exchange Format File directory for ' 'pointing') parser.add_argument('--obtmask', required=False, default=1, type=np.int, help='OBT flag mask') parser.add_argument('--flagmask', required=False, default=1, type=np.int, help='Quality flag mask') parser.add_argument('--pntflagmask', required=False, default=0, type=np.int, help='Which OBT flag bits to raise for HCM maneuvers') parser.add_argument('--ringdb', required=True, help='Ring DB file') parser.add_argument('--odfirst', required=False, default=None, help='First OD to use') parser.add_argument('--odlast', required=False, default=None, help='Last OD to use') parser.add_argument('--ringfirst', required=False, default=None, help='First ring to use') parser.add_argument('--ringlast', required=False, default=None, help='Last ring to use') parser.add_argument('--obtfirst', required=False, default=None, help='First OBT to use') parser.add_argument('--obtlast', required=False, default=None, help='Last OBT to use') parser.add_argument('--out', required=False, default='.', help='Output directory') # Dipole parameters dipogroup = parser.add_mutually_exclusive_group() dipogroup.add_argument('--dipole', dest='dipole', required=False, default=False, action='store_true', help='Simulate dipole') dipogroup.add_argument('--solsys-dipole', required=False, default=False, action='store_true', help='Simulate solar system dipole') dipogroup.add_argument('--orbital-dipole', required=False, default=False, action='store_true', help='Simulate orbital dipole') dipo_parameters_group = parser.add_argument_group('dipole_parameters') dipo_parameters_group.add_argument( '--solsys_speed', required=False, type=np.float, default=DEFAULT_PARAMETERS["solsys_speed"], help='Solar system speed wrt. CMB rest frame in km/s. Default is ' 'Planck 2015 best fit value') dipo_parameters_group.add_argument( '--solsys-glon', required=False, type=np.float, default=DEFAULT_PARAMETERS["solsys_glon"], help='Solar system velocity direction longitude in degrees') dipo_parameters_group.add_argument( '--solsys-glat', required=False, type=np.float, default=DEFAULT_PARAMETERS["solsys_glat"], help='Solar system velocity direction latitude in degrees') try: args = parser.parse_args() except SystemExit: sys.exit(0) if comm.world_rank == 0: print('All parameters:') print(args, flush=True) timer = Timer() timer.start() do_dipole = args.dipole or args.solsys_dipole or args.orbital_dipole if not do_dipole: raise RuntimeError( "You have to set dipole, solsys-dipole or orbital-dipole") nrange = 1 odranges = None if args.odfirst is not None and args.odlast is not None: odranges = [] firsts = [int(i) for i in str(args.odfirst).split(',')] lasts = [int(i) for i in str(args.odlast).split(',')] for odfirst, odlast in zip(firsts, lasts): odranges.append((odfirst, odlast)) nrange = len(odranges) ringranges = None if args.ringfirst is not None and args.ringlast is not None: ringranges = [] firsts = [int(i) for i in str(args.ringfirst).split(',')] lasts = [int(i) for i in str(args.ringlast).split(',')] for ringfirst, ringlast in zip(firsts, lasts): ringranges.append((ringfirst, ringlast)) nrange = len(ringranges) obtranges = None if args.obtfirst is not None and args.obtlast is not None: obtranges = [] firsts = [float(i) for i in str(args.obtfirst).split(',')] lasts = [float(i) for i in str(args.obtlast).split(',')] for obtfirst, obtlast in zip(firsts, lasts): obtranges.append((obtfirst, obtlast)) nrange = len(obtranges) if odranges is None: odranges = [None] * nrange if ringranges is None: ringranges = [None] * nrange if obtranges is None: obtranges = [None] * nrange detectors = None if args.dets is not None: detectors = re.split(',', args.dets) # create the TOD for this observation tods = [] for obtrange, ringrange, odrange in zip(obtranges, ringranges, odranges): # create the TOD for this observation tods.append( tp.Exchange(comm=comm.comm_group, detectors=detectors, ringdb=args.ringdb, effdir_in=args.effdir, effdir_pntg=args.effdir_pntg, obt_range=obtrange, ring_range=ringrange, od_range=odrange, freq=args.freq, RIMO=args.rimo, obtmask=args.obtmask, flagmask=args.flagmask, pntflagmask=args.pntflagmask, do_eff_cache=False)) # Make output directory if not os.path.isdir(args.out) and comm.world_rank == 0: os.makedirs(args.out) # This is the distributed data, consisting of one or # more observations, each distributed over a communicator. data = toast.Data(comm) for iobs, tod in enumerate(tods): ob = {} ob['name'] = 'observation{:04}'.format(iobs) ob['id'] = 0 ob['tod'] = tod ob['intervals'] = tod.valid_intervals ob['baselines'] = None ob['noise'] = tod.noise ob['noise_simu'] = tod.noise data.obs.append(ob) rimo = tods[0].rimo if mpiworld is not None: mpiworld.barrier() if comm.world_rank == 0: timer.report_clear("Metadata queries") # Always read the signal and flags, even if the signal is later # overwritten. There is no overhead for the signal because it is # interlaced with the flags. tod_name = 'signal' timestamps_name = 'timestamps' flags_name = 'flags' common_flags_name = 'common_flags' reader = tp.OpInputPlanck(signal_name=tod_name, flags_name=flags_name, timestamps_name=timestamps_name, commonflags_name=common_flags_name) if comm.world_rank == 0: print('Reading input signal from {}'.format(args.effdir), flush=True) reader.exec(data) if mpiworld is not None: mpiworld.barrier() if comm.world_rank == 0: timer.report_clear("Read") """ # Clear the signal eraser = tp.OpCacheMath(in1=tod_name, in2=0, multiply=True, out=tod_name) if comm.world_rank == 0: print('Erasing TOD', flush=True) eraser.exec(data) if mpiworld is not None: mpiworld.barrier() if comm.world_rank == 0: timer.report_clear("Erase") """ # make a planck Healpix pointing matrix mode = 'IQU' nside = 512 pointing = tp.OpPointingPlanck(nside=nside, mode=mode, RIMO=rimo, margin=0, apply_flags=True, keep_vel=do_dipole, keep_pos=False, keep_phase=False, keep_quats=do_dipole) pointing.exec(data) if mpiworld is not None: mpiworld.barrier() if comm.world_rank == 0: timer.report_clear("Pointing Matrix") flags_name = 'flags' common_flags_name = 'common_flags' # Simulate the dipole if args.dipole: dipomode = 'total' elif args.solsys_dipole: dipomode = 'solsys' else: dipomode = 'orbital' dipo = tp.OpDipolePlanck(args.freq, solsys_speed=args.solsys_speed, solsys_glon=args.solsys_glon, solsys_glat=args.solsys_glat, mode=dipomode, output='dipole', keep_quats=False, npipe_mode=True) dipo.exec(data) dipo = tp.OpDipolePlanck(args.freq, solsys_speed=args.solsys_speed, solsys_glon=args.solsys_glon, solsys_glat=args.solsys_glat, mode=dipomode, output='dipole4pi', keep_quats=False, npipe_mode=False, lfi_mode=False) dipo.exec(data) if mpiworld is not None: mpiworld.barrier() if comm.world_rank == 0: timer.report_clear("Dipole") # Write out the values in ASCII for iobs, obs in enumerate(data.obs): tod = obs["tod"] times = tod.local_times() velocity = tod.local_velocity() for det in tod.local_dets: quat = tod.local_pointing(det) angles = np.vstack(qarray.to_angles(quat)).T signal = tod.local_signal(det) dipole = tod.local_signal(det, "dipole") dipole4pi = tod.local_signal(det, "dipole4pi") fname_out = os.path.join( args.out, "{}_dipole.{}.{}.{}.txt".format(dipomode, comm.world_rank, iobs, det)) with open(fname_out, "w") as fout: for t, ang, vel, sig, dipo, dipo4pi in zip( times, angles, velocity, signal, dipole, dipole4pi): fout.write( (10 * " {}" + "\n").format(t, *ang, *vel, sig, dipo, dipo4pi)) print("{} : Wrote {}".format(comm.world_rank, fname_out)) if comm.world_rank == 0: timer.report_clear("Write dipole") gt.stop_all() if mpiworld is not None: mpiworld.barrier() timer = Timer() timer.start() alltimers = gather_timers(comm=mpiworld) if comm.world_rank == 0: out = os.path.join(args.out, "timing") dump_timing(alltimers, out) timer.stop() timer.report("Gather and dump timing info") return