def output_tidas(args, comm, data, totalname, common_flag_name, flag_name): if args.tidas is None: return autotimer = timing.auto_timer() from toast.tod.tidas import OpTidasExport, TODTidas tidas_path = os.path.abspath(args.tidas) comm.comm_world.Barrier() if comm.comm_world.rank == 0: print('Exporting TOD to a TIDAS volume at {}'.format(tidas_path), flush=args.flush) start = MPI.Wtime() export = OpTidasExport(tidas_path, TODTidas, backend="hdf5", use_todchunks=True, ctor_opts={"group_dets":"sim"}, cache_name=totalname) export.exec(data) comm.comm_world.Barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print('Wrote simulated TOD to {}:{} in {:.2f} s' ''.format(tidas_path, totalname, stop-start), flush=args.flush) return
def scan_signal(args, comm, data, localsm, subnpix): """ Scan time-ordered signal from a map. """ signalname = None if args.input_map: if comm.comm_world.rank == 0: print('Scanning input map', flush=args.flush) start = MPI.Wtime() autotimer = timing.auto_timer() npix = 12*args.nside**2 # Scan the sky signal if comm.comm_world.rank == 0 and not os.path.isfile(args.input_map): raise RuntimeError( 'Input map does not exist: {}'.format(args.input_map)) distmap = tm.DistPixels( comm=comm.comm_world, size=npix, nnz=3, dtype=np.float32, submap=subnpix, local=localsm) distmap.read_healpix_fits(args.input_map) scansim = tt.OpSimScan(distmap=distmap, out='signal') scansim.exec(data) stop = MPI.Wtime() if comm.comm_world.rank == 0: print('Read and sampled input map: {:.2f} seconds' ''.format(stop-start), flush=args.flush) signalname = 'signal' return signalname
def simulate_sky_signal(args, comm, data, mem_counter, focalplanes, subnpix, localsm, signalname): """ Use PySM to simulate smoothed sky signal. """ # Convolve a signal TOD from PySM start = MPI.Wtime() op_sim_pysm = ttm.OpSimPySM( comm=comm.comm_rank, out=signalname, pysm_model=args.input_pysm_model, pysm_precomputed_cmb_K_CMB=args.input_pysm_precomputed_cmb_K_CMB, focalplanes=focalplanes, nside=args.nside, subnpix=subnpix, localsm=localsm, apply_beam=args.apply_beam, debug=args.debug, coord=args.coord) op_sim_pysm.exec(data) stop = MPI.Wtime() if comm.comm_world.rank == 0: print('PySM took {:.2f} seconds'.format(stop - start), flush=args.flush) tod = data.obs[0]['tod'] for det in tod.local_dets: ref = tod.cache.reference(signalname + "_" + det) print('PySM signal first observation min max', det, ref.min(), ref.max()) del ref del op_sim_pysm mem_counter.exec(data)
def output_tidas(args, comm, data, totalname, common_flag_name, flag_name): if args.tidas is None: return autotimer = timing.auto_timer() from toast.tod.tidas import OpTidasExport tidas_path = os.path.abspath(args.tidas) comm.comm_world.Barrier() if comm.comm_world.rank == 0: print('Exporting TOD to a TIDAS volume at {}'.format(tidas_path), flush=args.flush) start = MPI.Wtime() export = OpTidasExport(tidas_path, name=totalname, common_flag_name=common_flag_name, flag_name=flag_name, usedist=True) export.exec(data) comm.comm_world.Barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print('Wrote simulated TOD to {}:{} in {:.2f} s' ''.format(tidas_path, totalname, stop - start), flush=args.flush) return
def apply_madam(args, comm, data, madampars, outpath, detweights, totalname_madam, flag_name, common_flag_name): if args.madam: if comm.comm_world.rank == 0: print('Destriping signal', flush=args.flush) start = MPI.Wtime() autotimer = timing.auto_timer() # create output directory for this realization madampars['path_output'] = outpath madam = tm.OpMadam(params=madampars, detweights=detweights, name=totalname_madam, common_flag_name=common_flag_name, flag_name=flag_name, common_flag_mask=args.common_flag_mask, purge_tod=True) madam.exec(data) comm.comm_world.barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print('Madam took {:.3f} s'.format(stop - start), flush=args.flush) return
def get_submaps(args, comm, data): """ Get a list of locally hit pixels and submaps on every process. """ autotimer = timing.auto_timer() if comm.comm_world.rank == 0: print('Scanning local pixels', flush=args.flush) start = MPI.Wtime() # Prepare for using distpixels objects nside = args.nside subnside = 16 if subnside > nside: subnside = nside subnpix = 12 * subnside * subnside # get locally hit pixels lc = tm.OpLocalPixels() localpix = lc.exec(data) if localpix is None: raise RuntimeError( 'Process {} has no hit pixels. Perhaps there are fewer ' 'detectors than processes in the group?'.format( comm.comm_world.rank)) # find the locally hit submaps. localsm = np.unique(np.floor_divide(localpix, subnpix)) comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print('Local submaps identified in {:.3f} s'.format(elapsed), flush=args.flush) return localpix, localsm, subnpix
def load_schedule(args, comm): start = MPI.Wtime() autotimer = timing.auto_timer() if comm.comm_world.rank == 0: fn = args.schedule if not os.path.isfile(fn): raise RuntimeError('No such schedule file: {}'.format(fn)) start = MPI.Wtime() f = open(fn, 'r') while True: line = f.readline() if line.startswith('#'): continue (site_name, telescope, site_lat, site_lon, site_alt) = line.split() site_alt = float(site_alt) site = [site_name, site_lat, site_lon, site_alt] break all_ces = [] for line in f: if line.startswith('#'): continue start_date, start_time, stop_date, stop_time, mjdstart, mjdstop, \ name, azmin, azmax, el, rs, \ sun_el1, sun_az1, sun_el2, sun_az2, \ moon_el1, moon_az1, moon_el2, moon_az2, moon_phase, \ scan, subscan = line.split() start_time = start_date + ' ' + start_time stop_time = stop_date + ' ' + stop_time try: start_time = dateutil.parser.parse(start_time + ' +0000') stop_time = dateutil.parser.parse(stop_time + ' +0000') except: start_time = dateutil.parser.parse(start_time) stop_time = dateutil.parser.parse(stop_time) start_timestamp = start_time.timestamp() stop_timestamp = stop_time.timestamp() all_ces.append([ start_timestamp, stop_timestamp, name, float(mjdstart), int(scan), int(subscan), float(azmin), float(azmax), float(el)]) f.close() stop = MPI.Wtime() elapsed = stop - start print('Load schedule: {:.2f} seconds'.format(stop-start), flush=args.flush) else: site = None all_ces = None site = comm.comm_world.bcast(site) all_ces = comm.comm_world.bcast(all_ces) comm.comm_world.barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print('Loading schedule {:.3f} s'.format(stop-start), flush=args.flush) return site, all_ces
def subtract_signal(self, tod, cworld, rank, masksampler, mapsampler, local_intervals): """ Subtract a signal estimate from the TOD and update the flags for noise estimation. """ start_signal_subtract = MPI.Wtime() for det in tod.local_dets: if rank == 0: print('Subtracting signal for {}'.format(det), flush=True) tod.cache.report() fsample = self._rimo[det].fsample epsilon = self._rimo[det].epsilon eta = (1 - epsilon) / (1 + epsilon) signal = tod.local_signal(det, name=self._signal) flags = tod.local_flags(det, name=self._flags) flags &= self._detmask for ival in local_intervals: ind = slice(ival.first, ival.last + 1) sig = signal[ind] flg = flags[ind] quat = tod.local_pointing(det)[ind] if self._pol: theta, phi, psi = qa.to_angles(quat) iw = np.ones_like(theta) qw = eta * np.cos(2 * psi) uw = eta * np.sin(2 * psi) iquw = np.column_stack([iw, qw, uw]) else: theta, phi = qa.to_position(quat) if masksampler is not None: maskflg = masksampler.at(theta, phi) < 0.5 flg[maskflg] |= 255 if mapsampler is not None: if self._pol: bg = mapsampler.atpol(theta, phi, iquw) else: bg = mapsampler.at(theta, phi) if self._calibrate_signal_estimate: good = flg == 0 ngood = np.sum(good) if ngood > 1: templates = np.vstack([np.ones(ngood), bg[good]]) invcov = np.dot(templates, templates.T) cov = np.linalg.inv(invcov) proj = np.dot(templates, sig[good]) coeff = np.dot(cov, proj) bg = coeff[0] + coeff[1] * bg sig -= bg cworld.barrier() stop_signal_subtract = MPI.Wtime() if rank == 0: print('TOD signal-subtracted in {:.2f} s'.format( stop_signal_subtract - start_signal_subtract), flush=True) return fsample
def elapsed(mcomm, start, msg): mcomm.barrier() stop = MPI.Wtime() dur = stop - start if mcomm.rank == 0: print("{}: {:.3f} s".format(msg, dur), flush=True) return stop
def simulate_sky_signal(args, comm, data, mem_counter, focalplanes, subnpix, localsm, signalname): """ Use PySM to simulate smoothed sky signal. """ # Convolve a signal TOD from PySM start = MPI.Wtime() op_sim_pysm = ttm.OpSimPySM(comm=comm.comm_rank, out=signalname, pysm_model=args.input_pysm_model, focalplanes=focalplanes, nside=args.nside, subnpix=subnpix, localsm=localsm, apply_beam=args.apply_beam) op_sim_pysm.exec(data) stop = MPI.Wtime() if comm.comm_world.rank == 0: print('PySM took {:.2f} seconds'.format(stop-start), flush=args.flush) mem_counter.exec(data)
def expand_pointing(args, comm, data): """ Expand the bore sight pointing to every detector. """ start = MPI.Wtime() autotimer = timing.auto_timer() hwprpm = args.hwprpm hwpstep = None if args.hwpstep is not None: hwpstep = float(args.hwpstep) hwpsteptime = args.hwpsteptime npix = 12 * args.nside**2 if comm.comm_world.rank == 0: print('Expanding pointing', flush=args.flush) pointing = tt.OpPointingHpix(nside=args.nside, nest=True, mode='IQU', hwprpm=hwprpm, hwpstep=hwpstep, hwpsteptime=hwpsteptime) pointing.exec(data) # Only purge the pointing if we are NOT going to export the # data to a TIDAS volume if args.tidas is None: for ob in data.obs: tod = ob['tod'] tod.free_radec_quats() comm.comm_world.barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print('Pointing generation took {:.3f} s'.format(stop - start), flush=args.flush) return
def apply_polyfilter(args, comm, data, totalname_freq): if args.polyorder: if comm.comm_world.rank == 0: print('Polyfiltering signal', flush=args.flush) start = MPI.Wtime() autotimer = timing.auto_timer() common_flag_name = 'common_flags' flag_name = 'flags' polyfilter = tt.OpPolyFilter(order=args.polyorder, name=totalname_freq, common_flag_name=common_flag_name, common_flag_mask=args.common_flag_mask, flag_name=flag_name) polyfilter.exec(data) comm.comm_world.barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print('Polynomial filtering took {:.3f} s'.format(stop - start), flush=args.flush) return
def apply_groundfilter(args, comm, data, totalname_freq): if args.wbin_ground: if comm.comm_world.rank == 0: print('Ground filtering signal', flush=args.flush) start = MPI.Wtime() autotimer = timing.auto_timer() common_flag_name = 'common_flags' flag_name = 'flags' groundfilter = tt.OpGroundFilter( wbin=args.wbin_ground, name=totalname_freq, common_flag_name=common_flag_name, common_flag_mask=args.common_flag_mask, flag_name=flag_name) groundfilter.exec(data) comm.comm_world.barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print('Ground filtering took {:.3f} s'.format(stop-start), flush=args.flush) return
def _simulate_atmosphere( self, weather, scan_range, tmin, tmax, comm, key1, key2, counter1, counter2, cachedir, prefix, tmin_tot, tmax_tot, rmin, rmax, ): if self._report_timing: comm.Barrier() tstart = MPI.Wtime() T0_center = weather.air_temperature wx = weather.west_wind wy = weather.south_wind w_center = np.sqrt(wx ** 2 + wy ** 2) wdir_center = np.arctan2(wy, wx) azmin, azmax, elmin, elmax = scan_range sim = atm_sim_alloc( azmin, azmax, elmin, elmax, tmin, tmax, self._lmin_center, self._lmin_sigma, self._lmax_center, self._lmax_sigma, w_center, 0, wdir_center, 0, self._z0_center, self._z0_sigma, T0_center, 0, self._zatm, self._zmax, self._xstep, self._ystep, self._zstep, self._nelem_sim_max, self._verbosity, comm, key1, key2, counter1, counter2, cachedir, rmin, rmax, ) if sim == 0: raise RuntimeError(prefix + "Failed to allocate simulation") if self._report_timing: comm.Barrier() tstop = MPI.Wtime() if comm.rank == 0 and tstop - tstart > 1: print( prefix + "OpSimAtmosphere: Initialized " "atmosphere in {:.2f} s".format(tstop - tstart), flush=self._flush, ) tstart = tstop comm.Barrier() use_cache = cachedir is not None if comm.rank == 0: fname = os.path.join( cachedir, "{}_{}_{}_{}_metadata.txt".format(key1, key2, counter1, counter2), ) if use_cache and os.path.isfile(fname): print( prefix + "Loading the atmosphere for t = {} " "from {}".format(tmin - tmin_tot, fname), flush=self._flush, ) cached = True else: print( prefix + "Simulating the atmosphere for t = {}" "".format(tmin - tmin_tot), flush=self._flush, ) cached = False err = atm_sim_simulate(sim, use_cache) if err != 0: raise RuntimeError(prefix + "Simulation failed.") # Advance the sample counter in case wind_time broke the # observation in parts counter2 += 100000000 if self._report_timing: comm.Barrier() tstop = MPI.Wtime() if comm.rank == 0 and tstop - tstart > 1: if cached: op = "Loaded" else: op = "Simulated" print( prefix + "OpSimAtmosphere: {} atmosphere in " "{:.2f} s".format(op, tstop - tstart), flush=self._flush, ) tstart = tstop return sim, counter2
def _observe_atmosphere( self, sim, tod, comm, prefix, common_ref, istart, nind, ind, scan_range, times, absorption, ): azmin, azmax, elmin, elmax = scan_range nsamp = tod.local_samples[1] if self._report_timing: comm.Barrier() tstart = MPI.Wtime() if comm.rank == 0: print(prefix + "Observing the atmosphere", flush=self._flush) for det in tod.local_dets: # Cache the output signal cachename = "{}_{}".format(self._out, det) if tod.cache.exists(cachename): ref = tod.cache.reference(cachename) else: ref = tod.cache.create(cachename, np.float64, (nsamp,)) # Cache the output flags flag_ref = tod.local_flags(det, self._flag_name) if self._apply_flags: good = np.logical_and( common_ref[ind] & self._common_flag_mask == 0, flag_ref[ind] & self._flag_mask == 0, ) ngood = np.sum(good) else: try: good = common_ref[ind] & tod.UNSTABLE == 0 ngood = np.sum(good) except: good = slice(0, nind) ngood = nind if ngood == 0: continue try: # Some TOD classes provide a shortcut to Az/El az, el = tod.read_azel(detector=det, local_start=istart, n=nind) az = az[good] el = el[good] except Exception as e: azelquat = tod.read_pntg( detector=det, local_start=istart, n=nind, azel=True )[good] # Convert Az/El quaternion of the detector back into # angles for the simulation. theta, phi = qa.to_position(azelquat) # Azimuth is measured in the opposite direction # than longitude az = 2 * np.pi - phi el = np.pi / 2 - theta atmdata = np.zeros(ngood, dtype=np.float64) if np.ptp(az) < np.pi: azmin_det = np.amin(az) azmax_det = np.amax(az) else: # Scanning across the zero azimuth. azmin_det = np.amin(az[az > np.pi]) - 2 * np.pi azmax_det = np.amax(az[az < np.pi]) elmin_det = np.amin(el) elmax_det = np.amax(el) if ( not (azmin <= azmin_det and azmax_det <= azmax) and not ( azmin <= azmin_det - 2 * np.pi and azmax_det - 2 * np.pi <= azmax ) ) or not (elmin <= elmin_det and elmin_det <= elmax): raise RuntimeError( prefix + "Detector Az/El: [{:.5f}, {:.5f}], " "[{:.5f}, {:.5f}] is not contained in " "[{:.5f}, {:.5f}], [{:.5f} {:.5f}]" "".format( azmin_det, azmax_det, elmin_det, elmax_det, azmin, azmax, elmin, elmax, ) ) # Integrate detector signal err = atm_sim_observe(sim, times[ind], az, el, atmdata, ngood, 0) if err != 0: # Observing failed print( prefix + "OpSimAtmosphere: Observing FAILED. " "det = {}, rank = {}".format(det, comm.rank), flush=self._flush, ) atmdata[:] = 0 flag_ref[ind] = 255 if self._gain: atmdata *= self._gain if absorption is not None: # Apply the frequency-dependent absorption-coefficient atmdata *= absorption ref[ind][good] += atmdata del ref err = atm_sim_free(sim) if err != 0: raise RuntimeError(prefix + "Failed to free simulation.") if self._report_timing: comm.Barrier() tstop = MPI.Wtime() if comm.rank == 0 and tstop - tstart > 1: print( prefix + "OpSimAtmosphere: Observed atmosphere " "in {:.2f} s".format(tstop - tstart), flush=self._flush, ) return
def bin_maps(args, comm, data, rootname, zmap, invnpp, zmap_group, invnpp_group, detweights, totalname_freq, flag_name, common_flag_name, outpath): """ Use TOAST facilities to bin stored signal. """ if not args.skip_bin: if comm.comm_world.rank == 0: print('Binning unfiltered maps', flush=args.flush) start0 = MPI.Wtime() start = start0 autotimer = timing.auto_timer() # Bin a map using the toast facilities zmap.data.fill(0.0) build_zmap = tm.OpAccumDiag(detweights=detweights, zmap=zmap, name=totalname_freq, flag_name=flag_name, common_flag_name=common_flag_name, common_flag_mask=args.common_flag_mask) build_zmap.exec(data) zmap.allreduce() comm.comm_world.barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print(' - Building noise weighted map took {:.3f} s' ''.format(stop - start), flush=args.flush) start = stop tm.covariance_apply(invnpp, zmap) comm.comm_world.barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print(' - Computing {} map took {:.3f} s' ''.format(rootname, stop - start), flush=args.flush) start = stop fn = os.path.join(outpath, rootname + '.fits') if args.zip: fn += '.gz' zmap.write_healpix_fits(fn) comm.comm_world.barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print(' - Writing {} map to {} took {:.3f} s' ''.format(rootname, fn, stop - start), flush=args.flush) if zmap_group is not None: zmap_group.data.fill(0.0) build_zmap_group = tm.OpAccumDiag( detweights=detweights, zmap=zmap_group, name=totalname_freq, flag_name=flag_name, common_flag_name=common_flag_name, common_flag_mask=args.common_flag_mask) build_zmap_group.exec(data) zmap_group.allreduce() comm.comm_group.barrier() stop = MPI.Wtime() if comm.comm_group.rank == 0: print(' - Building group noise weighted map took ' '{:.3f} s'.format(stop - start), flush=args.flush) start = stop tm.covariance_apply(invnpp_group, zmap_group) comm.comm_group.barrier() stop = MPI.Wtime() elapsed = stop - start if comm.comm_group.rank == 0: print(' - Computing {} map took {:.3f} s' ''.format(rootname, stop - start), flush=args.flush) start = stop fn = os.path.join( outpath, '{}_group_{:04}.fits' ''.format(rootname, comm.group)) if args.zip: fn += '.gz' zmap_group.write_healpix_fits(fn) comm.comm_group.barrier() stop = MPI.Wtime() if comm.comm_group.rank == 0: print(' - Writing group {} map to {} took ' '{:.3f} s'.format(rootname, fn, stop - start), flush=args.flush) stop = MPI.Wtime() if comm.comm_world.rank == 0: print('Mapmaking took {:.3f} s' ''.format(stop - start0), flush=args.flush) return
def __init__(self, bolo_id, IMO, filterlen=2 ** 20, fsample=180.3737, lfer='LFER8', overlap=10000, extra_global_offset=None, filterfile=None, tabulated_tf=None, fnorm=0.016, comm=None, normalize_filter=True): """ Instantiate the deconvolution object bolo_id -- Bolometer ID (e.g. 00_100_1a) IMO -- Either an IMO object or a path to IMO XML dump filterlen -- Fourier transform length, actual length will be a power of 2 AT LEAST as long as this fsample -- fixed sampling frequency lfer -- Transfer function to seek from the IMO and deconvolve overlap -- number of samples read for boundary effects. These are not written into the filtered TOI notch -- vector of line frequencies to notch out wnotch -- relative width of the notch extra_global_offset -- add another phase shift by hand; in same units as global_offset in IMO. tabulated_tf(None) -- When set, overrides LFER and IMO and filterfile. A 3-element tuple containing frequency, real, imaginary filterfile(None) -- When set, overrides LFER and IMO. A 3 column ASCII file containing the transfer function to convolve with fnorm -- the frequency at which the transfer function is normalized to 1.0. default is the dipole frequency. """ self.bolo_id = bolo_id self.IMO = IMO self.filterlen = 2 while self.filterlen < filterlen or self.filterlen < 3 * overlap: self.filterlen *= 2 self.overlap = overlap self.comm = comm if self.comm is None: self.ntask = 1 self.rank = 0 else: self.ntask = self.comm.size self.rank = self.comm.rank self.normalize_filter = normalize_filter # DEBUG begin if self.rank == 0: print("Initializing TauDeconvolver. bolo_id = {}, IMO = {}, filterlen = {}, fsample = {}, lfer = {}, filterfile = {}".format(bolo_id, IMO, filterlen, fsample, lfer, filterfile), flush=True) # DEBUG end freq = np.fft.rfftfreq(self.filterlen, 1. / fsample) self.freq = freq if tabulated_tf is not None: self.tf = np.interp( self.freq, tabulated_tf[0], tabulated_tf[1]) + 1j * np.interp(self.freq, tabulated_tf[0], tabulated_tf[2]) if self.normalize_filter: norm = np.abs( np.interp(fnorm, tabulated_tf[0], tabulated_tf[1]) + 1j * np.interp(fnorm, tabulated_tf[0], tabulated_tf[2])) self.tf = self.tf / norm self.tfinv = 1. / self.tf self.tfinv[np.abs(self.tf) < 1e-4] = 0 self.lowpass = time_response_tools.filter_function(freq) self.filter = self.lowpass * self.tfinv self.fsample = fsample if extra_global_offset is not None: if extra_global_offset != 0.0: phase = -2. * np.pi * extra_global_offset * freq / fsample shift_tf = np.cos(phase) + 1j * np.sin(phase) self.filter /= shift_tf self.tf *= shift_tf else: self.filterfile = filterfile if self.filterfile is not None: if self.rank == 0: try: filt = np.genfromtxt(filterfile).T except Exception as e: raise Exception('Failed to load filter function from ' '{}: {}'.format(filterfile, e)) else: filt = None if self.comm is not None: filt = self.comm.bcast(filt) self.filter = np.interp(self.freq, filt[0], filt[1]) + \ 1j * np.interp(self.freq, filt[0], filt[2]) if self.normalize_filter: norm = np.abs(np.interp(fnorm, filt[0], filt[1]) + 1j * np.interp(fnorm, filt[0], filt[2])) self.filter = self.filter / norm # Invert the filter to allow convolving self.tf = self.filter.copy() good = self.filter != 0 self.tf[good] = 1. / self.filter[good] self.tf[np.abs(self.filter) < 1e-4] = 0 else: self.global_offset = self.IMO.get( 'IMO:HFI:DET:Phot_Pixel Name="{}":NoiseAndSyst:TimeResp:' 'LFER8:global_offset'.format(bolo_id), np.float64) if extra_global_offset is not None: self.global_offset += extra_global_offset self.pars = {} npole = 0 if lfer == 'LFER8': prefix = 'IMO:HFI:DET:Phot_Pixel Name="{}":NoiseAndSyst:' \ 'TimeResp:LFER8:'.format(bolo_id) self.pars['a1'] = self.IMO.get(prefix + 'par1', np.float64) self.pars['a2'] = self.IMO.get(prefix + 'par2', np.float64) self.pars['a3'] = self.IMO.get(prefix + 'par3', np.float64) self.pars['a4'] = self.IMO.get(prefix + 'par9', np.float64) self.pars['a5'] = self.IMO.get(prefix + 'par11', np.float64) self.pars['a6'] = self.IMO.get(prefix + 'par13', np.float64) self.pars['a7'] = self.IMO.get(prefix + 'par15', np.float64) self.pars['a8'] = self.IMO.get(prefix + 'par17', np.float64) self.pars['tau1'] = self.IMO.get(prefix + 'par4', np.float64) self.pars['tau2'] = self.IMO.get(prefix + 'par5', np.float64) self.pars['tau3'] = self.IMO.get(prefix + 'par6', np.float64) self.pars['tau4'] = self.IMO.get(prefix + 'par10', np.float64) self.pars['tau5'] = self.IMO.get(prefix + 'par12', np.float64) self.pars['tau6'] = self.IMO.get(prefix + 'par14', np.float64) self.pars['tau7'] = self.IMO.get(prefix + 'par16', np.float64) self.pars['tau8'] = self.IMO.get(prefix + 'par18', np.float64) self.pars['tau_stray'] = self.IMO.get(prefix + 'par7', np.float64) self.pars['Sphase'] = self.IMO.get(prefix + 'par8', np.float64) prefix = 'IMO:HFI:DET:Phot_Pixel Name="{}":NoiseAndSyst:' \ 'TimeResp:SallenKeyHPF:'.format(bolo_id) self.pars['tauhp1'] = self.IMO.get(prefix + 'tauhp1', np.float64) self.pars['tauhp2'] = self.IMO.get(prefix + 'tauhp2', np.float64) npole = 8 for i in range(8, 0, -1): if self.pars['tau' + str(i)] != 0: break npole -= 1 if self.pars['tauhp1'] != self.pars['tauhp2']: raise Exception( 'Don\'t know how to handle the case where tauhp1 ' '({}) is not equal to tauhp2 ({})'.format( self.pars['tauhp1'], self.pars['tauhp2'])) elif lfer == 'LFER1': npole = 1 self.pars['a1'] = 1.0 self.pars['tau1'] = 0.01 self.pars['tau_stray'] = 2.095108e-03 self.pars['Sphase'] = 0.0 else: raise Exception( 'Don\'t know how to parse {} transfer function ' 'parameters from IMO'.format(lfer)) norm_f = np.array([0.0, fnorm]) norm_tf = time_response_tools.LFERn(norm_f, npole, self.pars) phase = -2. * np.pi * self.global_offset * norm_f / fsample shift_tf = np.cos(phase) + 1j * np.sin(phase) norm_tf = norm_tf * (np.cos(phase) + 1j * np.sin(phase)) norm = np.abs(norm_tf[1]) tstart = MPI.Wtime() if self.ntask == 1: self.tf = time_response_tools.LFERn(freq, npole, self.pars) \ / norm else: nfreq = len(freq) nfreq_task = np.int(np.ceil(nfreq / self.ntask)) # First frequency must be zero for normalization my_freq = np.hstack( [[0.0], freq[nfreq_task * self.rank: nfreq_task * (self.rank + 1)]]) # Discard the extra frequency bin here my_tf = time_response_tools.LFERn( my_freq, npole, self.pars)[1:] / norm self.tf = np.hstack(self.comm.allgather(my_tf)) tstop = MPI.Wtime() if self.rank == 0: print('Computed the LFER transfer function in {:.2f} s.' ''.format(tstop - tstart), flush=True) self.tfinv = 1. / self.tf self.tfinv[np.abs(self.tf) < 1e-4] = 0 self.lowpass = time_response_tools.filter_function(freq) self.filter = self.lowpass * self.tfinv self.fsample = fsample phase = -2. * np.pi * self.global_offset * freq / fsample shift_tf = np.cos(phase) + 1j * np.sin(phase) self.filter /= shift_tf self.tf *= shift_tf self.init_flag_kernels() return
madam = tm.OpMadam(params=pars, detweights=detweights, name="tot_signal") madam.exec(data) comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print("Mapmaking took {:.3f} s".format(elapsed), flush=True) comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - global_start if comm.comm_world.rank == 0: print("Total Time: {:.2f} seconds".format(elapsed), flush=True) if __name__ == "__main__": try: main() tman = timing.timing_manager() tman.report() MPI.Finalize() except: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) lines = [ "Proc {}: {}".format(MPI.COMM_WORLD.rank, x) for x in lines ] print("".join(lines), flush=True) toast.raise_error(6) # typical error code for SIGABRT MPI.COMM_WORLD.Abort(6)
def main(): comm = MPI.COMM_WORLD if comm.rank == 0: print("Running with {} processes".format(comm.size)) global_start = MPI.Wtime() parser = argparse.ArgumentParser( description='Read a toast covariance matrix and write the inverse condition number map' ) parser.add_argument( '--input', required=True, default=None, help='The input covariance FITS file' ) parser.add_argument( '--output', required=False, default=None, help='The output inverse condition map FITS file.' ) args = timing.add_arguments_and_parse(parser, timing.FILE(noquotes=True)) autotimer = timing.auto_timer(timing.FILE()) # get options infile = args.input outfile = None if args.output is not None: outfile = args.output else: inmat = re.match(r'(.*)\.fits', infile) if inmat is None: print("input file should have .fits extension") sys.exit(0) inroot = inmat.group(1) outfile = "{}_rcond.fits".format(inroot) # We need to read the header to get the size of the matrix. # This would be a trivial function call in astropy.fits or # fitsio, but we don't want to bring in a whole new dependency # just for that. Instead, we open the file with healpy in memmap # mode so that nothing is actually read except the header. nside = 0 nnz = 0 if comm.rank == 0: fake, head = hp.read_map(infile, h=True, memmap=True) for key, val in head: if key == 'NSIDE': nside = int(val) if key == 'TFIELDS': nnz = int(val) nside = comm.bcast(nside, root=0) nnz = comm.bcast(nnz, root=0) npix = 12 * nside**2 subnside = int(nside / 16) if subnside == 0: subnside = 1 subnpix = 12 * subnside**2 nsubmap = int( npix / subnpix ) # divide the submaps as evenly as possible among processes dist = toast.distribute_uniform(nsubmap, comm.size) local = np.arange(dist[comm.rank][0], dist[comm.rank][0] + dist[comm.rank][1]) if comm.rank == 0: if os.path.isfile(outfile): os.remove(outfile) comm.barrier() # create the covariance and inverse condition number map cov = tm.DistPixels(comm=comm, dtype=np.float64, size=npix, nnz=nnz, submap=subnpix, local=local) # read the covariance cov.read_healpix_fits(infile) # every process computes its local piece rcond = tm.covariance_rcond(cov) # write the map rcond.write_healpix_fits(outfile)
def exec(self, data): """ Generate atmosphere timestreams. This iterates over all observations and detectors and generates the atmosphere timestreams. Args: data (toast.Data): The distributed data. """ autotimer = timing.auto_timer(type(self).__name__) group = data.comm.group for obs in data.obs: try: obsname = obs["name"] except Exception: obsname = "observation" prefix = "{} : {} : ".format(group, obsname) tod = self._get_from_obs("tod", obs) comm = tod.mpicomm site = self._get_from_obs("site_id", obs) weather = self._get_from_obs("weather", obs) # Get the observation time span and initialize the weather # object if one is provided. times = tod.local_times() tmin = times[0] tmax = times[-1] tmin_tot = comm.allreduce(tmin, op=MPI.MIN) tmax_tot = comm.allreduce(tmax, op=MPI.MAX) weather.set(site, self._realization, tmin_tot) key1, key2, counter1, counter2 = self._get_rng_keys(obs) absorption = self._get_absorption_and_loading(obs) cachedir = self._get_cache_dir(obs, comm) comm.Barrier() if comm.rank == 0: print(prefix + "Setting up atmosphere simulation", flush=self._flush) comm.Barrier() # Cache the output common flags common_ref = tod.local_common_flags(self._common_flag_name) scan_range = self._get_scan_range(obs, comm) # Loop over the time span in "wind_time"-sized chunks. # wind_time is intended to reflect the correlation length # in the atmospheric noise. if self._report_timing: comm.Barrier() tstart = MPI.Wtime() tmin = tmin_tot istart = 0 counter1start = counter1 while tmin < tmax_tot: if self._report_timing: comm.Barrier() tstart = MPI.Wtime() comm.Barrier() if comm.rank == 0: print( prefix + "Instantiating the atmosphere for t = {}" "".format(tmin - tmin_tot), flush=self._flush, ) istart, istop, tmax = self._get_time_range( tmin, istart, times, tmax_tot, common_ref, tod, weather ) ind = slice(istart, istop) nind = istop - istart comm.Barrier() rmin = 0 rmax = 100 scale = 10 counter2start = counter2 counter1 = counter1start xstart, ystart, zstart = self._xstep, self._ystep, self._zstep while rmax < 100000: sim, counter2 = self._simulate_atmosphere( weather, scan_range, tmin, tmax, comm, key1, key2, counter1, counter2start, cachedir, prefix, tmin_tot, tmax_tot, rmin, rmax, ) if self._verbosity > 15: self._plot_snapshots( sim, prefix, obsname, scan_range, tmin, tmax, comm, rmin, rmax, ) self._observe_atmosphere( sim, tod, comm, prefix, common_ref, istart, nind, ind, scan_range, times, absorption, ) rmin = rmax rmax *= scale self._xstep *= np.sqrt(scale) self._ystep *= np.sqrt(scale) self._zstep *= np.sqrt(scale) counter1 += 1 if self._verbosity > 5: self._save_tod( obsname, tod, times, istart, nind, ind, comm, common_ref ) self._xstep, self._ystep, self._zstep = xstart, ystart, zstart tmin = tmax if self._report_timing: comm.Barrier() tstop = MPI.Wtime() if comm.rank == 0: print( prefix + "Simulated and observed atmosphere in {:.2f} s".format( tstop - tstart ), flush=self._flush, ) return
def binned_map(data, npix, subnpix, out="."): """Make a binned map This function should exist in toast, but all the pieces do. If we are doing MCs we break these operations into two pieces and only generate the noise weighted map each realization. """ # The global MPI communicator cworld = data.comm.comm_world # construct distributed maps to store the covariance, # noise weighted map, and hits invnpp = tm.DistPixels(data, nnz=6, dtype=np.float64) hits = tm.DistPixels(data, nnz=1, dtype=np.int64) zmap = tm.DistPixels(data, nnz=3, dtype=np.float64) invnpp.data.fill(0.0) hits.data.fill(0) zmap.data.fill(0.0) start = MPI.Wtime() if cworld.rank == 0: print("Accumulating hits and N_pp'^-1 ...", flush=True) # Setting detweights to None gives uniform weighting. build_invnpp = tm.OpAccumDiag(detweights=None, invnpp=invnpp, hits=hits, zmap=zmap, name="signal", pixels="pixels", weights="weights", common_flag_name="flags_common", common_flag_mask=1) build_invnpp.exec(data) invnpp.allreduce() hits.allreduce() zmap.allreduce() cworld.barrier() stop = MPI.Wtime() elapsed = stop - start if cworld.rank == 0: print("Building hits and N_pp^-1 took {:.3f} s".format(elapsed), flush=True) if cworld.rank == 0: print("Writing hits and N_pp'^-1 ...", flush=True) hits.write_healpix_fits(os.path.join(out, "hits.fits")) invnpp.write_healpix_fits(os.path.join(out, "invnpp.fits")) start = stop if cworld.rank == 0: print("Inverting N_pp'^-1 ...", flush=True) # invert it tm.covariance_invert(invnpp, 1.0e-3) cworld.barrier() stop = MPI.Wtime() elapsed = stop - start if cworld.rank == 0: print("Inverting N_pp^-1 took {:.3f} s".format(elapsed), flush=True) if cworld.rank == 0: print("Writing N_pp' ...", flush=True) invnpp.write_healpix_fits(os.path.join(out, "npp.fits")) start = stop if cworld.rank == 0: print("Computing binned map ...", flush=True) tm.covariance_apply(invnpp, zmap) cworld.barrier() stop = MPI.Wtime() elapsed = stop - start if cworld.rank == 0: print("Computing binned map took {:.3f} s".format(elapsed), flush=True) start = stop if cworld.rank == 0: print("Writing binned map ...", flush=True) zmap.write_healpix_fits(os.path.join(out, "binned.fits")) if cworld.rank == 0: print("Binned map done", flush=True) return
def measure_correction(self, fn=None, gain=None): """ Estimate the ADC correction per bin for each 4K phase """ self.corrections = [] if fn is not None and self.rank == 0: fn_out = os.path.join(self._out, fn) hdulist = [pf.PrimaryHDU()] else: fn_out = None hdulist = None if gain is not None and gain != 0: for phase in self.signal_estimate: for estimate in phase: mean_before = np.mean(estimate) estimate[:] = (estimate - mean_before) / gain + mean_before for phase in range(self._nphase4k): tstart_phase = MPI.Wtime() if self.rank == 0: print('Estimating ADC correction for phase {}'.format(phase), flush=True) signal_ADU = self.signal_ADU[phase] signal_estimate = self.signal_estimate[phase] signal_offset = self.signal_offset[phase] nl = [] # Additive offset between input and output signal for sig_ADU, estimate in zip(signal_ADU, signal_estimate): nl.append(sig_ADU - estimate) bins = [] for sig in signal_estimate: bins.append(np.floor(sig / self._wbin).astype(np.int32)) my_binmin = 99999999 my_binmax = -99999999 for binvec in bins: my_binmin = min(my_binmin, np.amin(binvec)) my_binmax = max(my_binmax, np.amax(binvec)) binmin = self.comm.allreduce(my_binmin, MPI.MIN) binmax = self.comm.allreduce(my_binmax, MPI.MAX) bin_offset = binmin nbin = binmax - binmin + 1 if nbin < 1: raise RuntimeError( '{} : ERROR in measure_correction: No valid bins ' 'for phase = {} / {}. binmin, binmax = {}, {}, ' 'my_binmin, my_binmax = {}, {}, nbin = {}'.format( self.rank, phase + 1, self._nphase4k, binmin, binmax, my_binmin, my_binmax, nbin)) for binvec in bins: binvec -= bin_offset my_rings = [] for binvec, sampvec, offset in zip(bins, nl, signal_offset): hitmap = np.zeros(nbin, dtype=np.int32) sigmap = np.zeros(nbin, dtype=np.float64) destripe_tools.fast_hit_binning(binvec, hitmap) destripe_tools.fast_binning(sampvec, binvec, sigmap) bin_centers = (0.5 + np.arange(nbin) + bin_offset) * self._wbin hit = hitmap != 0 hitmap = hitmap[hit].copy() bin_centers = bin_centers[hit].copy() sigmap = sigmap[hit].copy() sigmap /= hitmap # Fit a line to the ring measurement of NL coeff, cov = self.fit_line(bin_centers, sigmap, hitmap) slope = coeff[1] slope_err = np.sqrt(cov[1, 1]) my_rings.append( (bin_centers, sigmap, hitmap, offset, slope, slope_err)) # The ring offset optimization is done serially until we can # find a nonlinear parallel solver rings = self.comm.gather(my_rings, root=0) if self.rank == 0: # Flatten the ring list rings = [ring for ringlist in rings for ring in ringlist] if fn_out.endswith('.fits'): fn = fn_out.replace( '.fits', '_ring_data_phase{:02}.pck'.format(phase)) else: fn = fn_out + '_ring_data_phase{:02}.pck'.format(phase) pickle.dump(rings, open(fn, 'wb'), protocol=2) print('ADC ring data saved in {}'.format(fn), flush=True) polyorder = 10 ring_rms = [] for ring in rings: ring_rms.append(np.std(ring[1])) ring_rms = np.array(ring_rms) outliers = np.isnan(ring_rms) if ring_rms.size > 100: for i in range(10): good = np.logical_not(outliers) mn = np.mean(ring_rms[good]) rms = np.std(ring_rms[good]) bad = np.abs(ring_rms - mn) > 4 * rms bad[outliers] = False nbad = np.sum(bad) if nbad == 0: break outliers[bad] = True nout = np.sum(outliers) print( 'iter = {}: Discarding {} outlier RMS out of {}. ' 'Total outliers: {}.'.format( i, nbad, ring_rms.size, nout)) ring_len = [] for ring in rings: ring_len.append(ring[0].size) ring_len = np.array(ring_len) if ring_len.size > 100: for i in range(10): good = np.logical_not(outliers) smooth = flagged_running_average( ring_len, outliers, 100) rms = np.std((ring_len - smooth)[good]) bad = np.abs(ring_len - smooth) > 4 * rms bad[outliers] = False nbad = np.sum(bad) if nbad == 0: break outliers[bad] = True nout = np.sum(outliers) print( 'iter = {}: Discarding {} outlier lengths out of ' '{}. Total outliers: {}.'.format( i, nbad, ring_len.size, nout)) # Build an initial correction by integrating the # ring-by-ring derivatives (slopes) offset = [] deriv = [] err = [] for ring in rings: offset_ring, deriv_ring, deriv_err = ring[3:6] offset.append(offset_ring) deriv.append(deriv_ring) err.append(deriv_err) offset = np.hstack(offset) deriv = np.hstack(deriv) err = np.hstack(err) if offset.size > 100: for i in range(10): good = np.logical_not(outliers) smooth = flagged_running_average(deriv, outliers, 100) rms = np.std((deriv - smooth)[good]) bad = np.abs(deriv - smooth) > 4 * rms bad[outliers] = False nbad = np.sum(bad) if nbad == 0: break outliers[bad] = True nout = np.sum(outliers) print('iter = {}: Discarding {} outlier slopes out of ' '{}. Total outliers: {}.'.format( i, nbad, offset.size, nout)) good = np.logical_not(outliers) offset = offset[good] deriv = deriv[good] ind = np.argsort(offset) offset = offset[ind] deriv = deriv[ind] total_offset = np.zeros(offset.size, dtype=np.float64) for i in range(offset.size - 1): total_offset[i + 1] = total_offset[i] + deriv[i] * ( offset[i + 1] - offset[i]) w = offset[-1] - offset[0] # Domain will shift with the offset correction domain = [offset[0] - w / 10, offset[-1] + w / 10] polyorder = min(polyorder, offset.size - 1) # Omit the offset term x0 = np.polynomial.legendre.Legendre.fit( offset, total_offset, polyorder, domain=domain).coef[1:] # Collapse pointing periods that have the same offset collapsed_ring_lists = {} wbin_offset = (domain[1] - domain[0]) / 1000 for (ring, outlier) in zip(rings, outliers): if outlier: continue bin_centers, sigmap, hitmap, offset = ring[:4] offset_bin = np.int(np.floor(offset / wbin_offset)) if offset_bin not in collapsed_ring_lists: collapsed_ring_lists[offset_bin] = [] collapsed_ring_lists[offset_bin].append(ring) collapsed_rings = [] for offset_bin, offset_rings in collapsed_ring_lists.items(): # co-add the rings that have the same offset center_min = offset_rings[0][0][0] center_max = offset_rings[0][0][-1] for ring in offset_rings[1:]: bin_centers = ring[0] center_min = min(center_min, bin_centers[0]) center_max = max(center_max, bin_centers[-1]) nbin = np.int( np.rint((center_max - center_min) / self._wbin)) + 1 all_bin_centers = center_min + np.arange(nbin) * self._wbin all_sigmap = np.zeros(nbin, dtype=np.float64) all_hitmap = np.zeros(nbin, dtype=np.float64) for ring in offset_rings: bin_centers, sigmap, hitmap, offset = ring[:4] ind = np.searchsorted(all_bin_centers, bin_centers, side='left') all_hitmap[ind] += hitmap all_sigmap[ind] += sigmap * hitmap good = all_hitmap != 0 all_bin_centers = all_bin_centers[good] all_sigmap = all_sigmap[good] all_hitmap = all_hitmap[good] all_sigmap /= all_hitmap collapsed_rings.append( (all_bin_centers, all_sigmap, all_hitmap, (offset_bin + .5) * wbin_offset)) # Collect the ring-by-ring vectors into single vectors all_bin_centers = [] all_sigmaps = [] all_hitmaps = [] all_offsets = [] all_ranges = [] istart = 0 for ring in collapsed_rings: bin_centers, sigmap, hitmap, offset = ring all_bin_centers.append(bin_centers) all_sigmaps.append(sigmap) all_hitmaps.append(hitmap) all_offsets.append(offset) all_ranges.append(slice(istart, istart + sigmap.size)) istart += sigmap.size all_bin_centers = np.hstack(all_bin_centers).astype(np.float64) all_sigmaps = np.hstack(all_sigmaps).astype(np.float64) all_hitmaps = np.hstack(all_hitmaps).astype(np.float64) all_offsets = np.hstack(all_offsets).astype(np.float64) def get_nl(param, all_bin_centers, all_sigmaps, all_hitmaps, all_offsets, domain): # Add zeroth term, this is not a free parameter pfull = np.append([0], param) get_offset_nl = np.polynomial.legendre.Legendre( pfull, domain=domain) # Adjust the zeroth term to minimize offset x = np.linspace(domain[0], domain[1], 100) get_offset_nl.coef[0] = -np.median(get_offset_nl(x)) all_bins = all_bin_centers.copy() all_nl = all_sigmaps.copy() for ind, off in zip(all_ranges, all_offsets): delta = get_offset_nl(off) all_bins[ind] -= delta all_nl[ind] += delta all_bins = np.floor(all_bins / self._wbin).astype(np.int64) all_hits = all_hitmaps binmin = np.amin(all_bins) binmax = np.amax(all_bins) nbin = binmax - binmin + 1 all_bins -= binmin hitmap = np.zeros(nbin, dtype=np.float64) nlmap = np.zeros(nbin, dtype=np.float64) destripe_tools.fast_binning(all_hits, all_bins.astype(np.int32), hitmap) destripe_tools.fast_binning(all_nl * all_hits, all_bins.astype(np.int32), nlmap) hit = hitmap != 0 nlmap[hit] /= hitmap[hit] bin_centers = (0.5 + np.arange(nlmap.size) + binmin) * self._wbin nlmap_offset = get_offset_nl(bin_centers) return (nlmap, nlmap_offset, hitmap, all_nl, all_hits, all_bins, binmin) def get_nl_resid(param, all_bin_centers, all_sigmaps, all_hitmaps, all_offsets, domain): # Measure the residual between signal/estimate # difference and a binned+unrolled version of the # difference (nlmap, nlmap_offset, hitmap, nl, hits, bins, binmin) = get_nl(param, all_bin_centers, all_sigmaps, all_hitmaps, all_offsets, domain) nl_from_map = nlmap[bins] nl_from_offset_map = nlmap_offset[bins] return np.hstack([(nl - nl_from_map) * np.log(hits), (nl - nl_from_offset_map) * np.log(hits) ]) start = MPI.Wtime() try: xopt, _, infodict, mesg, ierr = scipy.optimize.leastsq( get_nl_resid, x0, args=(all_bin_centers, all_sigmaps, all_hitmaps, all_offsets, domain), full_output=True, Dfun=None, maxfev=1000) except Exception as e: print('leastsq failed with {}'.format(e)) raise if ierr not in [1, 2, 3, 4]: raise RuntimeError('leastsq failed with {}'.format(mesg)) stop = MPI.Wtime() print('Nonlinear optimization finished in {:.2f} s after {} ' 'evaluations.'.format(stop - start, infodict['nfev']), flush=True) print('Uncorrected residual: {}'.format( np.std( get_nl_resid(x0 * 0, all_bin_centers, all_sigmaps, all_hitmaps, all_offsets, domain)))) print('First guess residual: {}'.format( np.std( get_nl_resid(x0, all_bin_centers, all_sigmaps, all_hitmaps, all_offsets, domain)))) print(' Optimized residual: {}'.format( np.std( get_nl_resid(xopt, all_bin_centers, all_sigmaps, all_hitmaps, all_offsets, domain))), flush=True) nlmap, _, hitmap, nl, _, bins, binmin = get_nl( xopt, all_bin_centers, all_sigmaps, all_hitmaps, all_offsets, domain) bin_centers = (0.5 + np.arange(nlmap.size) + binmin) \ * self._wbin good = hitmap > 100 nlmap = nlmap[good].copy() hitmap = hitmap[good].copy() bin_centers = bin_centers[good].copy() # Apply a median filter to remove bin-to-bin variation # and make the poorly sampled end points usable nwin = min(101, (nlmap.size // 8) * 4 + 1) good = np.ones(bin_centers.size, dtype=np.bool) good[:nwin] = False good[-nwin:] = False steps = np.diff(bin_centers) step = np.median(steps) for i in np.argwhere(steps > nwin * step / 10).ravel(): good[i - nwin // 2:i + nwin // 2] = False bin_centers = scipy.signal.medfilt(bin_centers, nwin)[good] nlmap = scipy.signal.medfilt(nlmap, nwin)[good] else: bin_centers = None hitmap = None nlmap = None bin_centers = self.comm.bcast(bin_centers, root=0) hitmap = self.comm.bcast(hitmap, root=0) sigmap = self.comm.bcast(nlmap) V_in, V_delta = bin_centers, sigmap V_out = V_in + V_delta # Stretch the last two bins to get a rough extrapolation V_out[0] = 0 V_out[-1] = 1e7 self.corrections.append([V_out, -V_delta]) if hdulist is not None: phasename = 'phase{:02}'.format(phase) cols = [] cols.append(pf.Column(name='V_out', format='D', array=V_out)) cols.append( pf.Column(name='V_corr', format='D', array=-V_delta)) hdu = pf.BinTableHDU.from_columns(pf.ColDefs(cols)) hdu.header['extname'] = phasename hdulist.append(hdu) tstop_phase = MPI.Wtime() if self.rank == 0: print('ADC correction for phase {} done in {:.2f} s'.format( phase, tstop_phase - tstart_phase), flush=True) if hdulist is not None: if os.path.isfile(fn_out): os.remove(fn_out) pf.HDUList(hdulist).writeto(fn_out) print('ADC correction saved in {}'.format(fn_out), flush=True) return
nside = 256 file_sync = '/Users/reijo/data/PR2/foregroundmaps/' \ 'COM_CompMap_Synchrotron-commander_0256_R2.00.fits' file_sync_pol = '/Users/reijo/data/PR2/foregroundmaps/' \ 'COM_CompMap_SynchrotronPol-commander_0256_R2.00.fits' file_freefree = '/Users/reijo/data/PR2/foregroundmaps/' \ 'COM_CompMap_freefree-commander_0256_R2.00.fits' file_ame = '/Users/reijo/data/PR2/foregroundmaps/' \ 'COM_CompMap_AME-commander_0256_R2.00.fits' file_dust = '/Users/reijo/data/PR2/foregroundmaps/' \ 'COM_CompMap_dust-commander_0256_R2.00.fits' file_dust_pol = '/Users/reijo/data/PR2/foregroundmaps/' \ 'COM_CompMap_DustPol-commander_1024_R2.00.fits' t1 = MPI.Wtime() skymodel = SkyModel(nside, file_sync, file_sync_pol, file_freefree, file_ame, file_dust, file_dust_pol, MPI.COMM_WORLD) t2 = MPI.Wtime() print('{:4} : Initialized sky model in {:.2f} s'.format( skymodel.rank, t2 - t1), flush=True) skymodel.cache.report() norm = 1e6 amp = 3e2 pol_amp = 5e1 deriv_amp = 10 pol_deriv_amp = 1 freqs = [30, 44, 70, 100, 143, 217, 353]
def load_fp(args, comm): start = MPI.Wtime() autotimer = timing.auto_timer() fp = None # Load focalplane information nullquat = np.array([0, 0, 0, 1], dtype=np.float64) if comm.comm_world.rank == 0: if args.fp is None: # in this case, create a fake detector at the boresight # with a pure white noise spectrum. fake = {} fake['quat'] = nullquat fake['fwhm'] = 30.0 fake['fknee'] = 0.0 fake['fmin'] = 1e-9 fake['alpha'] = 1.0 fake['NET'] = 1.0 fake['color'] = 'r' fp = {} # Second detector at 22.5 degree polarization angle fp['bore1'] = fake fake2 = {} zrot = qa.rotation(ZAXIS, 22.5 * degree) fake2['quat'] = qa.mult(fake['quat'], zrot) fake2['fwhm'] = 30.0 fake2['fknee'] = 0.0 fake2['fmin'] = 1e-9 fake2['alpha'] = 1.0 fake2['NET'] = 1.0 fake2['color'] = 'r' fp['bore2'] = fake2 # Third detector at 45 degree polarization angle fake3 = {} zrot = qa.rotation(ZAXIS, 45 * degree) fake3['quat'] = qa.mult(fake['quat'], zrot) fake3['fwhm'] = 30.0 fake3['fknee'] = 0.0 fake3['fmin'] = 1e-9 fake3['alpha'] = 1.0 fake3['NET'] = 1.0 fake3['color'] = 'r' fp['bore3'] = fake3 # Fourth detector at 67.5 degree polarization angle fake4 = {} zrot = qa.rotation(ZAXIS, 67.5 * degree) fake4['quat'] = qa.mult(fake['quat'], zrot) fake4['fwhm'] = 30.0 fake4['fknee'] = 0.0 fake4['fmin'] = 1e-9 fake4['alpha'] = 1.0 fake4['NET'] = 1.0 fake4['color'] = 'r' fp['bore4'] = fake4 else: with open(args.fp, 'rb') as p: fp = pickle.load(p) fp = comm.comm_world.bcast(fp, root=0) stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print('Create focalplane: {:.2f} seconds'.format(stop - start), flush=args.flush) start = stop if args.debug: if comm.comm_world.rank == 0: outfile = '{}/focalplane.png'.format(args.outdir) tt.plot_focalplane(fp, 6, 6, outfile) detectors = sorted(fp.keys()) detweights = {} for d in detectors: net = fp[d]['NET'] detweights[d] = 1.0 / (args.samplerate * net * net) return fp, detweights
def create_observations(args, comm, fp, all_ces, site): """ Simulate constant elevation scans. Simulate constant elevation scans at "site" matching entries in "all_ces". Each operational day is assigned to a different process group to allow making day maps. """ start = MPI.Wtime() autotimer = timing.auto_timer() data = toast.Data(comm) site_name, site_lat, site_lon, site_alt = site detectors = sorted(fp.keys()) detquats = {} for d in detectors: detquats[d] = fp[d]['quat'] nces = len(all_ces) breaks = [] do_break = False for i in range(nces - 1): # If current and next CES are on different days, insert a break tz = args.timezone / 24. start1 = all_ces[i][3] # MJD start start2 = all_ces[i + 1][3] # MJD start scan1 = all_ces[i][4] scan2 = all_ces[i + 1][4] if scan1 != scan2 and do_break: breaks.append(i + 1) do_break = False continue day1 = int(start1 + tz) day2 = int(start2 + tz) if day1 != day2: if scan1 == scan2: # We want an entire CES, even if it crosses the day bound. # Wait until the scan number changes. do_break = True else: breaks.append(i + 1) nbreak = len(breaks) if nbreak != comm.ngroups - 1: raise RuntimeError( 'Number of observing days ({}) does not match number of process ' 'groups ({}).'.format(nbreak + 1, comm.ngroups)) groupdist = toast.distribute_uniform(nces, comm.ngroups, breaks=breaks) group_firstobs = groupdist[comm.group][0] group_numobs = groupdist[comm.group][1] # Create the noise model used by all observations fmin = {} fknee = {} alpha = {} NET = {} rates = {} for d in detectors: rates[d] = args.samplerate fmin[d] = fp[d]['fmin'] fknee[d] = fp[d]['fknee'] alpha[d] = fp[d]['alpha'] NET[d] = fp[d]['NET'] noise = tt.AnalyticNoise(rate=rates, fmin=fmin, detectors=detectors, fknee=fknee, alpha=alpha, NET=NET) for ices in range(group_firstobs, group_firstobs + group_numobs): ces = all_ces[ices] CES_start, CES_stop, name, mjdstart, scan, subscan, azmin, azmax, \ el = ces totsamples = int((CES_stop - CES_start) * args.samplerate) # create the single TOD for this observation try: tod = tt.TODGround(comm.comm_group, detquats, totsamples, detranks=comm.comm_group.size, firsttime=CES_start, rate=args.samplerate, site_lon=site_lon, site_lat=site_lat, site_alt=site_alt, azmin=azmin, azmax=azmax, el=el, scanrate=args.scanrate, scan_accel=args.scan_accel, CES_start=None, CES_stop=None, sun_angle_min=args.sun_angle_min, coord=args.coord, sampsizes=None) except RuntimeError as e: print('Failed to create the CES scan: {}'.format(e), flush=args.flush) return # Create the (single) observation ob = {} ob['name'] = 'CES-{}-{}-{}'.format(name, scan, subscan) ob['tod'] = tod if len(tod.subscans) > 0: ob['intervals'] = tod.subscans else: raise RuntimeError('{} has no valid intervals'.format(ob['name'])) ob['baselines'] = None ob['noise'] = noise ob['id'] = int(mjdstart * 10000) data.obs.append(ob) for ob in data.obs: tod = ob['tod'] tod.free_azel_quats() if comm.comm_group.rank == 0: print('Group # {:4} has {} observations.'.format( comm.group, len(data.obs)), flush=args.flush) if len(data.obs) == 0: raise RuntimeError('Too many tasks. Every MPI task must ' 'be assigned to at least one observation.') comm.comm_world.barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print('Simulated scans in {:.2f} seconds' ''.format(stop - start), flush=args.flush) return data
def main(): if MPI.COMM_WORLD.rank == 0: print("Running with {} processes".format(MPI.COMM_WORLD.size), flush=True) global_start = MPI.Wtime() parser = argparse.ArgumentParser( description="Simulate satellite " "boresight pointing and make a noise map.", fromfile_prefix_chars="@" ) parser.add_argument( "--groupsize", required=False, type=int, default=0, help="size of processor groups used to distribute observations" ) parser.add_argument( "--samplerate", required=False, type=float, default=40.0, help="Detector sample rate (Hz)" ) parser.add_argument( "--starttime", required=False, type=float, default=0.0, help="The overall start time of the simulation" ) parser.add_argument( "--spinperiod", required=False, type=float, default=10.0, help="The period (in minutes) of the rotation about the " "spin axis" ) parser.add_argument( "--spinangle", required=False, type=float, default=30.0, help="The opening angle (in degrees) of the boresight " "from the spin axis" ) parser.add_argument( "--precperiod", required=False, type=float, default=50.0, help="The period (in minutes) of the rotation about the " "precession axis" ) parser.add_argument( "--precangle", required=False, type=float, default=65.0, help="The opening angle (in degrees) of the spin axis " "from the precession axis" ) parser.add_argument( "--hwprpm", required=False, type=float, default=0.0, help="The rate (in RPM) of the HWP rotation" ) parser.add_argument( "--hwpstep", required=False, default=None, help="For stepped HWP, the angle in degrees of each step" ) parser.add_argument( "--hwpsteptime", required=False, type=float, default=0.0, help="For stepped HWP, the the time in seconds between " "steps" ) parser.add_argument( "--obs", required=False, type=float, default=1.0, help="Number of hours in one science observation" ) parser.add_argument( "--gap", required=False, type=float, default=0.0, help="Cooler cycle time in hours between science obs" ) parser.add_argument( "--numobs", required=False, type=int, default=1, help="Number of complete observations" ) parser.add_argument( "--outdir", required=False, default="out", help="Output directory" ) parser.add_argument( "--debug", required=False, default=False, action="store_true", help="Write diagnostics" ) parser.add_argument( "--nside", required=False, type=int, default=64, help="Healpix NSIDE" ) parser.add_argument( "--subnside", required=False, type=int, default=4, help="Distributed pixel sub-map NSIDE" ) parser.add_argument( "--baseline", required=False, type=float, default=60.0, help="Destriping baseline length (seconds)" ) parser.add_argument( "--noisefilter", required=False, default=False, action="store_true", help="Destripe with the noise filter enabled" ) parser.add_argument( "--madam", required=False, default=False, action="store_true", help="If specified, use libmadam for map-making" ) parser.add_argument( "--madampar", required=False, default=None, help="Madam parameter file" ) parser.add_argument('--flush', required=False, default=False, action='store_true', help='Flush every print statement.') parser.add_argument( "--MC_start", required=False, type=int, default=0, help="First Monte Carlo noise realization" ) parser.add_argument( "--MC_count", required=False, type=int, default=1, help="Number of Monte Carlo noise realizations" ) parser.add_argument( "--fp", required=False, default=None, help="Pickle file containing a dictionary of detector properties. " "The keys of this dict are the detector names, and each value is also " "a dictionary with keys \"quat\" (4 element ndarray), \"fwhm\" " "(float, arcmin), \"fknee\" (float, Hz), \"alpha\" (float), and \"NET\" " "(float). For optional plotting, the key \"color\" can specify a " "valid matplotlib color string." ) parser.add_argument('--tidas', required=False, default=None, help='Output TIDAS export path') parser.add_argument('--input_map', required=False, help='Input map for signal') parser.add_argument('--input_pysm_model', required=False, help='Comma separated models for on-the-fly PySM ' 'simulation, e.g. s3,d6,f1,a2"') parser.add_argument('--apply_beam', required=False, action='store_true', help='Apply beam convolution to input map with gaussian ' 'beam parameters defined in focalplane') parser.add_argument('--input_dipole', required=False, help='Simulate dipole, possible values are ' 'total, orbital, solar') parser.add_argument('--input_dipole_solar_speed_kms', required=False, help='Solar system speed [km/s]', type=float, default=369.0) parser.add_argument('--input_dipole_solar_gal_lat_deg', required=False, help='Solar system speed galactic latitude [degrees]', type=float, default=48.26) parser.add_argument('--input_dipole_solar_gal_lon_deg', required=False, help='Solar system speed galactic longitude[degrees]', type=float, default=263.99) args = timing.add_arguments_and_parse(parser, timing.FILE(noquotes=True)) autotimer = timing.auto_timer("@{}".format(timing.FILE())) if args.tidas is not None: if not tt.tidas_available: raise RuntimeError("TIDAS not found- cannot export") groupsize = args.groupsize if groupsize == 0: groupsize = MPI.COMM_WORLD.size # This is the 2-level toast communicator. if MPI.COMM_WORLD.size % groupsize != 0: if MPI.COMM_WORLD.rank == 0: print("WARNING: process groupsize does not evenly divide into " "total number of processes", flush=True) comm = toast.Comm(world=MPI.COMM_WORLD, groupsize=groupsize) # get options hwpstep = None if args.hwpstep is not None: hwpstep = float(args.hwpstep) npix = 12 * args.nside * args.nside subnside = args.subnside if subnside > args.nside: subnside = args.nside subnpix = 12 * subnside * subnside start = MPI.Wtime() fp = None # Load focalplane information if comm.comm_world.rank == 0: if args.fp is None: # in this case, create a fake detector at the boresight # with a pure white noise spectrum. fake = {} fake["quat"] = np.array([0.0, 0.0, 1.0, 0.0]) fake["fwhm"] = 30.0 fake["fknee"] = 0.0 fake["alpha"] = 1.0 fake["NET"] = 1.0 fake["color"] = "r" fp = {} fp["bore"] = fake else: with open(args.fp, "rb") as p: fp = pickle.load(p) fp = comm.comm_world.bcast(fp, root=0) stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print("Create focalplane: {:.2f} seconds".format(stop-start), flush=True) start = stop if args.debug: if comm.comm_world.rank == 0: outfile = "{}_focalplane.png".format(args.outdir) set_backend() tt.plot_focalplane(fp, 10.0, 10.0, outfile) # Since we are simulating noise timestreams, we want # them to be contiguous and reproducible over the whole # observation. We distribute data by detector within an # observation, so ensure that our group size is not larger # than the number of detectors we have. if groupsize > len(fp.keys()): if comm.comm_world.rank == 0: print("process group is too large for the number of detectors", flush=True) comm.comm_world.Abort() # Detector information from the focalplane detectors = sorted(fp.keys()) detquats = {} detindx = None if "index" in fp[detectors[0]]: detindx = {} for d in detectors: detquats[d] = fp[d]["quat"] if detindx is not None: detindx[d] = fp[d]["index"] # Distribute the observations uniformly groupdist = toast.distribute_uniform(args.numobs, comm.ngroups) # Compute global time and sample ranges of all observations obsrange = tt.regular_intervals(args.numobs, args.starttime, 0, args.samplerate, 3600*args.obs, 3600*args.gap) # Create the noise model used for all observations fmin = {} fknee = {} alpha = {} NET = {} rates = {} for d in detectors: rates[d] = args.samplerate fmin[d] = fp[d]["fmin"] fknee[d] = fp[d]["fknee"] alpha[d] = fp[d]["alpha"] NET[d] = fp[d]["NET"] noise = tt.AnalyticNoise(rate=rates, fmin=fmin, detectors=detectors, fknee=fknee, alpha=alpha, NET=NET) mem_counter = tt.OpMemoryCounter() # The distributed timestream data data = toast.Data(comm) # Every process group creates its observations group_firstobs = groupdist[comm.group][0] group_numobs = groupdist[comm.group][1] for ob in range(group_firstobs, group_firstobs + group_numobs): tod = tt.TODSatellite( comm.comm_group, detquats, obsrange[ob].samples, firsttime=obsrange[ob].start, rate=args.samplerate, spinperiod=args.spinperiod, spinangle=args.spinangle, precperiod=args.precperiod, precangle=args.precangle, detindx=detindx, detranks=comm.group_size ) obs = {} obs["name"] = "science_{:05d}".format(ob) obs["tod"] = tod obs["intervals"] = None obs["baselines"] = None obs["noise"] = noise obs["id"] = ob data.obs.append(obs) stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print("Read parameters, compute data distribution: " "{:.2f} seconds".format(stop-start), flush=True) start = stop # we set the precession axis now, which will trigger calculation # of the boresight pointing. for ob in range(group_numobs): curobs = data.obs[ob] tod = curobs["tod"] # Get the global sample offset from the original distribution of # intervals obsoffset = obsrange[group_firstobs + ob].first # Constantly slewing precession axis degday = 360.0 / 365.25 precquat = tt.slew_precession_axis(nsim=tod.local_samples[1], firstsamp=obsoffset, samplerate=args.samplerate, degday=degday) tod.set_prec_axis(qprec=precquat) stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print("Construct boresight pointing: " "{:.2f} seconds".format(stop-start), flush=True) start = stop # make a Healpix pointing matrix. pointing = tt.OpPointingHpix(nside=args.nside, nest=True, mode="IQU", hwprpm=args.hwprpm, hwpstep=hwpstep, hwpsteptime=args.hwpsteptime) pointing.exec(data) comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print("Pointing generation took {:.3f} s".format(elapsed), flush=True) start = stop localpix, localsm, subnpix = get_submaps(args, comm, data) signalname = "signal" if args.input_pysm_model: simulate_sky_signal(args, comm, data, mem_counter, [fp], subnpix, localsm, signalname=signalname) if args.input_dipole: print("Simulating dipole") op_sim_dipole = tt.OpSimDipole(mode=args.input_dipole, solar_speed=args.input_dipole_solar_speed_kms, solar_gal_lat=args.input_dipole_solar_gal_lat_deg, solar_gal_lon=args.input_dipole_solar_gal_lon_deg, out=signalname, keep_quats=True, keep_vel=False, subtract=False, coord="G", freq=0, # we could use frequency for quadrupole correction flag_mask=255, common_flag_mask=255) op_sim_dipole.exec(data) # Mapmaking. For purposes of this simulation, we use detector noise # weights based on the NET (white noise level). If the destriping # baseline is too long, this will not be the best choice. detweights = {} for d in detectors: net = fp[d]["NET"] detweights[d] = 1.0 / (args.samplerate * net * net) if not args.madam: if comm.comm_world.rank == 0: print("Not using Madam, will only make a binned map!", flush=True) # get locally hit pixels lc = tm.OpLocalPixels() localpix = lc.exec(data) # find the locally hit submaps. localsm = np.unique(np.floor_divide(localpix, subnpix)) # construct distributed maps to store the covariance, # noise weighted map, and hits invnpp = tm.DistPixels(comm=comm.comm_world, size=npix, nnz=6, dtype=np.float64, submap=subnpix, local=localsm) hits = tm.DistPixels(comm=comm.comm_world, size=npix, nnz=1, dtype=np.int64, submap=subnpix, local=localsm) zmap = tm.DistPixels(comm=comm.comm_world, size=npix, nnz=3, dtype=np.float64, submap=subnpix, local=localsm) # compute the hits and covariance once, since the pointing and noise # weights are fixed. invnpp.data.fill(0.0) hits.data.fill(0) build_invnpp = tm.OpAccumDiag(detweights=detweights, invnpp=invnpp, hits=hits) build_invnpp.exec(data) invnpp.allreduce() hits.allreduce() comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print("Building hits and N_pp^-1 took {:.3f} s".format(elapsed), flush=True) start = stop hits.write_healpix_fits("{}_hits.fits".format(args.outdir)) invnpp.write_healpix_fits("{}_invnpp.fits".format(args.outdir)) comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print("Writing hits and N_pp^-1 took {:.3f} s".format(elapsed), flush=True) start = stop # invert it tm.covariance_invert(invnpp, 1.0e-3) comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print("Inverting N_pp^-1 took {:.3f} s".format(elapsed), flush=True) start = stop invnpp.write_healpix_fits("{}_npp.fits".format(args.outdir)) comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print("Writing N_pp took {:.3f} s".format(elapsed), flush=True) start = stop # in debug mode, print out data distribution information if args.debug: handle = None if comm.comm_world.rank == 0: handle = open("{}_distdata.txt".format(args.outdir), "w") data.info(handle) if comm.comm_world.rank == 0: handle.close() comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print("Dumping debug data distribution took " "{:.3f} s".format(elapsed), flush=True) start = stop mcstart = start # Loop over Monte Carlos firstmc = int(args.MC_start) nmc = int(args.MC_count) for mc in range(firstmc, firstmc+nmc): # create output directory for this realization outpath = "{}_{:03d}".format(args.outdir, mc) if comm.comm_world.rank == 0: if not os.path.isdir(outpath): os.makedirs(outpath) comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print("Creating output dir {:04d} took {:.3f} s".format(mc, elapsed), flush=True) start = stop # clear all signal data from the cache, so that we can generate # new noise timestreams. tod.cache.clear("tot_signal_.*") # simulate noise nse = tt.OpSimNoise(out="tot_signal", realization=mc) nse.exec(data) # add sky signal add_sky_signal(args, comm, data, totalname="tot_signal", signalname=signalname) if mc == firstmc: # For the first realization, optionally export the # timestream data to a TIDAS volume. if args.tidas is not None: from toast.tod.tidas import OpTidasExport tidas_path = os.path.abspath(args.tidas) export = OpTidasExport(tidas_path, name="tot_signal") export.exec(data) comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print(" Noise simulation {:04d} took {:.3f} s".format(mc, elapsed), flush=True) start = stop zmap.data.fill(0.0) build_zmap = tm.OpAccumDiag(zmap=zmap, name="tot_signal", detweights=detweights) build_zmap.exec(data) zmap.allreduce() comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print(" Building noise weighted map {:04d} took {:.3f} s".format( mc, elapsed), flush=True) start = stop tm.covariance_apply(invnpp, zmap) comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print(" Computing binned map {:04d} took {:.3f} s".format(mc, elapsed), flush=True) start = stop zmap.write_healpix_fits(os.path.join(outpath, "binned.fits")) comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print(" Writing binned map {:04d} took {:.3f} s".format(mc, elapsed), flush=True) elapsed = stop - mcstart if comm.comm_world.rank == 0: print(" Mapmaking {:04d} took {:.3f} s".format(mc, elapsed), flush=True) start = stop else: # Set up MADAM map making. pars = {} cross = args.nside // 2 pars[ "temperature_only" ] = "F" pars[ "force_pol" ] = "T" pars[ "kfirst" ] = "T" pars[ "concatenate_messages" ] = "T" pars[ "write_map" ] = "T" pars[ "write_binmap" ] = "T" pars[ "write_matrix" ] = "T" pars[ "write_wcov" ] = "T" pars[ "write_hits" ] = "T" pars[ "nside_cross" ] = cross pars[ "nside_submap" ] = subnside if args.madampar is not None: pat = re.compile(r"\s*(\S+)\s*=\s*(\S+(\s+\S+)*)\s*") comment = re.compile(r"^#.*") with open(args.madampar, "r") as f: for line in f: if comment.match(line) is None: result = pat.match(line) if result is not None: key, value = result.group(1), result.group(2) pars[key] = value pars[ "base_first" ] = args.baseline pars[ "nside_map" ] = args.nside if args.noisefilter: pars[ "kfilter" ] = "T" else: pars[ "kfilter" ] = "F" pars[ "fsample" ] = args.samplerate # Loop over Monte Carlos firstmc = int(args.MC_start) nmc = int(args.MC_count) for mc in range(firstmc, firstmc+nmc): # clear all total signal data from the cache, so that we can generate # new noise timestreams. tod.cache.clear("tot_signal_.*") # simulate noise nse = tt.OpSimNoise(out="tot_signal", realization=mc) nse.exec(data) # add sky signal add_sky_signal(args, comm, data, totalname="tot_signal", signalname=signalname) comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print("Noise simulation took {:.3f} s".format(elapsed), flush=True) start = stop # create output directory for this realization pars[ "path_output" ] = "{}_{:03d}".format(args.outdir, mc) if comm.comm_world.rank == 0: if not os.path.isdir(pars["path_output"]): os.makedirs(pars["path_output"]) # in debug mode, print out data distribution information if args.debug: handle = None if comm.comm_world.rank == 0: handle = open(os.path.join(pars["path_output"], "distdata.txt"), "w") data.info(handle) if comm.comm_world.rank == 0: handle.close() madam = tm.OpMadam(params=pars, detweights=detweights, name="tot_signal") madam.exec(data) comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - start if comm.comm_world.rank == 0: print("Mapmaking took {:.3f} s".format(elapsed), flush=True) comm.comm_world.barrier() stop = MPI.Wtime() elapsed = stop - global_start if comm.comm_world.rank == 0: print("Total Time: {:.2f} seconds".format(elapsed), flush=True)
def build_npp(args, comm, data, localsm, subnpix, detweights, flag_name, common_flag_name): """ Build pixel-pixel noise covariance matrices. """ if not args.skip_bin: if comm.comm_world.rank == 0: print('Preparing distributed map', flush=args.flush) start0 = MPI.Wtime() start = start0 autotimer = timing.auto_timer() npix = 12 * args.nside**2 # construct distributed maps to store the covariance, # noise weighted map, and hits invnpp = tm.DistPixels(comm=comm.comm_world, size=npix, nnz=6, dtype=np.float64, submap=subnpix, local=localsm) invnpp.data.fill(0.0) hits = tm.DistPixels(comm=comm.comm_world, size=npix, nnz=1, dtype=np.int64, submap=subnpix, local=localsm) hits.data.fill(0) zmap = tm.DistPixels(comm=comm.comm_world, size=npix, nnz=3, dtype=np.float64, submap=subnpix, local=localsm) comm.comm_world.barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print(' - distobjects initialized in {:.3f} s' ''.format(stop - start), flush=args.flush) start = stop invnpp_group = None hits_group = None zmap_group = None if comm.comm_group.size < comm.comm_world.size: invnpp_group = tm.DistPixels(comm=comm.comm_group, size=npix, nnz=6, dtype=np.float64, submap=subnpix, local=localsm) invnpp_group.data.fill(0.0) hits_group = tm.DistPixels(comm=comm.comm_group, size=npix, nnz=1, dtype=np.int64, submap=subnpix, local=localsm) hits_group.data.fill(0) zmap_group = tm.DistPixels(comm=comm.comm_group, size=npix, nnz=3, dtype=np.float64, submap=subnpix, local=localsm) comm.comm_group.barrier() stop = MPI.Wtime() if comm.comm_group.rank == 0: print(' - group distobjects initialized in {:.3f} s' ''.format(stop - start), flush=args.flush) start = stop # compute the hits and covariance once, since the pointing and noise # weights are fixed. build_invnpp = tm.OpAccumDiag(detweights=detweights, invnpp=invnpp, hits=hits, flag_name=flag_name, common_flag_name=common_flag_name, common_flag_mask=args.common_flag_mask) build_invnpp.exec(data) comm.comm_world.barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print(' - distobjects accumulated in {:.3f} s' ''.format(stop - start), flush=args.flush) start = stop invnpp.allreduce() if not args.skip_hits: hits.allreduce() comm.comm_world.barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print(' - distobjects reduced in {:.3f} s'.format(stop - start), flush=args.flush) start = stop if invnpp_group is not None: build_invnpp_group = tm.OpAccumDiag( detweights=detweights, invnpp=invnpp_group, hits=hits_group, flag_name=flag_name, common_flag_name=common_flag_name, common_flag_mask=args.common_flag_mask) build_invnpp_group.exec(data) comm.comm_group.barrier() stop = MPI.Wtime() if comm.comm_group.rank == 0: print(' - group distobjects accumulated in {:.3f} s' ''.format(stop - start), flush=args.flush) start = stop invnpp_group.allreduce() if not args.skip_hits: hits_group.allreduce() comm.comm_group.barrier() stop = MPI.Wtime() if comm.comm_group.rank == 0: print(' - group distobjects reduced in {:.3f} s' ''.format(stop - start), flush=args.flush) start = stop if not args.skip_hits: fn = '{}/hits.fits'.format(args.outdir) if args.zip: fn += '.gz' hits.write_healpix_fits(fn) comm.comm_world.barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print(' - Writing hit map to {} took {:.3f} s' ''.format(fn, stop - start), flush=args.flush) start = stop del hits if hits_group is not None: if not args.skip_hits: fn = '{}/hits_group_{:04}.fits'.format(args.outdir, comm.group) if args.zip: fn += '.gz' hits_group.write_healpix_fits(fn) comm.comm_group.barrier() stop = MPI.Wtime() if comm.comm_group.rank == 0: print(' - Writing group hit map to {} took {:.3f} s' ''.format(fn, stop - start), flush=args.flush) start = stop del hits_group if not args.skip_hits: fn = '{}/invnpp.fits'.format(args.outdir) if args.zip: fn += '.gz' invnpp.write_healpix_fits(fn) comm.comm_world.barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print(' - Writing N_pp^-1 to {} took {:.3f} s' ''.format(fn, stop - start), flush=args.flush) start = stop if not args.skip_hits: if invnpp_group is not None: fn = '{}/invnpp_group_{:04}.fits'.format( args.outdir, comm.group) if args.zip: fn += '.gz' invnpp_group.write_healpix_fits(fn) comm.comm_group.barrier() stop = MPI.Wtime() if comm.comm_group.rank == 0: print(' - Writing group N_pp^-1 to {} took {:.3f} s' ''.format(fn, stop - start), flush=args.flush) start = stop # invert it tm.covariance_invert(invnpp, 1.0e-3) comm.comm_world.barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print(' - Inverting N_pp^-1 took {:.3f} s'.format(stop - start), flush=args.flush) start = stop if not args.skip_hits: fn = '{}/npp.fits'.format(args.outdir) if args.zip: fn += '.gz' invnpp.write_healpix_fits(fn) comm.comm_world.barrier() stop = MPI.Wtime() if comm.comm_world.rank == 0: print(' - Writing N_pp to {} took {:.3f} s' ''.format(fn, stop - start), flush=args.flush) start = stop if invnpp_group is not None: tm.covariance_invert(invnpp_group, 1.0e-3) comm.comm_group.barrier() stop = MPI.Wtime() if comm.comm_group.rank == 0: print(' - Inverting group N_pp^-1 took {:.3f} s' ''.format(stop - start), flush=args.flush) start = stop if not args.skip_hits: fn = '{}/npp_group_{:04}.fits'.format(args.outdir, comm.group) if args.zip: fn += '.gz' invnpp_group.write_healpix_fits(fn) comm.comm_group.barrier() stop = MPI.Wtime() if comm.comm_group.rank == 0: print(' - Writing group N_pp to {} took {:.3f} s'.format( fn, stop - start), flush=args.flush) start = stop stop = MPI.Wtime() if comm.comm_group.rank == 0: print('Building Npp took {:.3f} s'.format(stop - start0), flush=args.flush) return invnpp, zmap, invnpp_group, zmap_group, flag_name, common_flag_name
def main(): if MPI.COMM_WORLD.rank == 0: print("Running with {} processes".format(MPI.COMM_WORLD.size), flush=True) global_start = MPI.Wtime() parser = argparse.ArgumentParser( description="Read existing data and make a simple map.", fromfile_prefix_chars="@", ) parser.add_argument( "--groupsize", required=False, type=int, default=0, help="size of processor groups used to distribute " "observations", ) parser.add_argument( "--hwprpm", required=False, type=float, default=0.0, help="The rate (in RPM) of the HWP rotation", ) parser.add_argument( "--samplerate", required=False, default=100.0, type=np.float, help="Detector sample rate (Hz)", ) parser.add_argument("--outdir", required=False, default="out", help="Output directory") parser.add_argument("--nside", required=False, type=int, default=64, help="Healpix NSIDE") parser.add_argument( "--subnside", required=False, type=int, default=8, help="Distributed pixel sub-map NSIDE", ) parser.add_argument("--coord", required=False, default="E", help="Sky coordinate system [C,E,G]") parser.add_argument( "--baseline", required=False, type=float, default=60.0, help="Destriping baseline length (seconds)", ) parser.add_argument( "--noisefilter", required=False, default=False, action="store_true", help="Destripe with the noise filter enabled", ) parser.add_argument( "--madam", required=False, default=False, action="store_true", help="If specified, use libmadam for map-making", ) parser.add_argument("--madampar", required=False, default=None, help="Madam parameter file") parser.add_argument( "--polyorder", required=False, type=int, help="Polynomial order for the polyfilter", ) parser.add_argument( "--wbin_ground", required=False, type=float, help="Ground template bin width [degrees]", ) parser.add_argument( "--flush", required=False, default=False, action="store_true", help="Flush every print statement.", ) parser.add_argument("--tidas", required=False, default=None, help="Input TIDAS volume") parser.add_argument("--tidas_detgroup", required=False, default=None, help="TIDAS detector group") parser.add_argument("--spt3g", required=False, default=None, help="Input SPT3G data directory") parser.add_argument( "--spt3g_prefix", required=False, default=None, help="SPT3G data frame file prefix", ) parser.add_argument( "--common_flag_mask", required=False, default=0, type=np.uint8, help="Common flag mask", ) parser.add_argument( "--debug", required=False, default=False, action="store_true", help="Write data distribution info and focalplane plot", ) args = timing.add_arguments_and_parse(parser, timing.FILE(noquotes=True)) # args = parser.parse_args(sys.argv) autotimer = timing.auto_timer("@{}".format(timing.FILE())) if (args.tidas is not None) and (args.spt3g is not None): raise RuntimeError("Cannot read two datasets!") if (args.tidas is None) and (args.spt3g is None): raise RuntimeError("No dataset specified!") if args.tidas is not None: if not tt.tidas_available: raise RuntimeError("TIDAS not found- cannot load") if args.spt3g is not None: if not tt.spt3g_available: raise RuntimeError("SPT3G not found- cannot load") groupsize = args.groupsize if groupsize == 0: groupsize = MPI.COMM_WORLD.size # Pixelization nside = args.nside npix = 12 * args.nside * args.nside subnside = args.subnside if subnside > nside: subnside = nside subnpix = 12 * subnside * subnside # This is the 2-level toast communicator. if MPI.COMM_WORLD.size % groupsize != 0: if MPI.COMM_WORLD.rank == 0: print( "WARNING: process groupsize does not evenly divide into " "total number of processes", flush=True, ) comm = toast.Comm(world=MPI.COMM_WORLD, groupsize=groupsize) # Create output directory mtime = MPI.Wtime() if comm.comm_world.rank == 0: if not os.path.isdir(args.outdir): os.makedirs(args.outdir) mtime = elapsed(comm.comm_world, mtime, "Creating output directory") # The distributed timestream data data = None if args.tidas is not None: if args.tidas_detgroup is None: raise RuntimeError("you must specify the detector group") data = tds.load_tidas( comm, comm.group_size, args.tidas, "r", args.tidas_detgroup, tds.TODTidas, group_dets=args.tidas_detgroup, distintervals="chunks", ) if args.spt3g is not None: if args.spt3g_prefix is None: raise RuntimeError("you must specify the frame file prefix") data = s3g.load_spt3g( comm, comm.group_size, args.spt3g, args.spt3g_prefix, s3g.obsweight_spt3g, s3g.TOD3G, ) mtime = elapsed(comm.comm_world, mtime, "Distribute data") # In debug mode, print out data distribution information if args.debug: handle = None if comm.comm_world.rank == 0: handle = open("{}_distdata.txt".format(args.outdir), "w") data.info(handle) if comm.comm_world.rank == 0: handle.close() mtime = elapsed(comm.comm_world, mtime, "Dumping debug data distribution") if comm.comm_world.rank == 0: outfile = "{}_focalplane.png".format(args.outdir) set_backend() # Just plot the dets from the first TOD temptod = data.obs[0]["tod"] # FIXME: change this once we store det info in the metadata. dfwhm = {x: 10.0 for x in temptod.detectors} tt.plot_focalplane(temptod.detoffset(), 10.0, 10.0, outfile, fwhm=dfwhm) comm.comm_world.barrier() mtime = elapsed(comm.comm_world, mtime, "Plotting debug focalplane") # Compute pointing matrix pointing = tt.OpPointingHpix(nside=args.nside, nest=True, mode="IQU", hwprpm=args.hwprpm) pointing.exec(data) mtime = elapsed(comm.comm_world, mtime, "Expand pointing") # Mapmaking. # FIXME: We potentially have a different noise model for every # observation. We need to have both spt3g and tidas format Noise # classes which read the information from disk. Then the mapmaking # operators need to get these noise weights from each observation. detweights = {d: 1.0 for d in data.obs[0]["tod"].detectors} if not args.madam: if comm.comm_world.rank == 0: print("Not using Madam, will only make a binned map!", flush=True) # Filter data if desired if args.polyorder: polyfilter = tt.OpPolyFilter( order=args.polyorder, common_flag_mask=args.common_flag_mask) polyfilter.exec(data) mtime = elapsed(comm.comm_world, mtime, "Polynomial filtering") if args.wbin_ground: groundfilter = tt.OpGroundFilter( wbin=args.wbin_ground, common_flag_mask=args.common_flag_mask) groundfilter.exec(data) mtime = elapsed(comm.comm_world, mtime, "Ground template filtering") # Compute pixel space distribution lc = tm.OpLocalPixels() localpix = lc.exec(data) if localpix is None: raise RuntimeError( "Process {} has no hit pixels. Perhaps there are fewer " "detectors than processes in the group?".format( comm.comm_world.rank)) localsm = np.unique(np.floor_divide(localpix, subnpix)) mtime = elapsed(comm.comm_world, mtime, "Compute local submaps") # construct distributed maps to store the covariance, # noise weighted map, and hits mtime = MPI.Wtime() invnpp = tm.DistPixels( comm=comm.comm_world, size=npix, nnz=6, dtype=np.float64, submap=subnpix, local=localsm, ) hits = tm.DistPixels( comm=comm.comm_world, size=npix, nnz=1, dtype=np.int64, submap=subnpix, local=localsm, ) zmap = tm.DistPixels( comm=comm.comm_world, size=npix, nnz=3, dtype=np.float64, submap=subnpix, local=localsm, ) # compute the hits and covariance. invnpp.data.fill(0.0) hits.data.fill(0) build_invnpp = tm.OpAccumDiag( detweights=detweights, invnpp=invnpp, hits=hits, common_flag_mask=args.common_flag_mask, ) build_invnpp.exec(data) invnpp.allreduce() hits.allreduce() mtime = elapsed(comm.comm_world, mtime, "Building hits and N_pp^-1") hits.write_healpix_fits("{}_hits.fits".format(args.outdir)) invnpp.write_healpix_fits("{}_invnpp.fits".format(args.outdir)) mtime = elapsed(comm.comm_world, mtime, "Writing hits and N_pp^-1") # invert it tm.covariance_invert(invnpp, 1.0e-3) mtime = elapsed(comm.comm_world, mtime, "Inverting N_pp^-1") invnpp.write_healpix_fits("{}_npp.fits".format(args.outdir)) mtime = elapsed(comm.comm_world, mtime, "Writing N_pp") zmap.data.fill(0.0) build_zmap = tm.OpAccumDiag(zmap=zmap, detweights=detweights, common_flag_mask=args.common_flag_mask) build_zmap.exec(data) zmap.allreduce() mtime = elapsed(comm.comm_world, mtime, "Building noise weighted map") tm.covariance_apply(invnpp, zmap) mtime = elapsed(comm.comm_world, mtime, "Computing binned map") zmap.write_healpix_fits(os.path.join(args.outdir, "binned.fits")) mtime = elapsed(comm.comm_world, mtime, "Writing binned map") else: # Set up MADAM map making. pars = {} pars["temperature_only"] = "F" pars["force_pol"] = "T" pars["kfirst"] = "T" pars["concatenate_messages"] = "T" pars["write_map"] = "T" pars["write_binmap"] = "T" pars["write_matrix"] = "T" pars["write_wcov"] = "T" pars["write_hits"] = "T" pars["nside_cross"] = nside // 2 pars["nside_submap"] = subnside if args.madampar is not None: pat = re.compile(r"\s*(\S+)\s*=\s*(\S+(\s+\S+)*)\s*") comment = re.compile(r"^#.*") with open(args.madampar, "r") as f: for line in f: if comment.match(line) is None: result = pat.match(line) if result is not None: key, value = result.group(1), result.group(2) pars[key] = value pars["base_first"] = args.baseline pars["nside_map"] = nside if args.noisefilter: pars["kfilter"] = "T" else: pars["kfilter"] = "F" pars["fsample"] = args.samplerate madam = tm.OpMadam(params=pars, detweights=detweights, common_flag_mask=args.common_flag_mask) madam.exec(data) mtime = elapsed(comm.comm_world, mtime, "Madam mapmaking") comm.comm_world.barrier() stop = MPI.Wtime() dur = stop - global_start if comm.comm_world.rank == 0: print("Total Time: {:.2f} seconds".format(dur), flush=True) return
def exec(self, data): """ Generate atmosphere timestreams. This iterates over all observations and detectors and generates the atmosphere timestreams. Args: data (toast.Data): The distributed data. """ autotimer = timing.auto_timer(type(self).__name__) group = data.comm.group for obs in data.obs: try: obsname = obs['name'] except Exception: obsname = 'observation' prefix = '{} : {} : '.format(group, obsname) tod = self._get_from_obs('tod', obs) comm = tod.mpicomm obsindx = self._get_from_obs('id', obs) telescope = self._get_from_obs('telescope_id', obs) site = self._get_from_obs('site_id', obs) altitude = self._get_from_obs('altitude', obs) weather = self._get_from_obs('weather', obs) fp_radius = np.radians(self._get_from_obs('fpradius', obs)) # Get the observation time span and initialize the weather # object if one is provided. times = tod.local_times() tmin = times[0] tmax = times[-1] tmin_tot = comm.allreduce(tmin, op=MPI.MIN) tmax_tot = comm.allreduce(tmax, op=MPI.MAX) weather.set(site, self._realization, tmin_tot) """ The random number generator accepts a key and a counter, each made of two 64bit integers. Following tod_math.py we set key1 = realization * 2^32 + telescope * 2^16 + component key2 = obsindx * 2^32 counter1 = currently unused (0) counter2 = sample in stream (incremented internally in the atm code) """ key1 = self._realization * 2 ** 32 + telescope * 2 ** 16 \ + self._component key2 = site * 2**16 + obsindx counter1 = 0 counter2 = 0 if self._freq is not None: absorption = atm_get_absorption_coefficient( altitude, weather.air_temperature, weather.surface_pressure, weather.pwv, self._freq) loading = atm_get_atmospheric_loading(altitude, weather.air_temperature, weather.surface_pressure, weather.pwv, self._freq) tod.meta['loading'] = loading else: absorption = None if self._cachedir is None: cachedir = None else: # The number of atmospheric realizations can be large. Use # sub-directories under cachedir. subdir = str(int((obsindx % 1000) // 100)) subsubdir = str(int((obsindx % 100) // 10)) subsubsubdir = str(obsindx % 10) cachedir = os.path.join(self._cachedir, subdir, subsubdir, subsubsubdir) if comm.rank == 0: try: os.makedirs(cachedir) except FileExistsError: pass comm.Barrier() if comm.rank == 0: print(prefix + 'Setting up atmosphere simulation', flush=self._flush) comm.Barrier() # Cache the output common flags common_ref = tod.local_common_flags(self._common_flag_name) # Read the extent of the AZ/EL boresight pointing, and use that # to compute the range of angles needed for simulating the slab. (min_az_bore, max_az_bore, min_el_bore, max_el_bore) = tod.scan_range # print("boresight scan range = {}, {}, {}, {}".format( # min_az_bore, max_az_bore, min_el_bore, max_el_bore)) # Use a fixed focal plane radius so that changing the actual # set of detectors will not affect the simulated atmosphere. elfac = 1 / np.cos(max_el_bore + fp_radius) azmin = min_az_bore - fp_radius * elfac azmax = max_az_bore + fp_radius * elfac if azmin < -2 * np.pi: azmin += 2 * np.pi azmax += 2 * np.pi elif azmax > 2 * np.pi: azmin -= 2 * np.pi azmax -= 2 * np.pi elmin = min_el_bore - fp_radius elmax = max_el_bore + fp_radius azmin = comm.allreduce(azmin, op=MPI.MIN) azmax = comm.allreduce(azmax, op=MPI.MAX) elmin = comm.allreduce(elmin, op=MPI.MIN) elmax = comm.allreduce(elmax, op=MPI.MAX) if elmin < 0 or elmax > np.pi / 2: raise RuntimeError( 'Error in CES elevation: elmin = {:.2f}, elmax = {:.2f}' ''.format(elmin, elmax)) comm.Barrier() # Loop over the time span in "wind_time"-sized chunks. # wind_time is intended to reflect the correlation length # in the atmospheric noise. tmin = tmin_tot istart = 0 while tmin < tmax_tot: while times[istart] < tmin: istart += 1 tmax = tmin + self._wind_time if tmax < tmax_tot: # Extend the scan to the next turnaround istop = istart while istop < times.size and times[istop] < tmax: istop += 1 while istop < times.size and (common_ref[istop] | tod.TURNAROUND == 0): istop += 1 if istop < times.size: tmax = times[istop] else: tmax = tmax_tot else: tmax = tmax_tot istop = times.size ind = slice(istart, istop) nind = istop - istart if self._report_timing: comm.Barrier() tstart = MPI.Wtime() comm.Barrier() if comm.rank == 0: print(prefix + 'Instantiating the atmosphere for t = {}' ''.format(tmin - tmin_tot), flush=self._flush) comm.Barrier() T0_center = weather.air_temperature wx = weather.west_wind wy = weather.south_wind w_center = np.sqrt(wx**2 + wy**2) wdir_center = np.arctan2(wy, wx) sim = atm_sim_alloc( azmin, azmax, elmin, elmax, tmin, tmax, self._lmin_center, self._lmin_sigma, self._lmax_center, self._lmax_sigma, w_center, 0, wdir_center, 0, self._z0_center, self._z0_sigma, T0_center, 0, self._zatm, self._zmax, self._xstep, self._ystep, self._zstep, self._nelem_sim_max, self._verbosity, comm, self._gangsize, key1, key2, counter1, counter2, cachedir) if sim == 0: raise RuntimeError(prefix + 'Failed to allocate simulation') if self._report_timing: comm.Barrier() tstop = MPI.Wtime() if comm.rank == 0 and tstop - tstart > 1: print(prefix + 'OpSimAtmosphere: Initialized ' 'atmosphere in {:.2f} s'.format(tstop - tstart), flush=self._flush) tstart = tstop comm.Barrier() use_cache = cachedir is not None if comm.rank == 0: fname = os.path.join( cachedir, '{}_{}_{}_{}_metadata.txt'.format( key1, key2, counter1, counter2)) if use_cache and os.path.isfile(fname): print(prefix + 'Loading the atmosphere for t = {} ' 'from {}'.format(tmin - tmin_tot, fname), flush=self._flush) cached = True else: print(prefix + 'Simulating the atmosphere for t = {}' ''.format(tmin - tmin_tot), flush=self._flush) cached = False err = atm_sim_simulate(sim, use_cache) if err != 0: raise RuntimeError(prefix + 'Simulation failed.') # Advance the sample counter in case wind_time broke the # observation in parts counter2 += 100000000 if self._report_timing: comm.Barrier() tstop = MPI.Wtime() if comm.rank == 0 and tstop - tstart > 1: if cached: op = 'Loaded' else: op = 'Simulated' print(prefix + 'OpSimAtmosphere: {} atmosphere in ' '{:.2f} s'.format(op, tstop - tstart), flush=self._flush) tstart = tstop if self._verbosity > 0: self._plot_snapshots(sim, prefix, obsname, azmin, azmax, elmin, elmax, tmin, tmax, comm) nsamp = tod.local_samples[1] if self._report_timing: comm.Barrier() tstart = MPI.Wtime() if comm.rank == 0: print(prefix + 'Observing the atmosphere', flush=self._flush) for det in tod.local_dets: # Cache the output signal cachename = '{}_{}'.format(self._out, det) if tod.cache.exists(cachename): ref = tod.cache.reference(cachename) else: ref = tod.cache.create(cachename, np.float64, (nsamp, )) # Cache the output flags flag_ref = tod.local_flags(det, self._flag_name) if self._apply_flags: good = np.logical_and( common_ref[ind] & self._common_flag_mask == 0, flag_ref[ind] & self._flag_mask == 0) ngood = np.sum(good) if ngood == 0: continue azelquat = tod.read_pntg(detector=det, local_start=istart, n=nind, azel=True)[good] atmdata = np.zeros(ngood, dtype=np.float64) else: ngood = nind azelquat = tod.read_pntg(detector=det, local_start=istart, n=nind, azel=True) atmdata = np.zeros(nind, dtype=np.float64) # Convert Az/El quaternion of the detector back into # angles for the simulation. theta, phi, _ = qa.to_angles(azelquat) # Azimuth is measured in the opposite direction # than longitude az = 2 * np.pi - phi el = np.pi / 2 - theta if np.ptp(az) < np.pi: azmin_det = np.amin(az) azmax_det = np.amax(az) else: # Scanning across the zero azimuth. azmin_det = np.amin(az[az > np.pi]) - 2 * np.pi azmax_det = np.amax(az[az < np.pi]) elmin_det = np.amin(el) elmax_det = np.amax(el) if ((not (azmin <= azmin_det and azmax_det <= azmax) and not (azmin <= azmin_det - 2 * np.pi and azmax_det - 2 * np.pi <= azmax)) or not (elmin <= elmin_det and elmin_det <= elmax)): raise RuntimeError( prefix + 'Detector Az/El: [{:.5f}, {:.5f}], ' '[{:.5f}, {:.5f}] is not contained in ' '[{:.5f}, {:.5f}], [{:.5f} {:.5f}]' ''.format(azmin_det, azmax_det, elmin_det, elmax_det, azmin, azmax, elmin, elmax)) # Integrate detector signal err = atm_sim_observe(sim, times[ind], az, el, atmdata, ngood, 0) if err != 0: # Observing failed print(prefix + 'OpSimAtmosphere: Observing FAILED. ' 'det = {}, rank = {}'.format(det, comm.rank), flush=self._flush) atmdata[:] = 0 flag_ref[ind] = 255 if self._gain: atmdata *= self._gain if absorption is not None: # Apply the frequency-dependent absorption-coefficient atmdata *= absorption if self._apply_flags: ref[ind][good] += atmdata else: ref[ind] += atmdata del ref err = atm_sim_free(sim) if err != 0: raise RuntimeError(prefix + 'Failed to free simulation.') if self._report_timing: comm.Barrier() tstop = MPI.Wtime() if comm.rank == 0 and tstop - tstart > 1: print(prefix + 'OpSimAtmosphere: Observed atmosphere ' 'in {:.2f} s'.format(tstop - tstart), flush=self._flush) tmin = tmax return
def main(): # We are going to group our processes in a single group. This is fine # if we have fewer processes than detectors. Otherwise we should group # them in a reasonable size that is smaller than the number of detectors # and which divides evenly into the total number of processes. comm = toast.Comm(world=MPI.COMM_WORLD, groupsize=MPI.COMM_WORLD.size) # Make a fake focalplane. Plot it just for fun (don't waste time on this # for very large runs though). fp = fake_focalplane() if comm.comm_world.rank == 0: outfile = "custom_example_focalplane.png" set_backend() tt.plot_focalplane(fp, 6.0, 6.0, outfile) # Read in 2 boresight files borefiles = [ "../data/custom_example_boresight_1.txt", "../data/custom_example_boresight_2.txt" ] # Set up the distributed data rate = 100.0 data = create_observations(comm, rate, fp, borefiles) # Configure the healpix pixelization we will use for map-making and # also the "submap" resolution, which sets granularity of the locally # stored pieces of the sky. map_nside = 512 map_npix = 12 * map_nside**2 sub_nside = 4 sub_npix = 12 * sub_nside**2 # Compute a pointing matrix with healpix pixels and weights. pointing = tt.OpPointingHpix(nside=map_nside, nest=True, mode="IQU", pixels="pixels", weights="weights") pointing.exec(data) # Compute the locally hit submaps local_submaps = pixel_dist(data, sub_npix) # Sources of simulated data: scan from a symmetric beam convolved sky # and then add some simulated noise. signalmap = tm.DistPixels(comm=comm.comm_world, size=map_npix, nnz=3, dtype=np.float64, submap=sub_npix, local=local_submaps) signalmap.read_healpix_fits("../data/custom_example_sky.fits") scanmap = tt.OpSimScan(distmap=signalmap, pixels='pixels', weights='weights', out="sim") scanmap.exec(data) nse = tt.OpSimNoise(out="sim", realization=0) nse.exec(data) # Accumulate the hits and inverse diagonal pixel covariance, as well as the # noise weighted map. Here we simply use inverse noise weighting. detweights = {} for d in fp.keys(): net = fp[d]["NET"] detweights[d] = 1.0 / (rate * net * net) invnpp = tm.DistPixels(comm=comm.comm_world, size=map_npix, nnz=6, dtype=np.float64, submap=sub_npix, local=local_submaps) hits = tm.DistPixels(comm=comm.comm_world, size=map_npix, nnz=1, dtype=np.int64, submap=sub_npix, local=local_submaps) zmap = tm.DistPixels(comm=comm.comm_world, size=map_npix, nnz=3, dtype=np.float64, submap=sub_npix, local=local_submaps) invnpp.data.fill(0.0) hits.data.fill(0) zmap.data.fill(0.0) build_invnpp = tm.OpAccumDiag(detweights=detweights, invnpp=invnpp, hits=hits, zmap=zmap, name="sim") build_invnpp.exec(data) invnpp.allreduce() hits.allreduce() zmap.allreduce() # Write these products out hits.write_healpix_fits("custom_example_hits.fits") invnpp.write_healpix_fits("custom_example_invnpp.fits") zmap.write_healpix_fits("custom_example_zmap.fits") # Invert the covariance and write tm.covariance_invert(invnpp, 1.0e-3) invnpp.write_healpix_fits("custom_example_npp.fits") # Apply covariance to make the binned map tm.covariance_apply(invnpp, zmap) zmap.write_healpix_fits("custom_example_binned.fits") MPI.Finalize() return