Example #1
0
def create_observations(args, comm, schedules):
    """ Create and distribute TOAST observations for every CES in schedules.

    """
    log = Logger.get()
    timer = Timer()
    timer.start()

    data = Data(comm)

    # Loop over the schedules, distributing each schedule evenly across
    # the process groups.  For now, we'll assume that each schedule has
    # the same number of operational days and the number of process groups
    # matches the number of operational days.  Relaxing these constraints
    # will cause the season break to occur on different process groups
    # for different schedules and prevent splitting the communicator.

    for schedule in schedules:

        telescope = schedule.telescope
        all_ces = schedule.ceslist
        nces = len(all_ces)

        breaks = get_breaks(comm, all_ces, args)

        groupdist = distribute_uniform(nces, comm.ngroups, breaks=breaks)
        group_firstobs = groupdist[comm.group][0]
        group_numobs = groupdist[comm.group][1]

        for ices in range(group_firstobs, group_firstobs + group_numobs):
            obs = create_observation(args, comm, telescope, all_ces[ices])
            data.obs.append(obs)

    if comm.comm_world is None or comm.comm_group.rank == 0:
        log.info("Group # {:4} has {} observations.".format(comm.group, len(data.obs)))

    if len(data.obs) == 0:
        raise RuntimeError(
            "Too many tasks. Every MPI task must "
            "be assigned to at least one observation."
        )

    if comm.comm_world is not None:
        comm.comm_world.barrier()
    timer.stop()
    if comm.world_rank == 0:
        timer.report("Simulated scans")

    # Split the data object for each telescope for separate mapmaking.
    # We could also split by site.

    if len(schedules) > 1:
        telescope_data = data.split("telescope")
        if len(telescope_data) == 1:
            # Only one telescope available
            telescope_data = []
    else:
        telescope_data = []
    telescope_data.insert(0, ("all", data))
    return data, telescope_data
Example #2
0
def export_TOD(args,
               comm,
               data,
               totalname,
               schedules,
               other=None,
               verbose=True):
    if args.export is None:
        return

    log = Logger.get()
    timer = Timer()

    # Only import spt3g if we are writing out so3g files
    from spt3g import core as core3g
    from ..data.toast_export import ToastExport

    path = os.path.abspath(args.export)

    key = args.export_key
    if key is not None:
        prefix = "{}_{}".format(args.bands, key)
        det_groups = {}
        for schedule in schedules:
            for (
                    det_name,
                    det_data,
            ) in schedule.telescope.focalplane.detector_data.items():
                value = det_data[key]
                if value not in det_groups:
                    det_groups[value] = []
                det_groups[value].append(det_name)
    else:
        prefix = args.bands
        det_groups = None

    if comm.world_rank == 0 and verbose:
        log.info("Exporting data to directory tree at {}".format(path))

    timer.start()
    export = ToastExport(
        path,
        prefix=prefix,
        use_intervals=True,
        cache_name=totalname,
        cache_copy=other,
        mask_flag_common=TODGround.TURNAROUND,
        filesize=2**30,
        units=core3g.G3TimestreamUnits.Tcmb,
        detgroups=det_groups,
        compress=args.compress,
    )
    export.exec(data)
    if comm.comm_world is not None:
        comm.comm_world.Barrier()
    timer.stop()
    if comm.world_rank == 0 and verbose:
        timer.report("Wrote simulated data to {}:{}" "".format(path, "total"))

    return
Example #3
0
    def _observe_sso(self, sso_az, sso_el, sso_dist, sso_dia, tod, comm,
                     prefix):
        """
        Observe the SSO with each detector in tod
        """
        log = Logger.get()
        rank = 0
        if comm is not None:
            rank = comm.rank
        tmr = Timer()
        if self._report_timing:
            if comm is not None:
                comm.Barrier()
            tmr.start()

        nsamp = tod.local_samples[1]

        if rank == 0:
            log.info("{}Observing the SSO signal".format(prefix))

        for det in tod.local_dets:
            # Cache the output signal
            cachename = "{}_{}".format(self._out, det)
            if tod.cache.exists(cachename):
                ref = tod.cache.reference(cachename)
            else:
                ref = tod.cache.create(cachename, np.float64, (nsamp, ))

            try:
                # Some TOD classes provide a shortcut to Az/El
                az, el = tod.read_azel(detector=det)
            except Exception as e:
                azelquat = tod.read_pntg(detector=det, azel=True)
                # Convert Az/El quaternion of the detector back into
                # angles for the simulation.
                theta, phi = qa.to_position(azelquat)
                # Azimuth is measured in the opposite direction
                # than longitude
                az = 2 * np.pi - phi
                el = np.pi / 2 - theta

            beam, radius = self._get_beam_map(det, sso_dia)

            # Interpolate the beam map at appropriate locations
            x = (az - sso_az) * np.cos(el)
            y = el - sso_el
            r = np.sqrt(x**2 + y**2)
            good = r < radius
            sig = beam(x[good], y[good], grid=False)
            ref[:][good] += sig

            del ref, sig, beam

        if self._report_timing:
            if comm is not None:
                comm.Barrier()
            if rank == 0:
                tmr.stop()
                tmr.report("{}OpSimSSO: Observe signal".format(prefix))
        return
Example #4
0
def job_size(mpicomm):
    log = Logger.get()

    procs_per_node = 1
    node_rank = 0
    nodecomm = None
    rank = 0
    procs = 1

    if mpicomm is not None:
        rank = mpicomm.rank
        procs = mpicomm.size
        nodecomm = mpicomm.Split_type(MPI.COMM_TYPE_SHARED, 0)
        node_rank = nodecomm.rank
        procs_per_node = nodecomm.size
        min_per_node = mpicomm.allreduce(procs_per_node, op=MPI.MIN)
        max_per_node = mpicomm.allreduce(procs_per_node, op=MPI.MAX)
        if min_per_node != max_per_node:
            raise RuntimeError("Nodes have inconsistent numbers of MPI ranks")

    # One process on each node gets available RAM and communicates it
    avail = get_node_mem(mpicomm, node_rank)

    n_node = procs // procs_per_node

    if rank == 0:
        log.info(
            "Job running on {} nodes each with {} processes ({} total)".format(
                n_node, procs_per_node, procs
            )
        )
    return (procs_per_node, avail)
Example #5
0
def load_observations(args, comm):
    """Load existing data and put it in TOAST observations.
    """
    # This import is not at the top of the file to avoid
    # loading spt3g through so3g unnecessarily
    from ...io.toast_load import load_data
    log = Logger.get()
    if args.import_obs is not None:
        import_obs = args.import_obs.split(",")
    else:
        import_obs = None
    hw, telescope, det_index = get_hardware(args, comm, verbose=True)
    focalplane = get_focalplane(args, comm, hw, det_index, verbose=True)
    detweights = focalplane.detweights
    telescope.focalplane = focalplane

    if comm.world_rank == 0:
        log.info("Loading TOD from {}".format(args.import_dir))
    timer = Timer()
    timer.start()
    data = load_data(
        args.import_dir,
        obs=import_obs,
        comm=comm,
        prefix=args.import_prefix,
        dets=hw,
        detranks=comm.group_size,
        )
    if comm.world_rank == 0:
        timer.report_clear("Load data")
    telescope_data = [("all", data)]
    site = telescope.site
    focalplane = telescope.focalplane
    if args.weather is not None:
        weather = Weather(args.weather)
    else:
        weather = None
    for obs in data.obs:
        #obs["baselines"] = None
        obs["noise"] = focalplane.noise
        #obs["id"] = int(ces.mjdstart * 10000)
        #obs["intervals"] = tod.subscans
        obs["site"] = site.name
        obs["site_id"] = site.id
        obs["telescope"] = telescope.name
        obs["telescope_id"] = telescope.id
        obs["fpradius"] = focalplane.radius
        obs["weather"] = weather
        #obs["start_time"] = ces.start_time
        obs["altitude"] = site.alt
        #obs["season"] = ces.season
        #obs["date"] = ces.start_date
        #obs["MJD"] = ces.mjdstart
        obs["focalplane"] = focalplane.detector_data
        #obs["rising"] = ces.rising
        #obs["mindist_sun"] = ces.mindist_sun
        #obs["mindist_moon"] = ces.mindist_moon
        #obs["el_sun"] = ces.el_sun
    return data, telescope_data, detweights
Example #6
0
    def _stage_signal(self, detectors, nsamp, ndet, nodecomm, nread):
        """ Stage signal
        """
        log = Logger.get()
        timer = Timer()
        # Determine if we can purge the signal and avoid keeping two
        # copies in memory
        purge = self._name is not None and self._purge_tod
        if not purge:
            nread = 1
            nodecomm = MPI.COMM_SELF

        for iread in range(nread):
            nodecomm.Barrier()
            timer.start()
            if nodecomm.rank % nread == iread:
                self._mappraiser_signal = self._cache.create(
                    "signal", mappraiser.SIGNAL_TYPE, (nsamp * ndet, ))
                self._mappraiser_signal[:] = np.nan

                global_offset = 0
                local_blocks_sizes = []
                for iobs, obs in enumerate(self._data.obs):
                    tod = obs["tod"]

                    for idet, det in enumerate(detectors):
                        # Get the signal.
                        signal = tod.local_signal(det, self._name)
                        signal_dtype = signal.dtype
                        offset = global_offset
                        local_V_size = len(signal)
                        dslice = slice(idet * nsamp + offset,
                                       idet * nsamp + offset + local_V_size)
                        self._mappraiser_signal[dslice] = signal
                        offset += local_V_size
                        local_blocks_sizes.append(local_V_size)

                        del signal
                    # Purge only after all detectors are staged in case some are aliased
                    # cache.clear() will not fail if the object was already
                    # deleted as an alias
                    if purge:
                        for det in detectors:
                            cachename = "{}_{}".format(self._name, det)
                            tod.cache.clear(cachename)
                    global_offset = offset

                local_blocks_sizes = np.array(local_blocks_sizes,
                                              dtype=np.int32)
            if self._verbose and nread > 1:
                nodecomm.Barrier()
                if self._rank == 0:
                    timer.report_clear("Stage signal {} / {}".format(
                        iread + 1, nread))

        return signal_dtype, local_blocks_sizes
Example #7
0
def simulate_hwpss(args, comm, data, mc, name):
    if not args.simulate_hwpss:
        return
    log = Logger.get()
    timer = Timer()
    timer.start()
    hwpssop = OpSimHWPSS(name=name, fname_hwpss=args.hwpss_file, mc=mc)
    hwpssop.exec(data)
    timer.report_clear("Simulate HWPSS")

    return
Example #8
0
def convolve_time_constant(args, comm, data, name, verbose=True):
    if not args.tau_convolve:
        return
    log = Logger.get()
    timer = Timer()
    timer.start()
    tauop = OpTimeConst(name=name, tau=args.tau_value, inverse=False)
    tauop.exec(data)
    timer.report_clear("Convolve time constant")

    return
Example #9
0
def create_input_maps(args):
    if not os.path.isfile(args.input_map):
        log = Logger.get()
        log.info("Generating input map {}".format(args.input_map))

        # This is *completely* fake- just to have something on the sky besides zeros.

        ell = np.arange(3 * args.nside - 1, dtype=np.float64)

        sig = 50.0
        numer = ell - 30.0
        tspec = (1.0 / (sig * np.sqrt(2.0 * np.pi))) * np.exp(
            -0.5 * numer**2 / sig**2)
        tspec *= 2000.0

        sig = 100.0
        numer = ell - 500.0
        espec = (1.0 / (sig * np.sqrt(2.0 * np.pi))) * np.exp(
            -0.5 * numer**2 / sig**2)
        espec *= 1.0

        cls = (
            tspec,
            espec,
            np.zeros(3 * args.nside - 1, dtype=np.float32),
            np.zeros(3 * args.nside - 1, dtype=np.float32),
        )
        maps = hp.synfast(
            cls,
            args.nside,
            pol=True,
            pixwin=False,
            sigma=None,
            new=True,
            fwhm=np.radians(3.0 / 60.0),
            verbose=False,
        )
        # for m in maps:
        #     hp.reorder(m, inp="RING", out="NEST")
        hp.write_map(args.input_map,
                     maps,
                     nest=True,
                     fits_IDL=False,
                     dtype=np.float32)

        import matplotlib.pyplot as plt

        hp.mollview(maps[0])
        plt.savefig("{}_fake-T.png".format(args.input_map))
        plt.close()

        hp.mollview(maps[1])
        plt.savefig("{}_fake-E.png".format(args.input_map))
        plt.close()
Example #10
0
def get_hardware(args, comm, verbose=False):
    """ Get the hardware configuration, either from file or by simulating.
    Then trim it down to the bands that were selected.
    """
    log = Logger.get()
    telescope = get_telescope(args, comm, verbose=verbose)
    if comm.world_rank == 0:
        if args.hardware:
            log.info("Loading hardware configuration from {}..."
                     "".format(args.hardware))
            hw = hardware.Hardware(args.hardware)
        else:
            log.info("Simulating default hardware configuration")
            hw = hardware.get_example()
            hw.data["detectors"] = hardware.sim_telescope_detectors(
                hw, telescope.name)
        # Construct a running index for all detectors across all
        # telescopes for independent noise realizations
        det_index = {}
        for idet, det in enumerate(sorted(hw.data["detectors"])):
            det_index[det] = idet
        match = {"band": args.bands.replace(",", "|")}
        tubes = args.tubes.split(",")
        # If one provides both telescopes and tubes, the tubes matching *either*
        # will be concatenated
        #hw = hw.select(telescopes=[telescope.name], tubes=tubes, match=match)
        hw = hw.select(tubes=tubes, match=match)
        if args.thinfp:
            # Only accept a fraction of the detectors for
            # testing and development
            delete_detectors = []
            for det_name in hw.data["detectors"].keys():
                if (det_index[det_name] // 2) % args.thinfp != 0:
                    delete_detectors.append(det_name)
            for det_name in delete_detectors:
                del hw.data["detectors"][det_name]
        ndetector = len(hw.data["detectors"])
        if ndetector == 0:
            raise RuntimeError("No detectors match query: telescope={}, "
                               "tubes={}, match={}".format(
                                   telescope, tubes, match))
        log.info(
            "Telescope = {} tubes = {} bands = {}, thinfp = {} matches {} detectors"
            "".format(telescope.name, args.tubes, args.bands, args.thinfp,
                      ndetector))
    else:
        hw = None
        det_index = None
    if comm.comm_world is not None:
        hw = comm.comm_world.bcast(hw)
        det_index = comm.comm_world.bcast(det_index)
    return hw, telescope, det_index
Example #11
0
def main():
    env = Environment.get()
    log = Logger.get()

    parser = argparse.ArgumentParser(
        description="Test the TOAST runtime environment.",
        fromfile_prefix_chars="@")

    parser.add_argument(
        "--groupsize",
        required=False,
        type=int,
        default=0,
        help="size of processor groups used to distribute observations",
    )

    try:
        args = parser.parse_args()
    except SystemExit:
        return

    mpiworld, procs, rank = get_world()
    if rank == 0:
        print(env)
        log.info(
            "Numba threading layer set to '{}'".format(numba_threading_layer))
    if mpiworld is None:
        log.info("Running serially with one process")
    else:
        if rank == 0:
            log.info("Running with {} processes".format(procs))

    groupsize = args.groupsize
    if groupsize <= 0:
        groupsize = procs

    if rank == 0:
        log.info("Using group size of {} processes".format(groupsize))

    comm = Comm(world=mpiworld, groupsize=groupsize)

    log.info(
        "Process {}:  world rank {}, group {} of {}, group rank {}".format(
            rank, comm.world_rank, comm.group + 1, comm.ngroups,
            comm.group_rank))

    return
Example #12
0
def create_schedules(args, max_ces_seconds, days):
    opts = [
        "--site-lat",
        str(-22.958064),
        "--site-lon",
        str(-67.786222),
        "--site-alt",
        str(5200.0),
        "--site-name",
        "ATACAMA",
        "--telescope",
        "atacama_telescope",
        "--patch-coord",
        "C",
        "--el-min-deg",
        str(30.0),
        "--el-max-deg",
        str(80.0),
        "--sun-el-max-deg",
        str(90.0),
        "--sun-avoidance-angle-deg",
        str(30.0),
        "--moon-avoidance-angle-deg",
        str(10.0),
        "--start",
        "2021-06-01 00:00:00",
        "--gap-s",
        str(600.0),
        "--gap-small-s",
        str(0.0),
        "--fp-radius-deg",
        str(0.0),
        "--patch",
        "BICEP,1,-10,-55,10,-58",
        "--ces-max-time-s",
        str(max_ces_seconds),
        "--operational-days",
        str(days),
    ]

    if not os.path.isfile(args.schedule):
        log = Logger.get()
        log.info("Generating input schedule file {}:".format(args.schedule))

        opts.extend(["--out", args.schedule])
        run_scheduler(opts=opts)
Example #13
0
def rotate_focalplane(args, data, comm):
    """ The LAT focalplane projected on the sky rotates as the cryostat
    (co-rotator) tilts.  Usually the tilt is the same as the observing
    elevation to maintain constant angle between the mirror and the cryostat.

    This method must be called *before* expanding the detector pointing
    from boresight.
    """

    log = Logger.get()
    timer = Timer()
    timer.start()

    for obs in data.obs:
        if obs["telescope"] != "LAT":
            continue
        tod = obs["tod"]
        cache_name = "corotator_angle_deg"
        if tod.cache.exists(cache_name):
            corotator_angle = tod.cache.reference(cache_name)
        else:
            # If a vector of co-rotator angles isn't already cached,
            # make one now from the observation metadata.  This will
            # ensure they get recorded in the so3g files.
            corotator_angle = obs["corotator_angle_deg"]
            offset, nsample = tod.local_samples
            tod.cache.put(cache_name, np.zeros(nsample) + corotator_angle)
        el = np.degrees(tod.read_boresight_el())
        rot = qa.rotation(
            ZAXIS, np.radians(corotator_angle + el + LAT_COROTATOR_OFFSET_DEG)
        )
        quats = tod.read_boresight()
        quats[:] = qa.mult(quats, rot)
        try:
            # If there are horizontal boresight quaternions, they need
            # to be rotated as well.
            quats = tod.read_boresight(azel=True)
            quats[:] = qa.mult(quats, rot)
        except Exception as e:
            pass

    if comm.comm_world is None or comm.comm_world.rank == 0:
        timer.report_clear("Rotate focalplane")

    return
Example #14
0
def apply_polyfilter(args, comm, data, cache_name=None, verbose=True):
    """Apply the polynomial filter to data under `cache_name`."""
    if not args.apply_polyfilter:
        return
    log = Logger.get()
    timer = Timer()
    timer.start()
    if comm.world_rank == 0 and verbose:
        log.info("Polyfiltering signal")
    polyfilter = OpPolyFilter(order=args.poly_order,
                              name=cache_name,
                              common_flag_mask=args.common_flag_mask)
    polyfilter.exec(data)
    if comm.comm_world is not None:
        comm.comm_world.barrier()
    if comm.world_rank == 0 and verbose:
        timer.report_clear("Polynomial filtering")
    return
Example #15
0
def apply_common_mode_filter(args, comm, data, cache_name=None, verbose=True):
    """Apply the common mode filter to data under `cache_name`."""
    if not args.apply_common_mode_filter:
        return
    log = Logger.get()
    timer = Timer()
    timer.start()
    if comm.world_rank == 0 and verbose:
        log.info("Common mode filtering signal")
    commonfilter = OpCommonModeFilter(
        name=cache_name,
        common_flag_mask=args.common_flag_mask,
        focalplane_key=args.common_mode_filter_key,
    )
    commonfilter.exec(data)
    if comm.world_rank == 0 and verbose:
        timer.report_clear("Common mode filtering")
    return
Example #16
0
def compute_h_n(args, comm, data, verbose=True):
    if args.hn_max < args.hn_min:
        return
    log = Logger.get()
    timer = Timer()
    timer.start()
    hnop = OpHn(
        outdir=args.hn_outdir,
        outprefix=args.hn_prefix,
        nmin=args.hn_min,
        nmax=args.hn_max,
        common_flag_mask=args.common_flag_mask,
        flag_mask=255,
        zip_maps=args.hn_zip,
    )
    hnop.exec(data)
    timer.report_clear("Compute h_n")

    return
Example #17
0
def compute_cadence_map(args, comm, data, verbose=True):
    if not args.write_cadence_map:
        return
    log = Logger.get()
    if comm.world_rank == 0:
        log.info("Computing cadence map")
    timer = Timer()
    timer.start()
    cadence = OpCadenceMap(
        outdir=args.out,
        outprefix=args.cadence_map_prefix,
        common_flag_mask=args.common_flag_mask,
        flag_mask=255,
    )
    cadence.exec(data)
    if comm.world_rank == 0:
        timer.report_clear("Compute cadence map")

    return
Example #18
0
def demodulate(args,
               comm,
               data,
               name,
               detweights=None,
               madampars=None,
               verbose=True):
    if not args.demodulate:
        return
    log = Logger.get()
    timer = Timer()
    timer.start()

    if detweights is not None:
        # Copy the detector weights to demodulated TOD
        modulated = [
            detname for detname in detweights if "demod" not in detname
        ]
        for detname in modulated:
            detweight = detweights[detname]
            for demodkey in ["demod0", "demod4r", "demod4i"]:
                demod_name = "{}_{}".format(demodkey, detname)
                detweights[demod_name] = detweight
            del detweights[detname]

    if madampars is not None:
        # Filtering will affect the high frequency end of the noise PSD
        madampars["radiometers"] = False
        # Intensity and polarization will be decoupled in the noise matrix
        madampars["allow_decoupling"] = True

    demod = OpDemod(
        name=name,
        wkernel=args.demod_wkernel,
        fmax=args.demod_fmax,
        nskip=args.demod_nskip,
        do_2f=args.demod_2f,
    )
    demod.exec(data)

    timer.report_clear("Demodulate")

    return
Example #19
0
def apply_groundfilter(args, comm, data, cache_name=None, verbose=True):
    if not args.apply_groundfilter:
        return
    log = Logger.get()
    timer = Timer()
    timer.start()
    if comm.world_rank == 0 and verbose:
        log.info("Ground-filtering signal")
    groundfilter = OpGroundFilter(
        filter_order=args.ground_order,
        name=cache_name,
        common_flag_mask=args.common_flag_mask,
    )
    groundfilter.exec(data)
    if comm.comm_world is not None:
        comm.comm_world.barrier()
    if comm.world_rank == 0 and verbose:
        timer.report_clear("Ground filtering")
    return
Example #20
0
def compute_crosslinking(args, comm, data, detweights=None, verbose=True):
    if not args.write_crosslinking:
        return
    log = Logger.get()
    timer = Timer()
    timer.start()
    crosslinking = OpCrossLinking(
        outdir=args.out,
        outprefix=args.crosslinking_prefix,
        common_flag_mask=args.common_flag_mask,
        flag_mask=255,
        zip_maps=args.hn_zip,
        rcond_limit=1e-3,
        detweights=detweights,
    )
    crosslinking.exec(data)
    timer.report_clear("Compute crosslinking map")

    return
Example #21
0
    def exec(self, data):
        """Generate and apply flags
        Args:
            data (toast.Data): The distributed data.
        Returns:
            None
        """

        log = Logger.get()
        group = data.comm.group
        for obs in data.obs:
            try:
                obsname = obs["name"]
                focalplane = obs["focalplane"]
            except Exception:
                obsname = "observation"
                focalplane = None

            observer = ephem.Observer()
            observer.lon = obs['site'].lon
            observer.lat = obs['site'].lat
            observer.elevation = obs['site'].alt  # In meters
            observer.epoch = "2000"
            observer.temp = 0  # in Celcius
            observer.compute_pressure()

            tod = obs['tod']

            # Get the observation time span and compute the horizontal
            # position of the SSO
            times = tod.local_times()
            sso_azs, sso_els = self._get_sso_positions(times, observer)

            self._flag_ssos(sso_azs, sso_els, tod, focalplane)

            del sso_azs, sso_els

        return
Example #22
0
def deconvolve_time_constant(args,
                             comm,
                             data,
                             name,
                             realization=0,
                             verbose=True):
    if not args.tau_deconvolve:
        return

    log = Logger.get()
    timer = Timer()
    timer.start()
    tauop = OpTimeConst(
        name=name,
        tau=args.tau_value,
        inverse=True,
        tau_sigma=args.tau_sigma,
        realization=realization,
    )
    tauop.exec(data)
    timer.report_clear("De-convolve time constant")

    return
def main():
    log = Logger.get()
    gt = GlobalTimers.get()
    gt.start("toast_planck_reduce (total)")

    mpiworld, procs, rank, comm = get_comm()

    # This is the 2-level toast communicator.  By default,
    # there is just one group which spans MPI_COMM_WORLD.
    comm = toast.Comm()

    if comm.comm_world.rank == 0:
        print(
            "Running with {} processes at {}".format(
                procs, str(datetime.datetime.now())
            )
        )

    parser = argparse.ArgumentParser(
        description="Simple on-the-fly signal convolution + MADAM Mapmaking",
        fromfile_prefix_chars="@",
    )
    parser.add_argument("--lmax", required=True, type=np.int, help="Simulation lmax")
    parser.add_argument(
        "--fwhm", required=True, type=np.float, help="Sky fwhm [arcmin] to deconvolve"
    )
    parser.add_argument("--beammmax", required=True, type=np.int, help="Beam mmax")
    parser.add_argument("--order", default=11, type=np.int, help="Iteration order")
    parser.add_argument(
        "--pxx",
        required=False,
        default=False,
        action="store_true",
        help="Beams are in Pxx frame, not Dxx",
    )
    parser.add_argument(
        "--normalize",
        required=False,
        default=False,
        action="store_true",
        help="Normalize the beams",
    )
    parser.add_argument(
        "--skyfile",
        required=True,
        help="Path to sky alm files. Tag DETECTOR will be "
        "replaced with detector name.",
    )
    parser.add_argument(
        "--remove_monopole",
        required=False,
        default=False,
        action="store_true",
        help="Remove the sky monopole before convolution",
    )
    parser.add_argument(
        "--remove_dipole",
        required=False,
        default=False,
        action="store_true",
        help="Remove the sky dipole before convolution",
    )
    parser.add_argument(
        "--beamfile",
        required=True,
        help="Path to beam alm files. Tag DETECTOR will be "
        "replaced with detector name.",
    )
    parser.add_argument("--rimo", required=True, help="RIMO file")
    parser.add_argument("--freq", required=True, type=np.int, help="Frequency")
    parser.add_argument(
        "--dets", required=False, default=None, help="Detector list (comma separated)"
    )
    parser.add_argument(
        "--effdir", required=True, help="Input Exchange Format File directory"
    )
    parser.add_argument(
        "--effdir_pntg",
        required=False,
        help="Input Exchange Format File directory " "for pointing",
    )
    parser.add_argument(
        "--effdir_out", required=False, help="Output directory for convolved TOD"
    )
    parser.add_argument(
        "--obtmask", required=False, default=1, type=np.int, help="OBT flag mask"
    )
    parser.add_argument(
        "--flagmask", required=False, default=1, type=np.int, help="Quality flag mask"
    )
    parser.add_argument("--ringdb", required=True, help="Ring DB file")
    parser.add_argument(
        "--odfirst", required=False, default=None, type=np.int, help="First OD to use"
    )
    parser.add_argument(
        "--odlast", required=False, default=None, type=np.int, help="Last OD to use"
    )
    parser.add_argument(
        "--ringfirst",
        required=False,
        default=None,
        type=np.int,
        help="First ring to use",
    )
    parser.add_argument(
        "--ringlast", required=False, default=None, type=np.int, help="Last ring to use"
    )
    parser.add_argument(
        "--obtfirst",
        required=False,
        default=None,
        type=np.float,
        help="First OBT to use",
    )
    parser.add_argument(
        "--obtlast", required=False, default=None, type=np.float, help="Last OBT to use"
    )
    parser.add_argument("--madam_prefix", required=False, help="map prefix")
    parser.add_argument(
        "--madampar", required=False, default=None, help="Madam parameter file"
    )
    parser.add_argument(
        "--obtmask_madam", required=False, type=np.int, help="OBT flag mask for Madam"
    )
    parser.add_argument(
        "--flagmask_madam",
        required=False,
        type=np.int,
        help="Quality flag mask for Madam",
    )
    parser.add_argument(
        "--skip_madam",
        required=False,
        default=False,
        action="store_true",
        help="Do not run Madam on the convolved timelines",
    )
    parser.add_argument("--out", required=False, default=".", help="Output directory")

    try:
        args = parser.parse_args()
    except SystemExit:
        sys.exit(0)

    timer = Timer()
    timer.start()

    odrange = None
    if args.odfirst is not None and args.odlast is not None:
        odrange = (args.odfirst, args.odlast)

    ringrange = None
    if args.ringfirst is not None and args.ringlast is not None:
        ringrange = (args.ringfirst, args.ringlast)

    obtrange = None
    if args.obtfirst is not None and args.obtlast is not None:
        obtrange = (args.obtfirst, args.obtlast)

    detectors = None
    if args.dets is not None:
        detectors = re.split(",", args.dets)

    # This is the distributed data, consisting of one or
    # more observations, each distributed over a communicator.
    data = toast.Data(comm)

    # Ensure output directory exists

    if not os.path.isdir(args.out) and comm.comm_world.rank == 0:
        os.makedirs(args.out)

    # Read in madam parameter file

    # Allow more than one entry, gather into a list
    repeated_keys = ["detset", "detset_nopol", "survey"]
    pars = {}

    if comm.comm_world.rank == 0:
        pars["kfirst"] = False
        pars["temperature_only"] = True
        pars["base_first"] = 60.0
        pars["nside_map"] = 512
        pars["nside_cross"] = 512
        pars["nside_submap"] = 16
        pars["write_map"] = False
        pars["write_binmap"] = True
        pars["write_matrix"] = False
        pars["write_wcov"] = False
        pars["write_hits"] = True
        pars["kfilter"] = False
        pars["info"] = 3
        if args.madampar:
            pat = re.compile(r"\s*(\S+)\s*=\s*(\S+(\s+\S+)*)\s*")
            comment = re.compile(r"^#.*")
            with open(args.madampar, "r") as f:
                for line in f:
                    if not comment.match(line):
                        result = pat.match(line)
                        if result:
                            key, value = result.group(1), result.group(2)
                            if key in repeated_keys:
                                if key not in pars:
                                    pars[key] = []
                                pars[key].append(value)
                            else:
                                pars[key] = value
        # Command line parameters override the ones in the madam parameter file
        if "file_root" not in pars:
            pars["file_root"] = "madam"
        if args.madam_prefix is not None:
            pars["file_root"] = args.madam_prefix
        sfreq = "{:03}".format(args.freq)
        if sfreq not in pars["file_root"]:
            pars["file_root"] += "_" + sfreq
        try:
            fsample = {30: 32.51, 44: 46.55, 70: 78.77}[args.freq]
        except Exception:
            fsample = 180.3737
        pars["fsample"] = fsample
        pars["path_output"] = args.out

        print("All parameters:")
        print(args, flush=True)

    pars = comm.comm_world.bcast(pars, root=0)

    memreport("after parameters", MPI.COMM_WORLD)

    # madam only supports a single observation.  Normally
    # we would have multiple observations with some subset
    # assigned to each process group.

    # create the TOD for this observation

    tod = tp.Exchange(
        comm=comm.comm_group,
        detectors=detectors,
        ringdb=args.ringdb,
        effdir_in=args.effdir,
        effdir_pntg=args.effdir_pntg,
        obt_range=obtrange,
        ring_range=ringrange,
        od_range=odrange,
        freq=args.freq,
        RIMO=args.rimo,
        obtmask=args.obtmask,
        flagmask=args.flagmask,
        do_eff_cache=False,
    )

    # normally we would get the intervals from somewhere else, but since
    # the Exchange TOD already had to get that information, we can
    # get it from there.

    ob = {}
    ob["name"] = "mission"
    ob["id"] = 0
    ob["tod"] = tod
    ob["intervals"] = tod.valid_intervals
    ob["baselines"] = None
    ob["noise"] = tod.noise

    # Add the bare minimum focal plane information for the conviqt operator
    focalplane = {}
    for det in tod.detectors:
        if args.pxx:
            # Beam is in the polarization basis.
            # No extra rotations are needed
            psipol = tod.rimo[det].psi_pol
        else:
            # Beam is in the detector basis. Convolver needs to remove
            # the last rotation into the polarization sensitive frame.
            psipol = tod.rimo[det].psi_uv + tod.rimo[det].psi_pol
        focalplane[det] = {
            "pol_leakage" : tod.rimo[det].epsilon,
            "pol_angle_deg" : psipol,
        }
    ob["focalplane"] = focalplane

    data.obs.append(ob)

    comm.comm_world.barrier()
    if comm.comm_world.rank == 0:
        timer.report_clear("Metadata queries")

    loader = tp.OpInputPlanck(
        commonflags_name="common_flags", flags_name="flags", margin=0
    )

    loader.exec(data)

    comm.comm_world.barrier()
    if comm.comm_world.rank == 0:
        timer.report_clear("Data read and cache")
        tod.cache.report()

    memreport("after loading", mpiworld)

    # make a planck Healpix pointing matrix
    mode = "IQU"
    if pars["temperature_only"] == "T":
        mode = "I"
    nside = int(pars["nside_map"])
    pointing = tp.OpPointingPlanck(
        nside=nside,
        mode=mode,
        RIMO=tod.RIMO,
        margin=0,
        apply_flags=False,
        keep_vel=False,
        keep_pos=False,
        keep_phase=False,
        keep_quats=True,
    )
    pointing.exec(data)

    comm.comm_world.barrier()
    if comm.comm_world.rank == 0:
        timer.report_clear("Pointing Matrix took, mode = {}".format(mode))

    memreport("after pointing", mpiworld)

    # simulate the TOD by convolving the sky with the beams

    if comm.comm_world.rank == 0:
        print("Convolving TOD", flush=True)

    for pattern in args.beamfile.split(","):
        skyfiles = {}
        beamfiles = {}
        for det in tod.detectors:
            freq = "{:03}".format(tp.utilities.det2freq(det))
            if "LFI" in det:
                psmdet = "{}_{}".format(freq, det[3:])
                if det.endswith("M"):
                    arm = "y"
                else:
                    arm = "x"
                graspdet = "{}_{}_{}".format(freq[1:], det[3:5], arm)
            else:
                psmdet = det.replace("-", "_")
                graspdet = det
            skyfile = (
                args.skyfile.replace("FREQ", freq)
                .replace("PSMDETECTOR", psmdet)
                .replace("DETECTOR", det)
            )
            skyfiles[det] = skyfile
            beamfile = pattern.replace("GRASPDETECTOR", graspdet).replace(
                "DETECTOR", det
            )
            beamfiles[det] = beamfile
            if comm.comm_world.rank == 0:
                print("Convolving {} with {}".format(skyfile, beamfile), flush=True)

        conviqt = OpSimConviqt(
            comm.comm_world,
            skyfiles,
            beamfiles,
            lmax=args.lmax,
            beammmax=args.beammmax,
            pol=True,
            fwhm=args.fwhm,
            order=args.order,
            calibrate=True,
            dxx=True,
            out="conviqt_tod",
            apply_flags=False,
            remove_monopole=args.remove_monopole,
            remove_dipole=args.remove_dipole,
            verbosity=1,
            normalize_beam=args.normalize,
        )
        conviqt.exec(data)

    comm.comm_world.barrier()
    if comm.comm_world.rank == 0:
        timer.report_clear("Convolution")

    memreport("after conviqt", mpiworld)

    if args.effdir_out is not None:
        if comm.comm_world.rank == 0:
            print("Writing TOD", flush=True)

        tod.set_effdir_out(args.effdir_out, None)
        writer = tp.OpOutputPlanck(
            signal_name="conviqt_tod",
            flags_name="flags",
            commonflags_name="common_flags",
        )
        writer.exec(data)

        comm.comm_world.barrier()
        if comm.comm_world.rank == 0:
            timer.report_clear("Conviqt output")

        memreport("after writing", mpiworld)

    # for now, we pass in the noise weights from the RIMO.
    detweights = {}
    for d in tod.detectors:
        net = tod.rimo[d].net
        fsample = tod.rimo[d].fsample
        detweights[d] = 1.0 / (fsample * net * net)

    if not args.skip_madam:
        if comm.comm_world.rank == 0:
            print("Calling Madam", flush=True)

        try:
            if args.obtmask_madam is None:
                obtmask = args.obtmask
            else:
                obtmask = args.obtmask_madam
            if args.flagmask_madam is None:
                flagmask = args.flagmask
            else:
                flagmask = args.flagmask_madam
            madam = OpMadam(
                params=pars,
                detweights=detweights,
                name="conviqt_tod",
                flag_name="flags",
                purge=True,
                name_out="madam_tod",
                common_flag_mask=obtmask,
                flag_mask=flagmask,
            )
        except Exception as e:
            raise Exception(
                "{:4} : ERROR: failed to initialize Madam: {}".format(
                    comm.comm_world.rank, e
                )
            )
        madam.exec(data)

        comm.comm_world.barrier()
        if comm.comm_world.rank == 0:
            timer.report_clear("Madam took {:.3f} s")

        memreport("after madam", mpiworld)

    gt.stop_all()
    if mpiworld is not None:
        mpiworld.barrier()
    timer = Timer()
    timer.start()
    alltimers = gather_timers(comm=mpiworld)
    if comm.world_rank == 0:
        out = os.path.join(args.out, "timing")
        dump_timing(alltimers, out)
        timer.stop()
        timer.report("Gather and dump timing info")
    return
Example #24
0
def parse_arguments(comm, procs):
    log = Logger.get()

    parser = argparse.ArgumentParser(
        description="Simulate satellite boresight pointing and make a map.",
        fromfile_prefix_chars="@",
    )

    add_dist_args(parser)
    add_pointing_args(parser)
    add_tidas_args(parser)
    add_spt3g_args(parser)
    add_dipole_args(parser)
    add_pysm_args(parser)
    add_mc_args(parser)
    add_noise_args(parser)
    add_todsatellite_args(parser)

    parser.add_argument("--outdir",
                        required=False,
                        default="out",
                        help="Output directory")
    parser.add_argument(
        "--debug",
        required=False,
        default=False,
        action="store_true",
        help="Write diagnostics",
    )

    add_madam_args(parser)
    add_binner_args(parser)

    parser.add_argument(
        "--madam",
        required=False,
        action="store_true",
        help="Use libmadam for map-making",
        dest="use_madam",
    )
    parser.add_argument(
        "--no-madam",
        required=False,
        action="store_false",
        help="Do not use libmadam for map-making [default]",
        dest="use_madam",
    )
    parser.set_defaults(use_madam=False)

    parser.add_argument(
        "--focalplane",
        required=False,
        default=None,
        help="Pickle file containing a dictionary of detector properties.  "
        "The keys of this dict are the detector names, and each value is also "
        'a dictionary with keys "quat" (4 element ndarray), "fwhm" '
        '(float, arcmin), "fknee" (float, Hz), "alpha" (float), and "NET" '
        '(float).  For optional plotting, the key "color" can specify a '
        "valid matplotlib color string.",
    )

    parser.add_argument(
        "--gain",
        required=False,
        default=None,
        help="Calibrate the input timelines with a set of gains from a"
        "FITS file containing 3 extensions:"
        "HDU named DETECTORS : table with list of detector names in a column named DETECTORS"
        "HDU named TIME: table with common timestamps column named TIME"
        "HDU named GAINS: 2D image of floats with one row per detector and one column per value.",
    )

    try:
        args = parser.parse_args()
    except SystemExit:
        sys.exit()

    if comm.world_rank == 0:
        log.info("\n")
        log.info("All parameters:")
        for ag in vars(args):
            log.info("{} = {}".format(ag, getattr(args, ag)))
        log.info("\n")

    groupsize = args.group_size
    if groupsize is None or groupsize <= 0:
        groupsize = procs

    # This is the 2-level toast communicator.
    comm = Comm(groupsize=groupsize)

    return args, comm, groupsize
Example #25
0
def main():
    env = Environment.get()
    log = Logger.get()
    gt = GlobalTimers.get()
    gt.start("toast_satellite_sim (total)")
    timer0 = Timer()
    timer0.start()

    mpiworld, procs, rank, comm = get_comm()
    args, comm, groupsize = parse_arguments(comm, procs)

    # Parse options

    tmr = Timer()
    tmr.start()

    if comm.world_rank == 0:
        os.makedirs(args.outdir, exist_ok=True)

    focalplane, gain, detweights = load_focalplane(args, comm)

    data = create_observations(args, comm, focalplane, groupsize)

    expand_pointing(args, comm, data)

    localpix, localsm, subnpix = get_submaps(args, comm, data)

    signalname = None
    skyname = simulate_sky_signal(args, comm, data, [focalplane], subnpix,
                                  localsm, "signal")
    if skyname is not None:
        signalname = skyname

    diponame = simulate_dipole(args, comm, data, "signal")
    if diponame is not None:
        signalname = diponame

    # Mapmaking.

    if not args.use_madam:
        if comm.world_rank == 0:
            log.info("Not using Madam, will only make a binned map")

        npp, zmap = init_binner(args,
                                comm,
                                data,
                                detweights,
                                subnpix=subnpix,
                                localsm=localsm)

        # Loop over Monte Carlos

        firstmc = args.MC_start
        nmc = args.MC_count

        for mc in range(firstmc, firstmc + nmc):
            mctmr = Timer()
            mctmr.start()

            outpath = os.path.join(args.outdir, "mc_{:03d}".format(mc))

            simulate_noise(args, comm, data, mc, "tot_signal", overwrite=True)

            # add sky signal
            add_signal(args, comm, data, "tot_signal", signalname)

            if gain is not None:
                timer = Timer()
                timer.start()
                op_apply_gain = OpApplyGain(gain, name="tot_signal")
                op_apply_gain.exec(data)
                if comm.world_rank == 0:
                    timer.report_clear("  Apply gains {:04d}".format(mc))

            if mc == firstmc:
                # For the first realization, optionally export the
                # timestream data.  If we had observation intervals defined,
                # we could pass "use_interval=True" to the export operators,
                # which would ensure breaks in the exported data at
                # acceptable places.
                output_tidas(args, comm, data, "tot_signal")
                output_spt3g(args, comm, data, "tot_signal")

            apply_binner(args, comm, data, npp, zmap, detweights, outpath,
                         "tot_signal")

            if comm.world_rank == 0:
                mctmr.report_clear("  Map-making {:04d}".format(mc))
    else:

        # Initialize madam parameters

        madampars = setup_madam(args)

        # in debug mode, print out data distribution information
        if args.debug:
            handle = None
            if comm.world_rank == 0:
                handle = open(os.path.join(args.outdir, "distdata.txt"), "w")
            data.info(handle)
            if comm.world_rank == 0:
                handle.close()
            if comm.comm_world is not None:
                comm.comm_world.barrier()
            if comm.world_rank == 0:
                tmr.report_clear("Dumping data distribution")

        # Loop over Monte Carlos

        firstmc = args.MC_start
        nmc = args.MC_count

        for mc in range(firstmc, firstmc + nmc):
            mctmr = Timer()
            mctmr.start()

            # create output directory for this realization
            outpath = os.path.join(args.outdir, "mc_{:03d}".format(mc))

            simulate_noise(args, comm, data, mc, "tot_signal", overwrite=True)

            # add sky signal
            add_signal(args, comm, data, "tot_signal", signalname)

            if gain is not None:
                op_apply_gain = OpApplyGain(gain, name="tot_signal")
                op_apply_gain.exec(data)

            if comm.comm_world is not None:
                comm.comm_world.barrier()
            if comm.world_rank == 0:
                tmr.report_clear("  Apply gains {:04d}".format(mc))

            apply_madam(args, comm, data, madampars, outpath, detweights,
                        "tot_signal")

            if comm.comm_world is not None:
                comm.comm_world.barrier()
            if comm.world_rank == 0:
                mctmr.report_clear("  Map-making {:04d}".format(mc))

    gt.stop_all()
    if comm.comm_world is not None:
        comm.comm_world.barrier()
    tmr.stop()
    tmr.clear()
    tmr.start()
    alltimers = gather_timers(comm=comm.comm_world)
    if comm.world_rank == 0:
        out = os.path.join(args.outdir, "timing")
        dump_timing(alltimers, out)
        tmr.stop()
        tmr.report("Gather and dump timing info")
        timer0.report_clear("toast_satellite_sim.py")
    return
Example #26
0
def main():
    log = Logger.get()

    parser = argparse.ArgumentParser(
        description="Allocate and free cache objects.")

    parser.add_argument("--ndet",
                        required=False,
                        type=int,
                        default=10,
                        help="The number of detectors")

    parser.add_argument("--nobs",
                        required=False,
                        type=int,
                        default=2,
                        help="The number of observations")

    parser.add_argument(
        "--obsminutes",
        required=False,
        type=int,
        default=60,
        help="The number of minutes in each observation.",
    )

    parser.add_argument("--rate",
                        required=False,
                        type=float,
                        default=37.0,
                        help="The sample rate.")

    parser.add_argument(
        "--nloop",
        required=False,
        type=int,
        default=2,
        help="The number of allocate / free loops",
    )

    args = parser.parse_args()

    log.info("Input parameters:")
    log.info("  {} observations".format(args.nobs))
    log.info("  {} minutes per obs".format(args.obsminutes))
    log.info("  {} detectors per obs".format(args.ndet))
    log.info("  {}Hz sample rate".format(args.rate))

    nsampobs = int(args.obsminutes * 60 * args.rate)

    nsamptot = args.ndet * args.nobs * nsampobs

    log.info("{} total samples across all detectors and observations".format(
        nsamptot))

    bytes_sigobs = nsampobs * 8
    bytes_sigtot = nsamptot * 8
    bytes_flagobs = nsampobs * 1
    bytes_flagtot = nsamptot * 1
    bytes_pixobs = nsampobs * 8
    bytes_pixtot = nsamptot * 8
    bytes_wtobs = 3 * nsampobs * 4
    bytes_wttot = 3 * nsamptot * 4

    bytes_tot = bytes_sigtot + bytes_flagtot + bytes_pixtot + bytes_wttot
    bytes_tot_mb = bytes_tot / 2**20
    log.info("{} total bytes ({:0.2f}MB) of data expected".format(
        bytes_tot, bytes_tot_mb))

    for lp in range(args.nloop):
        log.info("Allocation loop {:02d}".format(lp))
        vmem = psutil.virtual_memory()._asdict()
        avstart = vmem["available"]
        avstart_mb = avstart / 2**20
        log.info(
            "  Starting with {:0.2f}MB of available memory".format(avstart_mb))

        # The list of Caches, one per "observation"
        caches = list()

        # This structure holds external references to cache objects, to ensure that we
        # can destroy objects and free memory, even if external references are held.
        refs = list()

        for ob in range(args.nobs):
            ch = Cache()
            rf = dict()
            for det in range(args.ndet):
                dname = "{:04d}".format(det)
                cname = "{}_sig".format(dname)
                rf[cname] = ch.create(cname, np.float64, (nsampobs, ))
                cname = "{}_flg".format(dname)
                rf[cname] = ch.create(cname, np.uint8, (nsampobs, ))
                cname = "{}_pix".format(dname)
                rf[cname] = ch.create(cname, np.int64, (nsampobs, ))
                cname = "{}_wgt".format(dname)
                rf[cname] = ch.create(cname, np.float32, (nsampobs, 3))
            caches.append(ch)
            refs.append(rf)

        vmem = psutil.virtual_memory()._asdict()
        avpost = vmem["available"]
        avpost_mb = avpost / 2**20
        log.info("  After allocation, {:0.2f}MB of available memory".format(
            avpost_mb))

        diff = avstart_mb - avpost_mb
        diffperc = 100.0 * np.absolute(diff - bytes_tot_mb) / bytes_tot_mb
        log.info(
            "  Difference is {:0.2f}MB, expected {:0.2f}MB ({:0.2f}% residual)"
            .format(diff, bytes_tot_mb, diffperc))

        for suf in ["wgt", "pix", "flg", "sig"]:
            for ob, ch in zip(range(args.nobs), caches):
                for det in range(args.ndet):
                    dname = "{:04d}".format(det)
                    ch.destroy("{}_{}".format(dname, suf))

        vmem = psutil.virtual_memory()._asdict()
        avfinal = vmem["available"]
        avfinal_mb = avfinal / 2**20
        log.info("  After destruction, {:0.2f}MB of available memory".format(
            avfinal_mb))

        diff = avfinal_mb - avpost_mb
        diffperc = 100.0 * np.absolute(diff - bytes_tot_mb) / bytes_tot_mb
        log.info(
            "  Difference is {:0.2f}MB, expected {:0.2f}MB ({:0.2f}% residual)"
            .format(diff, bytes_tot_mb, diffperc))

    return
Example #27
0
def main():
    env = Environment.get()
    env.enable_function_timers()

    log = Logger.get()
    gt = GlobalTimers.get()
    gt.start("toast_benchmark (total)")

    mpiworld, procs, rank = get_world()

    if rank == 0:
        log.info("TOAST version = {}".format(env.version()))
        log.info("Using a maximum of {} threads per process".format(env.max_threads()))
    if mpiworld is None:
        log.info("Running serially with one process at {}".format(str(datetime.now())))
    else:
        if rank == 0:
            log.info(
                "Running with {} processes at {}".format(procs, str(datetime.now()))
            )

    cases = {
        "tiny": 5000000,  # O(1) GB RAM
        "xsmall": 50000000,  # O(10) GB RAM
        "small": 500000000,  # O(100) GB RAM
        "medium": 5000000000,  # O(1) TB RAM
        "large": 50000000000,  # O(10) TB RAM
        "xlarge": 500000000000,  # O(100) TB RAM
        "heroic": 5000000000000,  # O(1000) TB RAM
    }

    args, comm, n_nodes, n_detector, case, group_seconds, n_group = job_config(
        mpiworld, cases
    )

    # Note:  The number of "days" here will just be an approximation of the desired
    # data volume since we are doing a realistic schedule for a real observing site.

    n_days = int(2.0 * (group_seconds * n_group) / (24 * 3600))
    if n_days == 0:
        n_days = 1

    if rank == 0:
        log.info(
            "Using {} detectors for approximately {} days".format(n_detector, n_days)
        )

    # Create the schedule file and input maps on one process
    if rank == 0:
        create_schedules(args, group_seconds, n_days)
        create_input_maps(args)
    if mpiworld is not None:
        mpiworld.barrier()

    if args.dry_run is not None:
        if rank == 0:
            log.info("Exit from dry run")
        # We are done!
        sys.exit(0)

    gt.start("toast_benchmark (science work)")

    # Load and broadcast the schedule file

    schedules = pipeline_tools.load_schedule(args, comm)

    # Load the weather and append to schedules

    pipeline_tools.load_weather(args, comm, schedules)

    # Simulate the focalplane

    detweights = create_focalplanes(args, comm, schedules, n_detector)

    # Create the TOAST data object to match the schedule.  This will
    # include simulating the boresight pointing.

    data, telescope_data, total_samples = create_observations(args, comm, schedules)

    # handle = None
    # if comm.world_rank == 0:
    #     handle = open(os.path.join(args.outdir, "distdata.txt"), "w")
    # data.info(handle)
    # if comm.world_rank == 0:
    #     handle.close()
    # if comm.comm_world is not None:
    #     comm.comm_world.barrier()

    # Split the communicator for day and season mapmaking

    time_comms = pipeline_tools.get_time_communicators(args, comm, data)

    # Expand boresight quaternions into detector pointing weights and
    # pixel numbers

    pipeline_tools.expand_pointing(args, comm, data)

    # Optionally rewrite the noise PSD:s in each observation to include
    # elevation-dependence

    pipeline_tools.get_elevation_noise(args, comm, data)

    # Purge the pointing if we are NOT going to export the
    # data to a TIDAS volume
    if (args.tidas is None) and (args.spt3g is None):
        for ob in data.obs:
            tod = ob["tod"]
            tod.free_radec_quats()

    # Prepare auxiliary information for distributed map objects

    signalname = pipeline_tools.scan_sky_signal(args, comm, data, "signal")

    # Set up objects to take copies of the TOD at appropriate times

    totalname, totalname_freq = setup_sigcopy(args)

    # Loop over Monte Carlos

    firstmc = args.MC_start
    nsimu = args.MC_count

    freqs = [float(freq) for freq in args.freq.split(",")]
    nfreq = len(freqs)

    for mc in range(firstmc, firstmc + nsimu):

        pipeline_tools.simulate_atmosphere(args, comm, data, mc, totalname)

        # Loop over frequencies with identical focal planes and identical
        # atmospheric noise.

        for ifreq, freq in enumerate(freqs):

            if comm.world_rank == 0:
                log.info(
                    "Processing frequency {}GHz {} / {}, MC = {}".format(
                        freq, ifreq + 1, nfreq, mc
                    )
                )

            # Make a copy of the atmosphere so we can scramble the gains and apply
            # frequency-dependent scaling.
            pipeline_tools.copy_signal(args, comm, data, totalname, totalname_freq)

            pipeline_tools.scale_atmosphere_by_frequency(
                args, comm, data, freq=freq, mc=mc, cache_name=totalname_freq
            )

            pipeline_tools.update_atmospheric_noise_weights(args, comm, data, freq, mc)

            # Add previously simulated sky signal to the atmospheric noise.

            pipeline_tools.add_signal(
                args, comm, data, totalname_freq, signalname, purge=(nsimu == 1)
            )

            mcoffset = ifreq * 1000000

            pipeline_tools.simulate_noise(
                args, comm, data, mc + mcoffset, totalname_freq
            )

            pipeline_tools.scramble_gains(
                args, comm, data, mc + mcoffset, totalname_freq
            )

            outpath = setup_output(args, comm, mc + mcoffset, freq)

            # Bin and destripe maps

            pipeline_tools.apply_mapmaker(
                args,
                comm,
                data,
                outpath,
                totalname_freq,
                time_comms=time_comms,
                telescope_data=telescope_data,
                first_call=(mc == firstmc),
            )

            if args.apply_polyfilter or args.apply_groundfilter:

                # Filter signal

                pipeline_tools.apply_polyfilter(args, comm, data, totalname_freq)

                pipeline_tools.apply_groundfilter(args, comm, data, totalname_freq)

                # Bin filtered maps

                pipeline_tools.apply_mapmaker(
                    args,
                    comm,
                    data,
                    outpath,
                    totalname_freq,
                    time_comms=time_comms,
                    telescope_data=telescope_data,
                    first_call=False,
                    extra_prefix="filtered",
                    bin_only=True,
                )

    gt.stop_all()
    if mpiworld is not None:
        mpiworld.barrier()

    runtime = gt.seconds("toast_benchmark (science work)")
    prefactor = 1.0e-3
    kilo_samples = 1.0e-3 * total_samples
    sample_factor = 1.2
    det_factor = 2.0
    metric = (
        prefactor
        * n_detector ** det_factor
        * kilo_samples ** sample_factor
        / (n_nodes * runtime)
    )
    if rank == 0:
        msg = "Science Metric: {:0.1e} * ({:d}**{:0.2f}) * ({:0.3e}**{:0.3f}) / ({:0.1f} * {}) = {:0.2f}".format(
            prefactor,
            n_detector,
            det_factor,
            kilo_samples,
            sample_factor,
            runtime,
            n_nodes,
            metric,
        )
        log.info("")
        log.info(msg)
        log.info("")
        with open(os.path.join(args.outdir, "log"), "a") as f:
            f.write(msg)
            f.write("\n\n")

    timer = Timer()
    timer.start()
    alltimers = gather_timers(comm=mpiworld)
    if comm.world_rank == 0:
        out = os.path.join(args.outdir, "timing")
        dump_timing(alltimers, out)
        with open(os.path.join(args.outdir, "log"), "a") as f:
            f.write("Copy of Global Timers:\n")
            with open("{}.csv".format(out), "r") as t:
                f.write(t.read())
        timer.stop()
        timer.report("Gather and dump timing info")
    return
Example #28
0
def job_config(mpicomm, cases):
    env = Environment.get()
    log = Logger.get()

    class args:
        debug = False
        # TOD Ground options
        el_mod_step_deg = 0.0
        el_mod_rate_hz = 0.0
        el_mod_amplitude_deg = 1.0
        el_mod_sine = False
        el_nod_deg = False
        el_nod_every_scan = False
        start_with_el_nod = False
        end_with_el_nod = False
        scan_rate = 1.0
        scan_rate_el = 0.0
        scan_accel = 1.0
        scan_accel_el = 0.0
        scan_cosecant_modulate = False
        sun_angle_min = 30.0
        schedule = None  # required
        weather = "SIM"
        timezone = 0
        sample_rate = 100.0
        coord = "C"
        split_schedule = None
        sort_schedule = False
        hwp_rpm = 10.0
        hwp_step_deg = None
        hwp_step_time_s = None
        elevation_noise_a = 0.0
        elevation_noise_b = 0.0
        freq = "150"
        do_daymaps = False
        do_seasonmaps = False
        # Pointing options
        nside = 1024
        nside_submap = 16
        single_precision_pointing = False
        common_flag_mask = 1
        # Polyfilter options
        apply_polyfilter = False
        poly_order = 0
        # Ground filter options
        apply_groundfilter = False
        ground_order = 0
        # Atmosphere options
        simulate_atmosphere = False
        simulate_coarse_atmosphere = False
        focalplane_radius_deg = None
        atm_verbosity = 0
        atm_lmin_center = 0.01
        atm_lmin_sigma = 0.001
        atm_lmax_center = 10.0
        atm_lmax_sigma = 10.0
        atm_gain = 2.0e-5
        atm_gain_coarse = 8.0e-5
        atm_zatm = 40000.0
        atm_zmax = 200.0
        atm_xstep = 10.0
        atm_ystep = 10.0
        atm_zstep = 10.0
        atm_nelem_sim_max = 10000
        atm_wind_dist = 3000.0
        atm_z0_center = 2000.0
        atm_z0_sigma = 0.0
        atm_T0_center = 280.0
        atm_T0_sigma = 10.0
        atm_cache = None
        atm_apply_flags = False
        # Noise simulation options
        simulate_noise = False
        # Gain scrambler
        apply_gainscrambler = False
        gain_sigma = 0.01
        # Map maker
        mapmaker_prefix = "toast"
        mapmaker_mask = None
        mapmaker_weightmap = None
        mapmaker_iter_max = 20
        mapmaker_precond_width = 100
        mapmaker_prefilter_order = None
        mapmaker_baseline_length = 200.0
        mapmaker_noisefilter = False
        mapmaker_fourier2D_order = None
        mapmaker_fourier2D_subharmonics = None
        write_hits = True
        write_binmap = True
        write_wcov = False
        write_wcov_inv = False
        zip_maps = False
        # Monte Carlo
        MC_start = 0
        MC_count = 1
        # Sky signal
        input_map = None
        simulate_sky = True
        # Input dir
        auxdir = "toast_inputs"
        # Output
        outdir = "toast"
        tidas = None
        spt3g = None

    parser = argparse.ArgumentParser(
        description="Run a TOAST workflow scaled appropriately to the MPI communicator size and available memory.",
        fromfile_prefix_chars="@",
    )

    parser.add_argument(
        "--node_mem_gb",
        required=False,
        default=None,
        type=float,
        help="Use this much memory per node in GB",
    )

    parser.add_argument(
        "--dry_run",
        required=False,
        default=None,
        type=str,
        help="Comma-separated total_procs,node_procs to simulate.",
    )

    parser.parse_args(namespace=args)

    procs = 1
    rank = 0
    if mpicomm is not None:
        procs = mpicomm.size
        rank = mpicomm.rank

    avail_node_bytes = None
    procs_per_node = None

    if args.dry_run is not None:
        dryrun_total, dryrun_node = args.dry_run.split(",")
        dryrun_total = int(dryrun_total)
        dryrun_node = int(dryrun_node)
        if rank == 0:
            log.info(
                "DRY RUN simulating {} total processes with {} per node".format(
                    dryrun_total, dryrun_node
                )
            )
        procs_per_node = dryrun_node
        procs = dryrun_total
        # We are simulating the distribution
        avail_node_bytes = get_node_mem(mpicomm, 0)

    else:
        # Get information about the actual job size
        procs_per_node, avail_node_bytes = job_size(mpicomm)

    if rank == 0:
        log.info(
            "Minimum detected per-node memory available is {:0.2f} GB".format(
                avail_node_bytes / (1024 ** 3)
            )
        )

    if args.node_mem_gb is not None:
        avail_node_bytes = int((1024 ** 3) * args.node_mem_gb)
        if rank == 0:
            log.info(
                "Setting per-node available memory to {:0.2f} GB as requested".format(
                    avail_node_bytes / (1024 ** 3)
                )
            )

    # Based on the total number of processes and count per node, choose the number of
    # nodes in each observation and a focalplane such that every process has >= 4
    # detectors.

    n_nodes = procs // procs_per_node
    if rank == 0:
        log.info("Job has {} total nodes".format(n_nodes))

    if rank == 0:
        log.info("Examining {} possible cases to run:".format(len(cases)))

    selected_case = None
    selected_nodes = None
    n_detector = None
    time_samples = None
    group_procs = None
    group_nodes = None
    n_group = None
    group_time_samples = None

    for case_name, case_samples in cases.items():
        (
            case_n_detector,
            case_time_samples,
            case_group_procs,
            case_group_nodes,
            case_n_group,
            case_group_time_samples,
        ) = sample_distribution(
            rank, procs_per_node, avail_node_bytes, case_samples, args.sample_rate
        )

        case_min_nodes = case_n_group * case_group_nodes
        if rank == 0:
            log.info(
                "  {:8s}: requires {:d} nodes for {} MPI ranks and {:0.1f}GB per node".format(
                    case_name,
                    case_min_nodes,
                    procs_per_node,
                    avail_node_bytes / (1024 ** 3),
                )
            )

        if selected_nodes is None:
            if case_min_nodes <= n_nodes:
                # First case that fits in our job
                selected_case = case_name
                selected_nodes = case_min_nodes
                n_detector = case_n_detector
                time_samples = case_time_samples
                group_procs = case_group_procs
                group_nodes = case_group_nodes
                n_group = case_n_group
                group_time_samples = case_group_time_samples
        else:
            if (case_min_nodes <= n_nodes) and (case_min_nodes >= selected_nodes):
                # This case fits in our job and is larger than the current one
                selected_case = case_name
                selected_nodes = case_min_nodes
                n_detector = case_n_detector
                time_samples = case_time_samples
                group_procs = case_group_procs
                group_nodes = case_group_nodes
                n_group = case_n_group
                group_time_samples = case_group_time_samples

    if selected_case is None:
        msg = (
            "None of the available cases fit into aggregate memory.  Use a larger job."
        )
        if rank == 0:
            log.error(msg)
        raise RuntimeError(msg)
    else:
        if rank == 0:
            log.info("Selected case '{}'".format(selected_case))

    if rank == 0:
        log.info("Using groups of {} nodes".format(group_nodes))

    # Adjust number of groups

    if n_nodes % group_nodes != 0:
        msg = "Current number of nodes ({}) is not divisible by the required group size ({})".format(
            n_nodes, group_nodes
        )
        if rank == 0:
            log.error(msg)
        raise RuntimeError(msg)

    n_group = n_nodes // group_nodes
    group_time_samples = 1 + time_samples // n_group

    group_seconds = group_time_samples / args.sample_rate

    if args.simulate_atmosphere and args.weather is None:
        raise RuntimeError("Cannot simulate atmosphere without a TOAST weather file")

    comm = None
    if mpicomm is None or args.dry_run is not None:
        comm = Comm(world=None)
    else:
        comm = Comm(world=mpicomm, groupsize=group_procs)

    jobdate = datetime.now().strftime("%Y%m%d-%H:%M:%S")

    args.outdir += "_{:06d}_grp-{:04d}p-{:02d}n_{}".format(
        procs, group_procs, group_nodes, jobdate
    )
    args.auxdir = os.path.join(args.outdir, "inputs")

    if rank == 0:
        os.makedirs(args.outdir)
        os.makedirs(args.auxdir, exist_ok=True)

    if rank == 0:
        with open(os.path.join(args.outdir, "log"), "w") as f:
            f.write("Running at {}\n".format(jobdate))
            f.write("TOAST version = {}\n".format(env.version()))
            f.write("TOAST max threads = {}\n".format(env.max_threads()))
            f.write("MPI Processes = {}\n".format(procs))
            f.write("MPI Processes per node = {}\n".format(procs_per_node))
            f.write(
                "Memory per node = {:0.2f} GB\n".format(avail_node_bytes / (1024 ** 3))
            )
            f.write("Number of groups = {}\n".format(n_group))
            f.write("Group nodes = {}\n".format(group_nodes))
            f.write("Group MPI Processes = {}\n".format(group_procs))
            f.write("Case selected = {}\n".format(selected_case))
            f.write("Case number of detectors = {}\n".format(n_detector))
            f.write(
                "Case total samples = {}\n".format(
                    n_group * group_time_samples * n_detector
                )
            )
            f.write(
                "Case samples per group = {}\n".format(group_time_samples * n_detector)
            )
            f.write("Case data seconds per group = {}\n".format(group_seconds))
            f.write("Parameters:\n")
            for k, v in vars(args).items():
                if re.match(r"_.*", k) is None:
                    f.write("  {} = {}\n".format(k, v))

    args.schedule = os.path.join(args.auxdir, "schedule.txt")
    args.input_map = os.path.join(args.auxdir, "cmb.fits")

    return args, comm, n_nodes, n_detector, selected_case, group_seconds, n_group
Example #29
0
    def exec(self, data):
        """Generate timestreams.
        Args:
            data (toast.Data): The distributed data.
        Returns:
            None
        """

        log = Logger.get()
        group = data.comm.group
        for obs in data.obs:
            try:
                obsname = obs["name"]
            except Exception:
                obsname = "observation"

            observer = ephem.Observer()
            observer.lon = obs['site'].lon
            observer.lat = obs['site'].lat
            observer.elevation = obs['site'].alt  # In meters
            observer.epoch = "2000"
            observer.temp = 0  # in Celcius
            observer.compute_pressure()

            prefix = "{} : {} : ".format(group, obsname)
            tod = obs['tod']
            comm = tod.mpicomm
            rank = 0
            if comm is not None:
                rank = comm.rank
            site = obs['site'].id

            if comm is not None:
                comm.Barrier()
            if rank == 0:
                log.info("{}Setting up SSO simulation".format(prefix))

            # Get the observation time span and compute the horizontal
            # position of the SSO
            times = tod.local_times()
            sso_az, sso_el, sso_dist, sso_dia = self._get_sso_position(
                times, observer)

            tmr = Timer()
            if self._report_timing:
                if comm is not None:
                    comm.Barrier()
                tmr.start()

            self._observe_sso(sso_az, sso_el, sso_dist, sso_dia, tod, comm,
                              prefix)

            del sso_az, sso_el, sso_dist

        if self._report_timing:
            if comm is not None:
                comm.Barrier()
            if rank == 0:
                tmr.stop()
                tmr.report(
                    "{}Simulated and observed SSO signal".format(prefix))
        return
Example #30
0
def sample_distribution(
    rank, procs_per_node, bytes_per_node, total_samples, sample_rate
):
    # For this benchmark, we start by ramping up to a realistic number of detectors for
    # one day of data. Then we extend the timespan to achieve the desired number of
    # samples.

    log = Logger.get()

    # Hex-packed 127 pixels (6 rings) times two dets per pixel.
    # max_detector = 254

    # Hex-packed 1027 pixels (18 rings) times two dets per pixel.
    max_detector = 2054

    # Minimum time span (one day)
    min_time_samples = int(24 * 3600 * sample_rate)

    # For the minimum time span, scale up the number of detectors to reach the
    # requested total sample size.

    n_detector = 1
    test_samples = n_detector * min_time_samples

    while test_samples < total_samples and n_detector < max_detector:
        n_detector += 1
        test_samples = n_detector * min_time_samples

    if rank == 0:
        log.debug(
            "  Dist total = {}, using {} detectors at min time samples = {}".format(
                total_samples, n_detector, min_time_samples
            )
        )

    # For this number of detectors, determine the group size needed to fit the
    # minimum number of samples in memory.  In practice, one day will actually be
    # split up into multiple observations.  However, sizing the groups this way ensures
    # that each group will have multiple observations and improve the load balancing.

    det_bytes_per_sample = 2 * (  # At most 2 detector data copies.
        8  # 64 bit float / ints used
        * (1 + 4)  # detector timestream  # pixel index and 3 IQU weights
        + 1  # one byte per sample for flags
    )

    common_bytes_per_sample = (
        8 * (4)  # 64 bit floats  # One quaternion per sample
        + 1  # one byte per sample for common flag
    )

    group_nodes = 0
    group_mem = 0.0

    # This just ensures we go through the loop once.
    min_time_mem = 1.0

    while group_mem < min_time_mem:
        group_nodes += 1
        group_procs = group_nodes * procs_per_node
        group_mem = group_nodes * bytes_per_node

        # NOTE:  change this when moving to toast-3, since common data is in shared mem.
        # So the prefactor should be nodes per group, not group_procs.
        bytes_per_samp = (
            n_detector * det_bytes_per_sample + group_procs * common_bytes_per_sample
        )
        # bytes_per_samp = (
        #     n_detector * det_bytes_per_sample + group_nodes * common_bytes_per_sample
        # )
        min_time_mem = min_time_samples * bytes_per_samp
        if rank == 0:
            log.verbose(
                "  Dist testing {} group nodes, {} proc/node, group mem = {}, comparing to minimum = {} ({} samp * {} bytes/samp)".format(
                    group_nodes,
                    procs_per_node,
                    group_mem,
                    min_time_mem,
                    min_time_samples,
                    bytes_per_samp,
                )
            )

    if rank == 0:
        log.debug("  Dist selecting {} nodes per group".format(group_nodes))

    # Now set the number of groups to get the target number of total samples.

    group_time_samples = min_time_samples
    group_samples = n_detector * group_time_samples

    n_group = 1 + (total_samples // group_samples)

    time_samples = n_group * group_time_samples

    if rank == 0:
        log.debug(
            "  Dist using {} groups, each with {} / {} (time / total) samples".format(
                n_group, group_time_samples, group_samples
            )
        )
        log.debug("  Dist using {} total samples".format(n_detector * time_samples))

    return (
        n_detector,
        time_samples,
        group_procs,
        group_nodes,
        n_group,
        group_time_samples,
    )