Exemplo n.º 1
0
#  grad   10.0   0.25
#  bilin   0.1   1.00 0.208 0.176
#  bilin   1.0   0.01 0.444 0.387

#
# So for the same grid size, bilinear is about 50% slower than gradient,
# but is 100 times more accurate.
# bilinear is memory-limited from grid-sizes of about 0.05 or so.
# gradient is memory-limited from about 0.02
# So for reasonable accuracies, gradient is so memory-limited that
# the quite significant difference in flops is drowned in memory
# overhead.

import numpy as np, argparse, os, time, sys
from enlib import pmat, config, utils, interpol, coordinates, bench, enmap, bunch
config.default("map_bits", 32, "Bits to use for maps")
parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("--t", type=float, default=56935, help="mjd")
parser.add_argument("--wt", type=float, default=15, help="minutes")
parser.add_argument("--az", type=float, default=90, help="degrees")
parser.add_argument("--waz", type=float, default=80, help="degrees")
parser.add_argument("--el", type=float, default=50, help="degrees")
parser.add_argument("--wel", type=float, default=0, help="degrees")
parser.add_argument("--res", type=float, default=0.5, help="arcmin")
parser.add_argument("--dir",
                    type=str,
                    default="1",
                    help="1 (forward) or -1 (backward), or list")
parser.add_argument("--nsamp", type=int, default=250000)
parser.add_argument("--ndet", type=int, default=1000)
parser.add_argument("--ntime", type=int, default=3)
Exemplo n.º 2
0
	"syear":  lambda id: 2013+np.searchsorted(season_ends, id2ts(id)),
	"t5":     lambda id: id[:5],
	"t":      lambda id: id[:id.index(".")],
	"date":   lambda id: ctime2date(id2ts(id), -9),
	"year":   lambda id: ctime2date(id2ts(id), -9, "%Y"),
	"month":  lambda id: ctime2date(id2ts(id), -9, "%m"),
	"day":    lambda id: ctime2date(id2ts(id), -9, "%d"),
	"Udate":  lambda id: ctime2date(id2ts(id),  0),
	"Uyear":  lambda id: ctime2date(id2ts(id),  0, "%Y"),
	"Umonth": lambda id: ctime2date(id2ts(id),  0, "%m"),
	"Uday":   lambda id: ctime2date(id2ts(id),  0, "%d"),
}

# Try to set up default databases. This is optional, and the databases
# will be none if it fails.
config.default("root", ".", "Path to directory where the different metadata sets are")
config.default("dataset", ".", "Path to data set directory relative to data_root")
config.default("filevars", "filevars.py", "File with common definitions for filedbs")
config.default("filedb", "filedb.txt", "File describing the location of the TOD and their metadata. Relative to dataset path.")
config.default("todinfo", "todinfo.hdf","File describing location of the TOD id lists. Relative to dataset path.")
config.default("file_override", "none", "Comma-separated list of field:file, or none to disable")
config.default("patch_dir", "area", "Directory where standard patch geometries are stored.")
config.init()

#class ACTFiles(filedb.FormatDB):
#	def __init__(self, file=None, data=None, override=None):
#		if file is None and data is None: file = cjoin(["root","dataset","filedb"])
#		override = config.get("file_override", override)
#		filedb.FormatDB.__init__(self, file=file, data=data, funcs=extractors, override=override)

def setup_filedb():
Exemplo n.º 3
0
"""
import numpy as np, time, sys
from enlib import enmap, interpol, utils, coordinates, config, errors, array_ops
from enlib import parallax, bunch, pointsrcs
import pmat_core_32
import pmat_core_64


def get_core(dtype):
    if dtype == np.float32:
        return pmat_core_32.pmat_core
    else:
        return pmat_core_64.pmat_core


config.default("pmat_map_order", 0,
               "The interpolation order of the map pointing matrix.")
config.default(
    "pmat_cut_type", "full",
    "The cut sample representation used. 'full' uses one degree of freedom for each cut sample. 'bin:N' uses one degree of freedom for every N samples. 'exp' used one degree of freedom for the first sample, then one for the next two, one for the next 4, and so on, giving high resoultion at the edges of each cut range, and low resolution in the middle."
)
config.default(
    "map_sys", "equ",
    "The coordinate system of the maps. Can be eg. 'hor', 'equ' or 'gal'.")
config.default(
    "pmat_accuracy", 1.0,
    "Factor by which to lower accuracy requirement in pointing interpolation. 1.0 corresponds to 1e-3 pixels and 0.1 arc minute in polangle"
)
config.default(
    "pmat_interpol_max_size", 100000,
    "Maximum mesh size in pointing interpolation. Worst-case time and memory scale at most proportionally with this."
)
Exemplo n.º 4
0
# are Nd[nbin,ndet], U[nmode,ndet], S[nbin,nmode],
# and bins[nbin,2].
#
# Noise will be measured in units of uK sqrt(sample)
# (or the square of that for power, which is what
# we measure here), in time domain. We want compatible
# units in frequency domain, such that a flat spectrum
# with amplitude s**2 results in a time series with
# stddev s. The fourier array passed in here must
# already be normalized such that this holds. Compared
# to numpy's ffts, this means dividing the fourier
# array by sqrt(n).


# Our main noise model
config.default("nmat_jon_apod", 0, "Apodization factor to apply for Jon's noise model")
config.default("nmat_jon_downweight", True, "Whether to downweight the lowest frequencies in the noise model.")
config.default("nmat_jon_amp_threshold", "16,16", "low,high threshold (in power) for accepting eigenmodes, relative to median")
config.default("nmat_jon_single_threshold", 0.55, "reject modes that have more than this fraction of its amplitude in a single detector")
config.default("nmat_spike_suppression", 1e-2, "How much to suppress spikes by. This multiplies the uncorrelated noise in those bins")

def detvecs_jon(ft, srate, dets=None, shared=False, cut_bins=None, apodization=None, cut_unit="freq", verbose=False):
	"""Build a Detvecs noise matrix based on Jon's noise model.
	ft is the *normalized* fourier-transform of a TOD: ft = fft.rfft(d)/nsamp.
	srate is the sampling rate, dets is the list of detectors, shared specifies
	whether the Detvecs object should use the compressed "shared" layout or not",
	and cut_freq_ranges is a [nbin,{freq_from,freq_2}] array of frequencies
	to completely cut."""
	apodization = config.get("nmat_jon_apod", apodization) or None
	downweight  = config.get("nmat_jon_downweight")
	spike_suppression = config.get("nmat_spike_suppression")
Exemplo n.º 5
0
import numpy as np, os
from enlib import config, utils, mpi, enmap, dmap, mapmaking, todfilter, log, scanutils
from enact import filedb, actdata, actscan
config.default("verbosity", 2, "Verbosity")
parser = config.ArgumentParser()
parser.add_argument("imap")
parser.add_argument("idlist")
parser.add_argument("omap")
parser.add_argument("-s", "--sys", type=str, default="cel")
parser.add_argument("--daz", type=float, default=3.0)
parser.add_argument("--nt", type=int, default=10)
parser.add_argument("--dets", type=str, default=0)
parser.add_argument("--ntod", type=int, default=0)
parser.add_argument("-w", "--weighted", type=int, default=1)
parser.add_argument("-D", "--deslope", type=int, default=0)
args = parser.parse_args()

comm = mpi.COMM_WORLD
filedb.init()

ids = [line.split()[0] for line in open(args.idlist, "r")]
if args.ntod: ids = ids[:args.ntod]

is_dmap = os.path.isdir(args.imap)
log_level = log.verbosity2level(config.get("verbosity"))
L = log.init(level=log_level, rank=comm.rank)
tshape = (720, 720)

# Read in all our scans
L.info("Reading %d scans" % len(ids))
myinds = np.arange(len(ids))[comm.rank::comm.size]
Exemplo n.º 6
0
# 3. May want to encode detector covariance somehow too. Full covmats is
#    out of the picture - would be far too big. But could store something
#    like average correlation per bin. How would I measure that?
#    Calc corr (would be very noisy in such small bins). Mean of
#    off-diagonal elements. This mean would be much less noisy.
#    Would be bad for correlations of mixed sign, though. For these,
#    one could take the square first, and then subtract a noise bias (1).
#    Coudl do both. I'm worried that all those covs would be slow.
#
# To avoid needing to keep many gigs in memory, we do this in chunks
# of e.g. 250 tods, making each file just 324 MB large.

import numpy as np, argparse, h5py, os, sys
from enlib import fft, utils, errors, config, mpi, colors, bench
from enact import filedb, actdata
config.default("cut_mostly_cut",False)
parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("sel")
parser.add_argument("odir")
parser.add_argument("prefix", nargs="?", default=None)
parser.add_argument("-b", "--nbin",       type=int,   default=20000)
parser.add_argument("-f", "--fmax",       type=float, default=200)
parser.add_argument("-B", "--nbin-det",   type=int,   default=100)
parser.add_argument("-Z", "--nbin-zoom",  type=int,   default=100)
parser.add_argument("-F", "--fmax-zoom",  type=float, default=10)
parser.add_argument("-C", "--chunk-size", type=int,   default=250)
parser.add_argument("--no-autocut", action="store_true")
args = parser.parse_args()

filedb.init()
ids   = filedb.scans[args.sel]
Exemplo n.º 7
0
# -*- coding: utf-8 -*-
import numpy as np, sys, os, h5py, copy, time
from scipy import optimize
from enlib import fft
from enlib import config, mpi, errors, log, utils, coordinates, pmat, zipper
from enlib import wcs as enwcs, enmap, array_ops
from enlib.cg import CG
from enact import filedb, actdata, actscan
import astropy.io.fits
config.default("verbosity", 1, "Verbosity of output")
config.default("work_az_step", 0.1, "Az resolution for workspace tagging in degrees")
config.default("work_el_step", 0.1, "El resolution for workspace tagging in degrees")
config.default("work_ra_step", 10,  "RA resolution for workspace tagging in degrees")
config.default("work_tag_fmt", "%04d_%04d_%03d_%02d", "Format to use for workspace tags")
config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
config.default("downsample", 1, "Factor with which to downsample the TOD")
fft.engine = "fftw"

# Fast and incremental mapping program.
# Idea:
#  1. Map in 2 steps: tod -> work and work -> tod
#  2. Work coordinates are pixel-shifted versions of sky coordinates,
#     with a different shift for each scanning pattern. Defined in
#     3 steps:
#      1. Shift in RA based on example sweep (fwd and back separately)
#      2. Subdivide pixels in dec to make them approximately equi-spaced
#         in azimuth.
#      3. Transpose for memory access efficiency.
#  3. Ignore detector correlations. That way the work-space noise matrix
#     is purely horizontal.
#  4. Each tod-detector makes an almost horizontal line in horizontal
Exemplo n.º 8
0
# -*- coding: utf-8 -*-
import numpy as np, sys, os, h5py, copy, time
from scipy import optimize
from enlib import fft
from enlib import config, mpi, errors, log, utils, coordinates, pmat, zipper
from enlib import wcs as enwcs, enmap, array_ops
from enlib.cg import CG
from enact import filedb, actdata, actscan
import astropy.io.fits
config.default("verbosity", 1, "Verbosity of output")
config.default("work_az_step", 0.1,
               "Az resolution for workspace tagging in degrees")
config.default("work_el_step", 0.1,
               "El resolution for workspace tagging in degrees")
config.default("work_ra_step", 10,
               "RA resolution for workspace tagging in degrees")
config.default("work_tag_fmt", "%04d_%04d_%03d_%02d",
               "Format to use for workspace tags")
config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
config.default("downsample", 1, "Factor with which to downsample the TOD")
fft.engine = "fftw"

# Fast and incremental mapping program.
# Idea:
#  1. Map in 2 steps: tod -> work and work -> tod
#  2. Work coordinates are pixel-shifted versions of sky coordinates,
#     with a different shift for each scanning pattern. Defined in
#     3 steps:
#      1. Shift in RA based on example sweep (fwd and back separately)
#      2. Subdivide pixels in dec to make them approximately equi-spaced
#         in azimuth.
Exemplo n.º 9
0
import numpy as np, time, h5py
from scipy import signal
from enlib import config, fft, utils, gapfill, todops, pmat

config.default(
    "gfilter_jon_naz", 16,
    "The number of azimuth modes to fit/subtract in Jon's polynomial ground filter."
)
config.default(
    "gfilter_jon_nt", 10,
    "The number of time modes to fit/subtract in Jon's polynomial ground filter."
)
config.default(
    "gfilter_jon_nhwp", 0,
    "The number of hwp modes to fit/subtract in Jon's polynomial ground filter."
)
config.default(
    "gfilter_jon_niter", 3,
    "The number of time modes to fit/subtract in Jon's polynomial ground filter."
)
config.default(
    "gfilter_jon_phase", False,
    "Modify Jon's polynomial ground filter to use phase instead of azimuth.")


def filter_poly_jon(tod,
                    az,
                    weights=None,
                    naz=None,
                    nt=None,
                    niter=None,
Exemplo n.º 10
0
	"syear":  lambda id: 2013+np.searchsorted(season_ends, id2ts(id)),
	"t5":     lambda id: id[:5],
	"t":      lambda id: id[:id.index(".")],
	"date":   lambda id: ctime2date(id2ts(id), -9),
	"year":   lambda id: ctime2date(id2ts(id), -9, "%Y"),
	"month":  lambda id: ctime2date(id2ts(id), -9, "%m"),
	"day":    lambda id: ctime2date(id2ts(id), -9, "%d"),
	"Udate":  lambda id: ctime2date(id2ts(id),  0),
	"Uyear":  lambda id: ctime2date(id2ts(id),  0, "%Y"),
	"Umonth": lambda id: ctime2date(id2ts(id),  0, "%m"),
	"Uday":   lambda id: ctime2date(id2ts(id),  0, "%d"),
}

# Try to set up default databases. This is optional, and the databases
# will be none if it fails.
config.default("root", ".", "Path to directory where the different metadata sets are")
config.default("dataset", ".", "Path to data set directory relative to data_root")
config.default("filevars", "filevars.py", "File with common definitions for filedbs")
config.default("filedb", "filedb.txt", "File describing the location of the TOD and their metadata. Relative to dataset path.")
config.default("todinfo", "todinfo.hdf","File describing location of the TOD id lists. Relative to dataset path.")
config.default("file_override", "none", "Comma-separated list of field:file, or none to disable")
config.init()

#class ACTFiles(filedb.FormatDB):
#	def __init__(self, file=None, data=None, override=None):
#		if file is None and data is None: file = cjoin(["root","dataset","filedb"])
#		override = config.get("file_override", override)
#		filedb.FormatDB.__init__(self, file=file, data=data, funcs=extractors, override=override)

def setup_filedb():
	"""Create a default filedb based on the root, dataset and filedb config
Exemplo n.º 11
0
mode = sys.argv[1]
mjd0 = 57174

# Handle each mode. These are practically separate programs, but I keep them in one
# command to reduce clutter.
if mode == "map":
    # Map mode. Process the actual time-ordered data, producing rhs.fits, div.fits and info.fits
    # for each time-chunk.
    import numpy as np, os, time
    from enlib import utils
    with utils.nowarn():
        import h5py
    from enlib import planet9, enmap, dmap, config, mpi, scanutils, sampcut, pmat, mapmaking
    from enlib import log, pointsrcs, gapfill, ephemeris
    from enact import filedb, actdata, actscan, cuts as actcuts
    config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
    config.default("downsample", 1, "Factor with which to downsample the TOD")
    config.default("map_sys", "cel", "Coordinate system for the maps")
    config.default("verbosity", 1, "Verbosity")
    parser = config.ArgumentParser()
    parser.add_argument("map", help="dummy")
    parser.add_argument("sel")
    parser.add_argument("area")
    parser.add_argument("odir")
    parser.add_argument("prefix", nargs="?", default=None)
    parser.add_argument("--dt", type=float, default=3)
    parser.add_argument("-T", "--Tref", type=float, default=40)
    parser.add_argument("--fref", type=float, default=150)
    parser.add_argument("--srcs", type=str, default=None)
    parser.add_argument("--srclim", type=float, default=200)
    parser.add_argument("-S", "--corr-spacing", type=float, default=2)
Exemplo n.º 12
0
# Scans through the indicated tods and computes the ratio
# between the power at mid and high frequency to determine
# how white the white noise floor it. Cuts detectors that
# aren't white enough. Also cuts detectors that have suspiciously
# low white noise floors.

import numpy as np, argparse, h5py, os, sys, shutil
from enlib import fft, utils, enmap, errors, config, mpi, todfilter
from enact import filedb, actdata, filters
config.default("gfilter_jon_nhwp", 200, "The number of hwp modes to fit/subtract in Jon's polynomial ground filter.")
parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("sel")
parser.add_argument("odir")
parser.add_argument("-f", type=str, default="10:1,100:1")
parser.add_argument("-R", type=str, default="0.5:3")
parser.add_argument("--max-sens", type=float, default=20, help="Reject detectors more than this times more sensitive than the median at any of the indicated frequencies. Set to 0 to disable.")
parser.add_argument("--full-stats", action="store_true")
args = parser.parse_args()

comm  = mpi.COMM_WORLD
srate = 400.
fmax  = srate/2
ndet  = 32*33

utils.mkdir(args.odir)

tmp  = [[float(tok) for tok in word.split(":")] for word in args.f.split(",")]
bins = np.array([[t[0]-t[1]/2,t[0]+t[1]/2] for t in tmp])
rate = [float(w) for w in args.R.split(":")]

filedb.init()
Exemplo n.º 13
0
#  grad   10.0   0.25
#  bilin   0.1   1.00 0.208 0.176
#  bilin   1.0   0.01 0.444 0.387

#
# So for the same grid size, bilinear is about 50% slower than gradient,
# but is 100 times more accurate.
# bilinear is memory-limited from grid-sizes of about 0.05 or so.
# gradient is memory-limited from about 0.02
# So for reasonable accuracies, gradient is so memory-limited that
# the quite significant difference in flops is drowned in memory
# overhead.

import numpy as np, argparse, os, time, sys
from enlib import pmat, config, utils, interpol, coordinates, bench, enmap, bunch
config.default("map_bits", 32, "Bits to use for maps")
parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("--t",   type=float, default=56935, help="mjd")
parser.add_argument("--wt",  type=float, default=15,    help="minutes")
parser.add_argument("--az",  type=float, default=90,    help="degrees")
parser.add_argument("--waz", type=float, default=80,    help="degrees")
parser.add_argument("--el",  type=float, default=50,    help="degrees")
parser.add_argument("--wel", type=float, default=0,     help="degrees")
parser.add_argument("--res", type=float, default=0.5,   help="arcmin")
parser.add_argument("--dir", type=str,   default="1",   help="1 (forward) or -1 (backward), or list")
parser.add_argument("--nsamp", type=int, default=250000)
parser.add_argument("--ndet",  type=int, default=1000)
parser.add_argument("--ntime", type=int, default=3)
#parser.add_argument("-T", action="store_true")
parser.add_argument("-H", "--hwp", action="store_true")
parser.add_argument("-i", "--interpolator", type=str, default="all")
Exemplo n.º 14
0
import numpy as np, argparse, os, sys, pipes, shutil, warnings
from enlib import utils, pmat, config, errors, mpi, bunch
from enlib import log, bench, scan, ptsrc_data, pointsrcs
from enact import actscan, filedb, todinfo

warnings.filterwarnings("ignore")

config.default("filedb", "filedb.txt", "File describing the location of the TOD and their metadata")
config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
config.default("verbosity", 1, "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages.")

parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("filelist")
parser.add_argument("srcs")
parser.add_argument("odir")
parser.add_argument("--ncomp",      type=int,   default=3)
parser.add_argument("--ndet",       type=int,   default=0)
parser.add_argument("--minamp",     type=float, default=100)
parser.add_argument("-c",           action="store_true")
parser.add_argument("--oldformat",  action="store_true")
args = parser.parse_args()

dtype = np.float32 if config.get("map_bits") == 32 else np.float64
comm  = mpi.COMM_WORLD
myid  = comm.rank
nproc = comm.size

filedb.init()
db = filedb.data
filelist = todinfo.get_tods(args.filelist, filedb.scans)
Exemplo n.º 15
0
import numpy as np, time, copy, argparse, os, sys, pipes, shutil, re
from enlib import utils
with utils.nowarn(): import h5py
from enlib import enmap, pmat, fft, config, array_ops, mapmaking, nmat, errors, mpi
from enlib import log, bench, dmap, coordinates, scan as enscan, scanutils
from enlib import pointsrcs, bunch, planet9, ephemeris, parallax
from enlib.cg import CG
from enlib.source_model import SourceModel
from enact import actscan, nmat_measure, filedb, todinfo
from enact import actdata

config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
config.default("downsample", 1, "Factor with which to downsample the TOD")
config.default("hwp_resample", False, "Whether to resample the TOD to make the HWP equispaced")
config.default("map_cg_nmax", 500, "Max number of CG steps to perform in map-making")
config.default("verbosity", 1, "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages.")
config.default("task_dist", "size", "How to assign scans to each mpi task. Can be 'plain' for comm.rank:n:comm.size-type assignment, 'size' for equal-total-size assignment. The optimal would be 'time', for equal total time for each, but that's not implemented currently.")
config.default("gfilter_jon", False, "Whether to enable Jon's ground filter.")
config.default("map_ptsrc_handling", "subadd", "How to handle point sources in the map. Can be 'none' for no special treatment, 'subadd' to subtract from the TOD and readd in pixel space, and 'sim' to simulate a pointsource-only TOD.")
config.default("map_ptsrc_sys", "cel", "Coordinate system the point source positions are specified in. Default is 'cel'")
config.default("map_format", "fits", "File format to use when writing maps. Can be 'fits', 'fits.gz' or 'hdf'.")
config.default("resume", 0, "Interval at which to write the internal CG information to allow for restarting. If 0, this will never be written. Also controls whether existing information on disk will be used for restarting if avialable. If negative, restart information will be written, but not used.")

# Special source handling
config.default("src_handling", "none", "Special source handling. 'none' to disable, 'inpaint' to inpaint, 'full' to map srcs with a white noise model and 'solve' to solve for the model errors jointly with the map.")
config.default("src_handling_lim", 10000, "Minimum source amplitude to apply special source handling to it")
config.default("src_handling_list", "", "Override source list")

# Default signal parameters
config.default("signal_sky_default",   "use=no,type=map,name=sky,sys=cel,prec=bin", "Default parameters for sky map")
config.default("signal_hor_default",   "use=no,type=map,name=hor,sys=hor,prec=bin", "Default parameters for ground map")
Exemplo n.º 16
0
	# Then read the actual tod
	_, tod = try_read(files.read_tod, "dark_tod", entry.tod, ids=dark_dets)
	samples = [0,tod.shape[-1]]
	return dataset.DataSet([
		dataset.DataField("dark_dets", dark_dets),
		#dataset.DataField("dark_cut", cuts, samples=samples, sample_index=1),
		dataset.DataField("dark_tod", tod, samples=samples, sample_index=1)])

def read_buddies(entry):
	dets, buddies = try_read(files.read_buddies, "buddies", entry.buddies)
	return dataset.DataSet([
		dataset.DataField("buddies_raw", data=buddies),
		dataset.DataField("buddies_raw_dets", data=build_detname(dets, entry)),
	])

config.default("hwp_fallback", "none", "How to handle missing HWP data. 'none' skips the tod (it it is supposed to have hwp data), while 'raw' falls back on the native hwp data.")
def read_hwp(entry):
	dummy = dataset.DataSet([
		dataset.DataField("hwp", 0),
		dataset.DataField("hwp_id", "none"),
		dataset.DataField("hwp_source", "none")])
	epochs = try_read(files.read_hwp_epochs, "hwp_epochs", entry.hwp_epochs)
	t, _, ar = entry.id.split(".")
	t = float(t)
	if ar not in epochs: return dummy
	for epoch in epochs[ar]:
		if t >= epoch[0] and t < epoch[1]:
			# Ok, the HWP was active during this period. Try to read it. It can be in
			# several different formats.
			if   entry.hwp_format == "tod":
				# HWP angles in the tod, in the new, high-quality format
Exemplo n.º 17
0
import numpy as np, time, h5py, copy, argparse, os, sys, pipes, shutil, re
from enlib import enmap, utils, pmat, fft, config, array_ops, mapmaking, nmat, errors, mpi
from enlib import log, bench, dmap, coordinates, scan as enscan, rangelist, scanutils
from enlib import pointsrcs, bunch
from enlib.cg import CG
from enlib.source_model import SourceModel
from enact import actscan, nmat_measure, filedb, todinfo

config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
config.default("verbosity", 1, "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages.")

parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("sel")
parser.add_argument("srcs")
parser.add_argument("area")
parser.add_argument("odir")
parser.add_argument("--nmax", type=int, default=10)
parser.add_argument("-s", "--src", type=int, default=None, help="Only analyze given source")
parser.add_argument("-c", "--cont", action="store_true")
args = parser.parse_args()

dtype = np.float32 if config.get("map_bits") == 32 else np.float64
comm  = mpi.COMM_WORLD
tcomm = mpi.COMM_SELF
nmax  = args.nmax
ncomp = 3
isys  = "hor"

utils.mkdir(args.odir)
log_level = log.verbosity2level(config.get("verbosity"))
L = log.init(level=log_level, rank=comm.rank, shared=False)
Exemplo n.º 18
0
# 2. For each scanning pattern, define a phase pixelization [ndet,{+,-},naz]
# 3. Sort tods in each scanning pattern by date. These tods collectively
#    make up an [ntod,ndet,nphase] data cube, but this would have a size of
#    roughly 1e3*1e3*1e3 = 1e9 pixels, which is a bit too big. So we will
#    output individual enmaps instead, as map_{el}_{az0}_{az1}_{pattern}_{id}.fits,
#    where id is the TOD id and pattern is the index into the list of patterns.
# 3. For each tod in a scanning pattern, read a partially calibrated TOD
#    and project it onto our map.

import numpy as np, os, h5py, sys, pipes, shutil, warnings
from enlib import config, errors, utils, log, bench, enmap, pmat, mapmaking, mpi, todfilter
from enlib.cg import CG
from enact import actdata, actscan, filedb, todinfo
warnings.filterwarnings("ignore")

config.default("downsample", 1, "Factor with which to downsample the TOD")
config.default("verbosity", 1, "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages.")
config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
config.default("noise_model", "uncorr", "Noise model. Defaults to uncorr, since detector correlations have no effect when one is mapping each detector to separate pixels")

parser = config.ArgumentParser(os.environ["HOME"]+"/.enkirc")
parser.add_argument("sel")
parser.add_argument("odir")
parser.add_argument("prefix", nargs="?")
parser.add_argument("--tol",  type=float, default=10, help="Tolerance in arcmin for separating scanning patterns")
parser.add_argument("--daz",  type=float, default=2,  help="Pixel size in azimuth, in arcmin")
parser.add_argument("--nrow", type=int,   default=33)
parser.add_argument("--ncol", type=int,   default=32)
parser.add_argument("--nstep",type=int,   default=20)
parser.add_argument("--nsub", type=int,   default=2)
parser.add_argument("--i0", type=int, default=None)
from __future__ import division, print_function
import numpy as np, time, os, sys
from scipy import integrate
from enlib import utils
with utils.nowarn():
    import h5py
from enlib import mpi, errors, fft, mapmaking, config, pointsrcs
from enlib import pmat, coordinates, enmap, bench, bunch, nmat, sampcut, gapfill, wcsutils, array_ops
from enact import filedb, actdata, actscan, nmat_measure

config.set("downsample", 1, "Amount to downsample tod by")
config.set("gapfill", "linear",
           "Gapfiller to use. Can be 'linear' or 'joneig'")
config.default(
    "pmat_interpol_pad", 10.0,
    "Number of arcminutes to pad the interpolation coordinate system by")

parser = config.ArgumentParser()
parser.add_argument("catalog")
parser.add_argument("sel")
parser.add_argument("odir")
parser.add_argument("-s", "--srcs", type=str, default=None)
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("-q", "--quiet", action="count", default=0)
parser.add_argument("-H", "--highpass", type=float, default=None)
parser.add_argument("--minamp", type=float, default=None)
parser.add_argument("--minsn", type=float, default=1)
parser.add_argument("--sys", type=str, default="cel")
args = parser.parse_args()
Exemplo n.º 20
0
from __future__ import division, print_function
import numpy as np, warnings, time, copy, argparse, os, sys, pipes, shutil, re
from enlib import config, coordinates, mapmaking, bench, scanutils, log, sampcut, dmap
from pixell import utils, enmap, pointsrcs, bunch, mpi, fft
from enact import filedb, actdata, actscan, files, todinfo
from scipy import ndimage

config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
config.default("downsample", 1, "Factor with which to downsample the TOD")
config.default(
    "verbosity", 1,
    "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages."
)
config.default("tod_window", 5.0,
               "Number of samples to window the tod by on each end")
config.default(
    "eig_limit", 0.1,
    "Pixel condition number below which polarization is dropped to make total intensity more stable. Should be a high value for single-tod maps to avoid thin stripes with really high noise"
)
config.default(
    "map_sys", "equ",
    "The coordinate system of the maps. Can be eg. 'hor', 'equ' or 'gal'.")
config.default("map_dist", False, "Whether to use distributed maps")

parser = config.ArgumentParser()
parser.add_argument("sel", help="TOD selction query")
parser.add_argument("area", help="Geometry to map")
parser.add_argument("odir", help="Output directory")
parser.add_argument("prefix", nargs="?", help="Output file name prefix")
parser.add_argument("--dets", type=str, default=0, help="Detector slice")
args = parser.parse_args()
Exemplo n.º 21
0
import numpy as np, time, h5py, copy, argparse, os, sys, pipes, shutil, re
from enlib import enmap, utils, pmat, fft, config, array_ops, mapmaking, nmat, errors, mpi
from enlib import log, bench, dmap, coordinates, scan as enscan, rangelist, scanutils
from enlib import pointsrcs, bunch
from enlib.cg import CG
from enlib.source_model import SourceModel
from enact import actscan, nmat_measure, filedb, todinfo

config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
config.default("downsample", 1, "Factor with which to downsample the TOD")
config.default("verbosity", 1, "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages.")
config.default("map_format", "fits", "File format to use when writing maps. Can be 'fits', 'fits.gz' or 'hdf'.")
config.default("tod_window", 5.0, "Number of samples to window the tod by on each end")

parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("sel")
parser.add_argument("area")
parser.add_argument("odir")
parser.add_argument("prefix",nargs="?")
parser.add_argument("--ndet",       type=int, default=0,  help="Max number of detectors")
args = parser.parse_args()
filedb.init()

utils.mkdir(args.odir)
root      = args.odir + "/" + (args.prefix + "_" if args.prefix else "")
log_level = log.verbosity2level(config.get("verbosity"))
dtype     = np.float32 if config.get("map_bits") == 32 else np.float64
area      = enmap.read_map(args.area)
comm      = mpi.COMM_WORLD
ids       = filedb.scans[args.sel]
L = log.init(level=log_level, rank=comm.rank)
Exemplo n.º 22
0
"""This module provides functions for filling gaps in an array based on ranges or masks."""
import numpy as np, utils
from enlib import fft, config, resample
from enlib.utils import repeat_filler
from enlib.rangelist import Rangelist, Multirange, multify

config.default("gapfill", "linear",
               "TOD gapfill method. Can be 'copy' or 'linear'")
config.default("gapfill_context", 10,
               "Samples of context to use for matching up edges of cuts.")


def gapfill(arr, ranges, inplace=False, overlap=None):
    gapfiller = {
        "linear": gapfill_linear,
        "joneig": gapfill_joneig,
        "copy": gapfill_copy,
        "cubic": gapfill_cubic
    }[config.get("gapfill")]
    overlap = config.get("gapfill_context", overlap)
    return gapfiller(arr, ranges, inplace=inplace, overlap=overlap)


@multify
def gapfill_linear(arr, ranges, inplace=False, overlap=None):
    """Returns arr with the ranges given by ranges, which can be [:,{from,to}] or
	a Rangelist, filled using linear interpolation."""
    ranges = Rangelist(ranges, len(arr), copy=False)
    overlap = config.get("gapfill_context", overlap)
    if not inplace: arr = np.array(arr)
    nr = len(ranges.ranges)
Exemplo n.º 23
0
#	# define areas of significant acceleration
#	lim    = config.get("cut_turnaround_lim", lim)
#	addaz  = np.abs(ddaz)
#	sigma  = np.std(ddaz)
#	for i in range(3):
#		sigma = np.std(ddaz[addaz < sigma*4])
#	mask  = addaz > sigma*lim
#	# Build the cut, and grow it by the margin
#	cut   = sampcut.from_mask(mask)
#	margin= utils.nint(config.get("cut_turnaround_margin", margin)*srate/2)
#	cut   = cut.widen(margin)
#	return cut

# New, simpler turnaround cuts. Simply cuts a given number of degrees away from the
# extrema.
config.default("cut_turnaround_margin", 0.2,
               "Margin for turnaround cut in degrees.")


def turnaround_cut(az, margin=None):
    margin = config.get("cut_turnaround_margin", margin) * utils.degree
    # Use percentile just in case there's some outliers (for example a scan that's a bit
    # higher than the others.
    az1 = np.percentile(az, 0.1)
    az2 = np.percentile(az, 99.9)
    mask = (az < az1) | (az > az2)
    cut = sampcut.from_mask(mask)
    return cut


#	return res
Exemplo n.º 24
0
import numpy as np, os
from enlib import config, utils, mpi, enmap, dmap, mapmaking, todfilter, log, scanutils
from enact import filedb, actdata, actscan
config.default("verbosity", 2, "Verbosity")
parser = config.ArgumentParser()
parser.add_argument("imap")
parser.add_argument("idlist")
parser.add_argument("omap")
parser.add_argument("-s", "--sys", type=str,   default="cel")
parser.add_argument(      "--daz", type=float, default=3.0)
parser.add_argument(      "--nt",  type=int,   default=10)
parser.add_argument(      "--dets",type=str,   default=0)
parser.add_argument(      "--ntod",type=int,   default=0)
parser.add_argument("-w", "--weighted", type=int, default=1)
parser.add_argument("-D", "--deslope",  type=int, default=0)
args = parser.parse_args()

comm = mpi.COMM_WORLD
filedb.init()

ids = [line.split()[0] for line in open(args.idlist,"r")]
if args.ntod: ids = ids[:args.ntod]

is_dmap = os.path.isdir(args.imap)
log_level = log.verbosity2level(config.get("verbosity"))
L = log.init(level=log_level, rank=comm.rank)
tshape= (720,720)

# Read in all our scans
L.info("Reading %d scans" % len(ids))
myinds = np.arange(len(ids))[comm.rank::comm.size]
Exemplo n.º 25
0
from __future__ import division, print_function
import numpy as np, time, os, sys
from scipy import integrate
from enlib import utils
with utils.nowarn():
    import h5py
from enlib import mpi, errors, fft, mapmaking, config, pointsrcs
from enlib import pmat, coordinates, enmap, bench, bunch, nmat, sampcut, gapfill, wcsutils, array_ops
from enact import filedb, actdata, actscan, nmat_measure

config.set("downsample", 1, "Amount to downsample tod by")
config.set("gapfill", "linear",
           "Gapfiller to use. Can be 'linear' or 'joneig'")
config.default(
    "pmat_interpol_pad", 10.0,
    "Number of arcminutes to pad the interpolation coordinate system by")
config.default(
    "pmat_ptsrc_rsigma", 3,
    "Max number of standard deviations away from a point source to compute the beam profile. Larger values are slower but more accurate, but may lead to misleading effective times in cases where a large region around a source is cut."
)

parser = config.ArgumentParser()
parser.add_argument("catalog")
parser.add_argument("sel")
parser.add_argument("odir")
parser.add_argument("-s", "--srcs", type=str, default=None)
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("-q", "--quiet", action="count", default=0)
parser.add_argument("-H", "--highpass", type=float, default=None)
parser.add_argument("--minamp", type=float, default=None)
Exemplo n.º 26
0
# Use joneig filtering to clean the area around each point source position.
# Treat the remainder as white noise. Make thumbnail maps in horizontal coordinates
# centered on the fiducial source position. Output as fully self-contained hdf files
# that can be analyzed by the fitter program.
import numpy as np, os, time, h5py, warnings
from astropy.io import fits
from enlib import utils, config, mpi, errors, sampcut, gapfill, cg
from enlib import pmat, coordinates, enmap, bench, bunch
from enact import filedb, actdata, actscan, nmat_measure
config.default("pmat_accuracy", 20.0, "Factor by which to lower accuracy requirement in pointing interpolation. 1.0 corresponds to 1e-3 pixels and 0.1 arc minute in polangle")
config.default("pmat_interpol_max_size", 1000000, "Maximum mesh size in pointing interpolation. Worst-case time and memory scale at most proportionally with this.")
config.default("gapfill", "linear", "TOD gapfill method. Can be 'copy', 'linear' or 'cubic'")
config.set("pmat_cut_type", "full", "Controls the degrees of freedom for the cut pmat. Using 'full' is stronly adviced to avoid unreasonable slowdown for src_thumb_build2")
parser = config.ArgumentParser(os.environ["HOME"] + "./enkirc")
parser.add_argument("srclist")
parser.add_argument("sel")
parser.add_argument("odir")
parser.add_argument("-A", "--minamp",    type=float, default=0)
parser.add_argument("-b", "--box",       type=str,   default="-10:10,-10:10")
parser.add_argument("-p", "--pad",       type=float, default=2)
parser.add_argument("-r", "--res",       type=float, default=0.1)
parser.add_argument("-s", "--restrict",  type=str,   default=None)
parser.add_argument("-m", "--minimaps",  action="store_true")
parser.add_argument("-c", "--cont",      action="store_true")
parser.add_argument("-C", "--cols",      type=str, default="0:1:2")
parser.add_argument(      "--nref",      type=int, default=2)
args = parser.parse_args()

# The joneig approach requires the mask to be as small as possible, especially in the scan
# direction, but the source must be entirely contained inside it. Previous tests have shown
# that x position can vary between -2' to 2'. If we want 3 sigma margin, then we need to add
Exemplo n.º 27
0
import numpy as np, time, h5py
from enlib import config, fft, utils, gapfill, todops, pmat, rangelist

config.default("gfilter_jon_naz", 8, "The number of azimuth modes to fit/subtract in Jon's polynomial ground filter.")
config.default("gfilter_jon_nt",  10, "The number of time modes to fit/subtract in Jon's polynomial ground filter.")
config.default("gfilter_jon_nhwp", 0, "The number of hwp modes to fit/subtract in Jon's polynomial ground filter.")
config.default("gfilter_jon_niter", 3, "The number of time modes to fit/subtract in Jon's polynomial ground filter.")

def filter_poly_jon(tod, az, weights=None, naz=None, nt=None, niter=None, cuts=None, hwp=None, nhwp=None, deslope=True, inplace=True):
	"""Fix naz Legendre polynomials in az and nt other polynomials
	in t jointly. Then subtract the best fit from the data.
	The subtraction is inplace, so tod is modified. If naz or nt are
	negative, they are fit for, but not subtracted.
	NOTE: This function may leave tod nonperiodic.
	"""
	#moomoo = tod[:8].copy()
	naz = config.get("gfilter_jon_naz", naz)
	nt  = config.get("gfilter_jon_nt", nt)
	nhwp= config.get("gfilter_jon_nhwp", nhwp)
	niter = config.get("gfilter_jon_niter", niter)
	if not inplace: tod = tod.copy()
	do_gapfill = cuts is not None
	#print "Mos", naz, nt, nhwp
	#print hwp
	# No point in iterating if we aren't gapfilling
	if not do_gapfill: niter = 1
	if hwp is None or np.all(hwp==0): nhwp = 0
	naz, asign = np.abs(naz), np.sign(naz)
	nt,  tsign = np.abs(nt),  np.sign(nt)
	nhwp,hsign = np.abs(nhwp),np.sign(nhwp)
	d   = tod.reshape(-1,tod.shape[-1])
Exemplo n.º 28
0
# 2. For each scanning pattern, define a phase pixelization [ndet,{+,-},naz]
# 3. Sort tods in each scanning pattern by date. These tods collectively
#    make up an [ntod,ndet,nphase] data cube, but this would have a size of
#    roughly 1e3*1e3*1e3 = 1e9 pixels, which is a bit too big. So we will
#    output individual enmaps instead, as map_{el}_{az0}_{az1}_{pattern}_{id}.fits,
#    where id is the TOD id and pattern is the index into the list of patterns.
# 3. For each tod in a scanning pattern, read a partially calibrated TOD
#    and project it onto our map.

import numpy as np, os, h5py, sys, pipes, shutil, warnings
from enlib import config, errors, utils, log, bench, enmap, pmat, mapmaking, mpi, todfilter
from enlib.cg import CG
from enact import actdata, actscan, filedb, todinfo
warnings.filterwarnings("ignore")

config.default("downsample", 1, "Factor with which to downsample the TOD")
config.default(
    "verbosity", 1,
    "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages."
)
config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
config.default(
    "noise_model", "uncorr",
    "Noise model. Defaults to uncorr, since detector correlations have no effect when one is mapping each detector to separate pixels"
)

parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("sel")
parser.add_argument("odir")
parser.add_argument("prefix", nargs="?")
parser.add_argument(
Exemplo n.º 29
0
# are Nd[nbin,ndet], U[nmode,ndet], S[nbin,nmode],
# and bins[nbin,2].
#
# Noise will be measured in units of uK sqrt(sample)
# (or the square of that for power, which is what
# we measure here), in time domain. We want compatible
# units in frequency domain, such that a flat spectrum
# with amplitude s**2 results in a time series with
# stddev s. The fourier array passed in here must
# already be normalized such that this holds. Compared
# to numpy's ffts, this means dividing the fourier
# array by sqrt(n).


# Our main noise model
config.default("nmat_jon_apod", 0, "Apodization factor to apply for Jon's noise model")
config.default("nmat_jon_downweight", True, "Whether to downweight the lowest frequencies in the noise model.")
config.default("nmat_jon_amp_threshold", "16,16", "low,high threshold (in power) for accepting eigenmodes, relative to median")
config.default("nmat_jon_single_threshold", 0.55, "reject modes that have more than this fraction of its amplitude in a single detector")
config.default("nmat_spike_suppression", 1e-2, "How much to suppress spikes by. This multiplies the uncorrelated noise in those bins")

def detvecs_jon(ft, srate, dets=None, shared=False, cut_bins=None, apodization=None, cut_unit="freq", verbose=False):
	"""Build a Detvecs noise matrix based on Jon's noise model.
	ft is the *normalized* fourier-transform of a TOD: ft = fft.rfft(d)/nsamp.
	srate is the sampling rate, dets is the list of detectors, shared specifies
	whether the Detvecs object should use the compressed "shared" layout or not",
	and cut_freq_ranges is a [nbin,{freq_from,freq_2}] array of frequencies
	to completely cut."""
	apodization = config.get("nmat_jon_apod", apodization) or None
	downweight  = config.get("nmat_jon_downweight")
	spike_suppression = config.get("nmat_spike_suppression")
Exemplo n.º 30
0
import numpy as np, time, h5py, copy, argparse, os, mpi4py.MPI, sys, pipes, shutil
from enlib import enmap, utils, pmat, fft, config, array_ops, map_equation, nmat, errors
from enlib import log, bench, scan
from enlib.cg import CG
from enact import data, nmat_measure, filedb, todinfo

config.default("filedb", "filedb.txt",
               "File describing the location of the TOD and their metadata")
config.default("todinfo", "todinfo.txt",
               "File describing location of the TOD id lists")
config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
config.default("downsample", 1, "Factor with which to downsample the TOD")
config.default("map_precon", "bin", "Preconditioner to use for map-making")
config.default("map_cg_nmax", 1000,
               "Max number of CG steps to perform in map-making")
config.default(
    "verbosity", 1,
    "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages."
)
config.default(
    "task_dist", "size",
    "How to assign scans to each mpi task. Can be 'plain' for myid:n:nproc-type assignment, 'size' for equal-total-size assignment. The optimal would be 'time', for equal total time for each, but that's not implemented currently."
)

parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("filelist")
parser.add_argument("area")
parser.add_argument("odir")
parser.add_argument("prefix", nargs="?")
parser.add_argument("-d", "--dump", type=int, default=10)
parser.add_argument("--ncomp", type=int, default=3)
Exemplo n.º 31
0
import numpy as np, argparse, enlib.scan, os
from enlib import enmap, utils, config, scansim, log, powspec, fft, bunch
from enact import actscan, filedb, nmat_measure

config.default("verbosity", 1, "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages.")

parser = config.ArgumentParser()
parser.add_argument("odir")
parser.add_argument("--area",  type=str)
parser.add_argument("--bore",  type=str, default="grid:2:0.2:0.8")
parser.add_argument("--dets",  type=str, default="scattered:3:3:2.0")
parser.add_argument("--signal",type=str, default="ptsrc:100:1e3:-3")
parser.add_argument("--noise", type=str, default="1/f:20:2:0.5")
parser.add_argument("--seed",  type=int, default=1)
parser.add_argument("--measure", type=float, default=None)
parser.add_argument("--real",  type=str, default=None)
args = parser.parse_args()

log_level = log.verbosity2level(config.get("verbosity"))
L = log.init(level=log_level)
utils.mkdir(args.odir)

if args.area:
	area = enmap.read_map(args.area)
	if area.ndim == 2: area = area[None]
else:
	shape, wcs = enmap.geometry(pos=np.array([[-1,-1],[1,1]])*np.pi/180, shape=(600,600), pre=(3,), proj="car", ref=[0,0])
	area = enmap.zeros(shape, wcs)

def get_scans(area, signal, bore, dets, noise, seed=0, real=None, noise_override=None):
	scans = []
Exemplo n.º 32
0
import numpy as np, argparse, os, sys, pipes, shutil, warnings
from enlib import utils, pmat, config, errors, mpi, bunch
from enlib import log, bench, scan, ptsrc_data, pointsrcs
from enact import actscan, filedb, todinfo

warnings.filterwarnings("ignore")

config.default("filedb", "filedb.txt", "File describing the location of the TOD and their metadata")
config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
config.default("verbosity", 1, "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages.")

parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("filelist")
parser.add_argument("srcs")
parser.add_argument("odir")
parser.add_argument("--ncomp",      type=int,   default=3)
parser.add_argument("--ndet",       type=int,   default=0)
parser.add_argument("--minamp",     type=float, default=100)
parser.add_argument("-c",           action="store_true")
parser.add_argument("--oldformat",  action="store_true")
args = parser.parse_args()

dtype = np.float32 if config.get("map_bits") == 32 else np.float64
comm  = mpi.COMM_WORLD
myid  = comm.rank
nproc = comm.size

filedb.init()
db = filedb.data
filelist = todinfo.get_tods(args.filelist, filedb.scans)
Exemplo n.º 33
0
import numpy as np, time, h5py, copy, argparse, os, sys, pipes, shutil, re
from enlib import enmap, utils, pmat, fft, config, array_ops, mapmaking, nmat, errors, mpi
from enlib import log, bench, dmap, coordinates, scan as enscan, rangelist, scanutils
from enlib import pointsrcs, bunch
from enlib.cg import CG
from enact import actscan, nmat_measure, filedb, todinfo

config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
config.default("downsample", 1, "Factor with which to downsample the TOD")
config.default(
    "verbosity", 1,
    "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages."
)
config.default(
    "map_format", "fits",
    "File format to use when writing maps. Can be 'fits', 'fits.gz' or 'hdf'.")
config.default("tod_window", 5.0,
               "Number of samples to window the tod by on each end")

parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("sel")
parser.add_argument("area")
parser.add_argument("odir")
parser.add_argument("prefix", nargs="?")
parser.add_argument("--ndet",
                    type=int,
                    default=0,
                    help="Max number of detectors")
args = parser.parse_args()
filedb.init()
Exemplo n.º 34
0
        self.pix, self.phase = self.data[scan].get_pix_phase()

    def free(self):
        del self.pix, self.phase

    def forward(self, scan, tod, work):
        if scan not in self.data: return
        self.data[scan].forward(tod, work, self.pix, self.phase)

    def backward(self, scan, tod, work):
        if scan not in self.data: return
        self.data[scan].backward(tod, work, self.pix, self.phase)


config.default(
    "dmap_format", "merged",
    "How to store dmaps on disk. 'merged': combine into a single fits file before writing. This is memory intensive. 'tiles': Write the tiles that make up the dmap directly to disk."
)


class SignalDmap(Signal):
    def __init__(self,
                 scans,
                 subinds,
                 area,
                 cuts=None,
                 name="main",
                 ofmt="{name}",
                 output=True,
                 ext="fits",
                 pmat_order=None,
                 sys=None,
Exemplo n.º 35
0
parser.add_argument("odir")
parser.add_argument("--rcol", type=int, default=6)
parser.add_argument("--dcol", type=int, default=7)
parser.add_argument("--acol", type=int, default=12)
parser.add_argument("-A", "--minamp", type=float, default=0)
parser.add_argument("-f", "--fknee-mul", type=float, default=1.5)
parser.add_argument("-a", "--alpha", type=float, default=5)
parser.add_argument("-R", "--radius", type=float, default=10)
parser.add_argument("-r", "--res", type=float, default=0.1)
parser.add_argument("-s", "--restrict", type=str, default=None)
parser.add_argument("-m", "--minimaps", action="store_true")
parser.add_argument("-c", "--cont", action="store_true")
args = parser.parse_args()

config.default(
    "pmat_accuracy", 10.0,
    "Factor by which to lower accuracy requirement in pointing interpolation. 1.0 corresponds to 1e-3 pixels and 0.1 arc minute in polangle"
)

filedb.init()
ids = filedb.scans[args.sel]
comm = mpi.COMM_WORLD
R = args.radius * utils.arcmin
res = args.res * utils.arcmin
dtype = np.float32
bsize_fknee = 100
bsize_ivar = 400
utils.mkdir(args.odir)

rfreqs = [12, 80]
drfreqs = [1, 1]
Exemplo n.º 36
0
import numpy as np, argparse, mpi4py.MPI, os, h5py
from enlib import utils, config, ptsrc_data, log

config.default("verbosity", 1, "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages.")

parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("filelist")
parser.add_argument("srcs")
parser.add_argument("odir")
parser.add_argument("-R", "--radius", type=float, default=5.0)
parser.add_argument("-r", "--resolution", type=float, default=0.25)
args = parser.parse_args()

comm  = mpi4py.MPI.COMM_WORLD
myid  = comm.rank
nproc = comm.size

log_level = log.verbosity2level(config.get("verbosity"))
L = log.init(level=log_level, rank=myid)

# Allow filelist to take the format filename:[slice]
toks = args.filelist.split(":")
filelist, fslice = toks[0], ":".join(toks[1:])
filelist = [line.split()[0] for line in open(filelist,"r") if line[0] != "#"]
filelist = eval("filelist"+fslice)

utils.mkdir(args.odir)
srcs = np.loadtxt(args.srcs).T

# create minimaps around each source
nsrc  = srcs.shape[1]
Exemplo n.º 37
0
parser.add_argument("sel")
parser.add_argument("odir")
parser.add_argument("--rcol", type=int, default=6)
parser.add_argument("--dcol", type=int, default=7)
parser.add_argument("--acol", type=int, default=12)
parser.add_argument("-A", "--minamp",    type=float, default=0)
parser.add_argument("-f", "--fknee-mul", type=float, default=1.5)
parser.add_argument("-a", "--alpha",     type=float, default=5)
parser.add_argument("-R", "--radius",    type=float, default=10)
parser.add_argument("-r", "--res",       type=float, default=0.1)
parser.add_argument("-s", "--restrict",  type=str,   default=None)
parser.add_argument("-m", "--minimaps",  action="store_true")
parser.add_argument("-c", "--cont",      action="store_true")
args = parser.parse_args()

config.default("pmat_accuracy", 10.0, "Factor by which to lower accuracy requirement in pointing interpolation. 1.0 corresponds to 1e-3 pixels and 0.1 arc minute in polangle")

filedb.init()
ids  = filedb.scans[args.sel]
comm = mpi.COMM_WORLD
R    = args.radius*utils.arcmin
res  = args.res*utils.arcmin
dtype= np.float32
bsize_fknee = 100
bsize_ivar  = 400
utils.mkdir(args.odir)

rfreqs  = [12, 80]
drfreqs = [1,   1]

def find_scan_vel(scan, ipos, aspeed, dt=0.1):
Exemplo n.º 38
0
#  4. Read off the focalplane correlation function at that position.
#
# No matter which approach I chosse, the simplest thing to do for the
# build-up program is to store
#  1. Offsets for all relevant detectors.
#  2. Covariance for all those detectors relative to center.
#     this will just a constant vector for common mode subtraction.

import numpy as np, os, h5py
from enlib import config, utils, mapmaking, scanutils, mpi, log, pmat, enmap, bench, fft
from enact import filedb, actscan, actdata, nmat_measure

# We want a detector-uncorrelated noise model because we will
# assume that when combining the maps later. This lets us choose
# smaller noise bins because the number of DOF is smaller.
config.default("nmat_uncorr_type", "lin")
config.default("nmat_uncorr_nbin", 4000)
config.default("nmat_uncorr_nmin", 1)

parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("sel")
parser.add_argument("area")
parser.add_argument("odir")
parser.add_argument("prefix", nargs="?")
parser.add_argument("-b", "--bsize", type=float, default=1)
parser.add_argument("-v", "--verbosity", type=int, default=1)
parser.add_argument("-d", "--dets", type=str, default=None)
args = parser.parse_args()

comm = mpi.COMM_WORLD
level = log.verbosity2level(args.verbosity)
Exemplo n.º 39
0
# which would be 8-42 minutes. And that's using 16 cores! That's too slow. So this one is
# useful for comparing with a faster methods for a few reference tods, but not in general.
# Currently N and P take similar time. Can optimize P more with some effort, but N is dominated
# by ffts, and can't improve much.
from __future__ import division, print_function
import numpy as np, time, astropy.io.fits, os, sys
from scipy import optimize, integrate
from enlib import utils
with utils.nowarn(): import h5py
from enlib import mpi, errors, fft, mapmaking, config, jointmap, pointsrcs
from enlib import pmat, coordinates, enmap, bench, bunch, nmat, sampcut, gapfill, wcsutils, array_ops
from enact import filedb, actdata, actscan, nmat_measure

config.set("downsample", 1, "Amount to downsample tod by")
config.set("gapfill", "linear", "Gapfiller to use. Can be 'linear' or 'joneig'")
config.default("pmat_interpol_pad", 10.0, "Number of arcminutes to pad the interpolation coordinate system by")
config.default("pmat_interpol_max_size", 4000000, "Maximum mesh size in pointing interpolation. Worst-case time and memory scale at most proportionally with this.")

parser = config.ArgumentParser(os.environ["HOME"] + "./enkirc")
parser.add_argument("mode", help="Mode to use. Can be srcs or planet. This sets up useful defaults for other arguments")
parser.add_argument("srcdb_or_planet")
parser.add_argument("sel")
parser.add_argument("odir")
parser.add_argument("-R", "--radius",    type=float, default=12)
parser.add_argument("-r", "--res",       type=float, default=0.1)
parser.add_argument("-m", "--method",    type=str,   default="fixamp")
parser.add_argument("-M", "--minimaps",  action="store_true")
parser.add_argument("-c", "--cont",      action="store_true")
parser.add_argument("-s", "--srcs",      type=str,   default=None)
parser.add_argument("-A", "--minamp",    type=float, default=None)
parser.add_argument("-v", "--verbose",   action="count", default=0)
Exemplo n.º 40
0
import numpy as np, time
from enact import nmat_measure, actdata
from enlib import utils, scan, nmat, resample, config, errors, bench, sampcut

config.default("cut_noise_whiteness", False, "Whether to apply the noise_cut or not")
config.default("cut_spikes", True, "Whether to apply the spike cut or not")
config.default("tod_sys", "hor", "Coordinate system the TOD is in. 'hor': Ideal horizontal coordinates. 'tele': non-ideal telescope coordinates.")
config.default("downsample_method", "fft", "Method to use when downsampling the TOD")
config.default("noise_model", "jon", "Which noise model to use. Can be 'file' or 'jon'")
config.default("tod_skip_deconv", False, "Whether to skip the time constant and butterworth deconvolution in actscan")
config.default("dummy_cut", 0.0, "Fraction of dummy cuts to inject *after* gapfilling and noise estimation")
config.default("dummy_cut_len", 1000, "Dummy cuts will be exponentially distributed up to this length")
class ACTScan(scan.Scan):
	def __init__(self, entry, subdets=None, d=None, verbose=False, dark=False):
		self.fields = ["gain","mce_filter","tags","polangle","tconst","hwp","cut","point_offsets","boresight","site","tod_shape","array_info","beam","pointsrcs", "buddies"]
		if dark: self.fields += ["dark"]
		if config.get("noise_model") == "file":
			self.fields += ["noise"]
		else:
			if config.get("cut_noise_whiteness"):
				self.fields += ["noise_cut"]
			if config.get("cut_spikes"):
				self.fields += ["spikes"]
		if d is None:
			d = actdata.read(entry, self.fields, verbose=verbose)
			d = actdata.calibrate(d, verbose=verbose)
			if subdets is not None:
				d.restrict(dets=d.dets[subdets])
		if d.ndet == 0 or d.nsamp == 0: raise errors.DataMissing("No data in scan")
		ndet = d.ndet
		# Necessary components for Scan interface
Exemplo n.º 41
0
import numpy as np, time, h5py, copy, argparse, os, sys, pipes, shutil, re
from enlib import enmap, utils, pmat, fft, config, array_ops, mapmaking, nmat, errors, mpi
from enlib import log, bench, dmap, coordinates, scan as enscan, rangelist, scanutils
from enlib import pointsrcs, bunch
from enlib.cg import CG
from enact import actscan, nmat_measure, filedb, todinfo

config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
config.default(
    "verbosity", 1,
    "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages."
)

parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("sel")
parser.add_argument("srcs")
parser.add_argument("area")
parser.add_argument("odir")
parser.add_argument("--nmax", type=int, default=10)
parser.add_argument("-s",
                    "--src",
                    type=int,
                    default=None,
                    help="Only analyze given source")
parser.add_argument("-c", "--cont", action="store_true")
args = parser.parse_args()

dtype = np.float32 if config.get("map_bits") == 32 else np.float64
comm = mpi.COMM_WORLD
tcomm = mpi.COMM_SELF
nmax = args.nmax
Exemplo n.º 42
0
# 3. May want to encode detector covariance somehow too. Full covmats is
#    out of the picture - would be far too big. But could store something
#    like average correlation per bin. How would I measure that?
#    Calc corr (would be very noisy in such small bins). Mean of
#    off-diagonal elements. This mean would be much less noisy.
#    Would be bad for correlations of mixed sign, though. For these,
#    one could take the square first, and then subtract a noise bias (1).
#    Coudl do both. I'm worried that all those covs would be slow.
#
# To avoid needing to keep many gigs in memory, we do this in chunks
# of e.g. 250 tods, making each file just 324 MB large.

import numpy as np, argparse, h5py, os, sys
from enlib import fft, utils, errors, config, mpi, colors, bench
from enact import filedb, actdata
config.default("cut_mostly_cut", False)
parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("sel")
parser.add_argument("odir")
parser.add_argument("prefix", nargs="?", default=None)
parser.add_argument("-b", "--nbin", type=int, default=20000)
parser.add_argument("-f", "--fmax", type=float, default=200)
parser.add_argument("-B", "--nbin-det", type=int, default=100)
parser.add_argument("-Z", "--nbin-zoom", type=int, default=100)
parser.add_argument("-F", "--fmax-zoom", type=float, default=10)
parser.add_argument("-C", "--chunk-size", type=int, default=250)
parser.add_argument("--tconst", action="store_true")
parser.add_argument("--no-autocut", action="store_true")
args = parser.parse_args()

filedb.init()
Exemplo n.º 43
0
#	# define areas of significant acceleration
#	lim    = config.get("cut_turnaround_lim", lim)
#	addaz  = np.abs(ddaz)
#	sigma  = np.std(ddaz)
#	for i in range(3):
#		sigma = np.std(ddaz[addaz < sigma*4])
#	mask  = addaz > sigma*lim
#	# Build the cut, and grow it by the margin
#	cut   = sampcut.from_mask(mask)
#	margin= utils.nint(config.get("cut_turnaround_margin", margin)*srate/2)
#	cut   = cut.widen(margin)
#	return cut

# New, simpler turnaround cuts. Simply cuts a given number of degrees away from the
# extrema.
config.default("cut_turnaround_margin", 0.2, "Margin for turnaround cut in degrees.")
def turnaround_cut(az, margin=None):
	margin = config.get("cut_turnaround_margin", margin)*utils.degree
	# Use percentile just in case there's some outliers (for example a scan that's a bit
	# higher than the others.
	az1    = np.percentile(az,  0.1)
	az2    = np.percentile(az, 99.9)
	mask   = (az<az1)|(az>az2)
	cut    = sampcut.from_mask(mask)
	return cut

#	return res

config.default("cut_ground_az", "57:62,-62:-57,73:75", "Az ranges to consider for ground cut")
config.default("cut_ground_el", "0:38", "El ranges to consider for ground cut")
def ground_cut(bore, det_offs, az_ranges=None, el_ranges=None):
Exemplo n.º 44
0
                                 mapping.oimap[None],
                                 order=1)
        res.hwp_phase = np.ascontiguousarray(
            utils.interpol(res.hwp_phase.T, mapping.oimap[None], order=1).T)
        try:
            res.dark_tod = utils.interpol(res.dark_tod, mapping.oimap[None])
            res.dark_cut = resample_cut(res.dark_cut, mapping)
        except AttributeError as e:
            pass
        res.cut = resample_cut(res.cut, mapping)
        res.cut_noiseest = resample_cut(res.cut_noiseest, mapping)
        res.noise = resample_noise(res.noise, mapping)
        return res


config.default("downsample_method", "fft",
               "Method to use when downsampling the TOD")


class H5Scan(Scan):
    def __init__(self, fname):
        self.fname = fname
        with h5py.File(fname, "r") as hfile:
            for k in ["boresight", "offsets", "comps", "sys", "mjd0", "dets"]:
                setattr(self, k, hfile[k].value)
            n = self.boresight.shape[0]
            neach = hfile["cut/neach"].value
            flat = hfile["cut/flat"].value
            self.cut = sampcut.Sampcut(flat, utils.cumsum(neach,
                                                          endpoint=True), n)
            self.cut_noiseest = self.cut.copy()
            self.noise = nmat.read_nmat(hfile, "noise")
Exemplo n.º 45
0
import numpy as np, time, h5py, copy, argparse, os, sys, pipes, shutil, re
from enlib import enmap, utils, pmat, fft, config, array_ops, mapmaking, nmat, errors, mpi
from enlib import log, bench, dmap, coordinates, scan as enscan, scanutils
from enlib import pointsrcs, bunch
from enlib.cg import CG
from enlib.source_model import SourceModel
from enact import actscan, nmat_measure, filedb, todinfo

config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
config.default("downsample", 1, "Factor with which to downsample the TOD")
config.default("map_cg_nmax", 500,
               "Max number of CG steps to perform in map-making")
config.default(
    "verbosity", 1,
    "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages."
)
config.default(
    "task_dist", "size",
    "How to assign scans to each mpi task. Can be 'plain' for comm.rank:n:comm.size-type assignment, 'size' for equal-total-size assignment. The optimal would be 'time', for equal total time for each, but that's not implemented currently."
)
config.default("gfilter_jon", False, "Whether to enable Jon's ground filter.")
config.default(
    "map_ptsrc_handling", "subadd",
    "How to handle point sources in the map. Can be 'none' for no special treatment, 'subadd' to subtract from the TOD and readd in pixel space, and 'sim' to simulate a pointsource-only TOD."
)
config.default(
    "map_ptsrc_sys", "cel",
    "Coordinate system the point source positions are specified in. Default is 'cel'"
)
config.default(
    "map_format", "fits",
Exemplo n.º 46
0
import numpy as np, os, sys, zipfile
from enlib import config, utils, coordinates, targets, mpi
from enact import filedb, files

config.default("filedb", "filedb.txt",
               "File describing the location of the TOD and their metadata")
parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("query")
parser.add_argument("-r", "--focalplane-radius", type=float, default=1.0)
parser.add_argument("-A", "--all-dists", action="store_true")
args = parser.parse_args()

comm = mpi.COMM_WORLD

filedb.init()
db = filedb.data
filelist = filedb.scans[args.query]
hprint = False
for ind in range(comm.rank, len(filelist), comm.size):
    id = filelist[ind]
    entry = db.query(id, multi=False)

    # Get a few representative samples
    site = files.read_site(entry.site)
    try:
        bore = files.read_boresight(entry.tod)[0]
    except zipfile.BadZipfile:
        print "%s %9.3f %9.3f %9.3f %9.3f %s" % (id, np.nan, np.nan, np.nan,
                                                 np.nan, "badzip")
        continue
    if bore.shape[0] < 3 or bore.shape[1] < 1:
Exemplo n.º 47
0
import numpy as np, sys, os
from enlib import enmap, config, log, pmat, mpi, utils, scan as enscan, errors
from enact import actscan, filedb, todinfo

config.default("downsample", 1, "Factor with which to downsample the TOD")
config.default(
    "verbosity", 1,
    "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages."
)

parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("sel")
parser.add_argument("area")
parser.add_argument("odir")
parser.add_argument("prefix", nargs="?")
args = parser.parse_args()

filedb.init()
ids = filedb.scans[args.sel]

comm = mpi.COMM_WORLD
dtype = np.float64
area = enmap.read_map(args.area).astype(dtype)

utils.mkdir(args.odir)
root = args.odir + "/" + (args.prefix + "_" if args.prefix else "")

# Set up logging
utils.mkdir(root + "log")
logfile = root + "log/log%03d.txt" % comm.rank
log_level = log.verbosity2level(config.get("verbosity"))
Exemplo n.º 48
0
# Scans through the indicated tods and computes the ratio
# between the power at mid and high frequency to determine
# how white the white noise floor it. Cuts detectors that
# aren't white enough. Also cuts detectors that have suspiciously
# low white noise floors.

import numpy as np, argparse, h5py, os, sys, shutil
from enlib import fft, utils, enmap, errors, config, mpi, todfilter
from enact import filedb, actdata, filters

config.default(
    "gfilter_jon_nhwp", 200,
    "The number of hwp modes to fit/subtract in Jon's polynomial ground filter."
)
parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("sel")
parser.add_argument("odir")
parser.add_argument("-f", type=str, default="10:1,100:1")
parser.add_argument("-R", type=str, default="0.5:3")
parser.add_argument(
    "--max-sens",
    type=float,
    default=20,
    help=
    "Reject detectors more than this times more sensitive than the median at any of the indicated frequencies. Set to 0 to disable."
)
parser.add_argument("--full-stats", action="store_true")
args = parser.parse_args()

comm = mpi.COMM_WORLD
srate = 400.
Exemplo n.º 49
0
# which would be 8-42 minutes. And that's using 16 cores! That's too slow. So this one is
# useful for comparing with a faster methods for a few reference tods, but not in general.
# Currently N and P take similar time. Can optimize P more with some effort, but N is dominated
# by ffts, and can't improve much.
from __future__ import division, print_function
import numpy as np, time, astropy.io.fits, os, sys
from scipy import optimize
from enlib import utils
with utils.nowarn(): import h5py
from enlib import mpi, errors, fft, mapmaking, config, jointmap, pointsrcs
from enlib import pmat, coordinates, enmap, bench, bunch, nmat, sampcut, gapfill, wcsutils
from enact import filedb, actdata, actscan, nmat_measure

config.set("downsample", 1, "Amount to downsample tod by")
config.set("gapfill", "linear", "Gapfiller to use. Can be 'linear' or 'joneig'")
config.default("pmat_interpol_pad", 10.0, "Number of arcminutes to pad the interpolation coordinate system by")
config.default("pmat_interpol_max_size", 4000000, "Maximum mesh size in pointing interpolation. Worst-case time and memory scale at most proportionally with this.")

parser = config.ArgumentParser(os.environ["HOME"] + "./enkirc")
parser.add_argument("mode", help="Mode to use. Can be srcs or planet. This sets up useful defaults for other arguments")
parser.add_argument("srcdb_or_planet")
parser.add_argument("sel")
parser.add_argument("odir")
parser.add_argument("-R", "--radius",    type=float, default=12)
parser.add_argument("-r", "--res",       type=float, default=0.1)
parser.add_argument("-m", "--method",    type=str,   default="fixamp")
parser.add_argument("-M", "--minimaps",  action="store_true")
parser.add_argument("-c", "--cont",      action="store_true")
parser.add_argument("-s", "--srcs",      type=str,   default=None)
parser.add_argument("-A", "--minamp",    type=float, default=None)
parser.add_argument("-v", "--verbose",   action="count", default=0)