Ejemplo n.º 1
0
def get_pix_ranges(shape, wcs, horbox, daz, nt=4, azdown=1, ndet=1.0):
	(t1,t2),(az1,az2),el = horbox[:,0], horbox[:,1], np.mean(horbox[:,2])
	nphi = np.abs(utils.nint(360/wcs.wcs.cdelt[0]))
	# Find the pixel coordinates of first az sweep
	naz  = utils.nint(np.abs(az2-az1)/daz)/azdown
	ahor = np.zeros([3,naz])
	ahor[0] = utils.ctime2mjd(t1)
	ahor[1] = np.linspace(az1,az2,naz)
	ahor[2] = el
	acel    = coordinates.transform("hor","cel",ahor[1:],time=ahor[0],site=site)
	y, x1   = upscale(fixx(utils.nint(enmap.sky2pix(shape, wcs, acel[::-1])),nphi),azdown)
	# Reduce to unique y values
	_, uinds, hits = np.unique(y, return_index=True, return_counts=True)
	y, x1 = y[uinds], x1[uinds]
	# Find the pixel coordinates of time drift
	thor = np.zeros([3,nt])
	thor[0] = utils.ctime2mjd(np.linspace(t1,t2,nt))
	thor[1] = az1
	thor[2] = el
	tcel    = coordinates.transform("hor","cel",thor[1:],time=thor[0],site=site)
	_, tx   = utils.nint(fixx(enmap.sky2pix(shape, wcs, tcel[::-1]),nphi))
	x2 = x1 + tx[-1]-tx[0]
	x1, x2  = np.minimum(x1,x2), np.maximum(x1,x2)
	pix_ranges = np.concatenate([y[:,None],x1[:,None],x2[:,None]],1)
	# Weight per pixel in pix ranges. If ndet=1 this corresponds to
	# telescope time per output pixel
	weights = (t2-t1)/(naz*azdown)/(x2-x1)*ndet * hits
	return pix_ranges, weights
Ejemplo n.º 2
0
def calc_driftangle(hor, t, site):
	hor = np.atleast_2d(hor).T
	t   = np.atleast_1d(t)
	equ = coordinates.transform("hor", "equ", hor, time=utils.ctime2mjd(t), site=site)
	hor_drift = utils.rewind(coordinates.transform("equ","hor", equ, time=utils.ctime2mjd(t+1), site=site),hor,2*np.pi)
	vec_drift = hor_drift-hor
	# Compute angle between this vector and the az axis
	angle = np.arctan2(vec_drift[1],vec_drift[0]*np.cos(hor[1]))%np.pi
	return angle
Ejemplo n.º 3
0
def setup_extra_transforms(param):
	extra = []
	if "p9" in param:
		# Planet 9 search coordinate system: p9=elemfile:tref. Includes
		# both parallax and motion compensation
		toks     = param["p9"].split(":")
		elemfile = toks[0]
		tref     = float(toks[1]) if len(toks)>1 else 1380000000.0
		tref     = utils.ctime2mjd(tref)
		obj      = ephemeris.read_object(elemfile)
		p9       = planet9.MotionCompensator(obj)
		def trf(pos, time):
			# We ignore the polarization rotation for now
			opos = pos.copy()
			opos[:2] = p9.compensate(pos[:2], time, tref)
			return opos
		extra.append(trf)
	if "parallax" in param:
		# Simple parallax compensation. parallax=dist, with dist in AU
		dist = float(param["parallax"])
		def trf(pos, time):
			opos = pos.copy()
			opos[:2] = parallax.earth2sun(pos[:2], time, dist)
			return opos
		extra.append(trf)
	return extra
Ejemplo n.º 4
0
 def __call__(self, sys1, sys2, pos):
     mjd = utils.ctime2mjd(self.data["t"])
     bore = np.array([self.data["az"], self.data["el"]]) * utils.degree
     hor = coordinates.transform(
         sys1, sys2, np.asarray(pos) * utils.degree, mjd,
         bore=bore) / utils.degree
     return hor
Ejemplo n.º 5
0
def avoidance_cut_old(bore, det_offs, site, name_or_pos, margin):
    """Cut samples that get too close to the specified object
	(e.g. "Sun" or "Moon") or celestial position ([ra,dec] in racians).
	Margin specifies how much to avoid the object by."""
    cmargin = np.cos(margin)
    mjd = utils.ctime2mjd(bore[0])
    obj_pos = coordinates.interpol_pos("cel", "hor", name_or_pos, mjd, site)
    obj_rect = utils.ang2rect(obj_pos, zenith=False)
    # Only cut if above horizon
    above_horizon = obj_pos[1] > 0
    if np.all(~above_horizon):
        return sampcut.empty(det_offs.shape[0], bore.shape[1])
    cuts = []
    for di, off in enumerate(det_offs):
        det_pos = bore[1:] + off[:, None]
        #det_rect1 = utils.ang2rect(det_pos, zenith=False) # slow
        # Not sure defining an ang2rect in array_ops is worth it.. It's ugly,
        # (angle convention hard coded) and only gives factor 2 on laptop.
        det_rect = array_ops.ang2rect(np.ascontiguousarray(det_pos.T)).T
        cdist = np.sum(obj_rect * det_rect, 0)
        # Cut samples above horizon that are too close
        bad = (cdist > cmargin) & above_horizon
        cuts.append(sampcut.from_mask(bad))
    res = sampcut.stack(cuts)
    return res
Ejemplo n.º 6
0
 def __call__(self, fname):
     mjd = utils.ctime2mjd(self.data["t"])
     mjd[~np.isfinite(mjd)] = 0
     obj = ephemeris.read_object(fname)
     pos = ephemeris.ephem_pos(obj, mjd)[:2]
     pos /= utils.degree
     return pos
Ejemplo n.º 7
0
def calc_driftangle(hor, t, site):
    hor = np.atleast_2d(hor).T
    t = np.atleast_1d(t)
    equ = coordinates.transform("hor",
                                "equ",
                                hor,
                                time=utils.ctime2mjd(t),
                                site=site)
    hor_drift = utils.rewind(
        coordinates.transform("equ",
                              "hor",
                              equ,
                              time=utils.ctime2mjd(t + 1),
                              site=site), hor, 2 * np.pi)
    vec_drift = hor_drift - hor
    # Compute angle between this vector and the az axis
    angle = np.arctan2(vec_drift[1], vec_drift[0] * np.cos(hor[1])) % np.pi
    return angle
Ejemplo n.º 8
0
    def summarize(self, chain):
        """Given a chain of lpars, compute a summary in the
		same format as returned by SrcFitterML."""
        dposs = np.array([c.dpos for c in chain])
        poss_cel = np.array([c.poss for c in chain])
        poss_hor = poss_cel * 0
        for i in range(len(self.sdata)):
            poss_hor[:, i] = coordinates.transform("cel",
                                                   "hor",
                                                   poss_cel[:, i].T,
                                                   time=utils.ctime2mjd(
                                                       self.sdata[i].ctime),
                                                   site=self.sdata[i].site).T
        ampss = np.array([c.amps for c in chain])
        vampss = np.array([c.vamps for c in chain])
        chisq0 = chain[0].chisq0
        chisq = np.mean([c.chisq for c in chain])
        # Compute means
        dpos = np.mean(dposs, 0)
        pos_cel = np.mean(poss_cel, 0)
        pos_hor = np.mean(poss_hor, 0)
        amps = np.mean(ampss, 0)
        # And uncertainties
        dpos_cov = np.cov(dposs.T)
        ddpos = np.diag(dpos_cov)**0.5
        pcorr = dpos_cov[0, 1] / ddpos[0] / ddpos[1]
        # mean([da0_i + da_ij]**2). Uncorrelated, so this is
        # mean(da0**2) + mean(da**2)
        arel = ampss + np.random.standard_normal(ampss.shape) * vampss**0.5
        amps = np.mean(arel, 0)
        vamps = np.var(arel, 0)
        #vamps   = np.var(ampss,0)+np.mean(vampss,0)
        damps = vamps**0.5
        models = self.lik.eval(dpos).models
        nsigma = (chisq0 - chisq)**0.5
        # We want how much to offset detector by, not how much to offset
        # source by
        res = bunch.Bunch(dpos=dpos,
                          poss_cel=pos_cel,
                          poss_hor=pos_hor,
                          ddpos=ddpos,
                          amps=amps,
                          damps=damps,
                          pcorr=pcorr,
                          nsrc=len(self.sdata),
                          models=models,
                          nsigma=nsigma,
                          chisq0=chisq0,
                          chisq=chisq,
                          npix=self.lik.npix)
        return res
Ejemplo n.º 9
0
def get_sids_in_tod(id, src_pos, bounds, ind, isids=None, src_sys="cel"):
	if isids is None: isids = list(range(src_pos.shape[-1]))
	if bounds is not None:
		poly      = bounds[:,:,ind]*utils.degree
		poly[0]   = utils.rewind(poly[0],poly[0,0])
		# bounds are defined in celestial coordinates. Must convert srcpos for comparison
		mjd       = utils.ctime2mjd(float(id.split(".")[0]))
		srccel    = coordinates.transform(src_sys, "cel", src_pos, time=mjd)
		srccel[0] = utils.rewind(srccel[0], poly[0,0])
		poly      = pad_polygon(poly.T, poly_pad).T
		accepted  = np.where(utils.point_in_polygon(srccel.T, poly.T))[0]
		sids      = [isids[i] for i in accepted]
	else:
		sids = isids
	return sids
Ejemplo n.º 10
0
def build_rangedata(tod, rcut, d, ivar):
    nmax = np.max(rcut.ranges[:, 1] - rcut.ranges[:, 0])
    nrange = rcut.nrange

    rdata = bunch.Bunch()
    rdata.detmap = np.zeros(nrange, int)
    rdata.tod = np.zeros([nrange, nmax], dtype)
    rdata.pos = np.zeros([nrange, nmax, 2])
    rdata.ivar = np.zeros([nrange, nmax], dtype)
    # Build our detector mapping
    for di in range(rcut.ndet):
        rdata.detmap[rcut.detmap[di]:rcut.detmap[di + 1]] = di
    rdata.n = rcut.ranges[:, 1] - rcut.ranges[:, 0]
    # Extract our tod samples and coordinates
    for i, r in enumerate(rcut.ranges):
        di = rdata.detmap[i]
        rn = r[1] - r[0]
        rdata.tod[i, :rn] = tod[di, r[0]:r[1]]
        bore = d.boresight[:, r[0]:r[1]]
        mjd = utils.ctime2mjd(bore[0])
        pos_hor = bore[1:] + d.point_offset[di, :, None]
        pos_rel = coordinates.transform(tod_sys,
                                        sys,
                                        pos_hor,
                                        time=mjd,
                                        site=d.site)
        rdata.pos[i, :rn] = pos_rel.T
        # Expand noise ivar too, including the effect of our normal data cut
        rdata.ivar[
            i, :rn] = ivar[di] * (1 - d.cut[di:di + 1, r[0]:r[1]].to_mask()[0])

    # Precompute our fourier space units
    rdata.freqs = fft.rfftfreq(nmax, 1 / d.srate)
    # And precompute out butterworth filter
    rdata.butter = filters.mce_filter(rdata.freqs, d.mce_fsamp, d.mce_params)
    # Store the fiducial time constants for reference
    rdata.tau = d.tau
    # These are also nice to have
    rdata.dsens = ivar**-0.5 / d.srate**0.5
    rdata.asens = np.sum(ivar)**-0.5 / d.srate**0.5
    rdata.srate = d.srate
    rdata.dets = d.dets
    rdata.beam = d.beam
    rdata.id = d.entry.id
    return rdata
Ejemplo n.º 11
0
 def calc_full_result(self, dpos, marginalize=True):
     res = self.lik.eval(dpos)
     res.poss_cel = res.poss
     res.poss_hor = np.array([
         coordinates.transform("cel",
                               "hor",
                               res.poss_cel[i],
                               utils.ctime2mjd(self.sdata[i].ctime),
                               site=self.sdata[i].site)
         for i in range(self.nsrc)
     ])
     # Get the position uncertainty
     hess = self.calc_hessian(dpos, step=0.1 * utils.arcmin)
     hess = 0.5 * (hess + hess.T)
     try:
         pcov = np.linalg.inv(0.5 * hess)
     except np.linalg.LinAlgError:
         pcov = np.diag([np.inf, np.inf])
     dchisq = res.chisq0 - res.chisq
     # Apply marginalization correction
     if marginalize:
         R = self.lik.rmax
         A = np.pi * R**2
         Abeam = 2 * np.pi * self.fwhm**2 / (8 * np.log(2))
         npoint = int(np.round(A / Abeam * self.nsrc))
         # Correct position and uncertainty
         dpos, pcov = marg_pos(dpos, pcov, res.chisq0 - res.chisq, npoint,
                               R)
         # Correct total chisquare
         prob = stats.norm.cdf(-dchisq**0.5)
         if prob > 1e-10:
             prob = 1 - (1 - prob)**npoint
             dchisq = stats.norm.ppf(prob)**2
     ddpos = np.diag(pcov)**0.5
     pcorr = pcov[0, 1] / ddpos[0] / ddpos[1]
     res.dpos = dpos
     res.ddpos = ddpos
     res.damps = res.vamps**0.5
     res.pcorr = pcorr
     res.pcov = pcov
     res.nsrc = self.nsrc
     res.dchisq = dchisq
     res.nsigma = dchisq**0.5
     return res
Ejemplo n.º 12
0
	def __init__(self, sdat):
		self.mjd  = utils.ctime2mjd(sdat.ctime)
		self.site = sdat.site
		self.ref_cel = sdat.srcpos
		# Find the boresight pointing that defines our focalplane
		# coordinates. This is not exact for two reasons:
		# 1. The array center is offset from the boresight, by about 1 degree.
		# 2. The detectors are offset from the array center by half that.
		# We could store the former to improve our accuracy a bit, but
		# to get #2 we would need a time-domain fit, which is what we're
		# trying to avoid here. The typical error from using the array center
		# instead would be about 1' offset * 1 degree error = 1.05 arcsec error.
		# We *can* easily get the boresight elevation since we have constant
		# elevation scans, so that removes half the error. That should be
		# good enough.
		self.bore= [
				coordinates.transform("cel","hor",sdat.srcpos,time=self.mjd,site=self.site)[0],
				sdat.el ]
		self.ref_foc = cel2foc(self.ref_cel, self.site, self.mjd, self.bore)
Ejemplo n.º 13
0
def avoidance_cut(bore, det_offs, site, name_or_pos, margin):
    """Cut samples that get too close to the specified object
	(e.g. "Sun" or "Moon") or celestial position ([ra,dec] in racians).
	Margin specifies how much to avoid the object by."""
    cmargin = np.cos(margin)
    mjd = utils.ctime2mjd(bore[0])
    obj_pos = coordinates.interpol_pos("cel", "hor", name_or_pos, mjd, site)
    obj_pos[0] = utils.rewind(obj_pos[0], bore[1])
    cosel = np.cos(obj_pos[1])
    # Only cut if above horizon
    above_horizon = obj_pos[1] > 0
    null_cut = sampcut.empty(det_offs.shape[0], bore.shape[1])
    if np.all(~above_horizon): return null_cut
    # Find center of array, and radius
    arr_center = np.mean(det_offs, 0)
    arr_rad = np.max(np.sum((det_offs - arr_center)**2, 1)**0.5)

    def calc_mask(det_pos, rad, mask=slice(None)):
        offs = (det_pos - obj_pos[:, mask])
        offs[0] *= cosel[mask]
        dists2 = np.sum(offs**2, 0)
        return dists2 < rad**2

    # Find samples that could possibly be near object for any detector
    cand_mask = calc_mask(arr_center[:, None] + bore[1:], margin + arr_rad)
    cand_mask &= above_horizon
    cand_inds = np.where(cand_mask)[0]
    if len(cand_inds) == 0: return null_cut
    # Loop through all detectors and find out if each candidate actually intersects
    cuts = []
    for di, off in enumerate(det_offs):
        det_pos = bore[1:, cand_inds] + off[:, None]
        det_mask = calc_mask(det_pos, margin, cand_inds)
        # Expand mask to full set
        det_mask_full = np.zeros(bore.shape[1], bool)
        det_mask_full[cand_inds] = det_mask
        # And use this to build actual cuts
        cuts.append(sampcut.from_mask(det_mask_full))
    res = sampcut.stack(cuts)
    return res
Ejemplo n.º 14
0
def build_rangedata(tod, rcut, d, ivar):
	nmax   = np.max(rcut.ranges[:,1]-rcut.ranges[:,0])
	nrange = rcut.nrange

	rdata  = bunch.Bunch()
	rdata.detmap = np.zeros(nrange,int)
	rdata.tod = np.zeros([nrange,nmax],dtype)
	rdata.pos = np.zeros([nrange,nmax,2])
	rdata.ivar= np.zeros([nrange,nmax],dtype)
	# Build our detector mapping
	for di in range(rcut.ndet):
		rdata.detmap[rcut.detmap[di]:rcut.detmap[di+1]] = di
	rdata.n = rcut.ranges[:,1]-rcut.ranges[:,0]
	# Extract our tod samples and coordinates
	for i, r in enumerate(rcut.ranges):
		di  = rdata.detmap[i]
		rn  = r[1]-r[0]
		rdata.tod[i,:rn] = tod[di,r[0]:r[1]]
		bore = d.boresight[:,r[0]:r[1]]
		mjd  = utils.ctime2mjd(bore[0])
		pos_hor = bore[1:] + d.point_offset[di,:,None]
		pos_rel = coordinates.transform(tod_sys, sys, pos_hor, time=mjd, site=d.site)
		rdata.pos[i,:rn] = pos_rel.T
		# Expand noise ivar too, including the effect of our normal data cut
		rdata.ivar[i,:rn] = ivar[di] * (1-d.cut[di:di+1,r[0]:r[1]].to_mask()[0])
	
	# Precompute our fourier space units
	rdata.freqs  = fft.rfftfreq(nmax, 1/d.srate)
	# And precompute out butterworth filter
	rdata.butter = filters.mce_filter(rdata.freqs, d.mce_fsamp, d.mce_params)
	# Store the fiducial time constants for reference
	rdata.tau    = d.tau
	# These are also nice to have
	rdata.dsens = ivar**-0.5 / d.srate**0.5
	rdata.asens = np.sum(ivar)**-0.5 / d.srate**0.5
	rdata.srate = d.srate
	rdata.dets  = d.dets
	rdata.beam  = d.beam
	rdata.id    = d.entry.id
	return rdata
Ejemplo n.º 15
0
	def summarize(self, chain):
		"""Given a chain of lpars, compute a summary in the
		same format as returned by SrcFitterML."""
		dposs    = np.array([c.dpos for c in chain])
		poss_cel = np.array([c.poss for c in chain])
		poss_hor = poss_cel*0
		for i in range(len(self.sdata)):
			poss_hor[:,i] = coordinates.transform("cel","hor",poss_cel[:,i].T,
					time=utils.ctime2mjd(self.sdata[i].ctime), site=self.sdata[i].site).T
		ampss  = np.array([c.amps for c in chain])
		vampss = np.array([c.vamps for c in chain])
		chisq0 = chain[0].chisq0
		chisq  = np.mean([c.chisq for c in chain])
		# Compute means
		dpos    = np.mean(dposs,0)
		pos_cel = np.mean(poss_cel,0)
		pos_hor = np.mean(poss_hor,0)
		amps    = np.mean(ampss,0)
		# And uncertainties
		dpos_cov= np.cov(dposs.T)
		ddpos   = np.diag(dpos_cov)**0.5
		pcorr   = dpos_cov[0,1]/ddpos[0]/ddpos[1]
		# mean([da0_i + da_ij]**2). Uncorrelated, so this is
		# mean(da0**2) + mean(da**2)
		arel    = ampss + np.random.standard_normal(ampss.shape)*vampss**0.5
		amps    = np.mean(arel,0)
		vamps   = np.var(arel,0)
		#vamps   = np.var(ampss,0)+np.mean(vampss,0)
		damps   = vamps**0.5
		models  = self.lik.eval(dpos).models
		nsigma  = (chisq0-chisq)**0.5
		# We want how much to offset detector by, not how much to offset
		# source by
		res  = bunch.Bunch(
				dpos = dpos,  poss_cel=pos_cel, poss_hor=pos_hor,
				ddpos= ddpos, amps=amps, damps=damps, pcorr=pcorr,
				nsrc = len(self.sdata), models=models,
				nsigma = nsigma, chisq0 = chisq0, chisq = chisq, npix=self.lik.npix)
		return res
Ejemplo n.º 16
0
	def calc_full_result(self, dpos, marginalize=True):
		res = self.lik.eval(dpos)
		res.poss_cel = res.poss
		res.poss_hor = np.array([
				coordinates.transform("cel","hor",res.poss_cel[i],utils.ctime2mjd(self.sdata[i].ctime),site=self.sdata[i].site) for i in range(self.nsrc)
			])
		# Get the position uncertainty
		hess  = self.calc_hessian(dpos, step=0.1*utils.arcmin)
		hess  = 0.5*(hess+hess.T)
		try:
			pcov  = np.linalg.inv(0.5*hess)
		except np.linalg.LinAlgError:
			pcov  = np.diag([np.inf,np.inf])
		dchisq  = res.chisq0-res.chisq
		# Apply marginalization correction
		if marginalize:
			R = self.lik.rmax
			A = np.pi*R**2
			Abeam = 2*np.pi*self.fwhm**2/(8*np.log(2))
			npoint= int(np.round(A/Abeam * self.nsrc))
			# Correct position and uncertainty
			dpos, pcov = marg_pos(dpos, pcov, res.chisq0-res.chisq, npoint, R)
			# Correct total chisquare
			prob   = stats.norm.cdf(-dchisq**0.5)
			if prob > 1e-10:
				prob   = 1-(1-prob)**npoint
				dchisq = stats.norm.ppf(prob)**2
		ddpos = np.diag(pcov)**0.5
		pcorr = pcov[0,1]/ddpos[0]/ddpos[1]
		res.dpos  = dpos
		res.ddpos = ddpos
		res.damps = res.vamps**0.5
		res.pcorr = pcorr
		res.pcov  = pcov
		res.nsrc  = self.nsrc
		res.dchisq= dchisq
		res.nsigma= dchisq**0.5
		return res
Ejemplo n.º 17
0
def avoidance_cut(bore, det_offs, site, name_or_pos, margin):
	"""Cut samples that get too close to the specified object
	(e.g. "Sun" or "Moon") or celestial position ([ra,dec] in racians).
	Margin specifies how much to avoid the object by."""
	cmargin = np.cos(margin)
	mjd = utils.ctime2mjd(bore[0])
	obj_pos    = coordinates.interpol_pos("cel","hor",name_or_pos,mjd,site)
	obj_pos[0] = utils.rewind(obj_pos[0], bore[1])
	cosel      = np.cos(obj_pos[1])
	# Only cut if above horizon
	above_horizon = obj_pos[1]>0
	null_cut = sampcut.empty(det_offs.shape[0], bore.shape[1])
	if np.all(~above_horizon): return null_cut
	# Find center of array, and radius
	arr_center = np.mean(det_offs,0)
	arr_rad    = np.max(np.sum((det_offs-arr_center)**2,1)**0.5)
	def calc_mask(det_pos, rad, mask=slice(None)):
		offs  = (det_pos-obj_pos[:,mask])
		offs[0] *= cosel[mask]
		dists2= np.sum(offs**2,0)
		return dists2 < rad**2
	# Find samples that could possibly be near object for any detector
	cand_mask  = calc_mask(arr_center[:,None] + bore[1:], margin+arr_rad)
	cand_mask &= above_horizon
	cand_inds  = np.where(cand_mask)[0]
	if len(cand_inds) == 0: return null_cut
	# Loop through all detectors and find out if each candidate actually intersects
	cuts = []
	for di, off in enumerate(det_offs):
		det_pos  = bore[1:,cand_inds]+off[:,None]
		det_mask = calc_mask(det_pos, margin, cand_inds)
		# Expand mask to full set
		det_mask_full = np.zeros(bore.shape[1], bool)
		det_mask_full[cand_inds] = det_mask
		# And use this to build actual cuts
		cuts.append(sampcut.from_mask(det_mask_full))
	res = sampcut.stack(cuts)
	return res
Ejemplo n.º 18
0
 def __init__(self, sdat):
     self.mjd = utils.ctime2mjd(sdat.ctime)
     self.site = sdat.site
     self.ref_cel = sdat.srcpos
     # Find the boresight pointing that defines our focalplane
     # coordinates. This is not exact for two reasons:
     # 1. The array center is offset from the boresight, by about 1 degree.
     # 2. The detectors are offset from the array center by half that.
     # We could store the former to improve our accuracy a bit, but
     # to get #2 we would need a time-domain fit, which is what we're
     # trying to avoid here. The typical error from using the array center
     # instead would be about 1' offset * 1 degree error = 1.05 arcsec error.
     # We *can* easily get the boresight elevation since we have constant
     # elevation scans, so that removes half the error. That should be
     # good enough.
     self.bore = [
         coordinates.transform("cel",
                               "hor",
                               sdat.srcpos,
                               time=self.mjd,
                               site=self.site)[0], sdat.el
     ]
     self.ref_foc = cel2foc(self.ref_cel, self.site, self.mjd, self.bore)
Ejemplo n.º 19
0
def avoidance_cut_old(bore, det_offs, site, name_or_pos, margin):
	"""Cut samples that get too close to the specified object
	(e.g. "Sun" or "Moon") or celestial position ([ra,dec] in racians).
	Margin specifies how much to avoid the object by."""
	cmargin = np.cos(margin)
	mjd = utils.ctime2mjd(bore[0])
	obj_pos  = coordinates.interpol_pos("cel","hor",name_or_pos,mjd,site)
	obj_rect = utils.ang2rect(obj_pos, zenith=False)
	# Only cut if above horizon
	above_horizon = obj_pos[1]>0
	if np.all(~above_horizon): return sampcut.empty(det_offs.shape[0], bore.shape[1])
	cuts = []
	for di, off in enumerate(det_offs):
		det_pos  = bore[1:]+off[:,None]
		#det_rect1 = utils.ang2rect(det_pos, zenith=False) # slow
		# Not sure defining an ang2rect in array_ops is worth it.. It's ugly,
		# (angle convention hard coded) and only gives factor 2 on laptop.
		det_rect = array_ops.ang2rect(np.ascontiguousarray(det_pos.T)).T
		cdist = np.sum(obj_rect*det_rect,0)
		# Cut samples above horizon that are too close
		bad  = (cdist > cmargin) & above_horizon
		cuts.append(sampcut.from_mask(bad))
	res = sampcut.stack(cuts)
	return res
Ejemplo n.º 20
0
    only = [int(word) for word in args.only.split(",")] if args.only else []

    # Should we use distributed maps?
    npix = shape[-2] * shape[-1]
    use_dmap = npix > 5e7

    utils.mkdir(root + "log")
    logfile = root + "log/log%03d.txt" % comm.rank
    log_level = log.verbosity2level(config.get("verbosity"))
    L = log.init(level=log_level, file=logfile, rank=comm.rank)

    filedb.init()
    db = filedb.scans.select(args.sel)
    ids = db.ids
    mjd = utils.ctime2mjd(db.data["t"])
    chunks = utils.find_equal_groups(mjd // args.dt)
    chunks = [np.sort(chunk) for chunk in chunks]
    chunks = [chunks[i] for i in np.argsort([c[0] for c in chunks])]
    corr_pos = planet9.choose_corr_points(shape, wcs,
                                          args.corr_spacing * utils.degree)

    if args.inject:
        inject_params = np.loadtxt(args.inject,
                                   ndmin=2)  # [:,{ra0,dec0,R,vx,vy,flux}]

    asteroids = planet9.get_asteroids(args.asteroid_file, args.asteroid_list)

    # How to parallelize? Could do it over chunks. Usually there will be more chunks than
    # mpi tasks. But there will still be many tods per chunk too (about 6 tods per hour
    # and 72 hours per chunk gives 432 tods per chunk). That's quite a bit for one mpi
Ejemplo n.º 21
0
        return res


srcs = load_srcs(args.srcs)

# Find out which sources are hit by each tod
db = filedb.scans.select(filedb.scans[args.sel])
tod_srcs = {}
for sid, src in enumerate(srcs):
    if args.src is not None and sid != args.src: continue
    if src.type == "planet":
        # This is a bit hacky, but sometimes the "t" member is unavailable
        # in the database. This also ignores the planet movement during the
        # TOD. We will take that into account in the final step in the mapping, though.
        t = np.char.partition(db.ids, ".")[:, 0].astype(float) + 300
        ra, dec = ephemeris.ephem_pos(src.name, utils.ctime2mjd(t), dt=0)[:2]
    else:
        ra, dec = src.ra, src.dec
    points = np.array([ra, dec])
    polys = db.data["bounds"] * utils.degree
    polys[0] = utils.rewind(polys[0], points[0])
    polys[0] = utils.rewind(polys[0], polys[0, 0])
    inside = utils.point_in_polygon(points.T, polys.T)
    dists = utils.poly_edge_dist(points.T, polys.T)
    dists = np.where(inside, 0, dists)
    hit = np.where(dists < args.hit_tol * utils.degree)[0]
    for id in db.ids[hit]:
        if not id in tod_srcs: tod_srcs[id] = []
        tod_srcs[id].append(sid)

# Prune those those that are done
Ejemplo n.º 22
0
		res.type = "fixed"
		return res

srcs = load_srcs(args.srcs)

# Find out which sources are hit by each tod
db = filedb.scans.select(filedb.scans[args.sel])
tod_srcs = {}
for sid, src in enumerate(srcs):
	if args.src is not None and sid != args.src: continue
	if src.type == "planet":
		# This is a bit hacky, but sometimes the "t" member is unavailable
		# in the database. This also ignores the planet movement during the
		# TOD. We will take that into account in the final step in the mapping, though.
		t = np.char.partition(db.ids,".")[:,0].astype(float)+300
		ra, dec = ephemeris.ephem_pos(src.name, utils.ctime2mjd(t), dt=0)[:2]
	else: ra, dec = src.ra, src.dec
	points = np.array([ra, dec])
	polys  = db.data["bounds"]*utils.degree
	polys[0] = utils.rewind(polys[0], points[0])
	polys[0] = utils.rewind(polys[0], polys[0,0])
	inside   = utils.point_in_polygon(points.T, polys.T)
	dists    = utils.poly_edge_dist(points.T, polys.T)
	dists    = np.where(inside, 0, dists)
	hit      = np.where(dists < args.hit_tol*utils.degree)[0]
	for id in db.ids[hit]:
		if not id in tod_srcs: tod_srcs[id] = []
		tod_srcs[id].append(sid)

# Prune those those that are done
if args.cont:
Ejemplo n.º 23
0
		try:
			d = actdata.read(entry, ["point_offsets","boresight","site","array_info"])
			d = actdata.calibrate(d, exclude=["autocut"])
		except (errors.DataMissing, AttributeError) as e:
			print("Skipping %s (%s)" % (id, e))
			continue
		# Build a projector between samples and mask. This
		# requires us to massage d into scan form. It's getting
		# annoying that scan and data objects aren't compatible.
		bore = d.boresight.T.copy()
		bore[:,0] -= bore[0,0]
		scan = enscan.Scan(
			boresight = bore,
			offsets = np.concatenate([np.zeros(d.ndet)[:,None],d.point_offset],1),
			comps = np.concatenate([np.ones(d.ndet)[:,None],np.zeros((d.ndet,3))],1),
			mjd0 = utils.ctime2mjd(d.boresight[0,0]),
			sys = "hor", site = d.site)
		scan.hwp_phase = np.zeros([len(bore),2])
		bore_box = np.array([np.min(d.boresight,1),np.max(d.boresight,1)])
		bore_corners = utils.box2corners(bore_box)
		scan.entry = d.entry
		# Is the source above the horizon? If not, it doesn't matter how close
		# it is.
		mjd = utils.ctime2mjd(utils.mjd2ctime(scan.mjd0)+scan.boresight[::100,0])
		try:
			object_pos = coordinates.interpol_pos("cel","hor", args.objname, mjd, site=scan.site)
		except AttributeError as e:
			print("Unexpected error in interpol_pos for %s. mid time was %.5f. message: %s. skipping" % (id, mjd[len(mjd)//2], e))
			continue
		visible = np.any(object_pos[1] >= -margin)
		if not visible:
Ejemplo n.º 24
0
def build_tod_stats(entry, Naz=8, Nt=2):
    """Collect summary information for the tod in the given entry, returning
	it as a bunch. If some information can't be found, then those fields will
	be set to a placeholder value (usually NaN), but the fields will still all
	be present."""
    # At the very least we need the pointing, so no try catch around this
    d = actdata.read(entry, ["boresight", "site"])
    d += actdata.read_point_offsets(entry, no_correction=True)
    d = actdata.calibrate(d, exclude=["autocut"])

    # Get the array center and radius
    acenter = np.mean(d.point_offset, 0)
    arad = np.mean((d.point_offset - acenter)**2, 0)**0.5

    t, baz, bel = 0.5 * (np.min(d.boresight, 1) + np.max(d.boresight, 1))
    #t, baz, bel = np.mean(d.boresight,1)
    az = baz + acenter[0]
    el = bel + acenter[1]
    dur, waz, wel = np.max(d.boresight, 1) - np.min(d.boresight, 1)
    if waz > 180 * utils.degree:
        print("bad waz %8.3f for %s" % (waz / utils.degree, entry.id))
    mjd = utils.ctime2mjd(t)
    hour = t / 3600. % 24
    day = hour >= day_range[0] and hour < day_range[1]
    night = not day
    jon = (t - jon_ref) / (3600 * 24)

    ra, dec = coordinates.transform(tsys, "cel", [az, el], mjd, site=d.site)
    # Get the array center bounds on the sky, assuming constant elevation
    ts = utils.ctime2mjd(t + dur / 2 * np.linspace(-1, 1, Nt))
    azs = az + waz / 2 * np.linspace(-1, 1, Naz)
    E1 = coordinates.transform(tsys,
                               "cel", [azs, [el] * Naz],
                               time=[ts[0]] * Naz,
                               site=d.site)[:, 1:]
    E2 = coordinates.transform(tsys,
                               "cel", [[azs[-1]] * Nt, [el] * Nt],
                               time=ts,
                               site=d.site)[:, 1:]
    E3 = coordinates.transform(tsys,
                               "cel", [azs[::-1], [el] * Naz],
                               time=[ts[-1]] * Naz,
                               site=d.site)[:, 1:]
    E4 = coordinates.transform(tsys,
                               "cel", [[azs[0]] * Nt, [el] * Nt],
                               time=ts[::-1],
                               site=d.site)[:, 1:]
    bounds = np.concatenate([E1, E2, E3, E4], 1)
    bounds[0] = utils.rewind(bounds[0])
    ## Grow bounds by array radius
    #bmid = np.mean(bounds,1)
    #for i in range(2):
    #	bounds[i,bounds[i]<bmid[i]] -= arad[i]
    #	bounds[i,bounds[i]>bmid[i]] += arad[i]
    tot_id = entry.id + (":" + entry.tag if entry.tag else "")
    res = bunch.Bunch(id=tot_id,
                      nsamp=d.nsamp,
                      t=t,
                      mjd=mjd,
                      jon=jon,
                      hour=hour,
                      day=day,
                      night=night,
                      dur=dur,
                      az=az / utils.degree,
                      el=el / utils.degree,
                      baz=baz / utils.degree,
                      bel=bel / utils.degree,
                      waz=waz / utils.degree,
                      wel=wel / utils.degree,
                      ra=ra / utils.degree,
                      dec=dec / utils.degree,
                      bounds=bounds / utils.degree)

    if "gseason" in entry:
        res[entry.gseason] = True

    # Planets
    for obj in [
            "Sun", "Moon", "Mercury", "Venus", "Mars", "Jupiter", "Saturn",
            "Uranus", "Neptune"
    ]:
        res[obj] = coordinates.ephem_pos(obj,
                                         utils.ctime2mjd(t)) / utils.degree

    # Get our weather information, if available
    try:
        d += actdata.read(entry, ["apex"])
        d = actdata.calibrate_apex(d)
        res["pwv"] = d.apex.pwv
        res["wx"] = d.apex.wind[0]
        res["wy"] = d.apex.wind[1]
        res["wind_speed"] = d.apex.wind_speed
        res["T"] = d.apex.temperature
    except errors.DataMissing:
        res["pwv"] = np.NaN
        res["wx"] = np.NaN
        res["wy"] = np.NaN
        res["wind_speed"] = np.NaN
        res["T"] = np.NaN

    # Try to get our cut info, so that we can select on
    # number of detectors and cut fraction
    try:
        npre = d.nsamp * d.ndet
        d += actdata.read(entry, ["cut"])
        res["ndet"] = d.ndet
        res["cut"] = 1 - d.nsamp * d.ndet / float(npre)
    except errors.DataMissing:
        res["ndet"] = 0
        res["cut"] = 1.0

    # Try to get hwp info
    res["hwp"] = False
    res["hwp_name"] = "none"
    try:
        epochs = actdata.try_read(files.read_hwp_epochs, "hwp_epochs",
                                  entry.hwp_epochs)
        t, _, ar = entry.id.split(".")
        t = float(t)
        if ar in epochs:
            for epoch in epochs[ar]:
                if t >= epoch[0] and t < epoch[1]:
                    res["hwp"] = True
                    res["hwp_name"] = epoch[2]
    except errors.DataMissing:
        pass

    return res
Ejemplo n.º 25
0
 def __call__(self, pos):
     mjd = utils.ctime2mjd(self.data["t"])
     hor = coordinates.transform("cel", "hor", pos * utils.degree,
                                 mjd) / utils.degree
     return hor
Ejemplo n.º 26
0
def get_pix_ranges(shape, wcs, horbox, daz, nt=4, ndet=1.0, site=None):
    """An appropriate daz for this function is about 1 degree"""
    # For each row in the map we want to know the hit density for that row,
    # as well as its start and end. In the original function we got one
    # sample per row by oversampling and then using unique. This is unreliable,
    # and also results in quantized steps in the depth. We can instead
    # do a coarse equispaced az -> ra,dec -> y,x. We can then interpolate
    # this to get exactly one sample per y. To get the density properly,
    # we just need dy/dt = dy/daz * daz/dt, where we assume daz/dt is constant.
    # We get dy/daz from the coarse stuff, and interpolate that too, which gives
    # the density per row.
    (t1, t2), (az1, az2), el = horbox[:, 0], horbox[:, 1], np.mean(horbox[:,
                                                                          2])
    nphi = np.abs(utils.nint(360 / wcs.wcs.cdelt[0]))
    # First produce the coarse single scan
    naz = utils.nint(np.abs(az2 - az1) / daz)
    if naz <= 1: return None, None
    ahor = np.zeros([3, naz])
    ahor[0] = utils.ctime2mjd(t1)
    ahor[1] = np.linspace(az1, az2, naz)
    ahor[2] = el
    acel = coordinates.transform("hor",
                                 "cel",
                                 ahor[1:],
                                 time=ahor[0],
                                 site=site)
    ylow, x1low = fixx(enmap.sky2pix(shape, wcs, acel[::-1]), nphi)
    if ylow[1] < ylow[0]:
        ylow, x1low = ylow[::-1], x1low[::-1]
    # Find dy/daz for these points
    glow = np.gradient(ylow) * (naz - 1) / (az2 - az1)
    # Now interpolate to full resolution
    y = np.arange(utils.nint(ylow[0]), utils.nint(ylow[-1]) + 1)
    if len(y) == 0:
        print "Why is y empty?", naz, ylow[0], ylow[1]
        return None, None
    x1 = np.interp(y, ylow, x1low)
    grad = np.interp(y, ylow, glow)
    # Now we just need the width of the rows, x2, which comes
    # from the time drift
    thor = np.zeros([3, nt])
    thor[0] = utils.ctime2mjd(np.linspace(t1, t2, nt))
    thor[1] = az1
    thor[2] = el
    tcel = coordinates.transform("hor",
                                 "cel",
                                 thor[1:],
                                 time=thor[0],
                                 site=site)
    _, tx = utils.nint(fixx(enmap.sky2pix(shape, wcs, tcel[::-1]), nphi))
    x2 = x1 + tx[-1] - tx[0]
    x1, x2 = np.minimum(x1, x2), np.maximum(x1, x2)
    pix_ranges = utils.nint(
        np.concatenate([y[:, None], x1[:, None], x2[:, None]], 1))
    # Weight per pixel. We want this to be in units of seconds of
    # observing time per pixel if ndet=1. We know the total number of pixels
    # hit (ny*nx) and the total time (t2-t1), and we know the relative
    # weight per row (1/grad), so we can just normalize things
    ny, nx = len(y), x2[0] - x1[0]
    npix = ny * nx
    if npix == 0 or np.any(grad <= 0):
        return pix_ranges, grad * 0
    else:
        weights = 1 / grad
        weights *= (t2 - t1) / (np.sum(weights) *
                                nx) * ndet  # *nx because weight is per row
        return pix_ranges, weights
Ejemplo n.º 27
0
            entry,
            fields=["gain", "cut", "point_offsets", "boresight", "site"])
        d = data.calibrate(d)
    except (zipfile.BadZipfile, errors.DataMissing) as e:
        print "#%s error: %s" % (id, e.message)
        #print "%s %8.3f %7.3f %8.3f %7.3f %s" % (id, np.nan, np.nan, np.nan, np.nan, "nodata")
        continue

    hour = info[ind].fields.hour
    tags = sorted(list(set(info[ind].tags) - set([id])))

    # Get input pointing
    bore = d.boresight[:, ::sstep]
    offs = d.point_offset.T[:, ::dstep]
    ipoint = np.zeros(bore.shape + offs.shape[1:])
    ipoint[0] = utils.ctime2mjd(bore[0, :, None])
    ipoint[1:] = bore[1:, :, None] + offs[:, None, :]
    ipoint = ipoint.reshape(3, -1)
    iref = np.mean(ipoint[1:], 1)

    # Transform to equ
    opoint = coordinates.transform("hor",
                                   "equ",
                                   ipoint[1:],
                                   time=ipoint[0],
                                   site=d.site)
    oref = np.mean(opoint, 1)

    print "%s %4.1f %8.3f %7.3f %8.3f %7.3f" % (
        id, hour, iref[0] / utils.degree, iref[1] / utils.degree,
        oref[0] / utils.degree, oref[1] / utils.degree),
Ejemplo n.º 28
0
def build_workspace_geometry(wid, bore, point_offset, global_wcs, site=None, tagger=None,
		padding=100, max_ra_width=2.5*utils.degree, ncomp=3, dtype=np.float64):
	if tagger is None: tagger = WorkspaceTagger()
	if isinstance(wid, basestring): wid = tagger.analyze(wid)
	if not valid_az_range(wid[0], wid[1]): raise WorkspaceError("Azimuth crosses north/south")

	trans = TransformPos2Pospix(global_wcs, site=site)
	az1, az2, el, ra1 = wid
	# Extract the workspace definition of the tag name
	ra_ref = ra1 + tagger.ra_step/2
	# We want ra(dec) for up- and down-sweeps for the middle of
	# the workspace. First find a t that will result in a sweep
	# that crosses through the middle of the workspace.
	foc_offset = np.mean(point_offset,0)
	t0   = utils.ctime2mjd(bore[0,0])
	t_ref = find_t_giving_ra(az1+foc_offset[0], el+foc_offset[1], ra_ref, site=site, t0=t0)
	# We also need the corners of the full workspace area.
	t1   = find_t_giving_ra(az1+foc_offset[0], el+foc_offset[1], ra1, site=site, t0=t0)
	t2   = find_t_giving_ra(az1+foc_offset[0], el+foc_offset[1], ra1+tagger.ra_step+max_ra_width, site=site, t0=t0)
	#print "t1", t1, "t2", t2
	#print "az1", az1/utils.degree, "az2", az2/utils.degree
	#print "ra", ra1/utils.degree, (ra1+tagger.ra_step+max_ra_width)/utils.degree
	bore_box_hor = np.array([[t1,az1,el],[t2,az2,el]])
	bore_corners_hor = utils.box2corners(bore_box_hor)
	work_corners_hor = bore_corners_hor[None,:,:] + (point_offset[:,[0,0,1]] * [0,1,1])[:,None,:]
	work_corners_hor = work_corners_hor.T.reshape(3,-1)
	work_corners     = trans(work_corners_hor[1:], time=work_corners_hor[0])
	ixcorn, iycorn   = np.round(work_corners[2:]).astype(int)
	iybox = np.array([np.min(iycorn)-padding,np.max(iycorn)+1+padding])
	# Generate an up and down sweep
	srate  = get_srate(bore[0])
	period = pmat.get_scan_period(bore[1], srate)
	dmjd   = period/2./24/3600
	xshifts = []
	yshifts = []
	work_dazs = []
	nwxs, nwys = [], []
	for si, (afrom,ato) in enumerate([[az1,az2],[az2,az1]]):
		sweep = generate_sweep_by_dec_pix(
				[[ t_ref,     afrom+foc_offset[0],el+foc_offset[1]],
					[t_ref+dmjd,ato  +foc_offset[0],el+foc_offset[1]]
				],iybox,trans)
		# Get the shift in ra pix per dec pix. At this point,
		# the shifts are just relative to the lowest-dec pixel
		xshift = np.round(sweep[5]-sweep[5,0,None]).astype(int)
		# Get the shift in dec pix per dec pix. These tell us where
		# each working pixel starts as a function of normal dec pixel.
		# For example [0,1,3,6] would mean that the work to normal pixel
		# mapping is [0,1,1,2,2,2]. This is done to make dwdec/daz approximately
		# constant
		daz = np.abs(sweep[1,1:]-sweep[1,:-1])
		daz_ratio = np.maximum(1,daz/np.min(daz[1:-1]))
		yshift  = np.round(utils.cumsum(daz_ratio, endpoint=True)).astype(int)
		yshift -= yshift[0]
		# Now that we have the x and y mapping, we can determine the
		# bounds of our workspace by transforming the corners of our
		# input coordinates.
		#print "iyc", iycorn-iybox[0]
		#print "ixc", ixcorn
		#for i in np.argsort(iycorn):
		#	print "A %6d %6d" % (iycorn[i], ixcorn[i])
		#print "min(ixc)", np.min(ixcorn)
		#print "max(ixc)", np.max(ixcorn)
		#print "xshift", xshift[iycorn-iybox[0]]
		wycorn = ixcorn - xshift[iycorn-iybox[0]]
		#print "wycorn", wycorn
		#print "min(wyc)", np.min(wycorn)
		#print "max(wyc)", np.max(wycorn)
		# Modify the shifts so that any scan in this workspace is always transformed
		# to valid positions. wx and wy are transposed relative to x and y.
		# Padding is needed because of the rounding involved in recovering the
		# az and el from the wid.
		xshift += np.min(wycorn)
		xshift -= padding
		wycorn2= ixcorn - xshift[iycorn-iybox[0]]
		#print "wycorn2", wycorn2
		#print "min(wyc2)", np.min(wycorn2)
		#print "max(wyc2)", np.max(wycorn2)
		#sys.stdout.flush()
		nwy = np.max(wycorn)-np.min(wycorn)+1 + 2*padding
		nwx = yshift[-1]+1
		# Get the average azimuth spacing in wx
		work_daz = (sweep[1,-1]-sweep[1,0])/(yshift[-1]-yshift[0])
		print work_daz/utils.degree
		# And collect so we can pass them to the Workspace construtor later
		xshifts.append(xshift)
		yshifts.append(yshift)
		nwxs.append(nwx)
		nwys.append(nwy)
		work_dazs.append(work_daz)
	# The shifts from each sweep are guaranteed to have the same length,
	# since they are based on the same iybox.
	nwx = np.max(nwxs)
	# To translate the noise properties, we need a mapping from the x and t
	# fourier spaces. For this we need the azimuth scanning speed.
	scan_speed = 2*(az2-az1)/period
	work_daz  = np.mean(work_dazs)
	wgeo = WorkspaceGeometry(nwys, nwx, xshifts, yshifts, iybox[0], scan_speed, work_daz, global_wcs, ncomp=ncomp, dtype=dtype)
	return wgeo
Ejemplo n.º 29
0
    def __init__(self, entry, subdets=None, d=None, verbose=False, dark=False):
        self.fields = [
            "gain", "mce_filter", "tags", "polangle", "tconst", "hwp", "cut",
            "point_offsets", "boresight", "site", "tod_shape", "array_info",
            "beam", "pointsrcs", "buddies"
        ]
        if dark: self.fields += ["dark"]
        if config.get("noise_model") == "file":
            self.fields += ["noise"]
        else:
            if config.get("cut_noise_whiteness"):
                self.fields += ["noise_cut"]
            if config.get("cut_spikes"):
                self.fields += ["spikes"]
        if d is None:
            d = actdata.read(entry, self.fields, verbose=verbose)
            d = actdata.calibrate(d, verbose=verbose)
            if subdets is not None:
                d.restrict(dets=d.dets[subdets])
        if d.ndet == 0 or d.nsamp == 0:
            raise errors.DataMissing("No data in scan")
        ndet = d.ndet
        # Necessary components for Scan interface
        self.mjd0 = utils.ctime2mjd(d.boresight[0, 0])
        self.boresight = np.ascontiguousarray(
            d.boresight.T.copy())  # [nsamp,{t,az,el}]
        self.boresight[:, 0] -= self.boresight[0, 0]
        self.offsets = np.zeros([ndet, self.boresight.shape[1]])
        self.offsets[:, 1:] = d.point_offset
        self.cut = d.cut.copy()
        self.cut_noiseest = d.cut_noiseest.copy()
        self.comps = np.zeros([ndet, 4])
        self.beam = d.beam
        self.pointsrcs = d.pointsrcs
        self.comps = d.det_comps
        self.hwp = d.hwp
        self.hwp_phase = d.hwp_phase
        self.dets = d.dets
        self.dgrid = (d.array_info.nrow, d.array_info.ncol)
        self.array_info = d.array_info
        self.sys = config.get("tod_sys",
                              entry.tod_sys if "tod_sys" in entry else None)
        self.site = d.site
        self.speed = d.speed
        if "noise" in d:
            self.noise = d.noise
        else:
            spikes = d.spikes[:2].T if "spikes" in d else None
            self.noise = nmat_measure.NmatBuildDelayed(
                model=config.get("noise_model"),
                spikes=spikes,
                cut=self.cut_noiseest)
        if "dark_tod" in d:
            self.dark_tod = d.dark_tod
        if "dark_cut" in d:
            self.dark_cut = d.dark_cut
        if "buddy_comps" in d:
            # Expand buddy_offs to {dt,daz,ddec}
            self.buddy_comps = d.buddy_comps
            self.buddy_offs = np.concatenate(
                [d.buddy_offs[..., :1] * 0, d.buddy_offs], -1)
        self.autocut = d.autocut if "autocut" in d else []
        # Implementation details. d is our DataSet, which we keep around in
        # because we need it to read tod consistently later. It will *not*
        # take part in any sample slicing operations, as that might make the
        # delayed tod read inconsistent with the rest. It could take part in
        # detector slicing as long as calibrate_tod operates on each detector
        # independently. This is true now, but would not be so if we did stuff
        # like common mode subtraction there. On the other hand, not doing this
        # would prevent slicing before reading from giving any speedup or memory
        # savings. I don't think allowing this should be a serious problem.
        self.d = d
        self.entry = entry

        def fmt_id(entry):
            if isinstance(entry, list):
                return "+".join([fmt_id(e) for e in entry])
            else:
                if entry.tag: return entry.id + ":" + entry.tag
                else: return entry.id

        self.id = fmt_id(entry)
        self.sampslices = []
        self.mapping = None

        # FIXME: debug test
        if config.get("dummy_cut") > 0:
            nmax = int(config.get("dummy_cut_len"))
            # Power law between 1 and nmax, with slope -1.
            # C(w) = log(w)/log(nmax)
            # P(w) = w**-1/log(nmax)
            # w(C) = n**C
            # Mean: (nmax-1)/log(nmax)
            nmean = (nmax - 1) / np.log(nmax)
            ncut = int(self.nsamp * config.get("dummy_cut") / nmean)
            cut_ranges = np.zeros([self.ndet, ncut, 2], int)
            w = (nmax**np.random.uniform(0, 1, size=[self.ndet,
                                                     ncut])).astype(int)
            np.clip(w, 1, nmax)
            cut_ranges[:, :, 0] = np.random.uniform(0,
                                                    self.nsamp,
                                                    size=[self.ndet,
                                                          ncut]).astype(int)
            cut_ranges[:, :, 0] = np.sort(cut_ranges[:, :, 0], 1)
            cut_ranges[:, :, 1] = cut_ranges[:, :, 0] + w
            np.clip(cut_ranges[:, :, 1], 0, self.nsamp)
            cut_dummy = sampcut.from_list(cut_ranges, self.nsamp)
            print(np.mean(w), nmean, nmax, ncut)
            print("cut fraction before", float(self.cut.sum()) / self.cut.size)
            self.cut *= cut_dummy
            print("cut fraction after", float(self.cut.sum()) / self.cut.size)
Ejemplo n.º 30
0
	def __init__(self, entry, subdets=None, d=None, verbose=False, dark=False):
		self.fields = ["gain","mce_filter","tags","polangle","tconst","hwp","cut","point_offsets","boresight","site","tod_shape","array_info","beam","pointsrcs", "buddies"]
		if dark: self.fields += ["dark"]
		if config.get("noise_model") == "file":
			self.fields += ["noise"]
		else:
			if config.get("cut_noise_whiteness"):
				self.fields += ["noise_cut"]
			if config.get("cut_spikes"):
				self.fields += ["spikes"]
		if d is None:
			d = actdata.read(entry, self.fields, verbose=verbose)
			d = actdata.calibrate(d, verbose=verbose)
			if subdets is not None:
				d.restrict(dets=d.dets[subdets])
		if d.ndet == 0 or d.nsamp == 0: raise errors.DataMissing("No data in scan")
		ndet = d.ndet
		# Necessary components for Scan interface
		self.mjd0      = utils.ctime2mjd(d.boresight[0,0])
		self.boresight = np.ascontiguousarray(d.boresight.T.copy()) # [nsamp,{t,az,el}]
		self.boresight[:,0] -= self.boresight[0,0]
		self.offsets   = np.zeros([ndet,self.boresight.shape[1]])
		self.offsets[:,1:] = d.point_offset
		self.cut       = d.cut.copy()
		self.cut_noiseest = d.cut_noiseest.copy()
		self.comps     = np.zeros([ndet,4])
		self.beam      = d.beam
		self.pointsrcs = d.pointsrcs
		self.comps     = d.det_comps
		self.hwp = d.hwp
		self.hwp_phase = d.hwp_phase
		self.dets  = d.dets
		self.dgrid = (d.array_info.nrow, d.array_info.ncol)
		self.array_info = d.array_info
		self.sys = config.get("tod_sys")
		self.site = d.site
		self.speed = d.speed
		if "noise" in d:
			self.noise = d.noise
		else:
			spikes = d.spikes[:2].T if "spikes" in d else None
			self.noise = nmat_measure.NmatBuildDelayed(model = config.get("noise_model"), spikes=spikes,
					cut=self.cut_noiseest)
		if "dark_tod" in d:
			self.dark_tod = d.dark_tod
		if "dark_cut" in d:
			self.dark_cut = d.dark_cut
		if "buddy_comps" in d:
			# Expand buddy_offs to {dt,daz,ddec}
			self.buddy_comps = d.buddy_comps
			self.buddy_offs  = np.concatenate([d.buddy_offs[...,:1]*0,d.buddy_offs],-1)
		self.autocut = d.autocut if "autocut" in d else []
		# Implementation details. d is our DataSet, which we keep around in
		# because we need it to read tod consistently later. It will *not*
		# take part in any sample slicing operations, as that might make the
		# delayed tod read inconsistent with the rest. It could take part in
		# detector slicing as long as calibrate_tod operates on each detector
		# independently. This is true now, but would not be so if we did stuff
		# like common mode subtraction there. On the other hand, not doing this
		# would prevent slicing before reading from giving any speedup or memory
		# savings. I don't think allowing this should be a serious problem.
		self.d = d
		self.entry = entry
		def fmt_id(entry):
			if isinstance(entry, list): return "+".join([fmt_id(e) for e in entry])
			else:
				if entry.tag: return entry.id + ":" + entry.tag
				else: return entry.id
		self.id = fmt_id(entry)
		self.sampslices = []
		self.mapping = None

		# FIXME: debug test
		if config.get("dummy_cut") > 0:
			nmax  = int(config.get("dummy_cut_len"))
			# Power law between 1 and nmax, with slope -1.
			# C(w) = log(w)/log(nmax)
			# P(w) = w**-1/log(nmax)
			# w(C) = n**C
			# Mean: (nmax-1)/log(nmax)
			nmean = (nmax-1)/np.log(nmax)
			ncut = int(self.nsamp * config.get("dummy_cut") / nmean)
			cut_ranges = np.zeros([self.ndet, ncut, 2],int)
			w = (nmax**np.random.uniform(0, 1, size=[self.ndet, ncut])).astype(int)
			np.clip(w, 1, nmax)
			cut_ranges[:,:,0] = np.random.uniform(0, self.nsamp, size=[self.ndet, ncut]).astype(int)
			cut_ranges[:,:,0] = np.sort(cut_ranges[:,:,0],1)
			cut_ranges[:,:,1] = cut_ranges[:,:,0] + w
			np.clip(cut_ranges[:,:,1], 0, self.nsamp)
			cut_dummy = sampcut.from_list(cut_ranges, self.nsamp)
			print np.mean(w), nmean, nmax, ncut
			print "cut fraction before", float(self.cut.sum())/self.cut.size
			self.cut *= cut_dummy
			print "cut fraction after", float(self.cut.sum())/self.cut.size
Ejemplo n.º 31
0
        # Get the scan el and az bounds
        az1 = np.min(d.boresight[1])
        az2 = np.max(d.boresight[1])
        el = np.mean(d.boresight[2])

        if not valid_az_range(az1, az2):
            L.debug("Skipped %s (%s)" % (id, "Azimuth crosses poles"))
            continue

        # Then get the ra block we live in. This is set by the lowest RA-
        # detector at the lowest az of the scan at the earliest time in
        # the scan. So transform all the detectors.
        ipoint = np.zeros([2, d.ndet])
        ipoint[0] = az1 + d.point_offset[:, 0]
        ipoint[1] = el + d.point_offset[:, 1]
        mjd = utils.ctime2mjd(d.boresight[0, 0])
        opoint = coordinates.transform("hor",
                                       "cel",
                                       ipoint,
                                       time=mjd,
                                       site=d.site)
        ra1 = np.min(opoint[0])
        wid = tagger.build(az1, az2, el, ra1)
        print "%s %s" % (id, wid)
        sys.stdout.flush()

elif command == "build":
    # Given a list of id tag, loop over tags, and project tods on
    # a work space per tag.
    parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
    parser.add_argument("command")
Ejemplo n.º 32
0
                      ["point_offsets", "boresight", "site", "array_info"])
     d = actdata.calibrate(d, exclude=["autocut"])
 except (errors.DataMissing, AttributeError) as e:
     print "Skipping %s (%s)" % (id, e.message)
     continue
 # Build a projector between samples and mask. This
 # requires us to massage d into scan form. It's getting
 # annoying that scan and data objects aren't compatible.
 bore = d.boresight.T.copy()
 bore[:, 0] -= bore[0, 0]
 scan = enscan.Scan(
     boresight=bore,
     offsets=np.concatenate([np.zeros(d.ndet)[:, None], d.point_offset], 1),
     comps=np.concatenate([np.ones(d.ndet)[:, None],
                           np.zeros((d.ndet, 3))], 1),
     mjd0=utils.ctime2mjd(d.boresight[0, 0]),
     sys="hor",
     site=d.site)
 scan.hwp_phase = np.zeros([len(bore), 2])
 bore_box = np.array([np.min(d.boresight, 1), np.max(d.boresight, 1)])
 bore_corners = utils.box2corners(bore_box)
 scan.entry = d.entry
 # Is the source above the horizon? If not, it doesn't matter how close
 # it is.
 mjd = utils.ctime2mjd(
     utils.mjd2ctime(scan.mjd0) + scan.boresight[::100, 0])
 object_pos = coordinates.interpol_pos("cel",
                                       "hor",
                                       args.objname,
                                       mjd,
                                       site=scan.site)
Ejemplo n.º 33
0
def build_workspace_geometry(wid,
                             bore,
                             point_offset,
                             global_wcs,
                             site=None,
                             tagger=None,
                             padding=100,
                             max_ra_width=2.5 * utils.degree,
                             ncomp=3,
                             dtype=np.float64):
    if tagger is None: tagger = WorkspaceTagger()
    if isinstance(wid, basestring): wid = tagger.analyze(wid)
    if not valid_az_range(wid[0], wid[1]):
        raise WorkspaceError("Azimuth crosses north/south")

    trans = TransformPos2Pospix(global_wcs, site=site)
    az1, az2, el, ra1 = wid
    # Extract the workspace definition of the tag name
    ra_ref = ra1 + tagger.ra_step / 2
    # We want ra(dec) for up- and down-sweeps for the middle of
    # the workspace. First find a t that will result in a sweep
    # that crosses through the middle of the workspace.
    foc_offset = np.mean(point_offset, 0)
    t0 = utils.ctime2mjd(bore[0, 0])
    t_ref = find_t_giving_ra(az1 + foc_offset[0],
                             el + foc_offset[1],
                             ra_ref,
                             site=site,
                             t0=t0)
    # We also need the corners of the full workspace area.
    t1 = find_t_giving_ra(az1 + foc_offset[0],
                          el + foc_offset[1],
                          ra1,
                          site=site,
                          t0=t0)
    t2 = find_t_giving_ra(az1 + foc_offset[0],
                          el + foc_offset[1],
                          ra1 + tagger.ra_step + max_ra_width,
                          site=site,
                          t0=t0)
    #print "t1", t1, "t2", t2
    #print "az1", az1/utils.degree, "az2", az2/utils.degree
    #print "ra", ra1/utils.degree, (ra1+tagger.ra_step+max_ra_width)/utils.degree
    bore_box_hor = np.array([[t1, az1, el], [t2, az2, el]])
    bore_corners_hor = utils.box2corners(bore_box_hor)
    work_corners_hor = bore_corners_hor[None, :, :] + (
        point_offset[:, [0, 0, 1]] * [0, 1, 1])[:, None, :]
    work_corners_hor = work_corners_hor.T.reshape(3, -1)
    work_corners = trans(work_corners_hor[1:], time=work_corners_hor[0])
    ixcorn, iycorn = np.round(work_corners[2:]).astype(int)
    iybox = np.array([np.min(iycorn) - padding, np.max(iycorn) + 1 + padding])
    # Generate an up and down sweep
    srate = get_srate(bore[0])
    period = pmat.get_scan_period(bore[1], srate)
    dmjd = period / 2. / 24 / 3600
    xshifts = []
    yshifts = []
    work_dazs = []
    nwxs, nwys = [], []
    for si, (afrom, ato) in enumerate([[az1, az2], [az2, az1]]):
        sweep = generate_sweep_by_dec_pix(
            [[t_ref, afrom + foc_offset[0], el + foc_offset[1]],
             [t_ref + dmjd, ato + foc_offset[0], el + foc_offset[1]]], iybox,
            trans)
        # Get the shift in ra pix per dec pix. At this point,
        # the shifts are just relative to the lowest-dec pixel
        xshift = np.round(sweep[5] - sweep[5, 0, None]).astype(int)
        # Get the shift in dec pix per dec pix. These tell us where
        # each working pixel starts as a function of normal dec pixel.
        # For example [0,1,3,6] would mean that the work to normal pixel
        # mapping is [0,1,1,2,2,2]. This is done to make dwdec/daz approximately
        # constant
        daz = np.abs(sweep[1, 1:] - sweep[1, :-1])
        daz_ratio = np.maximum(1, daz / np.min(daz[1:-1]))
        yshift = np.round(utils.cumsum(daz_ratio, endpoint=True)).astype(int)
        yshift -= yshift[0]
        # Now that we have the x and y mapping, we can determine the
        # bounds of our workspace by transforming the corners of our
        # input coordinates.
        #print "iyc", iycorn-iybox[0]
        #print "ixc", ixcorn
        #for i in np.argsort(iycorn):
        #	print "A %6d %6d" % (iycorn[i], ixcorn[i])
        #print "min(ixc)", np.min(ixcorn)
        #print "max(ixc)", np.max(ixcorn)
        #print "xshift", xshift[iycorn-iybox[0]]
        wycorn = ixcorn - xshift[iycorn - iybox[0]]
        #print "wycorn", wycorn
        #print "min(wyc)", np.min(wycorn)
        #print "max(wyc)", np.max(wycorn)
        # Modify the shifts so that any scan in this workspace is always transformed
        # to valid positions. wx and wy are transposed relative to x and y.
        # Padding is needed because of the rounding involved in recovering the
        # az and el from the wid.
        xshift += np.min(wycorn)
        xshift -= padding
        wycorn2 = ixcorn - xshift[iycorn - iybox[0]]
        #print "wycorn2", wycorn2
        #print "min(wyc2)", np.min(wycorn2)
        #print "max(wyc2)", np.max(wycorn2)
        #sys.stdout.flush()
        nwy = np.max(wycorn) - np.min(wycorn) + 1 + 2 * padding
        nwx = yshift[-1] + 1
        # Get the average azimuth spacing in wx
        work_daz = (sweep[1, -1] - sweep[1, 0]) / (yshift[-1] - yshift[0])
        print work_daz / utils.degree
        # And collect so we can pass them to the Workspace construtor later
        xshifts.append(xshift)
        yshifts.append(yshift)
        nwxs.append(nwx)
        nwys.append(nwy)
        work_dazs.append(work_daz)
    # The shifts from each sweep are guaranteed to have the same length,
    # since they are based on the same iybox.
    nwx = np.max(nwxs)
    # To translate the noise properties, we need a mapping from the x and t
    # fourier spaces. For this we need the azimuth scanning speed.
    scan_speed = 2 * (az2 - az1) / period
    work_daz = np.mean(work_dazs)
    wgeo = WorkspaceGeometry(nwys,
                             nwx,
                             xshifts,
                             yshifts,
                             iybox[0],
                             scan_speed,
                             work_daz,
                             global_wcs,
                             ncomp=ncomp,
                             dtype=dtype)
    return wgeo
Ejemplo n.º 34
0
		# Get the scan el and az bounds
		az1 = np.min(d.boresight[1])
		az2 = np.max(d.boresight[1])
		el  = np.mean(d.boresight[2])

		if not valid_az_range(az1, az2):
			L.debug("Skipped %s (%s)" % (id, "Azimuth crosses poles"))
			continue

		# Then get the ra block we live in. This is set by the lowest RA-
		# detector at the lowest az of the scan at the earliest time in
		# the scan. So transform all the detectors.
		ipoint = np.zeros([2, d.ndet])
		ipoint[0] = az1 + d.point_offset[:,0]
		ipoint[1] = el  + d.point_offset[:,1]
		mjd    = utils.ctime2mjd(d.boresight[0,0])
		opoint = coordinates.transform("hor","cel",ipoint,time=mjd,site=d.site)
		ra1    = np.min(opoint[0])
		wid    = tagger.build(az1,az2,el,ra1)
		print "%s %s" % (id, wid)
		sys.stdout.flush()

elif command == "build":
	# Given a list of id tag, loop over tags, and project tods on
	# a work space per tag.
	parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
	parser.add_argument("command")
	parser.add_argument("todtags")
	parser.add_argument("odir")
	args = parser.parse_args()
	filedb.init()
Ejemplo n.º 35
0
	try:
		d = data.read(entry, fields=["gain","cut","point_offsets","boresight","site"])
		d = data.calibrate(d)
	except (zipfile.BadZipfile, errors.DataMissing) as e:
		print "#%s error: %s" % (id,e.message)
		#print "%s %8.3f %7.3f %8.3f %7.3f %s" % (id, np.nan, np.nan, np.nan, np.nan, "nodata")
		continue

	hour = info[ind].fields.hour
	tags = sorted(list(set(info[ind].tags)-set([id])))

	# Get input pointing
	bore = d.boresight[:,::sstep]
	offs = d.point_offset.T[:,::dstep]
	ipoint = np.zeros(bore.shape + offs.shape[1:])
	ipoint[0] = utils.ctime2mjd(bore[0,:,None])
	ipoint[1:]= bore[1:,:,None]+offs[:,None,:]
	ipoint = ipoint.reshape(3,-1)
	iref = np.mean(ipoint[1:],1)

	# Transform to equ
	opoint = coordinates.transform("hor","equ", ipoint[1:], time=ipoint[0], site=d.site)
	oref = np.mean(opoint,1)

	print "%s %4.1f %8.3f %7.3f %8.3f %7.3f" % (id, hour, iref[0]/utils.degree, iref[1]/utils.degree, oref[0]/utils.degree, oref[1]/utils.degree),

	# Compute position of each object, and distance to it
	orect = coordinates.ang2rect(opoint, zenith=False)
	for obj in objs:
		objpos  = coordinates.ephem_pos(obj, ipoint[0,0])
		objrect = coordinates.ang2rect(objpos, zenith=False)
Ejemplo n.º 36
0
 def __call__(self, name):
     mjd = utils.ctime2mjd(self.data["t"])
     mjd[~np.isfinite(mjd)] = 0
     pos = coordinates.ephem_pos(name, mjd)
     pos /= utils.degree
     return pos
Ejemplo n.º 37
0
        print "%s %9.3f %9.3f %9.3f %9.3f %s" % (id, np.nan, np.nan, np.nan,
                                                 np.nan, "badzip")
        continue
    if bore.shape[0] < 3 or bore.shape[1] < 1:
        print "%s %9.3f %9.3f %9.3f %9.3f %s" % (id, np.nan, np.nan, np.nan,
                                                 np.nan, "nobore")
        continue

    # Compute mean pointing in hor and equ
    t = np.median(bore[0, ::10])
    hor = np.median(bore[1:, ::10], 1)
    try:
        equ = coordinates.transform("hor",
                                    "equ",
                                    hor[None] * utils.degree,
                                    time=utils.ctime2mjd(t[None]),
                                    site=site)[0] / utils.degree
    except AttributeError:
        equ = np.array([np.nan, np.nan])
    hour = t / 3600 % 24

    bsub = bore[:, 50::100].copy()
    bsub = bsub[:, np.any(~np.isnan(bsub), 0)]
    bsub[0] = utils.ctime2mjd(bsub[0])
    bsub[1:3] = coordinates.transform("hor",
                                      "equ",
                                      bsub[1:3] * utils.degree,
                                      time=bsub[0],
                                      site=site)
    # Compute matching object
    targdb = targets.TargetDB(entry.targets)
Ejemplo n.º 38
0
f = open(args.odir + "/fits_%03d.txt" % comm.rank, "w")

# Iterate over tods
for ind in range(comm.rank, len(ids), comm.size):
	id    = ids[ind]
	oid   = id.replace(":","_")

	# Check if we hit any of the sources. We first make sure
	# there's no angle wraps in the bounds, and then move the sources
	# to the same side of the sky. bounds are pretty approximate, so
	# might not actually hit all these sources
	if bounds is not None:
		poly      = bounds[:,:,ind]*utils.degree
		poly[0]   = utils.rewind(poly[0],poly[0,0])
		# bounds are defined in celestial coordinates. Must convert srcpos for comparison
		mjd       = utils.ctime2mjd(float(id.split(".")[0]))
		srccel    = coordinates.transform(src_sys, "cel", srcpos, time=mjd)
		srccel[0] = utils.rewind(srccel[0], poly[0,0])
		poly      = pad_polygon(poly.T, poly_pad).T
		sids      = np.where(utils.point_in_polygon(srccel.T, poly.T))[0]
		sids      = sorted(list(set(sids)&allowed))
	else:
		sids = sorted(list(allowed))
	if len(sids) == 0:
		print("%s has 0 srcs: skipping" % id)
		continue
	try:
		nsrc = len(sids)
		print("%s has %d srcs: %s" % (id,nsrc,", ".join(["%d (%.1f)" % (i,a) for i,a in zip(sids,amps[sids])])))
	except TypeError as e:
		print("Weird: %s" % e)