Beispiel #1
0
 def __init__(self, template, mask=None):
     zipper.SingleZipper.__init__(self, False, template.comm)
     self.template, self.mask = template, mask
     if self.mask is None:
         cum = utils.cumsum([t.size for t in self.template.tiles],
                            endpoint=True)
     else:
         cum = utils.cumsum([np.sum(m) for m in self.mask.tiles],
                            endpoint=True)
     self.n = cum[-1]
     self.bins = np.array([cum[:-1], cum[1:]]).T
Beispiel #2
0
 def __init__(self, scan, params=None):
     params = config.get("pmat_cut_type", params)
     # Extract the cut parameters. E.g. poly:foo_secs -> [4,foo_samps]
     par = np.array(self.parse_params(params, scan.srate))
     # Meaning of cuts array: [:,{dets,offset,length,out_length,type,args..}]
     self.cuts = np.zeros([scan.cut.nrange, 5 + len(par)], dtype=np.int32)
     # Detector each cut belongs to
     self.cuts[:, 0] = np.concatenate([
         np.full(nr, i, np.int32) for i, nr in enumerate(scan.cut.nranges)
     ])
     # Start of each cut
     self.cuts[:, 1] = scan.cut.ranges[:, 0]
     # Length of each cut
     self.cuts[:, 2] = scan.cut.ranges[:, 1] - scan.cut.ranges[:, 0]
     # Set up the parameter arguments
     self.cuts[:, 5:] = par[None, :]
     assert np.all(
         self.cuts[:,
                   2] > 0), "Empty cut range detected in %s" % scan.entry.id
     assert np.all(self.cuts[:, 1] >= 0) and np.all(
         scan.cut.ranges[:, 1] <= scan.nsamp
     ), "Out of bounds cut range detected in %s" % scan.entry.id
     if self.cuts.size > 0:
         get_core(np.float32).measure_cuts(self.cuts.T)
     self.cuts[:, 3] = utils.cumsum(self.cuts[:, 4])
     # njunk is the number of cut parameters for *this scan*
     self.njunk = np.sum(self.cuts[:, 4])
     self.params = params
     self.scan = scan
Beispiel #3
0
def compress_ranges(ranges, nrange, cut, nsamp):
    """Given ranges[nsrc,ndet,nmax,2], nrange[nsrc,ndet] where ranges has
	det-local numbering, return the same information in a compressed format
	ranges[nr,2], rangesets[nind], offsets[nsrc,ndet,2], where ranges still has
	per-detector ordering. It used to be in global sample ordering, but I always
	ended up converting back afterwards."""
    nsrc, ndet = nrange.shape

    # Special case: None hit. We represent this as a single range hitting no samples,
    # which isn't used by any of the srcs.
    def dummy():
        ranges = np.array([[0, 0]], dtype=np.int32)
        rangesets = np.array([0], dtype=np.int32)
        offsets = np.zeros([nsrc, ndet, 2], dtype=np.int32)
        return ranges, rangesets, offsets

    if np.sum(nrange) == 0: return dummy()
    # First collapse ranges,nrange to flat ranges and indices into it
    det_ranges = []
    maps = []
    nflat = 0
    offsets = np.zeros([nsrc, ndet, 2], dtype=np.int32)
    for di in xrange(ndet):
        # Collect the sample ranges for all the sources for a given detector
        src_ranges = []
        for si in xrange(nsrc):
            # Offsets holds the indices to the first and last+1 range for each
            # source and detector. We get this simply by counting how many ranges
            # we have processed so far. After merging, these will be indices into
            # the map array instead.
            offsets[si, di, 0] = nflat
            if nrange[si, di] > 0:
                current_ranges = ranges[si, di, :nrange[si, di]]
                cutsplit_ranges = utils.range_sub(current_ranges,
                                                  cut[di].ranges)
                nflat += len(cutsplit_ranges)
                if len(cutsplit_ranges) > 0:
                    src_ranges.append(cutsplit_ranges)
            offsets[si, di, 1] = nflat
        if len(src_ranges) > 0:
            src_ranges = np.concatenate(src_ranges)
            # Merge overlapping ranges for this detector. Map maps from
            # indices into the unmerged array to indices into the merged array.
            # We merge at this step rather than at the end to avoid merging
            # samples from one detector with samples from the next.
            src_merged, map = utils.range_union(src_ranges, mapping=True)
            det_ranges.append(src_merged)
            maps.append(map)
    # Concatenate the detector ranges into one long list. Make sure
    # that we actually have some ranges left. While we did check at the
    # start, the cuts may have eliminated the ranges we started with.
    if sum([len(r) for r in det_ranges]) == 0: return dummy()
    oranges = np.concatenate(det_ranges)
    moffs = utils.cumsum([len(r) for r in det_ranges])
    map = np.concatenate([m + o for m, o in zip(maps, moffs)])
    return oranges, map, offsets
Beispiel #4
0
def from_sampcut(scut, dets=None, name="cut", sample_offset=0):
    # To zeroeth order, flags simply become 1 when we enter a cut range and 0 when we exit
    flag_stack = scut.ranges.copy()
    flag_stack[:] = [1, 0]
    flag_stack = flag_stack.reshape(-1)
    index_stack = scut.ranges.reshape(-1)
    # However, each detector starts out uncut by default, so we must insert an uncut
    # at all the beginnings
    stack_bounds = utils.cumsum(2 * scut.nranges, True)
    flag_stack = np.insert(flag_stack, stack_bounds[:-1], 0)
    index_stack = np.insert(index_stack, stack_bounds[:-1], 0)
    stack_bounds = utils.cumsum(2 * scut.nranges + 1, True)
    # At this point we may have some empty ranges. I think that's acceptable
    # Expand flag_stack to full dimensionality
    flag_stack = flag_stack[:, None]
    return Flagrange(scut.nsamp,
                     index_stack,
                     flag_stack,
                     stack_bounds,
                     dets=dets,
                     flag_names=[name],
                     derived_names=["cuts"],
                     derived_masks=[[[1], [0]]],
                     sample_offset=sample_offset)
Beispiel #5
0
 def __init__(self, rangelists, copy=True):
     # Todo: Handle (neach, flat) inputs
     if rangelists is None:
         self.data = np.zeros([], dtype=np.object)
     if isinstance(rangelists, Multirange):
         if copy: rangelists = rangelists.copy()
         self.data = rangelists.data
     elif isinstance(rangelists, tuple):
         n, neach, flat = rangelists
         ncum = cumsum(neach, True)
         self.data = np.asarray(
             [Rangelist(flat[a:b], n) for a, b in zip(ncum[:-1], ncum[1:])])
     else:
         # List or array input. Constructing directly via array constructor
         # is suddenly broken - it tries to iterate through every index
         if copy: rangelists = np.array(rangelists)
         self.data = np.asarray(rangelists)
Beispiel #6
0
	def __init__(self, fname):
		self.fname = fname
		with h5py.File(fname, "r") as hfile:
			for k in ["boresight","offsets","comps","sys","mjd0","dets"]:
				setattr(self, k, hfile[k].value)
			n = self.boresight.shape[0]
			neach = hfile["cut/neach"].value
			flat  = hfile["cut/flat"].value
			self.cut  = sampcut.Sampcut(flat, utils.cumsum(neach, endpoint=True), n)
			self.cut_noiseest = self.cut.copy()
			self.noise= nmat.read_nmat(hfile, "noise")
			self.site = bunch.Bunch({k:hfile["site/"+k].value for k in hfile["site"]})
			self.subdets = np.arange(self.ndet)
			self.hwp = np.zeros(n)
			self.hwp_phase = np.zeros([n,2])
			self.sampslices = []
			self.id = os.path.basename(fname)
			self.entry = bunch.Bunch(id=self.id)
Beispiel #7
0
def merge_data(ds, verbose=False):
	if len(ds) == 1: return ds[0]
	# Offset samples to align them, and make detector ids unique
	#det_offs = utils.cumsum([d.array_info.ndet for d in ds])
	offs_real = measure_offsets([d.boresight[0] for d in ds])
	offs = np.round(offs_real).astype(int)
	assert np.all(np.abs(offs-offs_real) < 0.1), "Non-integer sample offset in read_combo"
	if verbose: print "offsets: " + ",".join([str(off) for off in offs])
	if verbose: print "shifting"
	for d, off in zip(ds, offs):
		d.shift(sample_shift=off)
	# Find the common samples, as we must restrict to these before
	# we can take the union
	samples_list = np.array([d.samples for d in ds])
	samples = np.array([np.max(samples_list[:,0]),np.min(samples_list[:,1])])
	if verbose: print "restricting"
	for d in ds: d.restrict(samples=samples)
	# Ok, all datasets have the same sample range, and non-overlapping detectors.
	# Merge into a union dataset
	if verbose: print "union"
	dtot = dataset.detector_union(ds)
	# Array info cannot be automatically merged, so do it manually. We
	# assume that all have the same rectangular layout with the same number
	# of columns.
	row_offs = utils.cumsum([d.array_info.nrow for d in ds])
	infos = []
	for i, d in enumerate(ds):
		info = d.array_info.info.copy()
		#info.det_uid += det_offs[i]
		info.row     += row_offs[i]
		infos.append(info)
	info = np.rec.array(np.concatenate(infos))
	dtot.array_info.info = info
	dtot.array_info.ndet = len(info)
	dtot.array_info.nrow = np.max(info.row)+1
	# Dark detectors must also be handled manually, since they don't
	# follow the normal det slicing
	if "dark_tod"  in dtot: dtot.dark_tod  = np.concatenate([d.dark_tod for d in ds],0)
	if "dark_dets" in dtot: dtot.dark_dets = np.concatenate([d.dark_dets for i,d in enumerate(ds)],0)
	if "dark_cut"  in dtot: dtot.dark_cut = rangelist.stack_ranges([d.dark_cut for d in ds],0)
	if "tag_defs"  in dtot:
		for key in dtot.tag_defs:
			dtot.tag_defs[key] = np.concatenate([d.tag_defs[key] for i,d in enumerate(ds) if key in d.tag_defs],0)
	return dtot
Beispiel #8
0
def build_noise_stats(myscans, comm):
	ids    = utils.allgatherv([scan.id    for scan in myscans], comm)
	ndets  = utils.allgatherv([scan.ndet  for scan in myscans], comm)
	srates = utils.allgatherv([scan.srate for scan in myscans], comm)
	gdets  = utils.allgatherv(safe_concat([scan.dets       for scan in myscans],int),   comm)
	ivars  = utils.allgatherv(safe_concat([scan.noise.ivar for scan in myscans],float), comm)
	offs   = utils.cumsum(ndets, endpoint=True)
	res    = []
	for i, id in enumerate(ids):
		o1, o2 = offs[i], offs[i+1]
		dsens = (ivars[o1:o2]*srates[i])**-0.5
		asens = (np.sum(ivars[o1:o2])*srates[i])**-0.5
		dets  = gdets[o1:o2]
		# We want sorted dets
		inds  = np.argsort(dets)
		dets, dsens = dets[inds], dsens[inds]
		line = {"id": id, "asens": asens, "dsens": dsens, "dets": dets}
		res.append(line)
	inds = np.argsort(ids)
	res = [res[ind] for ind in inds]
	return res
Beispiel #9
0
def build_noise_stats(myscans, comm):
	ids    = utils.allgatherv([scan.id    for scan in myscans], comm)
	ndets  = utils.allgatherv([scan.ndet  for scan in myscans], comm)
	srates = utils.allgatherv([scan.srate for scan in myscans], comm)
	gdets  = utils.allgatherv(safe_concat([scan.dets       for scan in myscans],int),   comm)
	ivars  = utils.allgatherv(safe_concat([scan.noise.ivar for scan in myscans],float), comm)
	offs   = utils.cumsum(ndets, endpoint=True)
	res    = []
	for i, id in enumerate(ids):
		o1, o2 = offs[i], offs[i+1]
		dsens = (ivars[o1:o2]*srates[i])**-0.5
		asens = (np.sum(ivars[o1:o2])*srates[i])**-0.5
		dets  = gdets[o1:o2]
		# We want sorted dets
		inds  = np.argsort(dets)
		dets, dsens = dets[inds], dsens[inds]
		line = {"id": id, "asens": asens, "dsens": dsens, "dets": dets}
		res.append(line)
	inds = np.argsort(ids)
	res = [res[ind] for ind in inds]
	return res
Beispiel #10
0
def build_workspace_geometry(wid,
                             bore,
                             point_offset,
                             global_wcs,
                             site=None,
                             tagger=None,
                             padding=100,
                             max_ra_width=2.5 * utils.degree,
                             ncomp=3,
                             dtype=np.float64):
    if tagger is None: tagger = WorkspaceTagger()
    if isinstance(wid, basestring): wid = tagger.analyze(wid)
    if not valid_az_range(wid[0], wid[1]):
        raise WorkspaceError("Azimuth crosses north/south")

    trans = TransformPos2Pospix(global_wcs, site=site)
    az1, az2, el, ra1 = wid
    # Extract the workspace definition of the tag name
    ra_ref = ra1 + tagger.ra_step / 2
    # We want ra(dec) for up- and down-sweeps for the middle of
    # the workspace. First find a t that will result in a sweep
    # that crosses through the middle of the workspace.
    foc_offset = np.mean(point_offset, 0)
    t0 = utils.ctime2mjd(bore[0, 0])
    t_ref = find_t_giving_ra(az1 + foc_offset[0],
                             el + foc_offset[1],
                             ra_ref,
                             site=site,
                             t0=t0)
    # We also need the corners of the full workspace area.
    t1 = find_t_giving_ra(az1 + foc_offset[0],
                          el + foc_offset[1],
                          ra1,
                          site=site,
                          t0=t0)
    t2 = find_t_giving_ra(az1 + foc_offset[0],
                          el + foc_offset[1],
                          ra1 + tagger.ra_step + max_ra_width,
                          site=site,
                          t0=t0)
    #print "t1", t1, "t2", t2
    #print "az1", az1/utils.degree, "az2", az2/utils.degree
    #print "ra", ra1/utils.degree, (ra1+tagger.ra_step+max_ra_width)/utils.degree
    bore_box_hor = np.array([[t1, az1, el], [t2, az2, el]])
    bore_corners_hor = utils.box2corners(bore_box_hor)
    work_corners_hor = bore_corners_hor[None, :, :] + (
        point_offset[:, [0, 0, 1]] * [0, 1, 1])[:, None, :]
    work_corners_hor = work_corners_hor.T.reshape(3, -1)
    work_corners = trans(work_corners_hor[1:], time=work_corners_hor[0])
    ixcorn, iycorn = np.round(work_corners[2:]).astype(int)
    iybox = np.array([np.min(iycorn) - padding, np.max(iycorn) + 1 + padding])
    # Generate an up and down sweep
    srate = get_srate(bore[0])
    period = pmat.get_scan_period(bore[1], srate)
    dmjd = period / 2. / 24 / 3600
    xshifts = []
    yshifts = []
    work_dazs = []
    nwxs, nwys = [], []
    for si, (afrom, ato) in enumerate([[az1, az2], [az2, az1]]):
        sweep = generate_sweep_by_dec_pix(
            [[t_ref, afrom + foc_offset[0], el + foc_offset[1]],
             [t_ref + dmjd, ato + foc_offset[0], el + foc_offset[1]]], iybox,
            trans)
        # Get the shift in ra pix per dec pix. At this point,
        # the shifts are just relative to the lowest-dec pixel
        xshift = np.round(sweep[5] - sweep[5, 0, None]).astype(int)
        # Get the shift in dec pix per dec pix. These tell us where
        # each working pixel starts as a function of normal dec pixel.
        # For example [0,1,3,6] would mean that the work to normal pixel
        # mapping is [0,1,1,2,2,2]. This is done to make dwdec/daz approximately
        # constant
        daz = np.abs(sweep[1, 1:] - sweep[1, :-1])
        daz_ratio = np.maximum(1, daz / np.min(daz[1:-1]))
        yshift = np.round(utils.cumsum(daz_ratio, endpoint=True)).astype(int)
        yshift -= yshift[0]
        # Now that we have the x and y mapping, we can determine the
        # bounds of our workspace by transforming the corners of our
        # input coordinates.
        #print "iyc", iycorn-iybox[0]
        #print "ixc", ixcorn
        #for i in np.argsort(iycorn):
        #	print "A %6d %6d" % (iycorn[i], ixcorn[i])
        #print "min(ixc)", np.min(ixcorn)
        #print "max(ixc)", np.max(ixcorn)
        #print "xshift", xshift[iycorn-iybox[0]]
        wycorn = ixcorn - xshift[iycorn - iybox[0]]
        #print "wycorn", wycorn
        #print "min(wyc)", np.min(wycorn)
        #print "max(wyc)", np.max(wycorn)
        # Modify the shifts so that any scan in this workspace is always transformed
        # to valid positions. wx and wy are transposed relative to x and y.
        # Padding is needed because of the rounding involved in recovering the
        # az and el from the wid.
        xshift += np.min(wycorn)
        xshift -= padding
        wycorn2 = ixcorn - xshift[iycorn - iybox[0]]
        #print "wycorn2", wycorn2
        #print "min(wyc2)", np.min(wycorn2)
        #print "max(wyc2)", np.max(wycorn2)
        #sys.stdout.flush()
        nwy = np.max(wycorn) - np.min(wycorn) + 1 + 2 * padding
        nwx = yshift[-1] + 1
        # Get the average azimuth spacing in wx
        work_daz = (sweep[1, -1] - sweep[1, 0]) / (yshift[-1] - yshift[0])
        print work_daz / utils.degree
        # And collect so we can pass them to the Workspace construtor later
        xshifts.append(xshift)
        yshifts.append(yshift)
        nwxs.append(nwx)
        nwys.append(nwy)
        work_dazs.append(work_daz)
    # The shifts from each sweep are guaranteed to have the same length,
    # since they are based on the same iybox.
    nwx = np.max(nwxs)
    # To translate the noise properties, we need a mapping from the x and t
    # fourier spaces. For this we need the azimuth scanning speed.
    scan_speed = 2 * (az2 - az1) / period
    work_daz = np.mean(work_dazs)
    wgeo = WorkspaceGeometry(nwys,
                             nwx,
                             xshifts,
                             yshifts,
                             iybox[0],
                             scan_speed,
                             work_daz,
                             global_wcs,
                             ncomp=ncomp,
                             dtype=dtype)
    return wgeo
Beispiel #11
0
			print "Skipped %s: To short tod" % id
			continue
		tod   = d.tod[:,:nseg*seg_size]
		stat  = np.zeros([2,nstat,ndet,nseg],dtype=dtype)
		for si in range(nstat):
			sub  = tod.reshape(ndet,nseg,-1,nmed,nrms[si])
			rmss = np.median(np.std(sub,-1),-1)
			stat[0,si] = np.mean(rmss,-1)
			stat[1,si] = np.std(rmss,-1)
		lens[i] = nseg
		mystats.append(stat)
		myinds.append(i)
		del d, tod, sub
	# Collect everybody's lengths
	lens = utils.allreduce(lens, comm)
	offs = utils.cumsum(lens, endpoint=True)
	# Allocate output stat buffer. This is a bit inefficient, since
	# only really the root should need to do this. But the stat arrays
	# aren't that big.
	stats = np.zeros([2,nstat,ndet,offs[-1]],dtype=dtype)
	for li, gi in enumerate(myinds):
		stats[:,:,:,offs[gi]:offs[gi+1]] = mystats[li]
	del mystats
	stats = utils.allreduce(stats, comm)
	# And output
	if comm.rank == 0:
		print "Writing %s" % ofile
		with h5py.File(ofile, "w") as hfile:
			hfile["stats"]= stats
			hfile["lens"] = lens
			hfile["ids"]  = ids[ind1:ind2]
Beispiel #12
0
         print "Skipped %s: To short tod" % id
         continue
     tod = d.tod[:, :nseg * seg_size]
     stat = np.zeros([2, nstat, ndet, nseg], dtype=dtype)
     for si in range(nstat):
         sub = tod.reshape(ndet, nseg, -1, nmed, nrms[si])
         rmss = np.median(np.std(sub, -1), -1)
         stat[0, si] = np.mean(rmss, -1)
         stat[1, si] = np.std(rmss, -1)
     lens[i] = nseg
     mystats.append(stat)
     myinds.append(i)
     del d, tod, sub
 # Collect everybody's lengths
 lens = utils.allreduce(lens, comm)
 offs = utils.cumsum(lens, endpoint=True)
 # Allocate output stat buffer. This is a bit inefficient, since
 # only really the root should need to do this. But the stat arrays
 # aren't that big.
 stats = np.zeros([2, nstat, ndet, offs[-1]], dtype=dtype)
 for li, gi in enumerate(myinds):
     stats[:, :, :, offs[gi]:offs[gi + 1]] = mystats[li]
 del mystats
 stats = utils.allreduce(stats, comm)
 # And output
 if comm.rank == 0:
     print "Writing %s" % ofile
     with h5py.File(ofile, "w") as hfile:
         hfile["stats"] = stats
         hfile["lens"] = lens
         hfile["ids"] = ids[ind1:ind2]
Beispiel #13
0
			# Define our phase maps
			nrow, ncol = active_scans[0].dgrid
			array_dets = np.arange(nrow*ncol)
			if col_major: array_dets = array_dets.reshape(nrow,ncol).T.reshape(-1)
			det_unit   = nrow if col_major else ncol
			areas      = mapmaking.PhaseMap.zeros(patterns, array_dets, res=res, det_unit=det_unit, dtype=dtype)
			signal     = mapmaking.SignalPhase(active_scans, areas, mypids, comm, name=effname, ofmt=param["ofmt"], output=param["output"]=="yes")
		elif param["type"] == "noiserect":
			ashape, awcs = enmap.read_map_geometry(param["value"])
			leftright = int(param["leftright"]) > 0
			# Drift is in degrees per hour, but we want it per second
			drift = float(param["drift"])/3600
			area = enmap.zeros((args.ncomp*(1+leftright),)+ashape[-2:], awcs, dtype)
			# Find the duration of each tod. We need this for the y offsets
			nactive = utils.allgather(np.array(len(active_scans)), comm)
			offs    = utils.cumsum(nactive, endpoint=True)
			durs    = np.zeros(np.sum(nactive))
			for i, scan in enumerate(active_scans): durs[offs[comm.rank]+i] = scan.nsamp/scan.srate
			durs    = utils.allreduce(durs, comm)
			ys      = utils.cumsum(durs)*drift
			my_ys   = ys[offs[comm.rank]:offs[comm.rank+1]]
			# That was surprisingly cumbersome
			signal  = mapmaking.SignalNoiseRect(active_scans, area, drift, my_ys, comm, name=effname, mode=param["mode"], ofmt=param["ofmt"], output=param["output"]=="yes")
		elif param["type"] == "srcsamp":
			if param["srcs"] == "none": srcs = None
			else: srcs = pointsrcs.read(param["srcs"])
			minamp = float(param["minamp"])
			signal = mapmaking.SignalSrcSamp(active_scans, dtype=dtype, comm=comm,
					srcs=srcs, amplim=minamp)
			signal_srcsamp = signal
		else:
Beispiel #14
0
def build_workspace_geometry(wid, bore, point_offset, global_wcs, site=None, tagger=None,
		padding=100, max_ra_width=2.5*utils.degree, ncomp=3, dtype=np.float64):
	if tagger is None: tagger = WorkspaceTagger()
	if isinstance(wid, basestring): wid = tagger.analyze(wid)
	if not valid_az_range(wid[0], wid[1]): raise WorkspaceError("Azimuth crosses north/south")

	trans = TransformPos2Pospix(global_wcs, site=site)
	az1, az2, el, ra1 = wid
	# Extract the workspace definition of the tag name
	ra_ref = ra1 + tagger.ra_step/2
	# We want ra(dec) for up- and down-sweeps for the middle of
	# the workspace. First find a t that will result in a sweep
	# that crosses through the middle of the workspace.
	foc_offset = np.mean(point_offset,0)
	t0   = utils.ctime2mjd(bore[0,0])
	t_ref = find_t_giving_ra(az1+foc_offset[0], el+foc_offset[1], ra_ref, site=site, t0=t0)
	# We also need the corners of the full workspace area.
	t1   = find_t_giving_ra(az1+foc_offset[0], el+foc_offset[1], ra1, site=site, t0=t0)
	t2   = find_t_giving_ra(az1+foc_offset[0], el+foc_offset[1], ra1+tagger.ra_step+max_ra_width, site=site, t0=t0)
	#print "t1", t1, "t2", t2
	#print "az1", az1/utils.degree, "az2", az2/utils.degree
	#print "ra", ra1/utils.degree, (ra1+tagger.ra_step+max_ra_width)/utils.degree
	bore_box_hor = np.array([[t1,az1,el],[t2,az2,el]])
	bore_corners_hor = utils.box2corners(bore_box_hor)
	work_corners_hor = bore_corners_hor[None,:,:] + (point_offset[:,[0,0,1]] * [0,1,1])[:,None,:]
	work_corners_hor = work_corners_hor.T.reshape(3,-1)
	work_corners     = trans(work_corners_hor[1:], time=work_corners_hor[0])
	ixcorn, iycorn   = np.round(work_corners[2:]).astype(int)
	iybox = np.array([np.min(iycorn)-padding,np.max(iycorn)+1+padding])
	# Generate an up and down sweep
	srate  = get_srate(bore[0])
	period = pmat.get_scan_period(bore[1], srate)
	dmjd   = period/2./24/3600
	xshifts = []
	yshifts = []
	work_dazs = []
	nwxs, nwys = [], []
	for si, (afrom,ato) in enumerate([[az1,az2],[az2,az1]]):
		sweep = generate_sweep_by_dec_pix(
				[[ t_ref,     afrom+foc_offset[0],el+foc_offset[1]],
					[t_ref+dmjd,ato  +foc_offset[0],el+foc_offset[1]]
				],iybox,trans)
		# Get the shift in ra pix per dec pix. At this point,
		# the shifts are just relative to the lowest-dec pixel
		xshift = np.round(sweep[5]-sweep[5,0,None]).astype(int)
		# Get the shift in dec pix per dec pix. These tell us where
		# each working pixel starts as a function of normal dec pixel.
		# For example [0,1,3,6] would mean that the work to normal pixel
		# mapping is [0,1,1,2,2,2]. This is done to make dwdec/daz approximately
		# constant
		daz = np.abs(sweep[1,1:]-sweep[1,:-1])
		daz_ratio = np.maximum(1,daz/np.min(daz[1:-1]))
		yshift  = np.round(utils.cumsum(daz_ratio, endpoint=True)).astype(int)
		yshift -= yshift[0]
		# Now that we have the x and y mapping, we can determine the
		# bounds of our workspace by transforming the corners of our
		# input coordinates.
		#print "iyc", iycorn-iybox[0]
		#print "ixc", ixcorn
		#for i in np.argsort(iycorn):
		#	print "A %6d %6d" % (iycorn[i], ixcorn[i])
		#print "min(ixc)", np.min(ixcorn)
		#print "max(ixc)", np.max(ixcorn)
		#print "xshift", xshift[iycorn-iybox[0]]
		wycorn = ixcorn - xshift[iycorn-iybox[0]]
		#print "wycorn", wycorn
		#print "min(wyc)", np.min(wycorn)
		#print "max(wyc)", np.max(wycorn)
		# Modify the shifts so that any scan in this workspace is always transformed
		# to valid positions. wx and wy are transposed relative to x and y.
		# Padding is needed because of the rounding involved in recovering the
		# az and el from the wid.
		xshift += np.min(wycorn)
		xshift -= padding
		wycorn2= ixcorn - xshift[iycorn-iybox[0]]
		#print "wycorn2", wycorn2
		#print "min(wyc2)", np.min(wycorn2)
		#print "max(wyc2)", np.max(wycorn2)
		#sys.stdout.flush()
		nwy = np.max(wycorn)-np.min(wycorn)+1 + 2*padding
		nwx = yshift[-1]+1
		# Get the average azimuth spacing in wx
		work_daz = (sweep[1,-1]-sweep[1,0])/(yshift[-1]-yshift[0])
		print work_daz/utils.degree
		# And collect so we can pass them to the Workspace construtor later
		xshifts.append(xshift)
		yshifts.append(yshift)
		nwxs.append(nwx)
		nwys.append(nwy)
		work_dazs.append(work_daz)
	# The shifts from each sweep are guaranteed to have the same length,
	# since they are based on the same iybox.
	nwx = np.max(nwxs)
	# To translate the noise properties, we need a mapping from the x and t
	# fourier spaces. For this we need the azimuth scanning speed.
	scan_speed = 2*(az2-az1)/period
	work_daz  = np.mean(work_dazs)
	wgeo = WorkspaceGeometry(nwys, nwx, xshifts, yshifts, iybox[0], scan_speed, work_daz, global_wcs, ncomp=ncomp, dtype=dtype)
	return wgeo
Beispiel #15
0
			# Define our phase maps
			nrow, ncol = active_scans[0].dgrid
			array_dets = np.arange(nrow*ncol)
			if col_major: array_dets = array_dets.reshape(nrow,ncol).T.reshape(-1)
			det_unit   = nrow if col_major else ncol
			areas      = mapmaking.PhaseMap.zeros(patterns, array_dets, res=res, det_unit=det_unit, dtype=dtype)
			signal     = mapmaking.SignalPhase(active_scans, areas, mypids, comm, name=effname, ofmt=param["ofmt"], output=param["output"]=="yes")
		elif param["type"] == "noiserect":
			ashape, awcs = enmap.read_map_geometry(get_map_path(param["value"]))
			leftright = int(param["leftright"]) > 0
			# Drift is in degrees per hour, but we want it per second
			drift = float(param["drift"])/3600
			area = enmap.zeros((args.ncomp*(1+leftright),)+ashape[-2:], awcs, dtype)
			# Find the duration of each tod. We need this for the y offsets
			nactive = utils.allgather(np.array(len(active_scans)), comm)
			offs    = utils.cumsum(nactive, endpoint=True)
			durs    = np.zeros(np.sum(nactive))
			for i, scan in enumerate(active_scans): durs[offs[comm.rank]+i] = scan.nsamp/scan.srate
			durs    = utils.allreduce(durs, comm)
			ys      = utils.cumsum(durs)*drift
			my_ys   = ys[offs[comm.rank]:offs[comm.rank+1]]
			# That was surprisingly cumbersome
			signal  = mapmaking.SignalNoiseRect(active_scans, area, drift, my_ys, comm, name=effname, mode=param["mode"], ofmt=param["ofmt"], output=param["output"]=="yes")
		elif param["type"] == "srcsamp":
			if param["srcs"] == "none": srcs = None
			else: srcs = pointsrcs.read(param["srcs"])
			minamp = float(param["minamp"])
			if "mask" in param: m = enmap.read_map(param["mask"]).astype(dtype)
			else: m = None
			signal = mapmaking.SignalSrcSamp(active_scans, dtype=dtype, comm=comm,
					srcs=srcs, amplim=minamp, mask=m)