예제 #1
0
파일: scanutils.py 프로젝트: amaurea/enlib
def scan_iterator(filelist, inds, reader, db=None, dets=None, quiet=False, downsample=1, hwp_resample=False):
	"""Given a set of ids/files and a set of indices into that list. Try
	to read each of these scans. Returns a list of successfully read scans
	and a list of their indices."""
	for ind in inds:
		try:
			if not isinstance(filelist[ind],basestring): raise IOError
			d = enscan.read_scan(filelist[ind])
			#actdata.read(filedb.data[filelist[ind]])
		except (IOError, OSError):
			try:
				entry = db[filelist[ind]]
				d = reader(entry)
				if d.ndet == 0 or d.nsamp == 0:
					raise errors.DataMissing("Tod contains no valid data")
			except errors.DataMissing as e:
				if not quiet: L.debug("Skipped %s (%s)" % (str(filelist[ind]), e.args[0]))
				continue
		if dets:
			if dets.startswith("@"):
				uids = [int(line.split()[0]) for line in open(dets[1:],"r")]
				_, duids = actdata.split_detname(d.dets)
				_,det_inds = utils.common_inds([uids,duids])
				d = d[det_inds]
			else:
				d = eval("d[%s]" % dets)
		hwp_active = np.any(d.hwp_phase[0] != 0)
		if hwp_resample and hwp_active:
			mapping = enscan.build_hwp_sample_mapping(d.hwp)
			d = d.resample(mapping)
		d = d[:,::downsample]
		if not quiet: L.debug("Read %s" % str(filelist[ind]))
		yield ind, d
예제 #2
0
 print "Processing %s" % id
 srates[i] = d.srate
 mce_fsamps[i] = d.mce_fsamp
 mce_params[i] = d.mce_params[:4]
 # Compute the power spectrum
 d.tod = d.tod.astype(dtype)
 nsamp = d.nsamp
 srate = d.srate
 ifmax = d.srate / 2
 ft = fft.rfft(d.tod) / (nsamp * srate)**0.5
 nfreq = ft.shape[-1]
 del d.tod
 ps = np.abs(ft)**2
 # Det specs
 zoom = int(round(ifmax / args.fmax_zoom))
 dets = actdata.split_detname(d.dets)[1]
 dspecs[i, dets] = bin(ps, args.nbin_det)
 dzooms[i, dets] = bin(ps, args.nbin_zoom, zoom=zoom)
 # Aggregate specs. First bin in small bins
 dhigh, binds = bin(ps, args.nbin, return_inds=True)
 nhits[i] = np.bincount(binds, minlength=args.nbin)
 # Then compute quantiles
 tspecs[0, i] = np.median(dhigh, 0)
 tspecs[1, i] = np.percentile(dhigh, 15.86553, 0)
 tspecs[2, i] = np.percentile(dhigh, 84.13447, 0)
 tspecs[3, i] = np.min(dhigh, 0)
 tspecs[4, i] = np.max(dhigh, 0)
 del ps
 # Normalize ft in bins, since we want correlations
 for di in range(d.ndet):
     ft[di] /= (dhigh[di]**0.5)[binds]
예제 #3
0
		#data.tod -= np.mean(data.tod,1)[:,None]
		data.tod -= data.tod[:,None,0].copy()
		data.tod  = data.tod.astype(dtype)
		# Set up our likelihood
		L = Likelihood(data, srcpos[:,sids], amps[sids], filter=highpass)
		# Find out which sources are reliable, so we don't waste time on bad ones
		if prune_unreliable_srcs:
			_, aicov = L.fit_amp()
			good = amps[sids]**2*aicov[:,0,0,0,0] > args.minsn**2
			sids = [sid for sid,g in zip(sids,good) if g]
			nsrc = len(sids)
			print("Restricted to %d srcs: %s" % (nsrc,", ".join(["%d (%.1f)" % (i,a) for i,a in zip(sids,amps[sids])])))
		if nsrc == 0: continue
		L    = Likelihood(data, srcpos[:,sids], amps[sids], perdet=perdet, thumbs=True, N=L.N, method=args.method, filter=highpass)
		beam_area = get_beam_area(data.beam)
		_, uids   = actdata.split_detname(data.dets) # Argh, stupid detnames
		freq      = data.array_info.info.nom_freq[uids[0]]
		fluxconv  = utils.flux_factor(beam_area, freq*1e9)
		group_data.append(bunch.Bunch(data=data, sids=sids, lik=L, id=id, oid=oid, ind=ind, beam_area=beam_area, freq=freq, fluxconv=fluxconv))

	if len(group_data) == 0:
		print("No usable tods in group %s. Skipping" % ",".join([ids[i] for i in group]))
		continue

	# Set up the full likelihood
	progress_thumbs = args.minimaps and verbose >= 3
	chisq_wrappers  = [data.lik.chisq_wrapper(thumb_path=args.odir + "/" + data.oid + "_thumb%03d.fits", thumb_interval=progress_thumbs) for data in group_data]
	def likfun(off): return sum([chisq_wrapper(off) for chisq_wrapper in chisq_wrappers])
	# Representaive individual lik for stuff that's common to them. This is a bit hacky.
	# A single joint lik class would have been cleaner.
	L = group_data[0].lik
예제 #4
0
    # Build the noise model
    N = NmatTot(scan, model="uncorr", window=2.0, filter=highpass)
    P = PmatTot(scan, srcpos[:, sids], perdet=True, sys=sys)

    # rhs
    N.apply(scan.tod)
    rhs = P.backward(scan.tod, ncomp=1)
    # div
    scan.tod[:] = 0
    P.forward(scan.tod, rhs * 0 + 1)
    N.apply(scan.tod)
    div = P.backward(scan.tod, ncomp=1)

    # Use beam to turn amp into flux. We want the flux in mJy, so divide by 1e3
    beam_area = get_beam_area(scan.beam)
    _, uids = actdata.split_detname(scan.dets)  # Argh, stupid detnames
    freq = scan.array_info.info.nom_freq[uids[0]]
    fluxconv = utils.flux_factor(beam_area, freq * 1e9) / 1e3

    div /= fluxconv**2
    rhs /= fluxconv

    # Solve. Unhit sources will be nan with errors inf
    with utils.nowarn():
        flux = rhs / div
        dflux = div**-0.5
    del rhs, div

    # Get the mean time for each source-detector. This will be nan for unhit sources
    scan.tod[:] = scan.boresight[None, :, 0]
    N.white(scan.tod)
예제 #5
0
            tod = np.zeros((d.ndet, d.nsamp), dtype=dtype)
            # And project
            pmap.forward(tod, mask)
            # Any nonzero samples should be cut
            tod = tod != 0
            cut = sampcut.from_mask(tod)
            del tod
        progress = 100.0 * (ind - comm.rank * ntod // comm.size) / (
            (comm.rank + 1) * ntod // comm.size -
            comm.rank * ntod // comm.size)
        print("%3d %5.1f %s %6.4f %d  %8.3f %8.3f" %
              (comm.rank, progress, id, float(cut.sum()) / cut.size, visible,
               memory.current() / 1024.**3, memory.max() / 1024.**3))
        mystats.append([ind, float(cut.sum()) / cut.size, visible])
        # Add to my work file
        _, uids = actdata.split_detname(d.dets)
        flags = flagrange.from_sampcut(cut, dets=uids)
        flags.write(hfile, group=id)

# Merge all the individual cut files into a single big one.
comm.Barrier()
if comm.rank == 0:
    with h5py.File(args.odir + "/cuts.hdf", "w") as ofile:
        for i in range(comm.size):
            print("Reducing %3d" % i)
            with h5py.File(args.odir + "/work_%03d.hdf" % i, "r") as ifile:
                for key in sorted(ifile.keys()):
                    ifile.copy(key, ofile)
    print("Done")

# Output the overall statistics
예제 #6
0
파일: mask2cut2.py 프로젝트: amaurea/tenki
			cut = sampcut.empty(d.ndet, d.nsamp)
		else:
			pmap = pmat.PmatMap(scan, mask, sys="sidelobe:%s" % args.objname)
			# Build a tod to project onto.
			tod = np.zeros((d.ndet, d.nsamp), dtype=dtype)
		# And project
			pmap.forward(tod, mask)
			# Any nonzero samples should be cut
			tod = tod != 0
			cut = sampcut.from_mask(tod)
			del tod
		progress = 100.0*(ind-comm.rank*ntod//comm.size)/((comm.rank+1)*ntod//comm.size-comm.rank*ntod//comm.size)
		print("%3d %5.1f %s %6.4f %d  %8.3f %8.3f" % (comm.rank, progress, id, float(cut.sum())/cut.size, visible, memory.current()/1024.**3, memory.max()/1024.**3))
		mystats.append([ind, float(cut.sum())/cut.size, visible])
		# Add to my work file
		_, uids  = actdata.split_detname(d.dets)
		flags = flagrange.from_sampcut(cut, dets=uids)
		flags.write(hfile, group=id)

# Merge all the individual cut files into a single big one.
comm.Barrier()
if comm.rank == 0:
	with h5py.File(args.odir + "/cuts.hdf", "w") as ofile:
		for i in range(comm.size):
			print("Reducing %3d" % i)
			with h5py.File(args.odir + "/work_%03d.hdf" % i, "r") as ifile:
				for key in sorted(ifile.keys()):
					ifile.copy(key, ofile)
	print("Done")

# Output the overall statistics
예제 #7
0
            pixbox = enmap.pixbox_of(swcs, shape, wcs)
            if not use_dmap:
                refmap = enmap.read_map(args.mapsub,
                                        pixbox=pixbox).astype(dtype)
            else:
                refmap = dmap.read_map(args.mapsub,
                                       pixbox=pixbox,
                                       bbox=mybbox,
                                       comm=comm).astype(dtype)
            refmap = signal.prepare(refmap)

        # Get the frequency and beam for this chunk. We assume that
        # this is the same for every member of the chunk, so we only need
        # to do this for one scan
        scan = actscan.ACTScan(filedb.data[chunk_ids[inds[0]]])
        _, dets = actdata.split_detname(scan.dets)
        beam = scan.beam
        freq = scan.array_info.info.nom_freq[dets[0]]
        barea = planet9.calc_beam_area(scan.beam)
        # Get the conversion from ref-freq flux to observed amplitude. This includes
        # dilution by the beam area
        flux2amp = 1 / utils.flux_factor(barea, args.fref * 1e9, utils.T_cmb)
        fref2freq = utils.planck(freq * 1e9, args.Tref) / utils.planck(
            args.fref * 1e9, args.Tref)
        rfact = flux2amp * fref2freq * 1e3  # 1e3 for flux in mJy and amp in uK

        # only work will be 3,ny,nx. The rest are scalar. Will copy in-out as necessary
        work = signal.work()
        rhs = area[0]
        div = rhs.copy()
        wrhs = signal.prepare(rhs)