示例#1
0
文件: actdata.py 项目: amaurea/enact
def calibrate_boresight(data):
	"""Calibrate the boresight by converting to radians and
	interpolating across missing samples linearly. Note that
	this won't give reasonable results for gaps of length
	similar to the scan period. Also adds a srate field containing
	the sampling rate."""
	require(data, ["boresight","flags"])
	# Convert angles to radians
	if data.nsamp in [0, None]: raise errors.DataMissing("nsamp")
	if data.nsamp < 0: raise errors.DataMissing("nsamp")
	a = data.boresight[1].copy()
	data.boresight[1]  = robust_unwind(data.boresight[1], period=360, tol=0.1)
	data.boresight[1:]*= np.pi/180
	#data.boresight[1:] = utils.unwind(data.boresight[1:] * np.pi/180)
	# Find unreliable regions
	bad_flag   = (data.flags!=0)*(data.flags!=0x10)
	bad_value  = find_boresight_jumps(data.boresight)
	bad_value |= find_elevation_outliers(data.boresight[2])
	bad = bad_flag | bad_value
	#bad += srate_mask(data.boresight[0])
	# Interpolate through bad regions. For long regions, this won't
	# work, so these should be cut.
	#  1. Raise an exception
	#  2. Construct a cut on the fly
	#  3. Handle it in the autocuts.
	# The latter is cleaner in my opinion
	cut = sampcut.from_mask(bad)
	gapfill.gapfill_linear(data.boresight, cut, inplace=True)
	srate = 1/utils.medmean(data.boresight[0,1:]-data.boresight[0,:-1])
	data += dataset.DataField("srate", srate)
	# Get the scanning speed too
	speed = calc_scan_speed(data.boresight[0], data.boresight[1])
	data += dataset.DataField("speed", speed)
	return data
示例#2
0
def find_jumps(tod, bsize=100, nsigma=10, margin=50, step=50, margin_step=1000):
	ndet = tod.shape[0]
	cuts = []
	for det, det_tod in enumerate(tod):
		n = len(det_tod)
		# Compute difference tod
		dtod = det_tod[1:]-det_tod[:-1]
		nsamp= dtod.size
		# Find typical standard deviation
		nblock = int(nsamp/bsize)
		sigma = utils.medmean(np.var(dtod[:nblock*bsize].reshape(nblock,bsize),-1))**0.5
		# Look for samples that deviate too much from 0
		bad = np.abs(dtod) > sigma*nsigma
		bad = np.concatenate([bad[:1],bad])
		# Look for steps, areas where the mean level changes dramatically on each
		# side of the jump. First find the center of each bad region
		steps = bad*0
		labels, nlabel = scipy.ndimage.label(bad)
		centers = np.array(scipy.ndimage.center_of_mass(bad, labels, np.arange(nlabel+1))).astype(int)[:,0]
		# Find mean to the left and right of each bad region
		for i, pos in enumerate(centers):
			m1 = np.mean(det_tod[max(0,pos-step*3/2):max(1,pos-step/2)])
			m2 = np.mean(det_tod[min(n-2,pos+step/2):min(n-1,pos+step*3/2)])
			if np.abs(m2-m1) > sigma*nsigma:
				steps[pos] = 1
		#print centers.shape, np.sum(steps)
		# Grow each cut by a margin
		bad = scipy.ndimage.distance_transform_edt(1-bad) <= margin
		steps = scipy.ndimage.distance_transform_edt(1-steps) <= margin_step
		cuts.append(rangelist.Rangelist(bad|steps))
	return rangelist.Multirange(cuts)
示例#3
0
def find_jumps(tod,
               bsize=100,
               nsigma=10,
               margin=50,
               step=50,
               margin_step=1000):
    ndet = tod.shape[0]
    cuts = []
    for det, det_tod in enumerate(tod):
        n = len(det_tod)
        # Compute difference tod
        dtod = det_tod[1:] - det_tod[:-1]
        nsamp = dtod.size
        # Find typical standard deviation
        nblock = int(nsamp / bsize)
        sigma = utils.medmean(
            np.var(dtod[:nblock * bsize].reshape(nblock, bsize), -1))**0.5
        # Look for samples that deviate too much from 0
        bad = np.abs(dtod) > sigma * nsigma
        bad = np.concatenate([bad[:1], bad])
        # Look for steps, areas where the mean level changes dramatically on each
        # side of the jump. First find the center of each bad region
        steps = bad * 0
        labels, nlabel = scipy.ndimage.label(bad)
        centers = np.array(
            scipy.ndimage.center_of_mass(bad, labels,
                                         np.arange(nlabel + 1))).astype(int)[:,
                                                                             0]
        # Find mean to the left and right of each bad region
        for i, pos in enumerate(centers):
            m1 = np.mean(det_tod[max(0, pos -
                                     step * 3 / 2):max(1, pos - step / 2)])
            m2 = np.mean(det_tod[min(n - 2, pos +
                                     step / 2):min(n - 1, pos + step * 3 / 2)])
            if np.abs(m2 - m1) > sigma * nsigma:
                steps[pos] = 1
        #print centers.shape, np.sum(steps)
        # Grow each cut by a margin
        bad = scipy.ndimage.distance_transform_edt(1 - bad) <= margin
        steps = scipy.ndimage.distance_transform_edt(1 - steps) <= margin_step
        cuts.append(rangelist.Rangelist(bad | steps))
    return rangelist.Multirange(cuts)
示例#4
0
 def srate(self):
     step = self.boresight.shape[0] / 100
     return float(step) / utils.medmean(self.boresight[::step, 0][1:] -
                                        self.boresight[::step, 0][:-1])
示例#5
0
def get_srate(ctime):
    step = ctime.size / 10
    ctime = ctime[::step]
    return float(step) / utils.medmean(ctime[1:] - ctime[:-1])
示例#6
0
文件: actdata.py 项目: amaurea/enact
def calc_scan_speed(t, az, step=40):
	# Quick and dirty scan speed calculation. Suffers from noise bias, but
	# should be small as long as the step isn't close to 1.
	tsub = t [::step]
	asub = az[::step]
	return utils.medmean(np.abs(asub[1:]-asub[:-1])/np.abs(tsub[1:]-tsub[:-1]))
示例#7
0
ratios = []
for i, (mapfile, hitfile) in enumerate(zip(mapfiles[1:], hitfiles[1:])):
	print "Reading map %s" % mapfile
	map2 = enmap.read_map(mapfile).preflat[args.component]
	print "Reading hit %s" % hitfile
	hit2 = enmap.read_map(hitfile)
	# Compute variances for the current map minus the previous map
	dmap = (map2-map)/2
	dhit = hit2+hit
	vmap = calc_map_block_ivar(dmap, bs)
	mask  = (hit>quant(hit,qlim)*hitlim) & (hit2>quant(hit2,qlim)*hitlim)
	mask &= (hit<quant(hit,qlim)) & (hit2<quant(hit2,qlim))
	# Reduce dhit and mask to vmap's resolution
	dhit = calc_map_block_mean(dhit, bs)
	mask = calc_map_block_mean(mask, bs)>0
	ratio = utils.medmean(vmap[mask]/dhit[mask])
	# And compute the sensitivity
	ratio *= get_bias(bs)**2
	ratios.append(ratio)
	sens = (ratio*args.srate)**-0.5
	print "%d-%d  %7.2f" % (i+1,i, sens)
	map, hit = map2, hit2

# Ratio has units 1/(uK^2*sample), and gives us the conversion
# factor between hitmaps and inverse variance maps, which we call
# div maps by convention from tenki.
ratio = np.mean(ratios)
print "mean %7.2f" % (ratio*args.srate)**-0.5

if args.dry_run: sys.exit()
示例#8
0
for i, (mapfile, hitfile) in enumerate(zip(mapfiles[1:], hitfiles[1:])):
    print "Reading map %s" % mapfile
    map2 = enmap.read_map(mapfile).preflat[args.component]
    print "Reading hit %s" % hitfile
    hit2 = enmap.read_map(hitfile)
    # Compute variances for the current map minus the previous map
    dmap = (map2 - map) / 2
    dhit = hit2 + hit
    vmap = calc_map_block_ivar(dmap, bs)
    mask = (hit > quant(hit, qlim) * hitlim) & (hit2 >
                                                quant(hit2, qlim) * hitlim)
    mask &= (hit < quant(hit, qlim)) & (hit2 < quant(hit2, qlim))
    # Reduce dhit and mask to vmap's resolution
    dhit = calc_map_block_mean(dhit, bs)
    mask = calc_map_block_mean(mask, bs) > 0
    ratio = utils.medmean(vmap[mask] / dhit[mask])
    # And compute the sensitivity
    ratio *= get_bias(bs)**2
    ratios.append(ratio)
    sens = (ratio * args.srate)**-0.5
    print "%d-%d  %7.2f" % (i + 1, i, sens)
    map, hit = map2, hit2

# Ratio has units 1/(uK^2*sample), and gives us the conversion
# factor between hitmaps and inverse variance maps, which we call
# div maps by convention from tenki.
ratio = np.mean(ratios)
print "mean %7.2f" % (ratio * args.srate)**-0.5

if args.dry_run: sys.exit()
示例#9
0
文件: fastmap.py 项目: amaurea/tenki
def get_srate(ctime):
	step = ctime.size/10
	ctime = ctime[::step]
	return float(step)/utils.medmean(ctime[1:]-ctime[:-1])