예제 #1
0
def firstlook(cloud, line='NH3_11'):
	print('Now %s' % line)
	a_rms, b_rms = basebox(cloud, line)
	cmin, cmax = chanmin(cloud, line), chanmax(cloud, line)
	# assuming the peak of emission is somewhere 
	# in the middle of the bandwidth:
	cmid = (cmin+cmax)/2
	# we can find the corresponding gap in basebox:
	bcen, acen = [[i,i+1] for i in range(len(a_rms)-1) 
			if b_rms[i]<cmid<a_rms[i+1]][0]
	index_peak=first_look.create_index([b_rms[bcen]],[a_rms[acen]])
	index_rms=first_look.create_index(a_rms, b_rms)
	
	file_in = '../%s/%s_%s.fits' % (keys[cloud]['region'], 
					keys[cloud]['region'], line)
	s = SpectralCube.read(file_in)
	s = s.with_spectral_unit(u.km/u.s,velocity_convention='radio')
	
	file_out=file_in.replace('.fits','_base1.fits')
	file_new=first_look.baseline(file_in, file_out, 
	                             index_clean=index_rms, polyorder=1)
	first_look.peak_rms(file_new, index_rms=index_rms, 
			    index_peak=index_peak)
예제 #2
0
def map_cloud(cloud, do_sdfits=False, do_calibration=False, do_imaging=True, keys=keys):
	source  = keys[cloud]['source' ] 
	region  = keys[cloud]['region' ] 
	windows = keys[cloud]['windows'] 
	nblocks = keys[cloud]['nblocks']
	scans   = keys[cloud]['scans'  ] 
	gains   = keys[cloud]['gains'  ] 
	beam    = keys[cloud]['beam'   ] 

	# TODO: make --windows (or better yet, --lines) an cmd argument
	# pulls all unique values from a dictionary of {'ifnum':'lineName', ...} form:
	lines = [] 
	_ = [(WindowDict[ifn],lines.append(WindowDict[ifn])) 
		for ifn in WindowDict if WindowDict[ifn] not in lines]
	# NOTE: "lines" list controls the imaging loop, 
	#	while "windows" list controls calibration!
	# TODO: resolve lines/windows ambiguity!

	# Convert VEGAS raw data to sdfits
	if do_sdfits:
		import subprocess
		assert type(scans) is list # better safe than sorry
		unique_sessions = set([s['session'] for s in scans])
		for session in unique_sessions:
			# all scan blocks within one session are 
			# parsed in a "s1:s2,s3:s4,..." format and
			# then sent to sdfits-test for data-crunching
			scan_blocks = ','.join(['%i:%i'%(s['start'],s['end']) 
					       for s in scans 
					       if s['session'] is session])
			sdfits_dir = ' AGBT15B_313_%.2i' % session
			sdfits_args = ' -scans=\"%s\"' % scan_blocks
			# TODO: oops I also need to properly set the output dir!
			subprocess.Popen('sdfits-test -backends=vegasi'+
					 sdfits_args+sdfits_dir)

	# it's being quite slow on the import, moved inside the script	
	import GAS_gridregion
	data_dir='/lustre/pipeline/scratch/vsokolov/'
	# Run the GAS pipeline wrapper
	if do_calibration:
		for window in windows:
			# TODO: this is way too slow; gbtpipeline can accept
			# arguments like -m "50:60,80:90", rewrite the 
			# GAS wrapper to accept faster arguments
			for block in range(nblocks):	
				GAS_gridregion.doPipeline(
				   SessionNumber = scans[block]['session'],
			 	   StartScan     = scans[block]['start'],
				   EndScan       = scans[block]['end'  ], 
				   Source        = source,
				   Gains         = gains, 
				   Region        = region, 
				   Window        = str(window), 
				   OutputRoot    = data_dir+region+'/',
				   overwrite     = True                   )
	
	# Image the calibrated data
	if do_imaging:
		# TODO: trim and implement proper vlsr corrections
		# cloud 'I' had a somewhat mismatched vlsr
		startChannel, endChannel = (2800, 4600) \
			if cloud is 'I' else (3200, 5000)
		for line in lines:
			GAS_gridregion.griddata(rootdir=data_dir, 
			                        region=region, 
			                        indir=region+'_'+line, 
			                        outfile=region+'_'+line, 
			                        startChannel = startChannel, 
			                        endChannel = endChannel, 
			                        doBaseline = True,
						baselineRegion = 
							basebox(cloud, line)+
							startChannel,
						useBeam = beam,
						file_extension='')