Exemplo n.º 1
0
def do_dataset(dataset,fileroot='observations',local='no'):
	'''
	This is a routine to set up the peak finding routines
	based on a dataset name and a list file.  

	The variable local indicates where the output files should be
	stored in the same sense as all of the persist programs
	
	This is the way the routine is called for pipline processing

	101214	ksl	Added so that we could run peaks from the top level directory
	110511	ksl	Modified so if there is extenal persistence associated with the dataset
			this is uesed for finding the peaks
	111011	ksl	There was an error in the modiffication above until at least now
	'''
	# Get infomation about this dataset, principally the path so that
	# we can identify where material is.

	record=per_list.read_ordered_list_one(fileroot,dataset)
	try:
		mask_file=record[0]
	except IndexError:
		print 'NOK: dataset %s does not exist in %s.ls' % (dataset,fileroot)
		return 'NOK: dataset %s does not exist in %s.ls' % (dataset,fileroot)



	work_dir=per_list.set_path(record[0],'no',local)  # This will be the Persist directory for the dataset
	fig_dir=work_dir+'/Figs'               # This will be the directory where figures are stored
	history_file=work_dir+dataset+'.txt'

	extper_file=work_dir+dataset+'_extper.fits'
	per_file=work_dir+dataset+'_persist.fits'

	# Fixed 111011
	if os.path.exists(extper_file):
		print 'Using external persistence file for dataset %s' % dataset
		string=doit(per_file,mask_file,history_file=history_file,local=local)
	elif os.path.exists(per_file):
		print 'Using internal persistence file for dataset %s' % dataset
		string=doit(per_file,mask_file,history_file=history_file,local=local)
	else:
		print 'Dataset %s does not have either an external or internal persistence file' % dataset


	# print 'Finished peaks for dataset %s' % dataset
	return 'OK: peaks.do_dataset: Finished for dataset %s' %  dataset
Exemplo n.º 2
0
def do_dataset(dataset='ia21h2e9q',model_type=0,norm=0.3,alpha=0.2,gamma=0.8,e_fermi=80000,kT=20000,fileroot='observations',ds9='yes',local='no',parameter_file='persist.pf',lookback_time=16):
	'''
	Create a persistence image for this dataset.  This version works by creating using the 
	persistance model from each of the previous images.  It assumes that the only one which
	matters is the image which created the most persistence according toe the modeld

	model_type=0 is our our orignal model which has a modified fermi distributioon, controlled by the values of norm, etc.
	model_type=1 is our purely describtive model defined in terms of amplitudes and power laws
	model_type=2 for the fermi model that interpolates bewtween cureves for different expsoure times

	All returns from this program should be:
		OK:  something
		NOK: something

	Outputs:
		If there were IR observations that preceded the observation being analyzed
		then this routines creates various fits files:
			rootname_persist.fits  - The model for the total of internal and external persistnce
			rootname_extper.fits   - The model for persistence from earlier visits, aka
			                         external persistence
			rootname_flt_cor.fits  - The corrected flt file
			rootname_stim.fits     - The stimulus that caused the persistence
			rootname_dt.fits       - The time at which the stimulus occurred
		Plots of the various images are also created, as well as a log file for
		the data set
	Notes:
		This is really the main routine of the program.  When run_persist.py calls this modeule. 
		this is the routine that is called.

		In practice, the only difference between model types 1 and 2 is the calibration file that is read in.  The same interpolation
		routine is used for both.  At present the calibration file names for this are hardwired.

	History

	100905	ksl	Added disgnostic files to record the time of the stimulus and the
			value of the stimulus
	101015	ksl	Moved the location of where the output files are stored and 
			added a history file, so we would know what had happened
	110122	ksl	Added a switch to turn off displaying with ds9
	110324	ksl	Changed call to accommodate new persistence model
	110602	ksl	Incorporated the possibility of providing a correction file xynorm to account
			for spatial variations across the detector persistence model
	140606	ksl	Added correction which puts the correct units in the stimulus file.
	140611	ksl	Began to add in new persistnce model, initially just by short-circuiting everything
	140803	ksl	Switched to fits version of data files
	141124	ksl	Small change to handle situations were the flat field correction is not found
	'''

	cur_time=date.get_gmt()

	xfile=locate_file(parameter_file)
	if xfile=='':
		print '# Error: Could not locate parameter file %s ' % parameter_file
	else:
		parameter_file=xfile


	if model_type==0:
		print '# Processing dataset %s with fermi model: Norm %4.2f alpha %4.2f gamma %4.2f e_fermi %6.0f kT %6.0f ' % (dataset,norm,alpha,gamma,e_fermi,kT)
	elif model_type==1:
		print '# Processing dataset %s with A gamma model' % dataset
	elif model_type==2:
		print '# Processing dataset %s with a time-variable fermi model' % dataset
	else:
		print '# Error: run_persist: Unknown model type %d' % model_type
		return 'NOK'

	# Read the observations file to get the data set of interest


	delta=lookback_time  # Datasets which occurred more than delta hours earlier not considered.


	records=per_list.read_ordered_list2(fileroot,dataset,interval=[-delta,0],outroot='none')

	# Check the length of records


	if len(records)==0:
		string = 'NOK: subtract_persist.do_dataset :There are no records associated with dataset %s.  Check name in %s.ls' % (dataset,fileroot)
		sum_string='NOK - No record associated with this dataset'
		per_list.update_summary(dataset,'ERROR',sum_string,append='no')
		return string

	# So now we have the list that we need.

	

	science_record=records[len(records)-1]  # The science record is the last record
	sci_progid=science_record[2]
	words=science_record[3].split('.')
	sci_visit=words[0]
	sci_fil=science_record[10]
	sci_exp=eval(science_record[11])
	sci_obj=science_record[14]
	
	# Create the Persist directory if it does not already exist
	path=per_list.set_path(science_record[0],'yes',local)
	if path.count('NOK'):  # Then we were not able to create a plausible directory to put the data ink
		return path

	# Open a history file.  Note that one needs the path before one can do this
	# The reason for calling this via the per_list routine, is that this routine sets the permissions
	# for the file

	history=per_list.open_file(path+science_record[1]+'.txt')


	history.write('START:  Persistence processing of file %s\n\n' % science_record[1])
	history.write('! Processed: %s\n' % date.get_gmt())
	history.write('! ProgramID: %s\n' % sci_progid)
	history.write('! Visit:     %s\n' % sci_visit)
	history.write('! FltFile:   %s\n' % science_record[1])
	history.write('! Filter:    %s\n' % sci_fil)
	history.write('! Exposure:  %6.1f\n' % sci_exp)
	history.write('! Object:    %s\n' % sci_obj)

	history.write('\n! Using Version %s of the perstence S/W' % VERSION)
	history.write('\n! Using a lookback time for observations that might cause persistence of %.1f hours\n' %  lookback_time)

	if model_type==0:
		history.write('\n! Processing  dataset %s with fermi model:  norm %6.2f alpha %6.2f e_fermi %6.0f kT %6.0f\n' % (dataset,norm,alpha,e_fermi,kT)) 
	elif model_type==1:
		history.write('\n! Processing dataset %s with A gamma model\n' % dataset)
	elif model_type==2:
		history.write('\n! Processing dataset %s with time-variable fermi  model\n' % dataset)


	# Check whether there is anything to do

	if len(records)==1:
		string='subtract_persist: No persistence for this dataset.  No earlier observations within %4.1f hours\n' % (delta)
		history.write('%s\n' % string)
		history.write('! Persistence:  None\n')
		string='OK: subtract_persist: -- None'
		print string
		history.close()
		xstring='  %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % (0,0,0,0,0,0)
		per_list.update_summary(dataset,'Persist',xstring,append='no')
		return string



	# Persistence is calculated for the middle of the interval in which the expousre
	# was taking place

	[t1,t2]=get_times(science_record[0])
	tscience=0.5*(t1+t2)  

	# establish which record is the last record that comes from a different visit than the current one

	i=0
	while i<len(records)-1:
		cur_progid=records[i][2]
		words=records[i][3].split('.')
		cur_visit=words[0]
		# print 'OK',cur_progid,cur_visit
		if cur_progid==sci_progid and cur_visit==sci_visit:
			# then we are now into earlier exposures of the same visit
			break
		i=i+1

	# if i = 0, all the exposures being evaluated for persistence create self_persistence
	# if i= len(records), all exposures being evaluated for persistence are from other visits

	last_external=i-1  # This is the last record that created external persistence
	ext_values=[]  # A place to store information about the persistence due to other observers
	ext_persist=[] # This is a place holder for storing the extenal persistence

	xynorm=read_parameter(parameter_file,'xynorm')
	if xynorm!='':
		xynorm=locate_file(xynorm)
		xcorr=get_image(xynorm,1)
		if len(xcorr)==0:
			xynorm=''  # This is an error because we were unable to find the file
			history.write('! Error: Could not find correction file %s containing spatial dependence. Continuing anyway' % xynorm)
		else:
			history.write('! Reference file containing spatial dependence:  %s\n' % xynorm)
	else:
		string='! Processing without spatially dependent correction'
		print string
		history.write('%s\n' % string )




	# This is the beginning of the loop for calculating the persistence model
	i=0
	while i<len(records)-1:
		record=records[i]
		# print 'subtract: %30s %6.1f model_type %d' % (record[0],eval(record[11]),model_type)
		# dt is measured from the end of the stimulus image to the middle of the
		# science image
		[t1,t2]=get_times(record[0])
		dt=(tscience-t2)*86400

		cur_progid=record[2]
		words=record[3].split('.')
		cur_visit=words[0]
		cur_sci_fil=record[10]
		cur_sci_exp=eval(record[11])
		cur_sci_obj=record[14]
		scan=record[4]

		xfile=record[0]
		# Use the ima file, if file calusing persistence is a scan object
		if scan=='scan':
			xfile=xfile.replace('flt','ima')
			print 'Using ima file for ',record[0],xfile,scan


		x=get_image(xfile,1,'e',fileref=science_record[0])  # Convert this to electrons
		if len(x)==0:
			xstring='NOK: Problem with science extension of %s' % record[0]
			history.write('%s\n' % xstring)
			print xstring
			return xstring

		dq=get_image(xfile,3,fileref=science_record[0])     # Get the dq 
		if len(dq)==0:
			xstring = 'NOK: Problem with dq extension of %s' % record[0]
			history.write('%s\n' % xstring)
			print xstring
			# 110926 - ksl - modified to allow this to process the image even if there was no dq array
			# return xstring

		if model_type==0:
			model_persistence=calc_persist(x,dq,dt,norm,alpha,gamma,e_fermi,kT)
		elif model_type==1:
			# print 'Model type is 1'
			xfile=read_parameter(parameter_file,'a_gamma')
			# The next lines are awkward, becuate the parameter file name is read multiple times
			if i==0:
				history.write('! Reference file containing spatially-averaged peristence model: %s' %  xfile)
			model_persistence=make_persistence_image(x,cur_sci_exp,dt,xfile)
		elif model_type==2:
			# print 'Model type is 2'
			xfile=read_parameter(parameter_file,'fermi')
			if i==0:
				history.write('! Reference file containing Spatially-averaged peristence model: %s' %  xfile)
			model_persistence=make_persistence_image(x,cur_sci_exp,dt,xfile)
		else:
			print 'Error: subtract_persist: Unknown model type %d' % model_type
			return 'NOK'

		values=how_much(model_persistence)

		if i==0:
			persist=model_persistence
			stimulus=x   # This is an array which contains the maximum counts in a pixel
			xtimes=numpy.ones_like(persist)
			xtimes=xtimes*dt # This is an array containing the delta time at which the stimulus occured
		else:
			xpersist=model_persistence
			stimulus=numpy.select([xpersist>persist],[x],default=stimulus)
			xtimes=numpy.select([xpersist>persist],[dt],default=xtimes)
			persist=numpy.select([xpersist>persist],[xpersist],default=persist)
		

		# Get some elementary statistics on the stimulus
		xvalues=get_stats(x,70000)

		history.write('\nsubtract_persist: Stimulus by %30s from program %s Visit %s\n' % (record[0],cur_progid,cur_visit))
		history.write('\nsubtract_persist: The filter was %s and exposure was %6.1f for target %s\n' % (cur_sci_fil,cur_sci_exp,cur_sci_obj))
		history.write('subtract_persist: The exposure was %8.0f s earlier than the current exposure\n' % dt)
		history.write('subtract_persist: The median value in the stimulus image was %6.1f and the number of saturated pixels was %d\n' % (xvalues[0],xvalues[1]))
		history.write('subtract_persist:   The maximum value for persistence is %f\n'   % values[4])
		history.write('subtract_persist:    The median value for persistence is %f\n'   % values[5])
		history.write('subtract_persist: 90 percent of persist values less than %f\n'   % values[6]) 
		history.write('subtract_persist: 99 percent of persist values less than %f\n'   % values[7]) 
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.10 e/s\n' % (values[1],values[1]*100./values[0]))
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.03 e/s\n' % (values[2],values[2]*100./values[0]))
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.01 e/s\n' % (values[3],values[3]*100./values[0]))
		string = 'subtract_persist: Finished (%2d of %2d) %s' % (i+1,len(records)-1,record[0])
		#130909 Removed print statement as unnneessary.  The string still goes to the history file
		# print string
		history.write('%s\n' % string)

		# Summarize the Stiumulus printing out the filename, prog_id, visit_name, target, dt and the number of pixels above saturation of 70000
		scan='No'
		if record[4]=='scan':
			scan='Yes'

		stimulus_summary='Stimulus: %40s %10s %10s %20s %8.0f %3d %3s %6s\n' % (record[0],cur_progid,cur_visit,cur_sci_obj,dt,xvalues[1],record[9],scan)
		history.write('! %s\n' % stimulus_summary)

		if i==last_external:
			ext_values=how_much(persist)
			ext_persist=numpy.copy(persist)

		i=i+1
	
	# This is the end of the loop where the persistence model is calculated

	# First report on the external persistence for this file;

	# Now apply the fix to account for spatial variations in persistence
	if xynorm !='' and numpy.shape(xcorr)==numpy.shape(persist):
		persist=persist*xcorr


	if len(ext_values)>0:

		f1=ext_values[1]*100./ext_values[0]
		f2=ext_values[2]*100./ext_values[0]
		f3=ext_values[3]*100./ext_values[0]
		emeasure='%6.2f %6.2f %6.2f' % (f1,f2,f3)

		history.write('\nsubtract_persist: Estimate of persistence from earlier visits\n')
		history.write('subtract_persist:   The maximum value for persistence is %f\n'   % ext_values[4])
		history.write('subtract_persist:    The median value for persistence is %f\n'   % ext_values[5])
		history.write('subtract_persist: 90 percent of persist values less than %f\n'   % ext_values[6]) 
		history.write('subtract_persist: 99 percent of persist values less than %f\n'   % ext_values[7]) 
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.10 e/s\n' % (ext_values[1],f1))
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.03 e/s\n' % (ext_values[2],f2))
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.01 e/s\n' % (ext_values[3],f3))
	else:
		emeasure='%6.2f %6.2f %6.2f' % (0 ,0,0)
		history.write('\nsubtract_persist: This exposure has no persistence from earlier visits.  All persistence is self-induced\n')



	# Now evaluate the total persistence

	values=how_much(persist)


	f1=values[1]*100./values[0]
	f2=values[2]*100./values[0]
	f3=values[3]*100./values[0]
	measure='%6.2f %6.2f %6.2f' % (f1,f2,f3)


	history.write('\nsubtract_persist: Estimate of total persistence for this file\n')
	history.write('subtract_persist:   The maximum value for persistence is %f\n'   % values[4])
	history.write('subtract_persist:    The median value for persistence is %f\n'   % values[5])
	history.write('subtract_persist: 90 percent of persist values less than %f\n'   % values[6]) 
	history.write('subtract_persist: 99 percent of persist values less than %f\n'   % values[7]) 
	history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.10 e/s\n' % (values[1],f1))
	history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.03 e/s\n' % (values[2],f2))
	history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.01 e/s\n' % (values[3],f3))




	history.write('! PersistenceSum: External   %s\n'% emeasure)
	history.write('! PersistenceSum: Total      %s\n'% measure)


	# Now write out all of the new fits files

	# First find out the units of the science image

	units=get_keyword(science_record[0],1,'bunit')
	# print 'test',units
	if units[0]=='COUNTS/S':
		print 'Reducing model to match units for dataset %s to match %s ' % (dataset,units)
		persist/=2.4

	# subtract and write out the corrected image
	science=get_image(science_record[0],1,'no')
	original=numpy.copy(science)
	science=science-persist


	xname=parse_fitsname(science_record[0])

	persist_file=path+dataset+'_persist.fits'
	ext_persist_file=path+dataset+'_extper.fits'

	# Note: Do not use some with an extension like flt.fits because it will interfere with making list
	corrected_file=path+dataset+'_flt_cor.fits'
	stimulus_file=path+dataset+'_stim.fits'
	time_file=path+dataset+'_dt.fits'

	rewrite_fits(xname[0],persist_file,1,persist)
	rewrite_fits(xname[0],corrected_file,1,science)
	rewrite_fits(xname[0],stimulus_file,1,stimulus)
	# 140606 - Fix added to put stimulus file in the correct units.
	put_keyword(stimulus_file,1,'bunit','ELECTRONS')
	rewrite_fits(xname[0],time_file,1,xtimes)
	if len(ext_persist)>0:
		rewrite_fits(xname[0],ext_persist_file,1,ext_persist)

	# This completes the section which writes out all of the fits files
		
	# Get statistics on the images and make the 4 panel plot 

        xmed=numpy.median(original)
        zmed=numpy.median(persist)
        zmax=numpy.max(persist)


	pylab.figure(1,[12,12])
	pylab.title(dataset)
	pylab.subplot(221)
	pylab.imshow(original,origin='lower',cmap=pylab.cm.gray,vmin=xmed-0.1,vmax=xmed+0.1)
	pylab.title('Original')

	pylab.subplot(222)
	pylab.imshow(science,origin='lower',cmap=pylab.cm.gray,vmin=xmed-0.1,vmax=xmed+0.1)
	pylab.title('Subtracted')

	pylab.subplot(223)
	pylab.imshow(persist,origin='lower',cmap=pylab.cm.gray,vmin=zmed-0.1,vmax=zmed+0.1)
	pylab.title('Total Persistence')

	if len(ext_persist)>0:
		pylab.subplot(224)
		pylab.imshow(ext_persist,origin='lower',cmap=pylab.cm.gray,vmin=zmed-0.1,vmax=zmed+0.1)
		pylab.title('External Persistence')
	else:
		pylab.subplot(224)
		pylab.imshow(stimulus,origin='lower',cmap=pylab.cm.gray,vmin=0.0,vmax=200000)
		pylab.title('Stimulus')

	fig1=path+'Figs/'+dataset+'_subtract.png'


	if os.path.isfile(fig1):
		os.remove(fig1)
	pylab.savefig(fig1)
	os.chmod(fig1,0770)

	# Eliminated to prevent an error on linux having to do with tkinter
	# pylab.close('all')

	if ds9=='yes':
		LoadFrame(science_record[0],1,0,2,'histequ')
		LoadFrame(persist_file,2,0,2,'histequ')
		LoadFrame(corrected_file,3,0,2,'histequ')
		if len(ext_persist)>0:
			LoadFrame(ext_persist_file,4,0,2,'histequ')
		else:
			LoadFrame(stimulus_file,4,0,1e5,'histequ')

	# Finished plots

	# Finished everything so wrap it up
	history.write('# Finished Persistence processing of file %s\n' % science_record[1])
	history.close()

	# Upadete the summary file
	string='%20s %20s' % (emeasure,measure)
	per_list.update_summary(dataset,'Persist',string,fileroot,append='no')

	return string
Exemplo n.º 3
0
def do_dataset(dataset='ia21h2eaq', fileroot='observations', local='no'):
    '''
	Make html files for a single dataset

	110203	ksl	Added local swithch which controls where the
			real working directory is to make testing
			easier
	140307	ksl	Added information about scans and subarray observations
	'''

    record = per_list.read_ordered_list_one(fileroot, dataset)
    if len(record) == 0:
        return 'NOK: make_html failed becaouse could not find dataset %s' % dataset

    work_dir = per_list.set_path(
        record[0], 'no',
        local)  # This will be the Persist directory for the dataset
    fig_dir = work_dir + '/Figs/'  # This will be the directory where figures are stored

    html_filename = work_dir + dataset + '_persist.html'

    # page=markup.page()
    title = 'Persistence Removal Evaluation for dataset %s' % dataset
    page = html.begin(title)

    # page.init(title='Persistence Removal Evaluation for dataset %s' % dataset)
    # page.h1('Persistence Removal Evaluation for %s' % dataset)

    # page.p('''This page contains images for the evaluation of how well persistence has been removed from an image''')
    page = page + html.paragraph(
        '''This page contains images for the evaluation of how well persistence has been removed from an image'''
    )

    # Look for the history file for this dataset

    history_file = dataset + '.txt'

    if os.path.exists(work_dir + history_file):
        string = '''The history file for the processing of this dataset is '''
        string = string + html.link("here", href=history_file)
        page = page + html.paragraph(string)

        # read history simply returns all of the lines in the history file that begin with !
        # And so any processing of these lines still has to be done
        lines, table1, table2 = read_history(work_dir + history_file)
        for line in lines:
            page = page + html.paragraph(line)
        if len(table1) > 0:
            page = page + html.h2(
                'Earlier exposures that could affect this image')
            page = page + html.table(table1)
        if len(table2) > 0:
            page = page + html.h2(
                'External and total persistence for this image')
            string = '''External persistence is persistance from previous visits; internal persistence
			is persistence induced from exposures in this vist.  Total persistence includes both
			internal and external persistence.  . Generally, self-induced or internal persistence is  
			only important if the dithers larger than the psf have been used within the visit'''
            page = page + html.paragraph(string)
            page = page + html.table(table2)

    else:
        page = page + html.paragraph(
            ''' The history file for this dataset appears to be missing.  Check that the file has been processed'''
        )

    page = page + html.hline(size='3', width='100')

    string = '''The next 4-panel image shows the original flt image (upper left), the corrected flt image (upper right), 
	the persistence model (lower left) and the stimulus (lower right).  The stimulus is simply the image constructed
	maximum value in electrons of any of the images that went into the stimulus model'''

    # Look for the summary image

    xname = dataset + '_subtract.png'
    if os.path.exists(fig_dir + xname):
        # page.img(src='Figs/'+xname,width=600,height=600,alt="Thumbnails")
        page = page + html.image(
            image='Figs/' + xname, width=600, height=600, alt="Thumbnails")
    else:
        # page.p('''The summary image is missing''')
        page = page + html.paragraph('''The summary image is missing''')

    # page.hr(size='3',width='100%')
    page = page + html.hline(size='3', width='100')

    # Now include the evaluation images

    string = '''As a qualitative indicator of how well the persistence correction has worked, some of the regions with
	the highest predicted persistence have been examined. 
	The next two images give an indication of how well the persistence has been subtracted from the images.
	Both images have the original data in red and the persistence-subtracted data in blue.  The first image is
	a plot of flux vs the persisence model, the second is flux as a function of the stimulus. Ideally the blue 
	curves would all center around 0. The utility of these plots depends on how isolated the persistence peaks
	are from stars in the image. If these plots are empty, no good regions for evaluation persistence were found.'''

    page = page + html.paragraph(string)

    xname = dataset + '.sum1.png'
    if os.path.exists(fig_dir + xname):
        # page.img(src='Figs/'+xname,width=300,height=300,alt="Thumbnails")
        page = page + html.image(
            'Figs/' + xname, width=300, height=300, alt="Thumbnails")
    else:
        # page.p('''The first evaluation image showing the subtraction is missing''')
        page = page + '''The first evaluation image showing the subtraction is missing'''

    xname = dataset + '.sum2.png'
    if os.path.exists(fig_dir + xname):
        # page.img(src='Figs/'+xname,width=300,height=300,alt="Thumbnails")
        page = page + html.image(
            'Figs/' + xname, width=300, height=300, alt="Thumbnails")
    else:
        # page.p('''The second evaluation image showing the subtraction is missing''')
        page = page + html.paragraph(
            '''The second evaluation image showing the subtraction is missing'''
        )

    # page.hr(size='3',width='100%')
    page = page + html.hline(size=3, width=100)

    # Look for the peaks summary

    string = '''This figures indicates what regions were selected for evaluation. The two panels are
	identical except the regions selected are indicated in the lower panel. '''

    page = page + html.paragraph(string)

    xname = dataset + '_persist.peaks.png'
    if os.path.exists(fig_dir + xname):
        # page.img(src='Figs/'+xname,width=600,height=1000,alt="Thumbnails")
        page = page + html.image(
            'Figs/' + xname, width=900, height=900, alt="Thumbnails")
    else:
        # page.p('''The summary figure for peak identification is missing''')
        page = page + html.paragraph(
            '''The summary figure for peak identification is missing''')

    # Now find all of the individual peak files:

    searchstring = fig_dir + dataset + '.peak.*.1.png'
    print searchstring

    try:
        peaks_file = work_dir + dataset + '_persist.peaks.dat'
        p = open(peaks_file, 'r')
        lines = p.readlines()
        p.close
    except IOError:
        print 'Warning: %s not found' % peaks_file
        lines = []

    xlines = []
    for one in lines:
        one = one.strip()
        if one[0] != '#' and len(one) > 0:
            xlines.append(one)

    if len(xlines) > 0:
        string = '''The results for individual regions are shown below. The four panels are a subsection of the original flt file, the predicted persistence in that region, the persistence subtracted flt file, and a plot of pixel values as a function of predicted persistence in the region. Green points are the original values; yellow point are the corrected values. The red and blue lines show the mean values in the original and corrected and corrected images, respectively.'''
        page = page + html.paragraph(string)
        page = page + html.hline(size='3', width='100')

        for one in xlines:
            word = one.split()
            x = int(word[0])
            y = int(word[1])
            z = eval(word[2])
            zz = eval(word[3])
            # page.p('Persistence at x = %3d, y=%3d' %(x,y))
            page = page + html.paragraph(
                'Persistence at x = %3d, y=%3d is about %6.3f e/s compared to science image flux of %6.3f e/s'
                % (x, y, z, zz))
            xname = '%s.peak.%03d_%03d.1.png' % (dataset, x, y)
            if os.path.exists(fig_dir + xname):
                # page.img(src='Figs/'+xname,width=400,height=400,alt="Thumbnails")
                page = page + html.image(
                    'Figs/' + xname, width=400, height=400, alt="Thumbnails")
            else:
                # page.p('Figure %s not present' % (work_dir+xname))
                page = page + html.paragraph('Figure %s not present' %
                                             (work_dir + xname))
            # page.hr(size='3',width='100%')
            page = page + html.hline(size='3', width='100')
    else:
        string = '''Unfortunately, no good regions for evaluating persistence were found.'''
        page = page + html.paragraph(string)
        page = page + html.hline(size='3', width='100')

    page = page + html.end()

    # Open the html file with the appropriate permissions, and then write it
    g = per_list.open_file(html_filename)
    g.write('%s' % page)
    g.close()

    return 'OK: subtract_html: %s' % html_filename
Exemplo n.º 4
0
def doit(filename='foo.fits',mask_file='none',box=3,maxno=25,history_file='history.txt',local='no'):
	'''
	Main routine which handles finding of the peaks and creating plots which indicate
	how well the persistence has been subtracted.  It attemps to locate region with 
	a significant amount of persistence, but to avoid regions where the flux from
	stars in the image under consideration is large.
	
	The variables are as follows:
		filename	The persist model
		mask_file	The original science image which here is used to
				identify regions which are contaminated by stars
		box		The size of the boxcar function used to smooth the persist
				image.  
		maxno		The maximum number of peaks to find
		history_file	The place to append history information
		local		The standard way in which to indicate where output files
				should be stored.
		
	
	The routine reads file name foo, and smooths it with boxcar function with kernal of size
	box  It also reads the mask_file, the original science image. This is used in find_peaks
	to exclude regions of the image that have very high count rates, since it will be impossible
	to see persistence agains a bright source.  So that bad pixels pixels are also excluded 
	a large number is added to the mask when a pixel has bad data quality.

	The routine also creates a plot for each of the peaks that indicates where
	the peaks are.  The individual subtractions are not evaluated here, see
	subtract_eval instead.

	111011	ksl	Tried to clean this routine up and make it run a little better
	130204	ksl	Modified the data quality criterion to elimate the possibility
			that the TDF had been set which causes all of the pixels to
			be declared bad.
	130913  ksl     Restored the use of DQ flags to since the problem with loss of 
			lock, which in certain cases, particularly darks, caused all
			pixels to have bad DQ.


	'''

	history=open(history_file,'a')
	history.write('# Starting peaks on file %s using  %s as mask \n' % (filename,mask_file))

	# determine from the filename where we want
	# to put the ouputs

	work_dir=per_list.set_path(filename,'yes',local)  # This will be the Persist directory for the dataset
	fig_dir=work_dir+'/Figs/'



	# Get a name for the root of any output files
	try:
		x=filename.split('/')
		name=x[len(x)-1]
		# print 'name',name
		j=string.rindex(name,'.')
		outroot=name[0:j]
		# print 'OK got',outroot
	except ValueError:
		outroot=filename
		print 'File had no extensions, using entire name'

	# Read the persistence file
	x=per_fits.get_image(filename,1)
	if len(x)==0:
		print 'Error: peaks:  Nothing to do since no data returned for %s' % filename
		history.write('Peaks: Nothing to do since no data returned for %s\n' % filename)
		return 'NOK'

	# Now read the flt file, which will be used to create a mask file in an attempt 
	# to remove from considerations regions of the image where there is persistence which
	# would be overwhelmed by light in the current image.
	# Read both the image and the data quality
	# extension.  The data quality extension is used to help assure that we are tracking
	# persistence from light in earlier exp sures.  For these pixels, we set the value
	# in the persitence image to zero, before the persistence image is smoothed.
	# Then smooth the persitence image  so that peak finding is easier

	if mask_file!='none':
		mask=per_fits.get_image(mask_file,1,rescale='e/s')
		xmask=smooth(mask,box)
		dq=per_fits.get_image(mask_file,3,rescale='no')
		z=numpy.select([dq>0],[0],default=x)
		z=smooth(z,box)
		history.write('Peaks: Using %s to mask exposure\n' % mask_file)
	else:
		history.write('Peaks: No mask from earlier undithered exposures\n')
		z=smooth(x,box)
		mask='none'

	# After having set everything up call the routine that locates the peaks to use to see
	# how well the subtraction works

	sources=find_peaks(z,mask,maxno=maxno)

	# Write out the results in a text file containing
	# the x and y positions

	outfile=work_dir+outroot+'.peaks.dat'

	# 	print 'Error: Could not open %s' % outfile
	
	g=per_list.open_file(outfile)

	g.write('# peaks in %s\n' % filename)
	for source in sources:
		xflux=xmask[source[0],source[1]]
		g.write('%5d %5d  %6.3f %6.3f\n' % (source[1],source[0],source[2],xflux))
	g.close()

	# Write out a set of ds9 region files of the sources
	make_reg(sources,work_dir+outroot+'.peaks.reg')


	# Plot the results. This creates two images, one with the regions selectd for detailed 
	# anaylsis superposed.

	xmed=numpy.median(x)
	pylab.figure(1,(8,12))
	pylab.clf()
	pylab.subplot(211)
	pylab.imshow(x,origin='lower',cmap=pylab.cm.gray,vmin=xmed-0.05,vmax=xmed+0.05)
	plothandle=pylab.subplot(212)
	ysize,xsize=x.shape
	pylab.imshow(z,origin='lower',extent=[0,xsize,0,ysize],cmap=pylab.cm.gray,vmin=xmed-0.05,vmax=xmed+0.05)

	# For reasons I don't quit understand, one needs to add 1 to both axes
	# To get the circles to line up, as if the elements were being counted
	# from one as in IRAF.
	for source in sources:
		circle(source[1]+1,source[0]+1,plothandle)

	# Note that this really needs to be in the Figs subdirectory

	figure_name=fig_dir+outroot+'.peaks.png'
	if os.path.isfile(figure_name):
		os.remove(figure_name)
	pylab.savefig(figure_name)
	os.chmod(figure_name,0770)

	# Generation of this summary plot is now complete.


	# 110325 - This next line was generating errors with the backend I was using.  The desired behavior
	# is that there be no window created when the program is run from the command line.  
	# The error seems to occur # when you try to close the figure without first drawing it. For now 
	# I have deleted the next line.  The behaviou I am currently seeing is taht the window
	# appears as if one had issued a draw command, i.e. the window stays up, but the program
	# continues ulike show()
	# pylab.close('all')

	history.write('# Finished peaks on file %s\n' % filename)

	return  'OK'
Exemplo n.º 5
0
def do_dataset(dataset='ib6v19bzq', radius=50, local='no'):
    '''
	Examine how well one has succeeded in subtracting persistence
	from a single dataset.  Assuming that all the actual sububraction
	has been done and the peaks file is in place.

	radius here is the half-size of the box that is plotted.

	110107	Changed the name of the output figure files so that it 
		would be easier to keep track of the files that had
		been created.  Also removed some text from figure.
	110203	ksl	Added local switch so testing would be easier
	'''

    # Read information about this dataset from the observations.ls file
    # Note that the file name is hardcoded here
    record = per_list.read_ordered_list_one('observations', dataset)

    # Set up the path, and open the history file

    path = per_list.set_path(record[0], 'no', local)
    fig_path = path + '/Figs/'
    history = open(path + dataset + '.txt', 'a')
    history.write('Start subtract_eval for dataset %s\n' % dataset)

    # Get complete names for all of the files that are to be used.
    file_flt, ext, all = per_fits.parse_fitsname(record[0])
    file_persist = path + dataset + '_persist.fits'
    file_cor = path + dataset + '_flt_cor.fits'
    file_stim = path + dataset + '_stim.fits'
    file_xy = path + dataset + '_persist.peaks.dat'

    # Check that all of these files exist

    ready = True

    if os.path.exists(file_flt) == False:
        print 'Error: subtract_eval.do_dataset: %s does not exist' % file_flt
        ready = False

    if os.path.exists(file_persist) == False:
        print 'Error: subtract_eval.do_dataset: %s does not exist' % file_persist
        ready = False

    if os.path.exists(file_cor) == False:
        print 'Error: subtract_eval.do_dataset: %s does not exist' % file_cor
        ready = False

    if os.path.exists(file_stim) == False:
        print 'Error: subtract_eval.do_dataset: %s does not exist' % file_stim
        ready = False

    if os.path.exists(file_xy) == False:
        print 'Error: subtract_eval.do_dataset: %s does not exist' % file_xy
        ready = False

    if ready == False:
        return 'Error: subtract_eval.do_dataset: Some files are missing'

    # At this point we know all of the necessary files exist

    # Since we are ready we should now delete all the figures from previous
    # runs.  Note that this command is dangerous.

    png_files = glob.glob('%s/%s.peak*png' % (fig_path, dataset))
    for one in png_files:
        os.remove(one)

    # Read the xy positions from file (produced by peaks.py)

    xy = read_peaks(file_xy)

    # Read all of the images
    flt = per_fits.get_image_ext(file_flt, 1)
    per = per_fits.get_image_ext(file_persist, 1)
    cor = per_fits.get_image_ext(file_cor, 1)
    stim = per_fits.get_image_ext(file_stim, 1)

    all_orig = []  # This is a place to store histograms of the original data
    all_corr = []  # This is a place to store histograms of the corrected data

    # Set up several arrays that are used in histogram creation (as the x axes)
    # set up the stim array

    # stim_hist=[1.,3.8]
    stim_hist = [1., 4.5]  # 30,000 electrons
    # x=4.
    dx = 0.2
    x = stim_hist[1] + dx  # We treat everything blow 30,000 as background
    while x <= 7:
        stim_hist.append(x)
        x = x + dx

    stim_hist = numpy.array(stim_hist)

    stim_hist = 10**stim_hist

    all_sorig = [
    ]  # This is a place to store histograms of the orignal data as a function of stimulus
    all_scorr = [
    ]  # This is a place to store histograms of the corrected data as a function of stimulus

    # Set up the per_hist array
    per_hist = []
    qper = 0
    dper = 0.02
    while qper <= 0.3:
        per_hist.append(qper)
        qper = qper + dper

    source_no = 0
    for one in xy:  # Main loop for each point
        source_no = source_no + 1
        x = one[0]
        y = one[1]

        # Make the stamps that are needed for each file
        xmin = one[0] - radius
        xmax = one[0] + radius
        ymin = one[1] - radius
        ymax = one[1] + radius

        ysize, xsize = numpy.shape(flt)
        if ymin < 1 or xmin < 1 or xmax > xsize or ymax > ysize:
            continue

        xflt = flt[ymin - 1:ymax, xmin - 1:xmax]
        xper = per[ymin - 1:ymax, xmin - 1:xmax]
        xcor = cor[ymin - 1:ymax, xmin - 1:xmax]
        xstim = stim[ymin - 1:ymax, xmin - 1:xmax]

        # OK at this point we have all the stamps; now flatten them

        xxflt = numpy.ravel(xflt)
        xxcor = numpy.ravel(xcor)
        xxper = numpy.ravel(xper)
        xxstim = numpy.ravel(xstim)

        med_flt = numpy.median(xxflt)
        max_per = numpy.max(xxper)
        zmin = med_flt - 0.05
        zmax = med_flt + 0.1

        fig_root = path + 'Figs/%s.peak.%03d_%03d.' % (dataset, x, y)

        # Create figure containing the original image, the persistence image and the subtracted
        # image surrouding a narrow region.  This is the 4 panel figure that appears in the
        # summary html file for each ragion

        pylab.figure(11, [8, 8])
        pylab.clf()
        pylab.subplot(221)
        pylab.imshow(xflt,
                     origin='lower',
                     cmap=pylab.cm.gray,
                     vmin=zmin,
                     vmax=zmax)
        pylab.title('Original')
        pylab.subplot(222)
        # pylab.imshow(xper,origin='lower',cmap=pylab.cm.gray,vmin=0.0,vmax=0.1)
        pylab.imshow(xper,
                     origin='lower',
                     cmap=pylab.cm.gray,
                     vmin=-0.05,
                     vmax=0.1)
        pylab.title('Model')
        pylab.subplot(223)
        pylab.imshow(xcor,
                     origin='lower',
                     cmap=pylab.cm.gray,
                     vmin=zmin,
                     vmax=zmax)
        pylab.title('Corrected')

        # Plot the figure that shows the observed rate as a function of estimate persistence

        pylab.subplot(224)
        pylab.plot(xxper, xxflt, '.', color='green')
        pylab.plot(xxper, xxcor, '.', color='yellow')

        # This constructs a histograms of median value of the original and subtracted
        # pixels as a function of the estimated persistence

        orig = []
        corr = []

        ii = 0
        while ii < len(per_hist) - 1:
            value = get_stats(xxper, xxflt, per_hist[ii], per_hist[ii + 1])
            orig.append(value)
            value = get_stats(xxper, xxcor, per_hist[ii], per_hist[ii + 1])
            corr.append(value)
            ii = ii + 1

        # Append the results for this particular point to one for all of the points
        # This is used in the summary slide for the entire field

        all_orig.append(orig)
        all_corr.append(corr)

        # Note that per_hist has one more element than the other arrays so must allow for this
        pylab.plot(per_hist[0:len(per_hist) - 1],
                   orig,
                   ls='steps-post',
                   lw=4,
                   color='red')
        pylab.plot(per_hist[0:len(per_hist) - 1],
                   corr,
                   ls='steps-post',
                   lw=4,
                   color='blue')

        pylab.axis([0, max_per + 0.01, med_flt - 0.2, med_flt + 0.3])
        pylab.xlabel('Est. Persistence (e/s)')
        pylab.ylabel('Flux (e/s)')

        # Finished with this histogram; now write out the figure

        figure_name = '%s%d.png' % (fig_root, 1)
        if os.path.isfile(figure_name):
            os.remove(figure_name)
        pylab.savefig(figure_name)
        os.chmod(figure_name, 0770)

        # Plot the original and subtracted pixels as a function of distance from a center positions
        # Create an array that contains the distance from the center for each pixel

        z = numpy.arange(-radius, radius + 1, 1)
        xx, yy = numpy.meshgrid(z, z)
        zz = xx * xx + yy * yy
        zzz = numpy.sqrt(zz)
        zzzz = numpy.ravel(zzz)

        pylab.figure(13, [6, 6])
        pylab.clf()
        pylab.plot(zzzz, xxflt, 'o')
        pylab.plot(zzzz, xxcor, 'o')

        # This constructs a histograms of median value of the original and subtracted
        # pixels as a function of distance from the source
        # Note thatthe size here that is plotted is not determined by radius, but is
        # hardcoded to be 20 pixels. This is typical smaller than radius because
        # we want to see how well as single star is subtracted.

        meds = []
        med_corr = []
        rr = []
        r = 0
        dr = 3
        rmax = 20
        while r < rmax:
            value = get_stats(zzzz, xxflt, r, r + dr)
            meds.append(value)
            value = get_stats(zzzz, xxcor, r, r + dr)
            med_corr.append(value)
            rr.append(r + 0.5 * dr)
            r = r + dr

        pylab.plot(rr, meds, ls='steps-mid', lw=3)
        pylab.plot(rr, med_corr, ls='steps-mid', lw=3)

        pylab.axis([0, rmax, med_flt - 0.2, med_flt + 0.3])
        pylab.xlabel('Radius (pixels)')
        pylab.ylabel('Flux (e/s)')

        figure_name = '%s%d.png' % (fig_root, 3)
        if os.path.isfile(figure_name):
            os.remove(figure_name)
        pylab.savefig(figure_name)
        os.chmod(figure_name, 0770)

        # 110622 - Elimaated to fix a problem on linux
        # pylab.close('all')

        # next section gathers information about eveything as a function of the stimulus

        i = 0
        sorig = []
        scorr = []
        while i < len(stim_hist) - 1:
            value = get_stats(xxstim, xxflt, stim_hist[i], stim_hist[i + 1])
            sorig.append(value)
            value = get_stats(xxstim, xxcor, stim_hist[i], stim_hist[i + 1])
            scorr.append(value)
            i = i + 1
        all_sorig.append(sorig)
        all_scorr.append(scorr)

        # This ends the main loop for each data point.

    # Now make the first summary figure which is a plot of flux as a function of the model
    # stimulus

    fig_root = path + 'Figs/%s.sum1' % (dataset)
    pylab.figure(14, [6, 6])
    pylab.clf()

    i = 0
    xmax = numpy.max(per_hist)
    ymax = (-1000)
    ymin = 1000
    per_hist = numpy.array(per_hist)
    per_hist = per_hist + 0.5 * dper

    while i < len(all_corr):
        corr = numpy.array(all_corr[i])
        orig = numpy.array(all_orig[i])

        corr = corr - corr[0]
        orig = orig - orig[0]
        k = 0
        while k < len(orig):
            if orig[k] < -900:
                break
            k = k + 1
        k = k - 1
        if k > 0:

            pylab.plot(per_hist[0:k], orig[0:k], 'ro-', lw=2)
            pylab.plot(per_hist[0:k], corr[0:k], 'bo-', lw=2)
            zmin = numpy.min(corr[0:k])
            if zmin < ymin:
                ymin = zmin
            zmax = numpy.max(orig[0:k])
            if zmax > ymax:
                ymax = zmax
        else:
            print 'Error: subtract_eval.do_dataset: there is a problem, because k=0'
        i = i + 1
    pylab.axis([0, xmax + 0.05, ymin - 0.05, ymax + 0.05])
    pylab.xlabel('Est. Persistence (e/s)')
    pylab.ylabel('Flux (e/s)')

    figure_name = fig_root + '.png'
    if os.path.isfile(figure_name):
        os.remove(figure_name)
    pylab.savefig(figure_name)
    os.chmod(figure_name, 0770)

    # 110622 - Eliminated to fix a problem on linux
    # pylab.close(14)

    # Now make the second summary figure showing the flux as a function of the stimulus.
    # Note that because these occurred at different times you will get a range here at
    # any stimulus

    # Construct the x axis for this figure
    i = 0
    xstim_hist = []
    while i < len(stim_hist) - 1:
        xstim_hist.append(0.5 * (stim_hist[i] + stim_hist[i + 1]))
        i = i + 1

    fig_root = path + 'Figs/%s.sum2' % (dataset)
    pylab.figure(15, [6, 6])
    pylab.clf()

    # Now go through each row in the arrays and plot the results
    i = 0
    while i < len(all_corr):
        corr = numpy.array(all_scorr[i])
        orig = numpy.array(all_sorig[i])
        corr = corr - corr[0]
        orig = orig - orig[0]
        k = 0
        while k < len(orig):
            if orig[k] < -900:
                break
            k = k + 1
        k = k - 1
        if k > 0:
            pylab.semilogx(xstim_hist[0:k], orig[0:k], 'ro-', lw=2)
            pylab.semilogx(xstim_hist[0:k], corr[0:k], 'bo-', lw=2)
            zmin = numpy.min(corr[0:k])
            if zmin < ymin:
                ymin = zmin
            zmax = numpy.max(orig[0:k])
            if zmax > ymax:
                ymax = zmax
        i = i + 1

    pylab.xlabel('Stimulus (e)')
    pylab.ylabel('Flux (e/s)')
    pylab.axis([3e4, 1e7, -0.1, 0.3])

    figure_name = fig_root + '.png'
    if os.path.isfile(figure_name):
        os.remove(figure_name)
    pylab.savefig(figure_name)
    os.chmod(figure_name, 0770)

    # 110622 - Eliminated to fix a problem on linux
    # pylab.close(15)

    history.write('End subtract_eval for dataset %s\n' % dataset)
    history.close()
Exemplo n.º 6
0
def steer(argv):
	'''
	This is a steering routine for subtract persist so that options can be exercised from the 
	command line.  See the top level documentaion for details

	100907	ksl	Added to begin to automate the subtraction process
	101215	ksl	Moved to a separate routine so that one would be able to split various portions
			of persistence subtraction and evaluation of the results into multiple files
	111114	ksl	Added a command.log to keep track of all of the run_persist commands
	140924	ksl	Updated to allow for varioua model types
	160103	ksl	Begin implemenation of multiprocessing
	'''


	log('# Start run_persist  %s\n' % date.get_gmt())
	xstart=time.clock()
	cur_time=date.get_gmt()

	i=1
	dataset_list='none'

	model_type=1
	norm=0.3
	alpha=0.174
	gamma=1.0
	e_fermi=90000
	kT=20000
	fileroot='observations'
	words=[]
	mjd_start=0.0    # A amall number for mjd
	mjd_stop=1.e6  # A large number for mjd
	dryrun='no'
	clean='no'
	ds9='no'
	local='no'
	pffile='persist.pf'
	lookback_time=16

	switch='single'
	np=1

	while i<len(argv):
		if argv[i]=='-h':
			print __doc__
			return    
		elif argv[i]=='-model':
			i=i+1
			model_type=int(argv[i])
		elif argv[i]=='-n':
			i=i+1
			norm=eval(argv[i])
		elif argv[i]=='-e':
			i=i+1
			e_fermi=eval(argv[i])
		elif argv[i]=='-kT':
			i=i+1
			kT=eval(argv[i])
		elif argv[i]=='-alpha':
			i=i+1
			alpha=eval(argv[i])
		elif argv[i]=='-gamma':
			i=i+1
			gamma=eval(argv[i])
		elif argv[i]=='-obslist':
			i=i+1
			fileroot=eval(argv[i])
		elif argv[i]=='-many':
			i=i+1
			dataset_list=argv[i]
			switch='many'
			print 'OK you want to evaluate a number of datasets in file %s', dataset_list
		elif argv[i]=='-all':
			switch='all'
			print 'OK you want to evaluate all the records in the obslist'
		elif argv[i] =='-prog_id':
			i=i+1
			prog_id=int(argv[i])
			switch='prog_id'
			print 'OK, you want to evaluate all the records for program %d' % prog_id
		elif argv[i]=='-start':
			i=i+1
			z=argv[i]
			try:
				mjd_start=float(z)
			except ValueError:
				mjd_start=date.iso2mjd(z)
			if switch !='prog_id':
				switch='all'
		elif argv[i]=='-stop':
			i=i+1
			z=argv[i]
			try:
				mjd_stop=float(z)
			except ValueError:
				mjd_stop=date.iso2mjd(z)
			if switch !='prog_id':
				switch='all'
		elif argv[i]=='-dryrun':
			dryrun='yes'
			print 'OK, This will be a dry run!'
		elif argv[i]=='-clean':
			dryrun='yes'
			clean='yes'
			print 'OK. This run will clean out various Persist directories, and revise the .sum file'
		elif argv[i]=='-ds9':
			ds9='yes'
		elif argv[i]=='-local':
			local='yes'
		elif argv[i]=='-pf':
			i=i+1
			pffile=argv[i]
		elif argv[i]=='-lookback':
			i=i+1
			lookback_time=eval(argv[i])
		elif argv[i]=='-np':
			i=i+1
			np=int(argv[i])
		elif argv[i][0]=='-':
			print 'Error: Unknown switch ---  %s' % argv[i]
			return
		else:
			words.append(argv[i])
		i=i+1

	# At this point all of the options have been processed and we can
	# begin the processing of individual datasets

	# Check that the listfile actually exists, and if not exit with a stern warning

	listfile=fileroot+'.ls'
	if os.path.exists(listfile)==False:
		print 'Error: run_persist.steer - No %s file in run directory.  EXITING!!!' % listfile
		return
	
	# At this point, we are able to determine exactly what datasets to process
	log('# Starting at %s\n' % (cur_time),'command.log')
	log('# Starting at %s\n' % (cur_time))
	string=''
	for one in argv:
		string=string+'%s ' % one
	log('Command:  %s\n' % string,'command.log')
	log('Command:  %s\n' % string)


	datasets=[]
	
	if switch=='single': #  Then we are processing a single file
		datasets.append(words[0])
	elif switch=='all': # Then we are working from the obslist
		records=per_list.read_ordered_list_mjd(fileroot,mjd_start,mjd_stop)
		for record in records:
			datasets.append(record[1])
	elif switch=='many':  # Then we are reading a file with rootnames of the files we want to process
		f=open(dataset_list,'r')
		lines=f.readlines()
		f.close()
		for line in lines:
			x=line.strip()
			if len(x)>0 and x[0]!='#':
				xx=x.split()  #  Take the first word to be the dataset name
				datasets.append(xx[0])
	elif switch=='prog_id':
		records=per_list.read_ordered_list_progid(fileroot,prog_id,mjd_start,mjd_stop)
		for record in records:
			datasets.append(record[1])
	else:
		print 'Error: run_persist: Unknown switch %s'% switch

	# Ok, now, unless this is a dryrun we actually process the data
	ntot=len(datasets)
	print 'There are %d datasets to process' % ntot          

	dry=[]
	if dryrun=='yes':
		for one in datasets:
			record=per_list.read_ordered_list_one(fileroot,one)
			dry.append(record)
		per_list.write_ordered_list('dryrun',dry)
		if clean=='yes':
			# xclean=open('CleanFiles','w')
			# os.chmod('CleanFiles',0770)

			xclean=per_list.open_file(Cleanfiles)

			for one in dry:
				xxx=per_list.set_path(one[0])
				xclean.write('rm -r -f %s\n' % xxx)
			xclean.close()
		# return
	elif np==1 or len(datasets)==1:
		n=1
		for one in datasets:
			do_dataset(one,model_type,norm,alpha,gamma,e_fermi,kT,fileroot,ds9,local,pffile,lookback_time)
			print '# Completed dataset %d of %d. Elapsed time is %0.1f s' % (n,ntot,time.clock()-xstart)
			n=n+1
	else:
		print 'There will be %d processes running simultaneously' % np
		jobs=[]
		for one in datasets:
			p=multiprocessing.Process(target=do_dataset,args=(one,model_type,norm,alpha,gamma,e_fermi,kT,fileroot,ds9,local,pffile,lookback_time,))
			jobs.append(p)
		i=0
		while i<np and i<len(jobs):
			print '!Starting %s' % datasets[i]
			one=jobs[i]
			one.start()
			i=i+1

		njobs=get_no_jobs(jobs)

		while i<len(jobs):
			time.sleep(2)
			njobs=get_no_jobs(jobs)
			print 'Running %d jobs,including job %d (%s) of %d total' % (njobs,i,datasets[i-1],len(datasets))
			if njobs<np:
				print 'Starting: ',datasets[i]
				one=jobs[i]
				one.start()
				i=i+1

		p.join()
		print 'Completed multiprocessing'




	per_list.fixup_summary_file(datasets)

	print 'xxx',xstart,time.clock()

	
	dtime=time.clock()-xstart
	cur_time=date.get_gmt()
	log('# End  %s  (Elapsed time for %d datasets was %.1f (or %.1f per dataset)\n' % (date.get_gmt(),ntot,dtime,dtime/ntot))
	cur_time=date.get_gmt()
	log('# End  %s  (Elapsed time for %d datasets was %.1f (or %.1f per dataset)\n' % (date.get_gmt(),ntot,dtime,dtime/ntot),'command.log')

	log('# Finished at %s\n' % (cur_time))
	log('# Finished at %s\n' % (cur_time),'command.log')
	return