コード例 #1
0
def make_html(coords=[[100, 200], [300, 400]], fileroot='ib2v09kzq'):
    '''
	Make an html file that gathers all of the images that have been made
	'''

    # page=markup.page()

    title = 'Persistence Removal Evaluation for %s' % fileroot
    page = html.begin(title)

    # page.init(title='Persistence Removal Evaluation for %s' % fileroot)
    # page.h1('Persistence Removal Evaluation for %s' % fileroot)
    # page.p('''This page contains images for the evaluation of how well persistence has been removed from an image''')
    page = page + html.paragraph(
        '''This page contains images for the evaluation of how well persistence has been removed from an image'''
    )

    # page.hr(size='3',width='100%')
    page = page + html.hline(size='3', width='100')

    for coord in coords:

        # page.p('Images for positions: %3d  %3d' % (coord[0],coord[1]))
        page = page + html.paragraph('Images for positions: %3d  %3d' %
                                     (coord[0], coord[1]))
        fig1 = 'Figs/Fig_%s_%04d_%04d_1.png' % (fileroot, coord[0], coord[1])
        fig2 = 'Figs/Fig_%s_%04d_%04d_2.png' % (fileroot, coord[0], coord[1])
        fig3 = 'Figs/Fig_%s_%04d_%04d_3.png' % (fileroot, coord[0], coord[1])

        # page.img( src=fig2, width=900, height=300, alt="Thumbnails" )

        page = page + html.image(f2, width=900, height=300, alt="Thumbnails")

        # page.p('Left: Original, Center: Persistence model, Right: Subtracted')
        page = page + html.paragraph(
            'Left: Original, Center: Persistence model, Right: Subtracted')

        # page.img( src=fig1, width=400, height=400, alt="Thumbnails" )
        page = page + html.image(fig1, width=400, height=400, alt="Thumbnails")
        # page.img( src=fig3, width=400, height=400, alt="Thumbnails" )
        page = page + html.image(fig3, width=400, height=400, alt="Thumbnails")

        # page.p('Left: Original and subtracted data as a function of the estimated persistence, Right: Original and subtracted data as a function of distance from x,y')
        page = page + html.paragraph(
            'Left: Original and subtracted data as a function of the estimated persistence, Right: Original and subtracted data as a function of distance from x,y'
        )
        # page.hr(size='3',width='100%')

        page = page + hline(size='3', width='100%')

    # Write the page to a file
    name = xpath + 'Persist_%s.html' % fileroot

    g = per_list.open_file(name)
    # g=open(name,'w')
    # os.chmod(name,0770)

    g.write('%s' % page)
    g.close()
    return fileroot
コード例 #2
0
def make_html(lines, filename='observations.html'):
    '''

	This routine makes the summary html file.  
	
	110428 	ksl	This routine was writtend because markup.py did not seem to be to handle
			a simple table.

	'''

    header = '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
	<html lang="en">
	<head>

	<meta http-equiv="content-type" content="text/html; charset=ISO-8859-1">
	  <title>Summary Evaluation Page of Persistence</title>
	  </head>
	  <body>
	  <p>This page contains links to the individual html files for each
	  dataset</p>
	  <hr size="3" width="100%">
	  <table border="1" cellpadding="2" cellspacing="2" width="100%">
	  '''

    string = header

    for line in lines:
        row = '<tr>\n'
        for word in line:
            row = row + '<td> %s </td>\n' % word
        row = row + '<tr>\n'
        string = string + row

    trailer = '''
	    </tbody>
	  </table>
	  <hr size="3" width="100%">
	  </body>
	  </html>
	  '''

    string = string + trailer

    g = per_list.open_file(filename)

    # g=open(filename,'w')
    # os.chmod(filename,0770)

    g.write('%s\n' % string)
    return
コード例 #3
0
ファイル: peaks.py プロジェクト: kslong/Persistence
def make_reg(sources,regionfile='sources.reg'):
	'''
	Make a simple region file from a set of
	source positions, labelling them by
	source number.

	'''

	# g=open(regionfile,'w')
	# os.chmod(regionfile,0770)
	# g.write(regionheader)

	g=per_list.open_file(regionfile)

	i=1
	for source in sources:
		g.write('circle(%6.2f,%6.2f ,5) # text={%03d}\n' % (source[1]+1,source[0]+1,i))
		i=i+1
	
	g.close()
	return
コード例 #4
0
def do_dataset(dataset='ia21h2e9q',model_type=0,norm=0.3,alpha=0.2,gamma=0.8,e_fermi=80000,kT=20000,fileroot='observations',ds9='yes',local='no',parameter_file='persist.pf',lookback_time=16):
	'''
	Create a persistence image for this dataset.  This version works by creating using the 
	persistance model from each of the previous images.  It assumes that the only one which
	matters is the image which created the most persistence according toe the modeld

	model_type=0 is our our orignal model which has a modified fermi distributioon, controlled by the values of norm, etc.
	model_type=1 is our purely describtive model defined in terms of amplitudes and power laws
	model_type=2 for the fermi model that interpolates bewtween cureves for different expsoure times

	All returns from this program should be:
		OK:  something
		NOK: something

	Outputs:
		If there were IR observations that preceded the observation being analyzed
		then this routines creates various fits files:
			rootname_persist.fits  - The model for the total of internal and external persistnce
			rootname_extper.fits   - The model for persistence from earlier visits, aka
			                         external persistence
			rootname_flt_cor.fits  - The corrected flt file
			rootname_stim.fits     - The stimulus that caused the persistence
			rootname_dt.fits       - The time at which the stimulus occurred
		Plots of the various images are also created, as well as a log file for
		the data set
	Notes:
		This is really the main routine of the program.  When run_persist.py calls this modeule. 
		this is the routine that is called.

		In practice, the only difference between model types 1 and 2 is the calibration file that is read in.  The same interpolation
		routine is used for both.  At present the calibration file names for this are hardwired.

	History

	100905	ksl	Added disgnostic files to record the time of the stimulus and the
			value of the stimulus
	101015	ksl	Moved the location of where the output files are stored and 
			added a history file, so we would know what had happened
	110122	ksl	Added a switch to turn off displaying with ds9
	110324	ksl	Changed call to accommodate new persistence model
	110602	ksl	Incorporated the possibility of providing a correction file xynorm to account
			for spatial variations across the detector persistence model
	140606	ksl	Added correction which puts the correct units in the stimulus file.
	140611	ksl	Began to add in new persistnce model, initially just by short-circuiting everything
	140803	ksl	Switched to fits version of data files
	141124	ksl	Small change to handle situations were the flat field correction is not found
	'''

	cur_time=date.get_gmt()

	xfile=locate_file(parameter_file)
	if xfile=='':
		print '# Error: Could not locate parameter file %s ' % parameter_file
	else:
		parameter_file=xfile


	if model_type==0:
		print '# Processing dataset %s with fermi model: Norm %4.2f alpha %4.2f gamma %4.2f e_fermi %6.0f kT %6.0f ' % (dataset,norm,alpha,gamma,e_fermi,kT)
	elif model_type==1:
		print '# Processing dataset %s with A gamma model' % dataset
	elif model_type==2:
		print '# Processing dataset %s with a time-variable fermi model' % dataset
	else:
		print '# Error: run_persist: Unknown model type %d' % model_type
		return 'NOK'

	# Read the observations file to get the data set of interest


	delta=lookback_time  # Datasets which occurred more than delta hours earlier not considered.


	records=per_list.read_ordered_list2(fileroot,dataset,interval=[-delta,0],outroot='none')

	# Check the length of records


	if len(records)==0:
		string = 'NOK: subtract_persist.do_dataset :There are no records associated with dataset %s.  Check name in %s.ls' % (dataset,fileroot)
		sum_string='NOK - No record associated with this dataset'
		per_list.update_summary(dataset,'ERROR',sum_string,append='no')
		return string

	# So now we have the list that we need.

	

	science_record=records[len(records)-1]  # The science record is the last record
	sci_progid=science_record[2]
	words=science_record[3].split('.')
	sci_visit=words[0]
	sci_fil=science_record[10]
	sci_exp=eval(science_record[11])
	sci_obj=science_record[14]
	
	# Create the Persist directory if it does not already exist
	path=per_list.set_path(science_record[0],'yes',local)
	if path.count('NOK'):  # Then we were not able to create a plausible directory to put the data ink
		return path

	# Open a history file.  Note that one needs the path before one can do this
	# The reason for calling this via the per_list routine, is that this routine sets the permissions
	# for the file

	history=per_list.open_file(path+science_record[1]+'.txt')


	history.write('START:  Persistence processing of file %s\n\n' % science_record[1])
	history.write('! Processed: %s\n' % date.get_gmt())
	history.write('! ProgramID: %s\n' % sci_progid)
	history.write('! Visit:     %s\n' % sci_visit)
	history.write('! FltFile:   %s\n' % science_record[1])
	history.write('! Filter:    %s\n' % sci_fil)
	history.write('! Exposure:  %6.1f\n' % sci_exp)
	history.write('! Object:    %s\n' % sci_obj)

	history.write('\n! Using Version %s of the perstence S/W' % VERSION)
	history.write('\n! Using a lookback time for observations that might cause persistence of %.1f hours\n' %  lookback_time)

	if model_type==0:
		history.write('\n! Processing  dataset %s with fermi model:  norm %6.2f alpha %6.2f e_fermi %6.0f kT %6.0f\n' % (dataset,norm,alpha,e_fermi,kT)) 
	elif model_type==1:
		history.write('\n! Processing dataset %s with A gamma model\n' % dataset)
	elif model_type==2:
		history.write('\n! Processing dataset %s with time-variable fermi  model\n' % dataset)


	# Check whether there is anything to do

	if len(records)==1:
		string='subtract_persist: No persistence for this dataset.  No earlier observations within %4.1f hours\n' % (delta)
		history.write('%s\n' % string)
		history.write('! Persistence:  None\n')
		string='OK: subtract_persist: -- None'
		print string
		history.close()
		xstring='  %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % (0,0,0,0,0,0)
		per_list.update_summary(dataset,'Persist',xstring,append='no')
		return string



	# Persistence is calculated for the middle of the interval in which the expousre
	# was taking place

	[t1,t2]=get_times(science_record[0])
	tscience=0.5*(t1+t2)  

	# establish which record is the last record that comes from a different visit than the current one

	i=0
	while i<len(records)-1:
		cur_progid=records[i][2]
		words=records[i][3].split('.')
		cur_visit=words[0]
		# print 'OK',cur_progid,cur_visit
		if cur_progid==sci_progid and cur_visit==sci_visit:
			# then we are now into earlier exposures of the same visit
			break
		i=i+1

	# if i = 0, all the exposures being evaluated for persistence create self_persistence
	# if i= len(records), all exposures being evaluated for persistence are from other visits

	last_external=i-1  # This is the last record that created external persistence
	ext_values=[]  # A place to store information about the persistence due to other observers
	ext_persist=[] # This is a place holder for storing the extenal persistence

	xynorm=read_parameter(parameter_file,'xynorm')
	if xynorm!='':
		xynorm=locate_file(xynorm)
		xcorr=get_image(xynorm,1)
		if len(xcorr)==0:
			xynorm=''  # This is an error because we were unable to find the file
			history.write('! Error: Could not find correction file %s containing spatial dependence. Continuing anyway' % xynorm)
		else:
			history.write('! Reference file containing spatial dependence:  %s\n' % xynorm)
	else:
		string='! Processing without spatially dependent correction'
		print string
		history.write('%s\n' % string )




	# This is the beginning of the loop for calculating the persistence model
	i=0
	while i<len(records)-1:
		record=records[i]
		# print 'subtract: %30s %6.1f model_type %d' % (record[0],eval(record[11]),model_type)
		# dt is measured from the end of the stimulus image to the middle of the
		# science image
		[t1,t2]=get_times(record[0])
		dt=(tscience-t2)*86400

		cur_progid=record[2]
		words=record[3].split('.')
		cur_visit=words[0]
		cur_sci_fil=record[10]
		cur_sci_exp=eval(record[11])
		cur_sci_obj=record[14]
		scan=record[4]

		xfile=record[0]
		# Use the ima file, if file calusing persistence is a scan object
		if scan=='scan':
			xfile=xfile.replace('flt','ima')
			print 'Using ima file for ',record[0],xfile,scan


		x=get_image(xfile,1,'e',fileref=science_record[0])  # Convert this to electrons
		if len(x)==0:
			xstring='NOK: Problem with science extension of %s' % record[0]
			history.write('%s\n' % xstring)
			print xstring
			return xstring

		dq=get_image(xfile,3,fileref=science_record[0])     # Get the dq 
		if len(dq)==0:
			xstring = 'NOK: Problem with dq extension of %s' % record[0]
			history.write('%s\n' % xstring)
			print xstring
			# 110926 - ksl - modified to allow this to process the image even if there was no dq array
			# return xstring

		if model_type==0:
			model_persistence=calc_persist(x,dq,dt,norm,alpha,gamma,e_fermi,kT)
		elif model_type==1:
			# print 'Model type is 1'
			xfile=read_parameter(parameter_file,'a_gamma')
			# The next lines are awkward, becuate the parameter file name is read multiple times
			if i==0:
				history.write('! Reference file containing spatially-averaged peristence model: %s' %  xfile)
			model_persistence=make_persistence_image(x,cur_sci_exp,dt,xfile)
		elif model_type==2:
			# print 'Model type is 2'
			xfile=read_parameter(parameter_file,'fermi')
			if i==0:
				history.write('! Reference file containing Spatially-averaged peristence model: %s' %  xfile)
			model_persistence=make_persistence_image(x,cur_sci_exp,dt,xfile)
		else:
			print 'Error: subtract_persist: Unknown model type %d' % model_type
			return 'NOK'

		values=how_much(model_persistence)

		if i==0:
			persist=model_persistence
			stimulus=x   # This is an array which contains the maximum counts in a pixel
			xtimes=numpy.ones_like(persist)
			xtimes=xtimes*dt # This is an array containing the delta time at which the stimulus occured
		else:
			xpersist=model_persistence
			stimulus=numpy.select([xpersist>persist],[x],default=stimulus)
			xtimes=numpy.select([xpersist>persist],[dt],default=xtimes)
			persist=numpy.select([xpersist>persist],[xpersist],default=persist)
		

		# Get some elementary statistics on the stimulus
		xvalues=get_stats(x,70000)

		history.write('\nsubtract_persist: Stimulus by %30s from program %s Visit %s\n' % (record[0],cur_progid,cur_visit))
		history.write('\nsubtract_persist: The filter was %s and exposure was %6.1f for target %s\n' % (cur_sci_fil,cur_sci_exp,cur_sci_obj))
		history.write('subtract_persist: The exposure was %8.0f s earlier than the current exposure\n' % dt)
		history.write('subtract_persist: The median value in the stimulus image was %6.1f and the number of saturated pixels was %d\n' % (xvalues[0],xvalues[1]))
		history.write('subtract_persist:   The maximum value for persistence is %f\n'   % values[4])
		history.write('subtract_persist:    The median value for persistence is %f\n'   % values[5])
		history.write('subtract_persist: 90 percent of persist values less than %f\n'   % values[6]) 
		history.write('subtract_persist: 99 percent of persist values less than %f\n'   % values[7]) 
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.10 e/s\n' % (values[1],values[1]*100./values[0]))
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.03 e/s\n' % (values[2],values[2]*100./values[0]))
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.01 e/s\n' % (values[3],values[3]*100./values[0]))
		string = 'subtract_persist: Finished (%2d of %2d) %s' % (i+1,len(records)-1,record[0])
		#130909 Removed print statement as unnneessary.  The string still goes to the history file
		# print string
		history.write('%s\n' % string)

		# Summarize the Stiumulus printing out the filename, prog_id, visit_name, target, dt and the number of pixels above saturation of 70000
		scan='No'
		if record[4]=='scan':
			scan='Yes'

		stimulus_summary='Stimulus: %40s %10s %10s %20s %8.0f %3d %3s %6s\n' % (record[0],cur_progid,cur_visit,cur_sci_obj,dt,xvalues[1],record[9],scan)
		history.write('! %s\n' % stimulus_summary)

		if i==last_external:
			ext_values=how_much(persist)
			ext_persist=numpy.copy(persist)

		i=i+1
	
	# This is the end of the loop where the persistence model is calculated

	# First report on the external persistence for this file;

	# Now apply the fix to account for spatial variations in persistence
	if xynorm !='' and numpy.shape(xcorr)==numpy.shape(persist):
		persist=persist*xcorr


	if len(ext_values)>0:

		f1=ext_values[1]*100./ext_values[0]
		f2=ext_values[2]*100./ext_values[0]
		f3=ext_values[3]*100./ext_values[0]
		emeasure='%6.2f %6.2f %6.2f' % (f1,f2,f3)

		history.write('\nsubtract_persist: Estimate of persistence from earlier visits\n')
		history.write('subtract_persist:   The maximum value for persistence is %f\n'   % ext_values[4])
		history.write('subtract_persist:    The median value for persistence is %f\n'   % ext_values[5])
		history.write('subtract_persist: 90 percent of persist values less than %f\n'   % ext_values[6]) 
		history.write('subtract_persist: 99 percent of persist values less than %f\n'   % ext_values[7]) 
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.10 e/s\n' % (ext_values[1],f1))
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.03 e/s\n' % (ext_values[2],f2))
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.01 e/s\n' % (ext_values[3],f3))
	else:
		emeasure='%6.2f %6.2f %6.2f' % (0 ,0,0)
		history.write('\nsubtract_persist: This exposure has no persistence from earlier visits.  All persistence is self-induced\n')



	# Now evaluate the total persistence

	values=how_much(persist)


	f1=values[1]*100./values[0]
	f2=values[2]*100./values[0]
	f3=values[3]*100./values[0]
	measure='%6.2f %6.2f %6.2f' % (f1,f2,f3)


	history.write('\nsubtract_persist: Estimate of total persistence for this file\n')
	history.write('subtract_persist:   The maximum value for persistence is %f\n'   % values[4])
	history.write('subtract_persist:    The median value for persistence is %f\n'   % values[5])
	history.write('subtract_persist: 90 percent of persist values less than %f\n'   % values[6]) 
	history.write('subtract_persist: 99 percent of persist values less than %f\n'   % values[7]) 
	history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.10 e/s\n' % (values[1],f1))
	history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.03 e/s\n' % (values[2],f2))
	history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.01 e/s\n' % (values[3],f3))




	history.write('! PersistenceSum: External   %s\n'% emeasure)
	history.write('! PersistenceSum: Total      %s\n'% measure)


	# Now write out all of the new fits files

	# First find out the units of the science image

	units=get_keyword(science_record[0],1,'bunit')
	# print 'test',units
	if units[0]=='COUNTS/S':
		print 'Reducing model to match units for dataset %s to match %s ' % (dataset,units)
		persist/=2.4

	# subtract and write out the corrected image
	science=get_image(science_record[0],1,'no')
	original=numpy.copy(science)
	science=science-persist


	xname=parse_fitsname(science_record[0])

	persist_file=path+dataset+'_persist.fits'
	ext_persist_file=path+dataset+'_extper.fits'

	# Note: Do not use some with an extension like flt.fits because it will interfere with making list
	corrected_file=path+dataset+'_flt_cor.fits'
	stimulus_file=path+dataset+'_stim.fits'
	time_file=path+dataset+'_dt.fits'

	rewrite_fits(xname[0],persist_file,1,persist)
	rewrite_fits(xname[0],corrected_file,1,science)
	rewrite_fits(xname[0],stimulus_file,1,stimulus)
	# 140606 - Fix added to put stimulus file in the correct units.
	put_keyword(stimulus_file,1,'bunit','ELECTRONS')
	rewrite_fits(xname[0],time_file,1,xtimes)
	if len(ext_persist)>0:
		rewrite_fits(xname[0],ext_persist_file,1,ext_persist)

	# This completes the section which writes out all of the fits files
		
	# Get statistics on the images and make the 4 panel plot 

        xmed=numpy.median(original)
        zmed=numpy.median(persist)
        zmax=numpy.max(persist)


	pylab.figure(1,[12,12])
	pylab.title(dataset)
	pylab.subplot(221)
	pylab.imshow(original,origin='lower',cmap=pylab.cm.gray,vmin=xmed-0.1,vmax=xmed+0.1)
	pylab.title('Original')

	pylab.subplot(222)
	pylab.imshow(science,origin='lower',cmap=pylab.cm.gray,vmin=xmed-0.1,vmax=xmed+0.1)
	pylab.title('Subtracted')

	pylab.subplot(223)
	pylab.imshow(persist,origin='lower',cmap=pylab.cm.gray,vmin=zmed-0.1,vmax=zmed+0.1)
	pylab.title('Total Persistence')

	if len(ext_persist)>0:
		pylab.subplot(224)
		pylab.imshow(ext_persist,origin='lower',cmap=pylab.cm.gray,vmin=zmed-0.1,vmax=zmed+0.1)
		pylab.title('External Persistence')
	else:
		pylab.subplot(224)
		pylab.imshow(stimulus,origin='lower',cmap=pylab.cm.gray,vmin=0.0,vmax=200000)
		pylab.title('Stimulus')

	fig1=path+'Figs/'+dataset+'_subtract.png'


	if os.path.isfile(fig1):
		os.remove(fig1)
	pylab.savefig(fig1)
	os.chmod(fig1,0770)

	# Eliminated to prevent an error on linux having to do with tkinter
	# pylab.close('all')

	if ds9=='yes':
		LoadFrame(science_record[0],1,0,2,'histequ')
		LoadFrame(persist_file,2,0,2,'histequ')
		LoadFrame(corrected_file,3,0,2,'histequ')
		if len(ext_persist)>0:
			LoadFrame(ext_persist_file,4,0,2,'histequ')
		else:
			LoadFrame(stimulus_file,4,0,1e5,'histequ')

	# Finished plots

	# Finished everything so wrap it up
	history.write('# Finished Persistence processing of file %s\n' % science_record[1])
	history.close()

	# Upadete the summary file
	string='%20s %20s' % (emeasure,measure)
	per_list.update_summary(dataset,'Persist',string,fileroot,append='no')

	return string
コード例 #5
0
def do_dataset(dataset='ia21h2eaq', fileroot='observations', local='no'):
    '''
	Make html files for a single dataset

	110203	ksl	Added local swithch which controls where the
			real working directory is to make testing
			easier
	140307	ksl	Added information about scans and subarray observations
	'''

    record = per_list.read_ordered_list_one(fileroot, dataset)
    if len(record) == 0:
        return 'NOK: make_html failed becaouse could not find dataset %s' % dataset

    work_dir = per_list.set_path(
        record[0], 'no',
        local)  # This will be the Persist directory for the dataset
    fig_dir = work_dir + '/Figs/'  # This will be the directory where figures are stored

    html_filename = work_dir + dataset + '_persist.html'

    # page=markup.page()
    title = 'Persistence Removal Evaluation for dataset %s' % dataset
    page = html.begin(title)

    # page.init(title='Persistence Removal Evaluation for dataset %s' % dataset)
    # page.h1('Persistence Removal Evaluation for %s' % dataset)

    # page.p('''This page contains images for the evaluation of how well persistence has been removed from an image''')
    page = page + html.paragraph(
        '''This page contains images for the evaluation of how well persistence has been removed from an image'''
    )

    # Look for the history file for this dataset

    history_file = dataset + '.txt'

    if os.path.exists(work_dir + history_file):
        string = '''The history file for the processing of this dataset is '''
        string = string + html.link("here", href=history_file)
        page = page + html.paragraph(string)

        # read history simply returns all of the lines in the history file that begin with !
        # And so any processing of these lines still has to be done
        lines, table1, table2 = read_history(work_dir + history_file)
        for line in lines:
            page = page + html.paragraph(line)
        if len(table1) > 0:
            page = page + html.h2(
                'Earlier exposures that could affect this image')
            page = page + html.table(table1)
        if len(table2) > 0:
            page = page + html.h2(
                'External and total persistence for this image')
            string = '''External persistence is persistance from previous visits; internal persistence
			is persistence induced from exposures in this vist.  Total persistence includes both
			internal and external persistence.  . Generally, self-induced or internal persistence is  
			only important if the dithers larger than the psf have been used within the visit'''
            page = page + html.paragraph(string)
            page = page + html.table(table2)

    else:
        page = page + html.paragraph(
            ''' The history file for this dataset appears to be missing.  Check that the file has been processed'''
        )

    page = page + html.hline(size='3', width='100')

    string = '''The next 4-panel image shows the original flt image (upper left), the corrected flt image (upper right), 
	the persistence model (lower left) and the stimulus (lower right).  The stimulus is simply the image constructed
	maximum value in electrons of any of the images that went into the stimulus model'''

    # Look for the summary image

    xname = dataset + '_subtract.png'
    if os.path.exists(fig_dir + xname):
        # page.img(src='Figs/'+xname,width=600,height=600,alt="Thumbnails")
        page = page + html.image(
            image='Figs/' + xname, width=600, height=600, alt="Thumbnails")
    else:
        # page.p('''The summary image is missing''')
        page = page + html.paragraph('''The summary image is missing''')

    # page.hr(size='3',width='100%')
    page = page + html.hline(size='3', width='100')

    # Now include the evaluation images

    string = '''As a qualitative indicator of how well the persistence correction has worked, some of the regions with
	the highest predicted persistence have been examined. 
	The next two images give an indication of how well the persistence has been subtracted from the images.
	Both images have the original data in red and the persistence-subtracted data in blue.  The first image is
	a plot of flux vs the persisence model, the second is flux as a function of the stimulus. Ideally the blue 
	curves would all center around 0. The utility of these plots depends on how isolated the persistence peaks
	are from stars in the image. If these plots are empty, no good regions for evaluation persistence were found.'''

    page = page + html.paragraph(string)

    xname = dataset + '.sum1.png'
    if os.path.exists(fig_dir + xname):
        # page.img(src='Figs/'+xname,width=300,height=300,alt="Thumbnails")
        page = page + html.image(
            'Figs/' + xname, width=300, height=300, alt="Thumbnails")
    else:
        # page.p('''The first evaluation image showing the subtraction is missing''')
        page = page + '''The first evaluation image showing the subtraction is missing'''

    xname = dataset + '.sum2.png'
    if os.path.exists(fig_dir + xname):
        # page.img(src='Figs/'+xname,width=300,height=300,alt="Thumbnails")
        page = page + html.image(
            'Figs/' + xname, width=300, height=300, alt="Thumbnails")
    else:
        # page.p('''The second evaluation image showing the subtraction is missing''')
        page = page + html.paragraph(
            '''The second evaluation image showing the subtraction is missing'''
        )

    # page.hr(size='3',width='100%')
    page = page + html.hline(size=3, width=100)

    # Look for the peaks summary

    string = '''This figures indicates what regions were selected for evaluation. The two panels are
	identical except the regions selected are indicated in the lower panel. '''

    page = page + html.paragraph(string)

    xname = dataset + '_persist.peaks.png'
    if os.path.exists(fig_dir + xname):
        # page.img(src='Figs/'+xname,width=600,height=1000,alt="Thumbnails")
        page = page + html.image(
            'Figs/' + xname, width=900, height=900, alt="Thumbnails")
    else:
        # page.p('''The summary figure for peak identification is missing''')
        page = page + html.paragraph(
            '''The summary figure for peak identification is missing''')

    # Now find all of the individual peak files:

    searchstring = fig_dir + dataset + '.peak.*.1.png'
    print searchstring

    try:
        peaks_file = work_dir + dataset + '_persist.peaks.dat'
        p = open(peaks_file, 'r')
        lines = p.readlines()
        p.close
    except IOError:
        print 'Warning: %s not found' % peaks_file
        lines = []

    xlines = []
    for one in lines:
        one = one.strip()
        if one[0] != '#' and len(one) > 0:
            xlines.append(one)

    if len(xlines) > 0:
        string = '''The results for individual regions are shown below. The four panels are a subsection of the original flt file, the predicted persistence in that region, the persistence subtracted flt file, and a plot of pixel values as a function of predicted persistence in the region. Green points are the original values; yellow point are the corrected values. The red and blue lines show the mean values in the original and corrected and corrected images, respectively.'''
        page = page + html.paragraph(string)
        page = page + html.hline(size='3', width='100')

        for one in xlines:
            word = one.split()
            x = int(word[0])
            y = int(word[1])
            z = eval(word[2])
            zz = eval(word[3])
            # page.p('Persistence at x = %3d, y=%3d' %(x,y))
            page = page + html.paragraph(
                'Persistence at x = %3d, y=%3d is about %6.3f e/s compared to science image flux of %6.3f e/s'
                % (x, y, z, zz))
            xname = '%s.peak.%03d_%03d.1.png' % (dataset, x, y)
            if os.path.exists(fig_dir + xname):
                # page.img(src='Figs/'+xname,width=400,height=400,alt="Thumbnails")
                page = page + html.image(
                    'Figs/' + xname, width=400, height=400, alt="Thumbnails")
            else:
                # page.p('Figure %s not present' % (work_dir+xname))
                page = page + html.paragraph('Figure %s not present' %
                                             (work_dir + xname))
            # page.hr(size='3',width='100%')
            page = page + html.hline(size='3', width='100')
    else:
        string = '''Unfortunately, no good regions for evaluating persistence were found.'''
        page = page + html.paragraph(string)
        page = page + html.hline(size='3', width='100')

    page = page + html.end()

    # Open the html file with the appropriate permissions, and then write it
    g = per_list.open_file(html_filename)
    g.write('%s' % page)
    g.close()

    return 'OK: subtract_html: %s' % html_filename
コード例 #6
0
ファイル: peaks.py プロジェクト: kslong/Persistence
def doit(filename='foo.fits',mask_file='none',box=3,maxno=25,history_file='history.txt',local='no'):
	'''
	Main routine which handles finding of the peaks and creating plots which indicate
	how well the persistence has been subtracted.  It attemps to locate region with 
	a significant amount of persistence, but to avoid regions where the flux from
	stars in the image under consideration is large.
	
	The variables are as follows:
		filename	The persist model
		mask_file	The original science image which here is used to
				identify regions which are contaminated by stars
		box		The size of the boxcar function used to smooth the persist
				image.  
		maxno		The maximum number of peaks to find
		history_file	The place to append history information
		local		The standard way in which to indicate where output files
				should be stored.
		
	
	The routine reads file name foo, and smooths it with boxcar function with kernal of size
	box  It also reads the mask_file, the original science image. This is used in find_peaks
	to exclude regions of the image that have very high count rates, since it will be impossible
	to see persistence agains a bright source.  So that bad pixels pixels are also excluded 
	a large number is added to the mask when a pixel has bad data quality.

	The routine also creates a plot for each of the peaks that indicates where
	the peaks are.  The individual subtractions are not evaluated here, see
	subtract_eval instead.

	111011	ksl	Tried to clean this routine up and make it run a little better
	130204	ksl	Modified the data quality criterion to elimate the possibility
			that the TDF had been set which causes all of the pixels to
			be declared bad.
	130913  ksl     Restored the use of DQ flags to since the problem with loss of 
			lock, which in certain cases, particularly darks, caused all
			pixels to have bad DQ.


	'''

	history=open(history_file,'a')
	history.write('# Starting peaks on file %s using  %s as mask \n' % (filename,mask_file))

	# determine from the filename where we want
	# to put the ouputs

	work_dir=per_list.set_path(filename,'yes',local)  # This will be the Persist directory for the dataset
	fig_dir=work_dir+'/Figs/'



	# Get a name for the root of any output files
	try:
		x=filename.split('/')
		name=x[len(x)-1]
		# print 'name',name
		j=string.rindex(name,'.')
		outroot=name[0:j]
		# print 'OK got',outroot
	except ValueError:
		outroot=filename
		print 'File had no extensions, using entire name'

	# Read the persistence file
	x=per_fits.get_image(filename,1)
	if len(x)==0:
		print 'Error: peaks:  Nothing to do since no data returned for %s' % filename
		history.write('Peaks: Nothing to do since no data returned for %s\n' % filename)
		return 'NOK'

	# Now read the flt file, which will be used to create a mask file in an attempt 
	# to remove from considerations regions of the image where there is persistence which
	# would be overwhelmed by light in the current image.
	# Read both the image and the data quality
	# extension.  The data quality extension is used to help assure that we are tracking
	# persistence from light in earlier exp sures.  For these pixels, we set the value
	# in the persitence image to zero, before the persistence image is smoothed.
	# Then smooth the persitence image  so that peak finding is easier

	if mask_file!='none':
		mask=per_fits.get_image(mask_file,1,rescale='e/s')
		xmask=smooth(mask,box)
		dq=per_fits.get_image(mask_file,3,rescale='no')
		z=numpy.select([dq>0],[0],default=x)
		z=smooth(z,box)
		history.write('Peaks: Using %s to mask exposure\n' % mask_file)
	else:
		history.write('Peaks: No mask from earlier undithered exposures\n')
		z=smooth(x,box)
		mask='none'

	# After having set everything up call the routine that locates the peaks to use to see
	# how well the subtraction works

	sources=find_peaks(z,mask,maxno=maxno)

	# Write out the results in a text file containing
	# the x and y positions

	outfile=work_dir+outroot+'.peaks.dat'

	# 	print 'Error: Could not open %s' % outfile
	
	g=per_list.open_file(outfile)

	g.write('# peaks in %s\n' % filename)
	for source in sources:
		xflux=xmask[source[0],source[1]]
		g.write('%5d %5d  %6.3f %6.3f\n' % (source[1],source[0],source[2],xflux))
	g.close()

	# Write out a set of ds9 region files of the sources
	make_reg(sources,work_dir+outroot+'.peaks.reg')


	# Plot the results. This creates two images, one with the regions selectd for detailed 
	# anaylsis superposed.

	xmed=numpy.median(x)
	pylab.figure(1,(8,12))
	pylab.clf()
	pylab.subplot(211)
	pylab.imshow(x,origin='lower',cmap=pylab.cm.gray,vmin=xmed-0.05,vmax=xmed+0.05)
	plothandle=pylab.subplot(212)
	ysize,xsize=x.shape
	pylab.imshow(z,origin='lower',extent=[0,xsize,0,ysize],cmap=pylab.cm.gray,vmin=xmed-0.05,vmax=xmed+0.05)

	# For reasons I don't quit understand, one needs to add 1 to both axes
	# To get the circles to line up, as if the elements were being counted
	# from one as in IRAF.
	for source in sources:
		circle(source[1]+1,source[0]+1,plothandle)

	# Note that this really needs to be in the Figs subdirectory

	figure_name=fig_dir+outroot+'.peaks.png'
	if os.path.isfile(figure_name):
		os.remove(figure_name)
	pylab.savefig(figure_name)
	os.chmod(figure_name,0770)

	# Generation of this summary plot is now complete.


	# 110325 - This next line was generating errors with the backend I was using.  The desired behavior
	# is that there be no window created when the program is run from the command line.  
	# The error seems to occur # when you try to close the figure without first drawing it. For now 
	# I have deleted the next line.  The behaviou I am currently seeing is taht the window
	# appears as if one had issued a draw command, i.e. the window stays up, but the program
	# continues ulike show()
	# pylab.close('all')

	history.write('# Finished peaks on file %s\n' % filename)

	return  'OK'
コード例 #7
0
ファイル: run_persist.py プロジェクト: kslong/Persistence
def steer(argv):
	'''
	This is a steering routine for subtract persist so that options can be exercised from the 
	command line.  See the top level documentaion for details

	100907	ksl	Added to begin to automate the subtraction process
	101215	ksl	Moved to a separate routine so that one would be able to split various portions
			of persistence subtraction and evaluation of the results into multiple files
	111114	ksl	Added a command.log to keep track of all of the run_persist commands
	140924	ksl	Updated to allow for varioua model types
	160103	ksl	Begin implemenation of multiprocessing
	'''


	log('# Start run_persist  %s\n' % date.get_gmt())
	xstart=time.clock()
	cur_time=date.get_gmt()

	i=1
	dataset_list='none'

	model_type=1
	norm=0.3
	alpha=0.174
	gamma=1.0
	e_fermi=90000
	kT=20000
	fileroot='observations'
	words=[]
	mjd_start=0.0    # A amall number for mjd
	mjd_stop=1.e6  # A large number for mjd
	dryrun='no'
	clean='no'
	ds9='no'
	local='no'
	pffile='persist.pf'
	lookback_time=16

	switch='single'
	np=1

	while i<len(argv):
		if argv[i]=='-h':
			print __doc__
			return    
		elif argv[i]=='-model':
			i=i+1
			model_type=int(argv[i])
		elif argv[i]=='-n':
			i=i+1
			norm=eval(argv[i])
		elif argv[i]=='-e':
			i=i+1
			e_fermi=eval(argv[i])
		elif argv[i]=='-kT':
			i=i+1
			kT=eval(argv[i])
		elif argv[i]=='-alpha':
			i=i+1
			alpha=eval(argv[i])
		elif argv[i]=='-gamma':
			i=i+1
			gamma=eval(argv[i])
		elif argv[i]=='-obslist':
			i=i+1
			fileroot=eval(argv[i])
		elif argv[i]=='-many':
			i=i+1
			dataset_list=argv[i]
			switch='many'
			print 'OK you want to evaluate a number of datasets in file %s', dataset_list
		elif argv[i]=='-all':
			switch='all'
			print 'OK you want to evaluate all the records in the obslist'
		elif argv[i] =='-prog_id':
			i=i+1
			prog_id=int(argv[i])
			switch='prog_id'
			print 'OK, you want to evaluate all the records for program %d' % prog_id
		elif argv[i]=='-start':
			i=i+1
			z=argv[i]
			try:
				mjd_start=float(z)
			except ValueError:
				mjd_start=date.iso2mjd(z)
			if switch !='prog_id':
				switch='all'
		elif argv[i]=='-stop':
			i=i+1
			z=argv[i]
			try:
				mjd_stop=float(z)
			except ValueError:
				mjd_stop=date.iso2mjd(z)
			if switch !='prog_id':
				switch='all'
		elif argv[i]=='-dryrun':
			dryrun='yes'
			print 'OK, This will be a dry run!'
		elif argv[i]=='-clean':
			dryrun='yes'
			clean='yes'
			print 'OK. This run will clean out various Persist directories, and revise the .sum file'
		elif argv[i]=='-ds9':
			ds9='yes'
		elif argv[i]=='-local':
			local='yes'
		elif argv[i]=='-pf':
			i=i+1
			pffile=argv[i]
		elif argv[i]=='-lookback':
			i=i+1
			lookback_time=eval(argv[i])
		elif argv[i]=='-np':
			i=i+1
			np=int(argv[i])
		elif argv[i][0]=='-':
			print 'Error: Unknown switch ---  %s' % argv[i]
			return
		else:
			words.append(argv[i])
		i=i+1

	# At this point all of the options have been processed and we can
	# begin the processing of individual datasets

	# Check that the listfile actually exists, and if not exit with a stern warning

	listfile=fileroot+'.ls'
	if os.path.exists(listfile)==False:
		print 'Error: run_persist.steer - No %s file in run directory.  EXITING!!!' % listfile
		return
	
	# At this point, we are able to determine exactly what datasets to process
	log('# Starting at %s\n' % (cur_time),'command.log')
	log('# Starting at %s\n' % (cur_time))
	string=''
	for one in argv:
		string=string+'%s ' % one
	log('Command:  %s\n' % string,'command.log')
	log('Command:  %s\n' % string)


	datasets=[]
	
	if switch=='single': #  Then we are processing a single file
		datasets.append(words[0])
	elif switch=='all': # Then we are working from the obslist
		records=per_list.read_ordered_list_mjd(fileroot,mjd_start,mjd_stop)
		for record in records:
			datasets.append(record[1])
	elif switch=='many':  # Then we are reading a file with rootnames of the files we want to process
		f=open(dataset_list,'r')
		lines=f.readlines()
		f.close()
		for line in lines:
			x=line.strip()
			if len(x)>0 and x[0]!='#':
				xx=x.split()  #  Take the first word to be the dataset name
				datasets.append(xx[0])
	elif switch=='prog_id':
		records=per_list.read_ordered_list_progid(fileroot,prog_id,mjd_start,mjd_stop)
		for record in records:
			datasets.append(record[1])
	else:
		print 'Error: run_persist: Unknown switch %s'% switch

	# Ok, now, unless this is a dryrun we actually process the data
	ntot=len(datasets)
	print 'There are %d datasets to process' % ntot          

	dry=[]
	if dryrun=='yes':
		for one in datasets:
			record=per_list.read_ordered_list_one(fileroot,one)
			dry.append(record)
		per_list.write_ordered_list('dryrun',dry)
		if clean=='yes':
			# xclean=open('CleanFiles','w')
			# os.chmod('CleanFiles',0770)

			xclean=per_list.open_file(Cleanfiles)

			for one in dry:
				xxx=per_list.set_path(one[0])
				xclean.write('rm -r -f %s\n' % xxx)
			xclean.close()
		# return
	elif np==1 or len(datasets)==1:
		n=1
		for one in datasets:
			do_dataset(one,model_type,norm,alpha,gamma,e_fermi,kT,fileroot,ds9,local,pffile,lookback_time)
			print '# Completed dataset %d of %d. Elapsed time is %0.1f s' % (n,ntot,time.clock()-xstart)
			n=n+1
	else:
		print 'There will be %d processes running simultaneously' % np
		jobs=[]
		for one in datasets:
			p=multiprocessing.Process(target=do_dataset,args=(one,model_type,norm,alpha,gamma,e_fermi,kT,fileroot,ds9,local,pffile,lookback_time,))
			jobs.append(p)
		i=0
		while i<np and i<len(jobs):
			print '!Starting %s' % datasets[i]
			one=jobs[i]
			one.start()
			i=i+1

		njobs=get_no_jobs(jobs)

		while i<len(jobs):
			time.sleep(2)
			njobs=get_no_jobs(jobs)
			print 'Running %d jobs,including job %d (%s) of %d total' % (njobs,i,datasets[i-1],len(datasets))
			if njobs<np:
				print 'Starting: ',datasets[i]
				one=jobs[i]
				one.start()
				i=i+1

		p.join()
		print 'Completed multiprocessing'




	per_list.fixup_summary_file(datasets)

	print 'xxx',xstart,time.clock()

	
	dtime=time.clock()-xstart
	cur_time=date.get_gmt()
	log('# End  %s  (Elapsed time for %d datasets was %.1f (or %.1f per dataset)\n' % (date.get_gmt(),ntot,dtime,dtime/ntot))
	cur_time=date.get_gmt()
	log('# End  %s  (Elapsed time for %d datasets was %.1f (or %.1f per dataset)\n' % (date.get_gmt(),ntot,dtime,dtime/ntot),'command.log')

	log('# Finished at %s\n' % (cur_time))
	log('# Finished at %s\n' % (cur_time),'command.log')
	return