Example #1
0
def do_dataset(dataset='ia21h2e9q',model_type=0,norm=0.3,alpha=0.2,gamma=0.8,e_fermi=80000,kT=20000,fileroot='observations',ds9='yes',local='no',parameter_file='persist.pf',lookback_time=16):
	'''
	Create a persistence image for this dataset.  This version works by creating using the 
	persistance model from each of the previous images.  It assumes that the only one which
	matters is the image which created the most persistence according toe the modeld

	model_type=0 is our our orignal model which has a modified fermi distributioon, controlled by the values of norm, etc.
	model_type=1 is our purely describtive model defined in terms of amplitudes and power laws
	model_type=2 for the fermi model that interpolates bewtween cureves for different expsoure times

	All returns from this program should be:
		OK:  something
		NOK: something

	Outputs:
		If there were IR observations that preceded the observation being analyzed
		then this routines creates various fits files:
			rootname_persist.fits  - The model for the total of internal and external persistnce
			rootname_extper.fits   - The model for persistence from earlier visits, aka
			                         external persistence
			rootname_flt_cor.fits  - The corrected flt file
			rootname_stim.fits     - The stimulus that caused the persistence
			rootname_dt.fits       - The time at which the stimulus occurred
		Plots of the various images are also created, as well as a log file for
		the data set
	Notes:
		This is really the main routine of the program.  When run_persist.py calls this modeule. 
		this is the routine that is called.

		In practice, the only difference between model types 1 and 2 is the calibration file that is read in.  The same interpolation
		routine is used for both.  At present the calibration file names for this are hardwired.

	History

	100905	ksl	Added disgnostic files to record the time of the stimulus and the
			value of the stimulus
	101015	ksl	Moved the location of where the output files are stored and 
			added a history file, so we would know what had happened
	110122	ksl	Added a switch to turn off displaying with ds9
	110324	ksl	Changed call to accommodate new persistence model
	110602	ksl	Incorporated the possibility of providing a correction file xynorm to account
			for spatial variations across the detector persistence model
	140606	ksl	Added correction which puts the correct units in the stimulus file.
	140611	ksl	Began to add in new persistnce model, initially just by short-circuiting everything
	140803	ksl	Switched to fits version of data files
	141124	ksl	Small change to handle situations were the flat field correction is not found
	'''

	cur_time=date.get_gmt()

	xfile=locate_file(parameter_file)
	if xfile=='':
		print '# Error: Could not locate parameter file %s ' % parameter_file
	else:
		parameter_file=xfile


	if model_type==0:
		print '# Processing dataset %s with fermi model: Norm %4.2f alpha %4.2f gamma %4.2f e_fermi %6.0f kT %6.0f ' % (dataset,norm,alpha,gamma,e_fermi,kT)
	elif model_type==1:
		print '# Processing dataset %s with A gamma model' % dataset
	elif model_type==2:
		print '# Processing dataset %s with a time-variable fermi model' % dataset
	else:
		print '# Error: run_persist: Unknown model type %d' % model_type
		return 'NOK'

	# Read the observations file to get the data set of interest


	delta=lookback_time  # Datasets which occurred more than delta hours earlier not considered.


	records=per_list.read_ordered_list2(fileroot,dataset,interval=[-delta,0],outroot='none')

	# Check the length of records


	if len(records)==0:
		string = 'NOK: subtract_persist.do_dataset :There are no records associated with dataset %s.  Check name in %s.ls' % (dataset,fileroot)
		sum_string='NOK - No record associated with this dataset'
		per_list.update_summary(dataset,'ERROR',sum_string,append='no')
		return string

	# So now we have the list that we need.

	

	science_record=records[len(records)-1]  # The science record is the last record
	sci_progid=science_record[2]
	words=science_record[3].split('.')
	sci_visit=words[0]
	sci_fil=science_record[10]
	sci_exp=eval(science_record[11])
	sci_obj=science_record[14]
	
	# Create the Persist directory if it does not already exist
	path=per_list.set_path(science_record[0],'yes',local)
	if path.count('NOK'):  # Then we were not able to create a plausible directory to put the data ink
		return path

	# Open a history file.  Note that one needs the path before one can do this
	# The reason for calling this via the per_list routine, is that this routine sets the permissions
	# for the file

	history=per_list.open_file(path+science_record[1]+'.txt')


	history.write('START:  Persistence processing of file %s\n\n' % science_record[1])
	history.write('! Processed: %s\n' % date.get_gmt())
	history.write('! ProgramID: %s\n' % sci_progid)
	history.write('! Visit:     %s\n' % sci_visit)
	history.write('! FltFile:   %s\n' % science_record[1])
	history.write('! Filter:    %s\n' % sci_fil)
	history.write('! Exposure:  %6.1f\n' % sci_exp)
	history.write('! Object:    %s\n' % sci_obj)

	history.write('\n! Using Version %s of the perstence S/W' % VERSION)
	history.write('\n! Using a lookback time for observations that might cause persistence of %.1f hours\n' %  lookback_time)

	if model_type==0:
		history.write('\n! Processing  dataset %s with fermi model:  norm %6.2f alpha %6.2f e_fermi %6.0f kT %6.0f\n' % (dataset,norm,alpha,e_fermi,kT)) 
	elif model_type==1:
		history.write('\n! Processing dataset %s with A gamma model\n' % dataset)
	elif model_type==2:
		history.write('\n! Processing dataset %s with time-variable fermi  model\n' % dataset)


	# Check whether there is anything to do

	if len(records)==1:
		string='subtract_persist: No persistence for this dataset.  No earlier observations within %4.1f hours\n' % (delta)
		history.write('%s\n' % string)
		history.write('! Persistence:  None\n')
		string='OK: subtract_persist: -- None'
		print string
		history.close()
		xstring='  %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % (0,0,0,0,0,0)
		per_list.update_summary(dataset,'Persist',xstring,append='no')
		return string



	# Persistence is calculated for the middle of the interval in which the expousre
	# was taking place

	[t1,t2]=get_times(science_record[0])
	tscience=0.5*(t1+t2)  

	# establish which record is the last record that comes from a different visit than the current one

	i=0
	while i<len(records)-1:
		cur_progid=records[i][2]
		words=records[i][3].split('.')
		cur_visit=words[0]
		# print 'OK',cur_progid,cur_visit
		if cur_progid==sci_progid and cur_visit==sci_visit:
			# then we are now into earlier exposures of the same visit
			break
		i=i+1

	# if i = 0, all the exposures being evaluated for persistence create self_persistence
	# if i= len(records), all exposures being evaluated for persistence are from other visits

	last_external=i-1  # This is the last record that created external persistence
	ext_values=[]  # A place to store information about the persistence due to other observers
	ext_persist=[] # This is a place holder for storing the extenal persistence

	xynorm=read_parameter(parameter_file,'xynorm')
	if xynorm!='':
		xynorm=locate_file(xynorm)
		xcorr=get_image(xynorm,1)
		if len(xcorr)==0:
			xynorm=''  # This is an error because we were unable to find the file
			history.write('! Error: Could not find correction file %s containing spatial dependence. Continuing anyway' % xynorm)
		else:
			history.write('! Reference file containing spatial dependence:  %s\n' % xynorm)
	else:
		string='! Processing without spatially dependent correction'
		print string
		history.write('%s\n' % string )




	# This is the beginning of the loop for calculating the persistence model
	i=0
	while i<len(records)-1:
		record=records[i]
		# print 'subtract: %30s %6.1f model_type %d' % (record[0],eval(record[11]),model_type)
		# dt is measured from the end of the stimulus image to the middle of the
		# science image
		[t1,t2]=get_times(record[0])
		dt=(tscience-t2)*86400

		cur_progid=record[2]
		words=record[3].split('.')
		cur_visit=words[0]
		cur_sci_fil=record[10]
		cur_sci_exp=eval(record[11])
		cur_sci_obj=record[14]
		scan=record[4]

		xfile=record[0]
		# Use the ima file, if file calusing persistence is a scan object
		if scan=='scan':
			xfile=xfile.replace('flt','ima')
			print 'Using ima file for ',record[0],xfile,scan


		x=get_image(xfile,1,'e',fileref=science_record[0])  # Convert this to electrons
		if len(x)==0:
			xstring='NOK: Problem with science extension of %s' % record[0]
			history.write('%s\n' % xstring)
			print xstring
			return xstring

		dq=get_image(xfile,3,fileref=science_record[0])     # Get the dq 
		if len(dq)==0:
			xstring = 'NOK: Problem with dq extension of %s' % record[0]
			history.write('%s\n' % xstring)
			print xstring
			# 110926 - ksl - modified to allow this to process the image even if there was no dq array
			# return xstring

		if model_type==0:
			model_persistence=calc_persist(x,dq,dt,norm,alpha,gamma,e_fermi,kT)
		elif model_type==1:
			# print 'Model type is 1'
			xfile=read_parameter(parameter_file,'a_gamma')
			# The next lines are awkward, becuate the parameter file name is read multiple times
			if i==0:
				history.write('! Reference file containing spatially-averaged peristence model: %s' %  xfile)
			model_persistence=make_persistence_image(x,cur_sci_exp,dt,xfile)
		elif model_type==2:
			# print 'Model type is 2'
			xfile=read_parameter(parameter_file,'fermi')
			if i==0:
				history.write('! Reference file containing Spatially-averaged peristence model: %s' %  xfile)
			model_persistence=make_persistence_image(x,cur_sci_exp,dt,xfile)
		else:
			print 'Error: subtract_persist: Unknown model type %d' % model_type
			return 'NOK'

		values=how_much(model_persistence)

		if i==0:
			persist=model_persistence
			stimulus=x   # This is an array which contains the maximum counts in a pixel
			xtimes=numpy.ones_like(persist)
			xtimes=xtimes*dt # This is an array containing the delta time at which the stimulus occured
		else:
			xpersist=model_persistence
			stimulus=numpy.select([xpersist>persist],[x],default=stimulus)
			xtimes=numpy.select([xpersist>persist],[dt],default=xtimes)
			persist=numpy.select([xpersist>persist],[xpersist],default=persist)
		

		# Get some elementary statistics on the stimulus
		xvalues=get_stats(x,70000)

		history.write('\nsubtract_persist: Stimulus by %30s from program %s Visit %s\n' % (record[0],cur_progid,cur_visit))
		history.write('\nsubtract_persist: The filter was %s and exposure was %6.1f for target %s\n' % (cur_sci_fil,cur_sci_exp,cur_sci_obj))
		history.write('subtract_persist: The exposure was %8.0f s earlier than the current exposure\n' % dt)
		history.write('subtract_persist: The median value in the stimulus image was %6.1f and the number of saturated pixels was %d\n' % (xvalues[0],xvalues[1]))
		history.write('subtract_persist:   The maximum value for persistence is %f\n'   % values[4])
		history.write('subtract_persist:    The median value for persistence is %f\n'   % values[5])
		history.write('subtract_persist: 90 percent of persist values less than %f\n'   % values[6]) 
		history.write('subtract_persist: 99 percent of persist values less than %f\n'   % values[7]) 
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.10 e/s\n' % (values[1],values[1]*100./values[0]))
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.03 e/s\n' % (values[2],values[2]*100./values[0]))
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.01 e/s\n' % (values[3],values[3]*100./values[0]))
		string = 'subtract_persist: Finished (%2d of %2d) %s' % (i+1,len(records)-1,record[0])
		#130909 Removed print statement as unnneessary.  The string still goes to the history file
		# print string
		history.write('%s\n' % string)

		# Summarize the Stiumulus printing out the filename, prog_id, visit_name, target, dt and the number of pixels above saturation of 70000
		scan='No'
		if record[4]=='scan':
			scan='Yes'

		stimulus_summary='Stimulus: %40s %10s %10s %20s %8.0f %3d %3s %6s\n' % (record[0],cur_progid,cur_visit,cur_sci_obj,dt,xvalues[1],record[9],scan)
		history.write('! %s\n' % stimulus_summary)

		if i==last_external:
			ext_values=how_much(persist)
			ext_persist=numpy.copy(persist)

		i=i+1
	
	# This is the end of the loop where the persistence model is calculated

	# First report on the external persistence for this file;

	# Now apply the fix to account for spatial variations in persistence
	if xynorm !='' and numpy.shape(xcorr)==numpy.shape(persist):
		persist=persist*xcorr


	if len(ext_values)>0:

		f1=ext_values[1]*100./ext_values[0]
		f2=ext_values[2]*100./ext_values[0]
		f3=ext_values[3]*100./ext_values[0]
		emeasure='%6.2f %6.2f %6.2f' % (f1,f2,f3)

		history.write('\nsubtract_persist: Estimate of persistence from earlier visits\n')
		history.write('subtract_persist:   The maximum value for persistence is %f\n'   % ext_values[4])
		history.write('subtract_persist:    The median value for persistence is %f\n'   % ext_values[5])
		history.write('subtract_persist: 90 percent of persist values less than %f\n'   % ext_values[6]) 
		history.write('subtract_persist: 99 percent of persist values less than %f\n'   % ext_values[7]) 
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.10 e/s\n' % (ext_values[1],f1))
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.03 e/s\n' % (ext_values[2],f2))
		history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.01 e/s\n' % (ext_values[3],f3))
	else:
		emeasure='%6.2f %6.2f %6.2f' % (0 ,0,0)
		history.write('\nsubtract_persist: This exposure has no persistence from earlier visits.  All persistence is self-induced\n')



	# Now evaluate the total persistence

	values=how_much(persist)


	f1=values[1]*100./values[0]
	f2=values[2]*100./values[0]
	f3=values[3]*100./values[0]
	measure='%6.2f %6.2f %6.2f' % (f1,f2,f3)


	history.write('\nsubtract_persist: Estimate of total persistence for this file\n')
	history.write('subtract_persist:   The maximum value for persistence is %f\n'   % values[4])
	history.write('subtract_persist:    The median value for persistence is %f\n'   % values[5])
	history.write('subtract_persist: 90 percent of persist values less than %f\n'   % values[6]) 
	history.write('subtract_persist: 99 percent of persist values less than %f\n'   % values[7]) 
	history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.10 e/s\n' % (values[1],f1))
	history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.03 e/s\n' % (values[2],f2))
	history.write('subtract_persist: %7d pixels (or %6.3f percent) greater than 0.01 e/s\n' % (values[3],f3))




	history.write('! PersistenceSum: External   %s\n'% emeasure)
	history.write('! PersistenceSum: Total      %s\n'% measure)


	# Now write out all of the new fits files

	# First find out the units of the science image

	units=get_keyword(science_record[0],1,'bunit')
	# print 'test',units
	if units[0]=='COUNTS/S':
		print 'Reducing model to match units for dataset %s to match %s ' % (dataset,units)
		persist/=2.4

	# subtract and write out the corrected image
	science=get_image(science_record[0],1,'no')
	original=numpy.copy(science)
	science=science-persist


	xname=parse_fitsname(science_record[0])

	persist_file=path+dataset+'_persist.fits'
	ext_persist_file=path+dataset+'_extper.fits'

	# Note: Do not use some with an extension like flt.fits because it will interfere with making list
	corrected_file=path+dataset+'_flt_cor.fits'
	stimulus_file=path+dataset+'_stim.fits'
	time_file=path+dataset+'_dt.fits'

	rewrite_fits(xname[0],persist_file,1,persist)
	rewrite_fits(xname[0],corrected_file,1,science)
	rewrite_fits(xname[0],stimulus_file,1,stimulus)
	# 140606 - Fix added to put stimulus file in the correct units.
	put_keyword(stimulus_file,1,'bunit','ELECTRONS')
	rewrite_fits(xname[0],time_file,1,xtimes)
	if len(ext_persist)>0:
		rewrite_fits(xname[0],ext_persist_file,1,ext_persist)

	# This completes the section which writes out all of the fits files
		
	# Get statistics on the images and make the 4 panel plot 

        xmed=numpy.median(original)
        zmed=numpy.median(persist)
        zmax=numpy.max(persist)


	pylab.figure(1,[12,12])
	pylab.title(dataset)
	pylab.subplot(221)
	pylab.imshow(original,origin='lower',cmap=pylab.cm.gray,vmin=xmed-0.1,vmax=xmed+0.1)
	pylab.title('Original')

	pylab.subplot(222)
	pylab.imshow(science,origin='lower',cmap=pylab.cm.gray,vmin=xmed-0.1,vmax=xmed+0.1)
	pylab.title('Subtracted')

	pylab.subplot(223)
	pylab.imshow(persist,origin='lower',cmap=pylab.cm.gray,vmin=zmed-0.1,vmax=zmed+0.1)
	pylab.title('Total Persistence')

	if len(ext_persist)>0:
		pylab.subplot(224)
		pylab.imshow(ext_persist,origin='lower',cmap=pylab.cm.gray,vmin=zmed-0.1,vmax=zmed+0.1)
		pylab.title('External Persistence')
	else:
		pylab.subplot(224)
		pylab.imshow(stimulus,origin='lower',cmap=pylab.cm.gray,vmin=0.0,vmax=200000)
		pylab.title('Stimulus')

	fig1=path+'Figs/'+dataset+'_subtract.png'


	if os.path.isfile(fig1):
		os.remove(fig1)
	pylab.savefig(fig1)
	os.chmod(fig1,0770)

	# Eliminated to prevent an error on linux having to do with tkinter
	# pylab.close('all')

	if ds9=='yes':
		LoadFrame(science_record[0],1,0,2,'histequ')
		LoadFrame(persist_file,2,0,2,'histequ')
		LoadFrame(corrected_file,3,0,2,'histequ')
		if len(ext_persist)>0:
			LoadFrame(ext_persist_file,4,0,2,'histequ')
		else:
			LoadFrame(stimulus_file,4,0,1e5,'histequ')

	# Finished plots

	# Finished everything so wrap it up
	history.write('# Finished Persistence processing of file %s\n' % science_record[1])
	history.close()

	# Upadete the summary file
	string='%20s %20s' % (emeasure,measure)
	per_list.update_summary(dataset,'Persist',string,fileroot,append='no')

	return string
Example #2
0
def steer(argv):
    '''
	Steering routine for this routine which is intended to make it easier
	to inspect the results of persistence subtraction.
	'''

    fileroot = 'observations'
    status = 'Complete'
    prog_id = 0
    mjd_start = 0
    mjd_stop = 0
    proc_start = 0
    proc_stop = 0

    i = 1
    while i < len(argv):
        if argv[i] == '-h':
            print __doc__
            return
        elif argv[i] == '-prog_id':
            i = i + 1
            prog_id = int(argv[i])
        elif argv[i] == '-all':
            status = 'All'
        elif argv[i] == '-status':
            i = i + 1
            status = argv[i]
        elif argv[i] == '-start':
            i = i + 1
            z = argv[i]
            try:
                mjd_start = float(z)
            except ValueError:
                mjd_start = date.iso2mjd(z)
                print 'Start', z, mjd_start
        elif argv[i] == '-stop':
            i = i + 1
            z = argv[i]
            try:
                mjd_stop = float(z)
            except ValueError:
                mjd_stop = date.iso2mjd(z)
                print 'Stop', z, mjd_stop
        elif argv[i] == '-today':
            # today=date.get_gmt('%Y-%m-%d')
            today = date.get_gmt()
            today = date.parse_iso(today)
            proc_start = today - 86400
            proc_stop = today
        else:
            print 'Unknown switch %s' % argv[i]
            return

        i = i + 1

    # parse_iso returns a unix time in seconds since Epoch0

    if len(argv) == 1:
        today = date.get_gmt()
        # print 'now',today
        today = date.parse_iso(today)
        proc_start = today - 3600
        proc_stop = today
        print 'Creating summary file for last records processes in last hour'
        print proc_start, proc_stop

    doit(fileroot, status, prog_id, mjd_start, mjd_stop, proc_start, proc_stop)
    return
Example #3
0
def steer(argv):
	'''
	This is a steering routine for subtract persist so that options can be exercised from the 
	command line.  See the top level documentaion for details

	100907	ksl	Added to begin to automate the subtraction process
	101215	ksl	Moved to a separate routine so that one would be able to split various portions
			of persistence subtraction and evaluation of the results into multiple files
	111114	ksl	Added a command.log to keep track of all of the run_persist commands
	140924	ksl	Updated to allow for varioua model types
	160103	ksl	Begin implemenation of multiprocessing
	'''


	log('# Start run_persist  %s\n' % date.get_gmt())
	xstart=time.clock()
	cur_time=date.get_gmt()

	i=1
	dataset_list='none'

	model_type=1
	norm=0.3
	alpha=0.174
	gamma=1.0
	e_fermi=90000
	kT=20000
	fileroot='observations'
	words=[]
	mjd_start=0.0    # A amall number for mjd
	mjd_stop=1.e6  # A large number for mjd
	dryrun='no'
	clean='no'
	ds9='no'
	local='no'
	pffile='persist.pf'
	lookback_time=16

	switch='single'
	np=1

	while i<len(argv):
		if argv[i]=='-h':
			print __doc__
			return    
		elif argv[i]=='-model':
			i=i+1
			model_type=int(argv[i])
		elif argv[i]=='-n':
			i=i+1
			norm=eval(argv[i])
		elif argv[i]=='-e':
			i=i+1
			e_fermi=eval(argv[i])
		elif argv[i]=='-kT':
			i=i+1
			kT=eval(argv[i])
		elif argv[i]=='-alpha':
			i=i+1
			alpha=eval(argv[i])
		elif argv[i]=='-gamma':
			i=i+1
			gamma=eval(argv[i])
		elif argv[i]=='-obslist':
			i=i+1
			fileroot=eval(argv[i])
		elif argv[i]=='-many':
			i=i+1
			dataset_list=argv[i]
			switch='many'
			print 'OK you want to evaluate a number of datasets in file %s', dataset_list
		elif argv[i]=='-all':
			switch='all'
			print 'OK you want to evaluate all the records in the obslist'
		elif argv[i] =='-prog_id':
			i=i+1
			prog_id=int(argv[i])
			switch='prog_id'
			print 'OK, you want to evaluate all the records for program %d' % prog_id
		elif argv[i]=='-start':
			i=i+1
			z=argv[i]
			try:
				mjd_start=float(z)
			except ValueError:
				mjd_start=date.iso2mjd(z)
			if switch !='prog_id':
				switch='all'
		elif argv[i]=='-stop':
			i=i+1
			z=argv[i]
			try:
				mjd_stop=float(z)
			except ValueError:
				mjd_stop=date.iso2mjd(z)
			if switch !='prog_id':
				switch='all'
		elif argv[i]=='-dryrun':
			dryrun='yes'
			print 'OK, This will be a dry run!'
		elif argv[i]=='-clean':
			dryrun='yes'
			clean='yes'
			print 'OK. This run will clean out various Persist directories, and revise the .sum file'
		elif argv[i]=='-ds9':
			ds9='yes'
		elif argv[i]=='-local':
			local='yes'
		elif argv[i]=='-pf':
			i=i+1
			pffile=argv[i]
		elif argv[i]=='-lookback':
			i=i+1
			lookback_time=eval(argv[i])
		elif argv[i]=='-np':
			i=i+1
			np=int(argv[i])
		elif argv[i][0]=='-':
			print 'Error: Unknown switch ---  %s' % argv[i]
			return
		else:
			words.append(argv[i])
		i=i+1

	# At this point all of the options have been processed and we can
	# begin the processing of individual datasets

	# Check that the listfile actually exists, and if not exit with a stern warning

	listfile=fileroot+'.ls'
	if os.path.exists(listfile)==False:
		print 'Error: run_persist.steer - No %s file in run directory.  EXITING!!!' % listfile
		return
	
	# At this point, we are able to determine exactly what datasets to process
	log('# Starting at %s\n' % (cur_time),'command.log')
	log('# Starting at %s\n' % (cur_time))
	string=''
	for one in argv:
		string=string+'%s ' % one
	log('Command:  %s\n' % string,'command.log')
	log('Command:  %s\n' % string)


	datasets=[]
	
	if switch=='single': #  Then we are processing a single file
		datasets.append(words[0])
	elif switch=='all': # Then we are working from the obslist
		records=per_list.read_ordered_list_mjd(fileroot,mjd_start,mjd_stop)
		for record in records:
			datasets.append(record[1])
	elif switch=='many':  # Then we are reading a file with rootnames of the files we want to process
		f=open(dataset_list,'r')
		lines=f.readlines()
		f.close()
		for line in lines:
			x=line.strip()
			if len(x)>0 and x[0]!='#':
				xx=x.split()  #  Take the first word to be the dataset name
				datasets.append(xx[0])
	elif switch=='prog_id':
		records=per_list.read_ordered_list_progid(fileroot,prog_id,mjd_start,mjd_stop)
		for record in records:
			datasets.append(record[1])
	else:
		print 'Error: run_persist: Unknown switch %s'% switch

	# Ok, now, unless this is a dryrun we actually process the data
	ntot=len(datasets)
	print 'There are %d datasets to process' % ntot          

	dry=[]
	if dryrun=='yes':
		for one in datasets:
			record=per_list.read_ordered_list_one(fileroot,one)
			dry.append(record)
		per_list.write_ordered_list('dryrun',dry)
		if clean=='yes':
			# xclean=open('CleanFiles','w')
			# os.chmod('CleanFiles',0770)

			xclean=per_list.open_file(Cleanfiles)

			for one in dry:
				xxx=per_list.set_path(one[0])
				xclean.write('rm -r -f %s\n' % xxx)
			xclean.close()
		# return
	elif np==1 or len(datasets)==1:
		n=1
		for one in datasets:
			do_dataset(one,model_type,norm,alpha,gamma,e_fermi,kT,fileroot,ds9,local,pffile,lookback_time)
			print '# Completed dataset %d of %d. Elapsed time is %0.1f s' % (n,ntot,time.clock()-xstart)
			n=n+1
	else:
		print 'There will be %d processes running simultaneously' % np
		jobs=[]
		for one in datasets:
			p=multiprocessing.Process(target=do_dataset,args=(one,model_type,norm,alpha,gamma,e_fermi,kT,fileroot,ds9,local,pffile,lookback_time,))
			jobs.append(p)
		i=0
		while i<np and i<len(jobs):
			print '!Starting %s' % datasets[i]
			one=jobs[i]
			one.start()
			i=i+1

		njobs=get_no_jobs(jobs)

		while i<len(jobs):
			time.sleep(2)
			njobs=get_no_jobs(jobs)
			print 'Running %d jobs,including job %d (%s) of %d total' % (njobs,i,datasets[i-1],len(datasets))
			if njobs<np:
				print 'Starting: ',datasets[i]
				one=jobs[i]
				one.start()
				i=i+1

		p.join()
		print 'Completed multiprocessing'




	per_list.fixup_summary_file(datasets)

	print 'xxx',xstart,time.clock()

	
	dtime=time.clock()-xstart
	cur_time=date.get_gmt()
	log('# End  %s  (Elapsed time for %d datasets was %.1f (or %.1f per dataset)\n' % (date.get_gmt(),ntot,dtime,dtime/ntot))
	cur_time=date.get_gmt()
	log('# End  %s  (Elapsed time for %d datasets was %.1f (or %.1f per dataset)\n' % (date.get_gmt(),ntot,dtime,dtime/ntot),'command.log')

	log('# Finished at %s\n' % (cur_time))
	log('# Finished at %s\n' % (cur_time),'command.log')
	return
Example #4
0
def doit(fileroot='observations',
         status='Complete',
         prog_id=0,
         mjd_start=0,
         mjd_stop=0,
         proc_start=0,
         proc_stop=0,
         censor='yes'):
    '''
	This is the main routine for the subtract_asum program

	It calls the routine to read the main parts of the summary file and then reformats the results
	so a table can be made.  It then calls the routine that makes the html file, and another routine
	that attempts to locate examples of the worst persistence, and another to make a figure.

	If censor=='yes', then certain calibration programs and all Tungsten lamp exposures are
	excluded from analysis

	
	'''
    lines = read_sum_file(fileroot, status, prog_id, mjd_start, mjd_stop,
                          proc_start, proc_stop)
    xstart = time.time()
    print 'xstart', time
    cur_time = date.get_gmt()

    # Get programs, visits, and locations

    location = []
    for one in lines:
        file = one[12]
        words = file.split('/')
        try:
            x = file[0:file.rindex('/')]
            location.append(x)
        except ValueError:
            print 'Error: no / in %s\n' % file

    x = set(location)
    x = list(x)

    ntot = len(x)
    print 'There are %d directories to process' % ntot

    n = 1
    for one in x:
        print 'Starting %s' % one
        g = open('TarEm', 'w')
        word = one.split('/')
        tar_dir = 'long_%s/%s_%s' % (word[1], word[2], word[3])
        src_dir = one
        string = 'rm -r %s\n' % tar_dir
        string = string + 'mkdir %s\n' % tar_dir
        string = string + 'ln %s/*persist.fits %s\n' % (src_dir, tar_dir)
        string = string + 'ln %s/*cor.fits %s\n' % (src_dir, tar_dir)
        string = string + 'tar czf long_%s/%s.%s.tar.gz %s\n' % (
            word[1], word[2], word[3], tar_dir)
        string = string + 'rm -r %s\n' % tar_dir
        g.write('%s\n' % string)
        g.close()
        proc = subprocess.Popen('source TarEm',
                                shell=True,
                                stdout=subprocess.PIPE)
        x = proc.communicate()[0]
        dt = time.time() - xstart
        print dt
        print '# Completed dataset %d of %d. Elapsed time is %0.1f s (Ave %0.1f)' % (
            n, ntot, dt, dt / n)
        n = n + 1
    print x
Example #5
0
def do_dataset(dataset='ia21h2e9q',model_type=1,norm=0.3,alpha=0.2,gamma=0.8,e_fermi=80000,kT=20000,fileroot='observations',ds9='yes',local='no',pffile='persist.pf',lookback_time=16):
	'''

	Run the persistence 'pipeline' for a single dataset.

	where	dataset is the dataset name
		norm,alpha,gamma, e_fermi, kT define the model
		fileroot is the rootname of the file produced by per_lsist
		ds9 indicates whehter on should try to show resulst in ds9
		local=='yes'  imples that the output files are created directly below
			the place the program is being run, insteand of in the
			Visit directories
		pffile is the parameter file which contains the names of files needed
			to create models of persistence
		


	History

	101215	ksl	Begain coding to provide a mechanism to control the running of
			persistence suhtraction software for larger numbers of datasets
	110103	ksl	Added local switch for testing
	140929	ksl	Added new variable model_type to do_dataset, which is simply passed to subtract_persist.do_dataset.
	'''



	cur_time=date.get_gmt()

	print '# Processing dataset %s at %s' % (dataset,cur_time)


	log('# Starting dataset %s at %s\n' % (dataset,cur_time))

	record=per_list.read_ordered_list_one(fileroot,dataset)
	if len(record)>0:
		log('run_persist: flt file is %s\n' % record[0])
	else:
		log('run_persist: dataset %s not found in %s.ls\n' % (dataset,fileroot))
	        log('NOK: Processing aborted for dataset %s\n' %dataset)
		log('# Finished dataset %s at %s\n' % (dataset,cur_time))
		return 'Error: Dataset not found at %s' % cur_time

	# Carry out peristence subtraction for this dataset
	string=subtract_persist.do_dataset(dataset,model_type,norm,alpha,gamma,e_fermi,kT,fileroot,ds9,local,pffile,lookback_time)

	log('%s\n' % string)
	if string[0:3]=='NOK':
		sum_string='%s %s' % (cur_time,string)
		log('NOK: Processing aborted for dataset %s\n' %dataset)
		cur_time=date.get_gmt()
		log('# Finished dataset %s at %s\n' % (dataset,cur_time))
		return

	# print string

	# Carry out peaks identification for this dataset
	string=peaks.do_dataset(dataset,fileroot,local=local)
	log('%s\n' % string)
	if  string[0:2]!='OK':
	                log('NOK: Processing aborted for dataset %s\n' %dataset)
			cur_time=date.get_gmt()
			log('# Finished dataset %s at %s\n' % (dataset,cur_time))
	                return
	
	# Evaluate the results at the positions identified in peaks

	string=subtract_eval.do_dataset(dataset,local=local)
	log('%s\n' % string)

	# Make an html file for the dataset

	string=subtract_html.do_dataset(dataset,local=local)
	log('%s\n' % string)
	if string[0:2]!='OK':
		log('NOK: Processing aborted for dataset %s\n' %dataset)
		cur_time=date.get_gmt()
		log('# Finished dataset %s at %s\n' % (dataset,cur_time))
		return


	words=string.split()

	# Now update the summary file
	per_list.update_summary(dataset,'Complete_%s'% VERSION,words[2])


	cur_time=date.get_gmt()
	log('# Finished dataset %s at %s\n' % (dataset,cur_time))
	return