예제 #1
0
def doit(fileroot='observations'):
    '''
	Main routine which locates any missing files
	and writes their names to the screen and 
	to a file called 'Missing.txt'

	Note: It is possible this should be included 
	as part of per_list
	'''
    records = per_list.read_ordered_list0(fileroot)
    sums = read_summary_file(fileroot)

    print len(records), len(sums)

    g = open('Missing.txt', 'w')

    i = 0
    nmissing = 0
    while i < len(records):
        record = records[i]
        sum = sums[i]

        # Get the flt file name, eliminating the extension that is part of the name in the .ls file
        flt_name = record[0]
        flt_name = per_fits.parse_fitsname(flt_name)
        flt_name = flt_name[0]

        persist_name = 'None'
        if sum[5].count('Complet'):
            try:
                persist_name = sum[12]
            except IndexError:
                print 'Incomplete obs.sum record: ', sum
        # print flt_name,persist_name

        flt = 'yes'
        if os.path.exists(flt_name) == False:
            flt = 'no'
        persist = 'yes'
        if persist_name != 'None' and os.path.exists(persist_name) == False:
            persist = 'no'

        if persist == 'no' or flt == 'no':
            string = '%10s %50s %10s  %60s %10s' % (record[1], flt_name, flt,
                                                    persist_name, persist)
            print string
            g.write('%s\n' % string)
            nmissing = nmissing + 1

        i = i + 1
    g.close()
    print 'The number of missing files was %d.  See Missing.txt' % nmissing
    return
예제 #2
0
def check4scan(filename='./Visit43/ic9t43j1q_flt.fits[1]'):
    '''
	Determine whether or not a raw, ima, or flt file is associated with an observation involving  
	a spatial scan.  

	Return:
		scan	if a spatial scan
		stare   if the spt file exists, but it is not a spatial san
		unknown	if the spt file does not exist
	
	Notes:

	The routine looks for the spt file corresponding to the dataset and
	parses the header to find out if it is a scanned observation.
	
	130225  Coded and Debugged
	130307  Replaced routine using astropy.fits with iraf because astropy.fits
		was very slow
	'''

    xscan = 'unknown'

    # First strip of the extension if any
    xname = per_fits.parse_fitsname(filename, 0, 'yes')
    xfile = xname[0]
    xfile = xfile.replace('flt', 'spt')
    xfile = xfile.replace('raw', 'spt')
    xfile = xfile.replace('ima', 'spt')

    # Now we should have the name of the spt file
    if os.path.exists(xfile) == True:
        # Revert to iraf/pyraf for performance reasons
        # xx=per_fits.get_keyword(xfile,0,'SCAN_TYP')
        xx = pyraf.iraf.hselect(xfile + '[0]', 'SCAN_TYP', 'yes', Stdout=1)
        xx = xx[0].split('\t')
    else:
        return 'no_spt'

    if xx[0] == 'N':
        xscan = 'stare'
    else:
        xscan = 'scan'

    return xscan
예제 #3
0
파일: scan.py 프로젝트: kslong/Persistence
def is_scan(filename='./Visit43/ic9t43j1q_flt.fits[1]'):
    '''
	Determine whether or not a file is associated with an observation that contains 
	a spatial scan

	Return:
		yes is a spatial scan
		no  if the spt file exists, but it is not a spatial san
		noinfo  if the spt file does not exist
	
	Notes:

	The routine looks for the spt file corresponding to the dataset and
	parses the header to find out if it is a scanned observation.
	
	130225  Coded and Debugged
	'''

    # First strip of the extension if any
    xname = per_fits.parse_fitsname(filename, 0, 'yes')
    xfile = xname[0]
    xfile = xfile.replace('flt', 'spt')
    xfile = xfile.replace('raw', 'spt')
    xfile = xfile.replace('ima', 'spt')
    # Now we should have the name of the file

    # print xfile

    try:
        x = fits.open(xfile)
    except IOError:
        return 'noinfo'

    # print x[0].header

    if x[0].header['SCAN_TYP'] == 'N':
        return 'no'

    return 'yes'
예제 #4
0
파일: scan.py 프로젝트: kslong/Persistence
def compare(filename='./Visit43/ic9t43j1q_flt.fits[1]'):
    '''
	Compare information in the flt files and the ima file

	notes:  This is just test routine
	'''
    xname = per_fits.parse_fitsname(filename, 0, 'yes')
    xfile = xname[0]
    ima_file = xfile
    ima_file = ima_file.replace('flt', 'ima')
    flt_file = xfile
    flt_file = flt_file.replace('ima', 'flt')

    # So now we have the name of the ima and the flt file no matter what

    ima_data = per_fits.get_image(ima_file, 1, 'e', fileref=flt_file)
    flt_data = per_fits.get_image(flt_file, 1, 'e')

    print ima_data.shape, flt_data.shape

    per_fits.rewrite_fits(flt_file, 'x_flt.fits', 1, flt_data)
    per_fits.rewrite_fits(flt_file, 'x_ima.fits', 1, ima_data)
예제 #5
0
파일: scan.py 프로젝트: kslong/Persistence
def cal_scan(filename='./Visit43/ic9t43j1q_flt.fits[1]'):
    '''
	Recalibrate a wfc3 file changing switches to assure
	that only those parts needed for scanned observations are
	carried out
	'''

    # First strip of the extension if any
    xname = per_fits.parse_fitsname(filename, 0, 'yes')
    xfile = xname[0]
    xfile = xfile.replace('flt', 'ima')
    xfile = xfile.replace('raw', 'ima')
    # Now we should have the name of the file

    try:
        x = fits.open(xfile)
    except IOError:
        print 'Error: Could not open %s' % filename
        return

    extime = x[1].header['EXPTIME']
    data = x[1].data
    data = data * exptime
예제 #6
0
def do_dataset(dataset='ib6v19bzq', radius=50, local='no'):
    '''
	Examine how well one has succeeded in subtracting persistence
	from a single dataset.  Assuming that all the actual sububraction
	has been done and the peaks file is in place.

	radius here is the half-size of the box that is plotted.

	110107	Changed the name of the output figure files so that it 
		would be easier to keep track of the files that had
		been created.  Also removed some text from figure.
	110203	ksl	Added local switch so testing would be easier
	'''

    # Read information about this dataset from the observations.ls file
    # Note that the file name is hardcoded here
    record = per_list.read_ordered_list_one('observations', dataset)

    # Set up the path, and open the history file

    path = per_list.set_path(record[0], 'no', local)
    fig_path = path + '/Figs/'
    history = open(path + dataset + '.txt', 'a')
    history.write('Start subtract_eval for dataset %s\n' % dataset)

    # Get complete names for all of the files that are to be used.
    file_flt, ext, all = per_fits.parse_fitsname(record[0])
    file_persist = path + dataset + '_persist.fits'
    file_cor = path + dataset + '_flt_cor.fits'
    file_stim = path + dataset + '_stim.fits'
    file_xy = path + dataset + '_persist.peaks.dat'

    # Check that all of these files exist

    ready = True

    if os.path.exists(file_flt) == False:
        print 'Error: subtract_eval.do_dataset: %s does not exist' % file_flt
        ready = False

    if os.path.exists(file_persist) == False:
        print 'Error: subtract_eval.do_dataset: %s does not exist' % file_persist
        ready = False

    if os.path.exists(file_cor) == False:
        print 'Error: subtract_eval.do_dataset: %s does not exist' % file_cor
        ready = False

    if os.path.exists(file_stim) == False:
        print 'Error: subtract_eval.do_dataset: %s does not exist' % file_stim
        ready = False

    if os.path.exists(file_xy) == False:
        print 'Error: subtract_eval.do_dataset: %s does not exist' % file_xy
        ready = False

    if ready == False:
        return 'Error: subtract_eval.do_dataset: Some files are missing'

    # At this point we know all of the necessary files exist

    # Since we are ready we should now delete all the figures from previous
    # runs.  Note that this command is dangerous.

    png_files = glob.glob('%s/%s.peak*png' % (fig_path, dataset))
    for one in png_files:
        os.remove(one)

    # Read the xy positions from file (produced by peaks.py)

    xy = read_peaks(file_xy)

    # Read all of the images
    flt = per_fits.get_image_ext(file_flt, 1)
    per = per_fits.get_image_ext(file_persist, 1)
    cor = per_fits.get_image_ext(file_cor, 1)
    stim = per_fits.get_image_ext(file_stim, 1)

    all_orig = []  # This is a place to store histograms of the original data
    all_corr = []  # This is a place to store histograms of the corrected data

    # Set up several arrays that are used in histogram creation (as the x axes)
    # set up the stim array

    # stim_hist=[1.,3.8]
    stim_hist = [1., 4.5]  # 30,000 electrons
    # x=4.
    dx = 0.2
    x = stim_hist[1] + dx  # We treat everything blow 30,000 as background
    while x <= 7:
        stim_hist.append(x)
        x = x + dx

    stim_hist = numpy.array(stim_hist)

    stim_hist = 10**stim_hist

    all_sorig = [
    ]  # This is a place to store histograms of the orignal data as a function of stimulus
    all_scorr = [
    ]  # This is a place to store histograms of the corrected data as a function of stimulus

    # Set up the per_hist array
    per_hist = []
    qper = 0
    dper = 0.02
    while qper <= 0.3:
        per_hist.append(qper)
        qper = qper + dper

    source_no = 0
    for one in xy:  # Main loop for each point
        source_no = source_no + 1
        x = one[0]
        y = one[1]

        # Make the stamps that are needed for each file
        xmin = one[0] - radius
        xmax = one[0] + radius
        ymin = one[1] - radius
        ymax = one[1] + radius

        ysize, xsize = numpy.shape(flt)
        if ymin < 1 or xmin < 1 or xmax > xsize or ymax > ysize:
            continue

        xflt = flt[ymin - 1:ymax, xmin - 1:xmax]
        xper = per[ymin - 1:ymax, xmin - 1:xmax]
        xcor = cor[ymin - 1:ymax, xmin - 1:xmax]
        xstim = stim[ymin - 1:ymax, xmin - 1:xmax]

        # OK at this point we have all the stamps; now flatten them

        xxflt = numpy.ravel(xflt)
        xxcor = numpy.ravel(xcor)
        xxper = numpy.ravel(xper)
        xxstim = numpy.ravel(xstim)

        med_flt = numpy.median(xxflt)
        max_per = numpy.max(xxper)
        zmin = med_flt - 0.05
        zmax = med_flt + 0.1

        fig_root = path + 'Figs/%s.peak.%03d_%03d.' % (dataset, x, y)

        # Create figure containing the original image, the persistence image and the subtracted
        # image surrouding a narrow region.  This is the 4 panel figure that appears in the
        # summary html file for each ragion

        pylab.figure(11, [8, 8])
        pylab.clf()
        pylab.subplot(221)
        pylab.imshow(xflt,
                     origin='lower',
                     cmap=pylab.cm.gray,
                     vmin=zmin,
                     vmax=zmax)
        pylab.title('Original')
        pylab.subplot(222)
        # pylab.imshow(xper,origin='lower',cmap=pylab.cm.gray,vmin=0.0,vmax=0.1)
        pylab.imshow(xper,
                     origin='lower',
                     cmap=pylab.cm.gray,
                     vmin=-0.05,
                     vmax=0.1)
        pylab.title('Model')
        pylab.subplot(223)
        pylab.imshow(xcor,
                     origin='lower',
                     cmap=pylab.cm.gray,
                     vmin=zmin,
                     vmax=zmax)
        pylab.title('Corrected')

        # Plot the figure that shows the observed rate as a function of estimate persistence

        pylab.subplot(224)
        pylab.plot(xxper, xxflt, '.', color='green')
        pylab.plot(xxper, xxcor, '.', color='yellow')

        # This constructs a histograms of median value of the original and subtracted
        # pixels as a function of the estimated persistence

        orig = []
        corr = []

        ii = 0
        while ii < len(per_hist) - 1:
            value = get_stats(xxper, xxflt, per_hist[ii], per_hist[ii + 1])
            orig.append(value)
            value = get_stats(xxper, xxcor, per_hist[ii], per_hist[ii + 1])
            corr.append(value)
            ii = ii + 1

        # Append the results for this particular point to one for all of the points
        # This is used in the summary slide for the entire field

        all_orig.append(orig)
        all_corr.append(corr)

        # Note that per_hist has one more element than the other arrays so must allow for this
        pylab.plot(per_hist[0:len(per_hist) - 1],
                   orig,
                   ls='steps-post',
                   lw=4,
                   color='red')
        pylab.plot(per_hist[0:len(per_hist) - 1],
                   corr,
                   ls='steps-post',
                   lw=4,
                   color='blue')

        pylab.axis([0, max_per + 0.01, med_flt - 0.2, med_flt + 0.3])
        pylab.xlabel('Est. Persistence (e/s)')
        pylab.ylabel('Flux (e/s)')

        # Finished with this histogram; now write out the figure

        figure_name = '%s%d.png' % (fig_root, 1)
        if os.path.isfile(figure_name):
            os.remove(figure_name)
        pylab.savefig(figure_name)
        os.chmod(figure_name, 0770)

        # Plot the original and subtracted pixels as a function of distance from a center positions
        # Create an array that contains the distance from the center for each pixel

        z = numpy.arange(-radius, radius + 1, 1)
        xx, yy = numpy.meshgrid(z, z)
        zz = xx * xx + yy * yy
        zzz = numpy.sqrt(zz)
        zzzz = numpy.ravel(zzz)

        pylab.figure(13, [6, 6])
        pylab.clf()
        pylab.plot(zzzz, xxflt, 'o')
        pylab.plot(zzzz, xxcor, 'o')

        # This constructs a histograms of median value of the original and subtracted
        # pixels as a function of distance from the source
        # Note thatthe size here that is plotted is not determined by radius, but is
        # hardcoded to be 20 pixels. This is typical smaller than radius because
        # we want to see how well as single star is subtracted.

        meds = []
        med_corr = []
        rr = []
        r = 0
        dr = 3
        rmax = 20
        while r < rmax:
            value = get_stats(zzzz, xxflt, r, r + dr)
            meds.append(value)
            value = get_stats(zzzz, xxcor, r, r + dr)
            med_corr.append(value)
            rr.append(r + 0.5 * dr)
            r = r + dr

        pylab.plot(rr, meds, ls='steps-mid', lw=3)
        pylab.plot(rr, med_corr, ls='steps-mid', lw=3)

        pylab.axis([0, rmax, med_flt - 0.2, med_flt + 0.3])
        pylab.xlabel('Radius (pixels)')
        pylab.ylabel('Flux (e/s)')

        figure_name = '%s%d.png' % (fig_root, 3)
        if os.path.isfile(figure_name):
            os.remove(figure_name)
        pylab.savefig(figure_name)
        os.chmod(figure_name, 0770)

        # 110622 - Elimaated to fix a problem on linux
        # pylab.close('all')

        # next section gathers information about eveything as a function of the stimulus

        i = 0
        sorig = []
        scorr = []
        while i < len(stim_hist) - 1:
            value = get_stats(xxstim, xxflt, stim_hist[i], stim_hist[i + 1])
            sorig.append(value)
            value = get_stats(xxstim, xxcor, stim_hist[i], stim_hist[i + 1])
            scorr.append(value)
            i = i + 1
        all_sorig.append(sorig)
        all_scorr.append(scorr)

        # This ends the main loop for each data point.

    # Now make the first summary figure which is a plot of flux as a function of the model
    # stimulus

    fig_root = path + 'Figs/%s.sum1' % (dataset)
    pylab.figure(14, [6, 6])
    pylab.clf()

    i = 0
    xmax = numpy.max(per_hist)
    ymax = (-1000)
    ymin = 1000
    per_hist = numpy.array(per_hist)
    per_hist = per_hist + 0.5 * dper

    while i < len(all_corr):
        corr = numpy.array(all_corr[i])
        orig = numpy.array(all_orig[i])

        corr = corr - corr[0]
        orig = orig - orig[0]
        k = 0
        while k < len(orig):
            if orig[k] < -900:
                break
            k = k + 1
        k = k - 1
        if k > 0:

            pylab.plot(per_hist[0:k], orig[0:k], 'ro-', lw=2)
            pylab.plot(per_hist[0:k], corr[0:k], 'bo-', lw=2)
            zmin = numpy.min(corr[0:k])
            if zmin < ymin:
                ymin = zmin
            zmax = numpy.max(orig[0:k])
            if zmax > ymax:
                ymax = zmax
        else:
            print 'Error: subtract_eval.do_dataset: there is a problem, because k=0'
        i = i + 1
    pylab.axis([0, xmax + 0.05, ymin - 0.05, ymax + 0.05])
    pylab.xlabel('Est. Persistence (e/s)')
    pylab.ylabel('Flux (e/s)')

    figure_name = fig_root + '.png'
    if os.path.isfile(figure_name):
        os.remove(figure_name)
    pylab.savefig(figure_name)
    os.chmod(figure_name, 0770)

    # 110622 - Eliminated to fix a problem on linux
    # pylab.close(14)

    # Now make the second summary figure showing the flux as a function of the stimulus.
    # Note that because these occurred at different times you will get a range here at
    # any stimulus

    # Construct the x axis for this figure
    i = 0
    xstim_hist = []
    while i < len(stim_hist) - 1:
        xstim_hist.append(0.5 * (stim_hist[i] + stim_hist[i + 1]))
        i = i + 1

    fig_root = path + 'Figs/%s.sum2' % (dataset)
    pylab.figure(15, [6, 6])
    pylab.clf()

    # Now go through each row in the arrays and plot the results
    i = 0
    while i < len(all_corr):
        corr = numpy.array(all_scorr[i])
        orig = numpy.array(all_sorig[i])
        corr = corr - corr[0]
        orig = orig - orig[0]
        k = 0
        while k < len(orig):
            if orig[k] < -900:
                break
            k = k + 1
        k = k - 1
        if k > 0:
            pylab.semilogx(xstim_hist[0:k], orig[0:k], 'ro-', lw=2)
            pylab.semilogx(xstim_hist[0:k], corr[0:k], 'bo-', lw=2)
            zmin = numpy.min(corr[0:k])
            if zmin < ymin:
                ymin = zmin
            zmax = numpy.max(orig[0:k])
            if zmax > ymax:
                ymax = zmax
        i = i + 1

    pylab.xlabel('Stimulus (e)')
    pylab.ylabel('Flux (e/s)')
    pylab.axis([3e4, 1e7, -0.1, 0.3])

    figure_name = fig_root + '.png'
    if os.path.isfile(figure_name):
        os.remove(figure_name)
    pylab.savefig(figure_name)
    os.chmod(figure_name, 0770)

    # 110622 - Eliminated to fix a problem on linux
    # pylab.close(15)

    history.write('End subtract_eval for dataset %s\n' % dataset)
    history.close()
예제 #7
0
def get_info(lines, apertures, filetype):
    '''
	Get all of the keyword information for a set of files and return this

	Notes:

	This section of the old make_ordered list was put into a separate function
	so that it could be run in parallel 

	History

	160118	ksl	Added
	'''

    records = []
    times = []

    if len(lines) == 0:
        print 'There were no %s files in the directory structure' % filetype
        return []
    else:
        print 'There are %d datasets to process' % len(lines)

    i = 0
    for line in lines:

        line = line.strip()
        line = line.split()

        # 100807 Added date of file creation so could handle non-unique data sets
        # This is the old version using pyraf, which has been put back for perfomance reasons
        xfile = '%s[1]' % line[0]
        if pyraf.iraf.imaccess(xfile):
            x = pyraf.iraf.hselect(
                xfile,
                '$I,rootname,proposid,linenum, instrume,detector,expstart,date-obs,time-obs,aperture,filter,exptime,crval1,crval2,targname,asn_id,pr_inv_L,date',
                'yes',
                Stdout=1)
            x = x[0].split('\t')
            # Kluge for raw data files which have two ROOTNAME keywords for unknown reasons
            if x[1] == x[2]:
                x.pop(2)

            x[16].replace(' ', '-')  # Get rid of spaces in PI names
            # Another kludge for raw files.  The is no 'date' field in the first extension as there is for flt and ima files, but DATE does exist in extension 0
            if filetype == 'raw':
                xname = per_fits.parse_fitsname(xfile, 0, 'yes')
                xx = pyraf.iraf.hselect(xname[2], '$I,DATE', 'yes', Stdout=1)
                xx = xx[0].split('\t')
                x.append(xx[1])
            x[0] = line[0]

            #		# Replaced upcoming lines with iraf/pyraf for performance reasons
            #		xfile=line[0]
            #		if os.path.isfile(xfile) == True:
            #			x=per_fits.get_keyword(xfile,1,'rootname,proposid,linenum, instrume,detector,expstart,date-obs,time-obs,aperture,filter,exptime,crval1,crval2,targname,asn_id,pr_inv_L')
            #			x=[xfile]+x

            #			# for raw files the date is in extension 0, but for the others it is in 1.
            #			if filetype=='raw':
            #				date=per_fits.get_keyword(xfile,0,'date')
            #			else:
            #				date=per_fits.get_keyword(xfile,1,'date')
            #			x.append(date[0])

            scan = check4scan(xfile)

            if x[5] == 'IR':
                x[4] = scan
                j = string.count(x[9], 'SUB')
                if j == 0 or apertures != 'full':
                    records.append(x)
                    times.append(float(x[6]))
        else:
            print 'File %s does not really exist' % xfile
        i = i + 1
        if i % 100 == 1:
            print 'Inspected %6d of %6d datasets --> %6d IR datasets' % (
                i, len(lines), len(records))
    print 'Inspected %6d of %6d datasets --> %6d IR datasets' % (i, len(lines),
                                                                 len(records))
    return times, records