Example #1
0
def flatpipe(flatfiles, out_prefix):
    """
	flatpipe(flatfiles,out_prefix)

	Processes the spectroscopic flats by:
	  coadding
	  determining y-distortion
	  finding science slits and star boxes
	  creating normalized response flat

	Input:
	  flatfiles  - a list of the files to be processed
	  out_prefix - the output prefix of the distortion maps, normalized
	                 flat, and slit descriptors

	Output:
	  pickled description of the slits, star boxes, and distortion
	    coefficients
	  distortion maps
	  normalized flats
	"""

    from mostools import ycorrect, id_slits, spectools
    from lris.lris_biastrim import redside as biastrim
    from special_functions import lsqfit, genfunc
    from scipy import ndimage, stats

    # Read data from files, biastrim and coadd
    weight = 0
    for name in flatfiles:
        flattmp = pyfits.open(name)
        flatdata = biastrim(flattmp[0].data.astype(scipy.float32))
        exptime = flattmp[0].header['elaptime']

        if weight == 0:
            flatavg = flatdata
        else:
            flatavg += flatdata
        weight += exptime
        del flatdata
    del flattmp
    flatavg /= weight

    # Find the y-correction
    ytrue, ymap = ycorrect.ycorrect(flatavg)

    # Generate the distortion maps
    coords = spectools.array_coords(flatavg.shape)
    x = coords[1].flatten()
    y = coords[0].flatten()
    yforw = genfunc(x, y, ytrue).reshape(coords[0].shape)
    yback = genfunc(x, y, ymap).reshape(coords[0].shape)
    del coords, x, y

    # Create straight image to find slits/normalization
    flat_ycor = spectools.resampley(flatavg, yforw, cval=0.)
    del flatavg

    # Identify slits and star boxes using brighter columns for each
    #   (straightened) row
    search = scipy.sort(flat_ycor, 1)
    width = search.shape[1]
    slits, starboxes = id_slits.id_slits(search[:,
                                                width * 2 / 3:width * 9 / 10])

    print "Starbox Locations..."
    for i, j in starboxes:
        print "[:,%d:%d]" % (i, j)

    print "Slit boundaries defined by..."
    for i, j in slits:
        print "[:,%d:%d]" % (i, j)

    # Normalize the straightened flatfield
    flatmodel = scipy.ones(flat_ycor.shape, scipy.float32)
    for i, j in slits:
        data = flat_ycor[i:j, :].copy()
        data[data == 0] = scipy.nan
        slice = stats.stats.nanmedian(data, axis=0)
        slice[scipy.isnan(slice)] = stats.stats.nanmedian(slice)
        norm = ndimage.gaussian_filter1d(slice, 23.)
        norm[norm <= 0] = 1.
        top = i - 3
        bottom = j + 3
        if top < 0:
            top = 0
        if bottom > flatmodel.shape[0]:
            bottom = flatmodel.shape[0]
        flatmodel[top:bottom, :] = flat_ycor[top:bottom, :] / norm
    del flat_ycor

    # Create normalized flatfield by resampling to curved frame
    flatnorm = spectools.resampley(flatmodel, yback, cval=1.)
    del flatmodel

    # Output normalized flat
    flatname = out_prefix + "_flat.fits"
    pyfits.PrimaryHDU(flatnorm.astype(scipy.float32)).writeto(flatname)

    # Output coefficient/slit definition arrays
    outname = out_prefix + "_yforw.fits"
    pyfits.PrimaryHDU(yforw.astype(scipy.float32)).writeto(outname)
    outname = out_prefix + "_yback.fits"
    pyfits.PrimaryHDU(yback.astype(scipy.float32)).writeto(outname)
    outname = out_prefix + "_ygeom.dat"
    outfile = open(outname, "w")
    pickle.dump(slits, outfile)
    pickle.dump(starboxes, outfile)
    pickle.dump(ytrue, outfile)
    pickle.dump(ymap, outfile)
    outfile.close()

    return yforw.astype(scipy.float32), yback.astype(
        scipy.float32), slits, starboxes, flatnorm.astype(scipy.float32)
Example #2
0
def flatpipe(flatfiles, out_prefix):
    """
	flatpipe(flatfiles,out_prefix)

	Processes the spectroscopic flats by:
	  coadding
	  determining y-distortion
	  finding science slits and star boxes

	Input:
	  flatfiles  - a list of the files to be processed
	  out_prefix - the output prefix of the distortion maps, normalized
			 flat, and slit descriptors

	Output:
	  pickled description of the slits, star boxes, and distortion
	    coefficients
	  distortion maps
	"""
    from lris.lris_biastrim import blueside as biastrim
    from mostools import ycorrect, id_slits, spectools
    from special_functions import lsqfit, genfunc

    # Read data from files, biastrim and coadd
    weight = 0
    for name in flatfiles:
        flattmp = pyfits.open(name)
        flatdata = biastrim(flattmp[0].data.copy())
        exptime = flattmp[0].header['elaptime']

        if weight == 0:
            flatavg = flatdata
        else:
            flatavg += flatdata
        weight += exptime
        del flatdata
    del flattmp
    flatavg /= weight

    # The center of the mask...
    YMID = 2048

    # Find the y-correction
    coords = spectools.array_coords(flatavg[:YMID].shape)
    x = coords[1].flatten()
    y = coords[0].flatten()
    ytrue = {}
    ymap = {}
    yforw = {}
    yback = {}

    # Bottom
    a, b = ycorrect.ycorrect(flatavg[:YMID])
    yforw['bottom'] = genfunc(x, y, a).reshape(coords[0].shape)
    yback['bottom'] = genfunc(x, y, b).reshape(coords[0].shape)
    ytrue['bottom'] = a
    ymap['bottom'] = b
    # Top
    a, b = ycorrect.ycorrect(flatavg[YMID:])
    yforw['top'] = genfunc(x, y, a).reshape(coords[0].shape)
    yback['top'] = genfunc(x, y, b).reshape(coords[0].shape)
    ytrue['top'] = a
    ymap['top'] = b
    del coords, x, y

    # Create straight image to find slits
    flat_ycor = {}
    flat_ycor['bottom'] = spectools.resampley(flatavg[:YMID],
                                              yforw['bottom'],
                                              cval=0.)
    flat_ycor['top'] = spectools.resampley(flatavg[YMID:],
                                           yforw['top'],
                                           cval=0.)
    del flatavg

    slits = {}
    starboxes = {}
    for i in ['bottom', 'top']:
        add = 0
        if i == 'top':
            add = YMID
        # Identify slits and star boxes using brighter columns for each
        #   (straightened) row
        search = scipy.sort(flat_ycor[i], 1)
        del flat_ycor[i]
        width = search.shape[1]
        slits[i], starboxes[i] = id_slits.id_slits(search[:, width * 7 /
                                                          10:width * 9 / 10])
        print "Starbox Locations for %s:" % i
        for a, b in starboxes[i]:
            print "[:,%d:%d]" % (a + add, b + add)
        print ""
        print "Slit Locations for %s:" % i
        for a, b in slits[i]:
            print "[:,%d:%d]" % (a + add, b + add)
        print ""

        # Output coefficient arrays
        outname = out_prefix + "_yforw_%s.fits" % i
        pyfits.PrimaryHDU(yforw[i]).writeto(outname)
        outname = out_prefix + "_yback_%s.fits" % i
        pyfits.PrimaryHDU(yback[i]).writeto(outname)

    # Output distortion solutions and slit definitions
    outname = out_prefix + "_ygeom.dat"
    outfile = open(outname, "w")
    pickle.dump(slits, outfile)
    pickle.dump(starboxes, outfile)
    pickle.dump(ytrue, outfile)
    pickle.dump(ymap, outfile)
    outfile.close()

    return yforw, yback, slits, starboxes
def lris_pipeline(prefix,dir,scinames,arcname,flatnames,out_prefix,useflat=0,usearc=0,cache=0,offsets=None):
	print "Processing mask",out_prefix


	nsci = len(scinames)

	print "Preparing flatfields"
	if useflat==1:
		yforw,yback,slits,starboxes,flatnorm = flatload(out_prefix)
	else:
		yforw,yback,slits,starboxes,flatnorm = flatpipe(flatnames,out_prefix)
	axis1 = flatnorm.shape[0]
	axis2 = flatnorm.shape[1]

	"""
	Read lamps data from the arclamp file; this is unnecssary for the red
	  side unless the line fitting is altered to better calibrate the blue
	  end (ie for 460 dichroic data).
	"""
	print "Preparing arcs for line identification"
	if usearc==1:
		arcdata = biastrim(pyfits.open(arcname)[0].data)
		arcname = out_prefix+"_arc.fits"
		arc_tmp = pyfits.open(arcname)
		arc_ycor = arc_tmp[0].data.astype(scipy.float32)
		lamps = arc_tmp[0].header['LAMPS']
		del arc_tmp
	else:
		arc_tmp = pyfits.open(arcname)
		arcdata = arc_tmp[0].data.copy()
		lamps = arc_tmp[0].header['LAMPS']
		del arc_tmp
		arcdata = biastrim(arcdata)
		arc_ycor = spectools.resampley(arcdata,yforw).astype(scipy.float32)
		arcname = out_prefix+"_arc.fits"
		arc_hdu = pyfits.PrimaryHDU(arc_ycor)
		arc_hdu.header.update('LAMPS',lamps)
		arc_hdu.writeto(arcname)
		del arc_hdu

	"""
	Skysubtraction, centering, &c. may work better if there is some slop on
	  the sides of the data (the slit definitions have been created to only
	  include good data, so 'bad' edges are rejected).
	"""
	wide_stars = []
	for i,j in starboxes:
		mod = scipy.where((yback<j)&(yback>i))
		a = mod[0].min()-3
		b = mod[0].max()+3
		if a<0:
			a = 0
		if b>axis1:
			b = axis1
		wide_stars.append([a,b])


	print "Bias trimming and flatfielding science data"
	scidata = scipy.zeros((nsci,axis1,axis2),'f4')
	center = scipy.zeros((nsci,len(starboxes)),'f4')
	flux = scipy.zeros((nsci),'f4')
	airmass = []
	for i in range(nsci):
		filename = scinames[i]
		scitmp = pyfits.open(filename)

		scidatatmp = scitmp[0].data.copy()
		scidatatmp = biastrim(scidatatmp).astype(scipy.float32)

		"""
		The biastrim routine should take care of bad columns, but this
		  is just in case; we do a simple linear interpolation over
		  bad columns.
		"""
		bad = scipy.where(scidatatmp>56000.)
		nbad = bad[0].size
		for k in range(nbad):
			y = bad[0][k]
			x = bad[1][k]
			if x==0:
				x1 = x+1
				x2 = x+1
			elif x == scidatatmp.shape[1]-1:
				x1 = x-1
				x2 = x-1
			else:
				x1 = x-1
				x2 = x+1
			scidatatmp[y,x] = \
			    (scidatatmp[y,x1]+scidatatmp[y,x2])/2.

		"""
		Apply the flatfield and copy the data into the working array.
		"""		  
		scidatatmp = scidatatmp/flatnorm
		scidata[i,:,:] = scidatatmp.copy()

		"""
		Copy key header keywords; note that old data might not have
		  MSWAVE or DICHNAME keywords.
		"""
		try:
			mswave = scitmp[0].header['MSWAVE']
		except:
			mswave = 6500.
		disperser = scitmp[0].header['GRANAME']
		airmass.append(scitmp[0].header['AIRMASS'])
		try:
			dichroic = scitmp[0].header['DICHNAME']
		except:
			dichroic = None

		"""
		This should give a reasonable estimate of the sky level; the
		  program does a dumb scaling (to the level of exposure with the
		  highest sky level)
		"""
		flux[i] = scipy.sort(scipy.ravel(scidatatmp))[scidatatmp.size/4]

		"""
		Centroid stars in starboxes to find shifts between mask
		  exposures.
		"""
		for j in range(len(starboxes)):
			a,b = starboxes[j]
			m,n = wide_stars[j]
			a -= 4
			b += 4
			m -= 2
			n += 2
			if m<0:
				m = 0
			if n>scidatatmp.shape[0]:
				n = scidatatmp.shape[0]
			if a<0:
				a = 0
			if b>yforw.shape[0]:
				b = yforw.shape[0]
			center[i,j] = offset.findoffset(scidatatmp[m:n],yforw[a:b],m)

		del scitmp
		del scidatatmp
	del flatnorm

	"""
	This implements the mechanism for manually entering offsets (if for
	  example we dithered the stars out of the starboxes).
	"""
	if offsets is not None:
		center = scipy.asarray(offsets)
	else:
		center = stats.stats.nanmean(center,axis=1)

	"""
	Perform the flux scaling and set the offsets relative to each other.
	"""
	print "Normalizing Fluxes"
	cmax = center.max()
	fmax = flux.max()
	for i in range(center.size):
		center[i] -= cmax
		ratio = fmax/flux[i]
		scidata[i] *= ratio
	cmax = ceil(fabs(center.min()))


	"""
	Set the output scale (and approximate input scale), as well as blue
	  cutoff limits.
	"""
	if disperser=="150/7500":
		scale = 4.8
	elif disperser=="300/5000":
		scale = 2.45
	elif disperser=="400/8500":
		scale = 1.85
	elif disperser=="600/5000":
		scale = 1.25
	elif disperser=="600/7500":
		scale = 1.25
	elif disperser=="600/10000":
		scale = 1.25
	elif disperser=="831/8200":
		scale = 0.915
	elif disperser=="900/5500":
		scale = 0.85
	elif disperser=="1200/7500":
		scale = 0.64

	if dichroic=='mirror':
		redcutoff = 4000.
		dich_file = ''
	elif dichroic=='460':
		redcutoff = 4600.  # I haven't checked this...
		dich_file = '460'
	elif dichroic=='500':
		redcutoff = 5000.  # I haven't checked this...
		dich_file = '500'
	elif dichroic=='560':
		redcutoff = 5500.
		dich_file = '560'
	elif dichroic=='680':
		redcutoff = 6700.
		dich_file = '680'
	else:
		redcutoff = 3500.
		dich_file = ''

	"""
	Determine the y-size of the output arrays. We also find an estimate of
	  the mask resolution while looping through. Only every seventh slit
	  is examined to expedite the process.
	"""
	nsize = 0
	csize = 0
	wide_slits = []
	linewidth = []
	for i,j in slits:
		csize += int(j-i+cmax) + 5
		nsize += j-i+5
		mod = scipy.where((yback>i)&(yback<j))
		a = mod[0].min()-4
		b = mod[0].max()+4
		if a<0:
			a = 0
		if b>axis1:
			b = axis1
		wide_slits.append([a,b])
		if len(wide_slits)%7==0:
			linewidth.append(measure_width.measure(arc_ycor[(i+j)/2,:],15))
	csize -= 5
	nsize -= 5

	linewidth = scipy.median(scipy.asarray(linewidth))

	print "Loading wavelength model"
	lris_path = lris.__path__[0]

	filename = lris_path+"/data/uves_sky.model"
	infile = open(filename,"r")
	wavecalmodel = pickle.load(infile)
	infile.close()
	wave = scipy.arange(3400.,10400.,0.1)

	"""
	We make the sky spectrum slightly more realistic by taking into account
	  the dichroic cutoff. This mainly helps with matching the 5577 line
	  for the 560 dichroic. It would be nice if the response of the
	  instrument was somewhat well characterized for all grating/dichroic
	  combinations....
	"""
	if dich_file!='':
		filename = lris_path+"/data/dichroics/dichroic_"+dich_file+"_t.dat"
		infile = open(filename,"r")
		#input = sio.read_array(infile)
		input = np.loadtxt(infile)
		infile.close()
		spline = interpolate.splrep(input[:,0],input[:,1],s=0)
		dich = interpolate.splev(wave,spline)
		dich[wave<4500.] = 1.
		dich[wave>8800.] = 1.
		del input,spline
	else:
		dich = scipy.ones(wave.size)

	"""
	Create two sky spectrum spline models. One is a 'fine' model matched
	  to the resolution of the instrumental setup. The other is a widened
	  model for coarse wavelength matching.
	"""
	wavemodel = interpolate.splev(wave,wavecalmodel)
	finemodel = ndimage.gaussian_filter1d(wavemodel,linewidth*scale/0.1)
	wavemodel = ndimage.gaussian_filter1d(finemodel,5./0.1)
	finemodel *= dich
	finemodel = interpolate.splrep(wave,finemodel,s=0)
	wavemodel *= dich
	widemodel = interpolate.splrep(wave,wavemodel,s=0)
	goodmodel = finemodel
	del dich,wave,wavemodel

	""" See extract.py; sets default extraction width. """
	extractwidth = 10

	print "Creating output arrays"

	"""
	We choose an output array size that *should* be large enough to contain
	  all of the valid data (given reasonable assumptions about how far
	  the slits are placed from the center of the mask). We could also
	  decrease the needed size by enforcing the blue limit....
	"""
	outlength = int(axis2*1.6)
	out = scipy.zeros((nsci,nsize,outlength))*scipy.nan
	out2 = scipy.zeros((2,csize,outlength))*scipy.nan

	"""
	For systems with limited RAM, it might make sense to cache the output
	  arrays to disk. This increases the time it takes to run but may be
	  necessary and also allows the progress of the reduction to be
	  monitored.
	"""
	if cache:
		import os
		print "Caching..."
		strtfile = out_prefix+"_TMPSTRT.fits"
		bgfile = out_prefix+"_TMPBSUB.fits"
		try:
			os.remove(strtfile)
		except:
			pass

		outfile = pyfits.PrimaryHDU(out)
		outfile.header.update('CTYPE1','LINEAR')
		outfile.header.update('CRPIX1',1)
		outfile.header.update('CRVAL1',mswave-(0.5*out2.shape[2])*scale)
		outfile.header.update('CD1_1',scale)
		outfile.header.update('CTYPE2','LINEAR')
		outfile.header.update('CRPIX2',1)
		outfile.header.update('CRVAL2',1)
		outfile.header.update('CD2_2',1)
		if nsci>1:
			outfile.header.update('CTYPE3','LINEAR')
			outfile.header.update('CRPIX3',1)
			outfile.header.update('CRVAL3',1)
			outfile.header.update('CD3_3',1)
		outfile.writeto(strtfile)
		del outfile,out

		try:
			os.remove(bgfile)
		except:
			pass

		outfile = pyfits.PrimaryHDU(out2)
		outfile.header.update('CTYPE1','LINEAR')
		outfile.header.update('CRPIX1',1)
		outfile.header.update('CRVAL1',mswave-(0.5*out2.shape[2])*scale)
		outfile.header.update('CD1_1',scale)
		outfile.header.update('CTYPE2','LINEAR')
		outfile.header.update('CRPIX2',1)
		outfile.header.update('CRVAL2',1)
		outfile.header.update('CD2_2',1)
		outfile.header.update('CTYPE3','LINEAR')
		outfile.header.update('CRPIX3',1)
		outfile.header.update('CRVAL3',1)
		outfile.header.update('CD3_3',1)
		outfile.writeto(bgfile)
		del outfile,out2

	"""
	Loop through all of the slits, determining the wavelength solution and
	  performing the background subtraction. It might be more robust to
	  determine all wavelength solutions, then jointly determine a 'master'
	  solution.... posc stores the current (starting) position of the
	  coadded array, and posn stores the current position of the straight
	  array.
	"""
	posc = 0
	posn = 0
	count = 1

	""" Debugging feature; set to 1 to skip background subtraction """
	lris.lris_red.skysub.RESAMPLE = 0
	""" Extract 1d spectra? """
	do_extract = False

	for k in range(len(slits)):
		i,j = slits[k]
		a,b = wide_slits[k]

		""" Debugging feature; change number to skip initial slits """
		if count<1:
			count += 1
			continue

		print "Working on slit %d (%d to %d)" % (count,i,j)
		# Determine the wavelength solution
		sky2x,sky2y,ccd2wave = wavematch(a,scidata[:,a:b],arc_ycor[i:j],yforw[i:j],widemodel,finemodel,goodmodel,scale,mswave,redcutoff)
		# Resample and background subtract
		print 'Doing background subtraction'
		#scidata[0,a:b] = arcdata[a:b] # This line may be a debugging step that MWA put in.  See what happens with it missing.
		strt,bgsub,varimg = doskysub(i,j-i,outlength,scidata[:,a:b],yback[a:b],sky2x,sky2y,ccd2wave,scale,mswave,center,redcutoff,airmass)

		# Store the resampled 2d spectra
		h = strt.shape[1]
		if cache:
			file = pyfits.open(strtfile,mode="update")
			out = file[0].data
		out[:,posn:posn+h] = strt.copy()
		if cache:
			file.close()
			del file,out
		posn += h+5

		if lris.lris_red.skysub.RESAMPLE:
			count += 1
			continue

		# Store the resampled, background subtracted 2d spectra
		h = bgsub.shape[0]
		if cache:
			file = pyfits.open(bgfile,mode="update")
			out2 = file[0].data
		out2[0,posc:posc+h] = bgsub.copy()
		out2[1,posc:posc+h] = varimg.copy()
		if cache:
			file.close()
			del file,out2
		posc += h+5


		# Find and extract object traces
		if do_extract:
			print '  Extracting object spectra'
			tmp = scipy.where(scipy.isnan(bgsub),0.,bgsub)
			filter = tmp.sum(axis=0)
			mod = scipy.where(filter!=0)
			start = mod[0][0]
			end = mod[0][-1]+1
			del tmp
			slit = bgsub[:,start:end]
			spectra = extract(slit,varimg[:,start:end],extractwidth)
			num = 1
			crval = mswave-(0.5*bgsub.shape[1]-start)*scale
			for spec in spectra:
				for item in spec:
					if item.size==4:
						hdu = pyfits.PrimaryHDU()
						hdu.header.update('CENTER',item[2])
						hdu.header.update('WIDTH',item[3])
						hdulist = pyfits.HDUList([hdu])
					else:
						thdu = pyfits.ImageHDU(item)
						thdu.header.update('CRVAL1',crval)
						thdu.header.update('CD1_1',scale)
						thdu.header.update('CRPIX1',1)
						thdu.header.update('CRVAL2',1)
						thdu.header.update('CD2_2',1)
						thdu.header.update('CRPIX2',1)
						thdu.header.update('CTYPE1','LINEAR')
						hdulist.append(thdu)
					outname = out_prefix+"_spec_%02d_%02d.fits" % (count,num)
					hdulist.writeto(outname)
					num += 1

		count += 1


	""" Output 2d spectra """
	if cache:
		file = pyfits.open(bgfile)
		out2 = file[0].data.copy()
		del file
	tmp = out2[0].copy()
	tmp = scipy.where(scipy.isnan(tmp),0,1)
	mod = scipy.where(tmp.sum(axis=0)!=0)
	start = mod[0][0]
	end = mod[0][-1]+1
	del tmp

	outname = out_prefix+"_bgsub.fits"
	outfile = pyfits.PrimaryHDU(out2[0,:,start:end])
	outfile.header.update('CTYPE1','LINEAR')
	outfile.header.update('CRPIX1',1)
	outfile.header.update('CRVAL1',mswave-(0.5*out2.shape[2]-start)*scale)
	outfile.header.update('CD1_1',scale)
	outfile.header.update('CRPIX2',1)
	outfile.header.update('CRVAL2',1)
	outfile.header.update('CD2_2',1)
	outfile.writeto(outname)
	hdr = outfile.header.copy()

	outname = out_prefix+"_var.fits"
	outfile = pyfits.PrimaryHDU(out2[1,:,start:end])
	outfile.header=hdr
	outfile.writeto(outname)
	del out2,hdr

	if cache:
		file = pyfits.open(strtfile)
		out = file[0].data.copy()
		del file
	for i in range(nsci):
		outname = out_prefix+"_straight_%d.fits" % (i+1)
		outfile = pyfits.PrimaryHDU(out[i,:,start:end])
		outfile.header.update('CTYPE1','LINEAR')
		outfile.header.update('CRPIX1',1)
		outfile.header.update('CRVAL1',mswave-(0.5*out.shape[2]-start)*scale)
		outfile.header.update('CD1_1',scale)
		outfile.header.update('CRPIX2',1)
		outfile.header.update('CRVAL2',1)
		outfile.header.update('CD2_2',1)
		#if nsci>1:
		#	outfile.header.update('CRPIX3',1)
		#	outfile.header.update('CRVAL3',1)
		#	outfile.header.update('CD3_3',1)	
		outfile.writeto(outname)
		del outfile

	del out
def lris_pipeline(prefix,
                  dir,
                  scinames,
                  arcname,
                  flatnames,
                  out_prefix,
                  useflat=0,
                  usearc=0,
                  cache=0,
                  offsets=None,
                  logfile=None):
    # Create a logfile for this session
    if logfile is None:
        logfile = open('%s.log' % out_prefix, 'w')
    else:
        logfile = open(logfile, 'w')
    stime = time.strftime("%d/%m/%y %H:%M:%S")
    logfile.write('%s\n' % stime)

    print "Processing mask", out_prefix
    logfile.write('Processing mask %s\n' % out_prefix)
    """ Prepare image names. """

    nsci = len(scinames)
    YMID = 2048  # offset for the second detector

    print "Preparing flatfields"
    if useflat == 1:
        logfile.write('Using pre-made flats\n')
        yforw, yback, slits, starboxes = flatload(out_prefix)
    else:
        logfile.write('Making new flats\n')
        yforw, yback, slits, starboxes = flatpipe(flatnames, out_prefix)

    print "Preparing arcs for line identification"
    if usearc == 1:
        logfile.write('Using pre-made arcs\n')
        arc_ycor = {}
        for i in ['bottom', 'top']:
            arcname = out_prefix + "_arc_%s.fits" % i
            arc_tmp = pyfits.open(arcname)
            arc_ycor[i] = arc_tmp[0].data.astype(scipy.float32)
            lamps = arc_tmp[0].header['LAMPS']
            filter = arc_tmp[0].header['BLUFILT']
            del arc_tmp
    else:
        logfile.write('Making new arcs\n')
        """ Load arc data from fits file """
        arc_tmp = pyfits.open(arcname)
        arcdata = arc_tmp[0].data.copy()
        """ Determine which lamps were used """
        lamps = arc_tmp[0].header['LAMPS']
        try:
            filter = arc_tmp[0].header['BLUFILT']
        except:
            filter = 'clear'
        del arc_tmp
        """ Process arcs for the top and bottom separately """
        arcdata = biastrim(arcdata)
        arc_ycor = {}
        arc_ycor['bottom'] = spectools.resampley(
            arcdata[:YMID], yforw['bottom']).astype(scipy.float32)
        arcname = out_prefix + "_arc_bottom.fits"
        arc_hdu = pyfits.PrimaryHDU(arc_ycor['bottom'])
        arc_hdu.header.update('LAMPS', lamps)
        arc_hdu.header.update('BLUFILT', filter)
        arc_hdu.writeto(arcname)

        arc_ycor['top'] = spectools.resampley(
            arcdata[YMID:], yforw['top']).astype(scipy.float32)
        arcname = out_prefix + "_arc_top.fits"
        arc_hdu = pyfits.PrimaryHDU(arc_ycor['top'])
        arc_hdu.header.update('LAMPS', lamps)
        arc_hdu.header.update('BLUFILT', filter)
        arc_hdu.writeto(arcname)

        del arc_hdu, arcdata

    axis1 = 4096
    axis2 = 4096
    """
	We create 'wide' starboxes that describe the minimum and maximum
	  y-position of the slit in the *unstraightened* frame.
	"""
    wide_stars = {}
    logfile.write('\n')
    for l in ['bottom', 'top']:
        wide_stars[l] = []
        bmax = arc_ycor[l].shape[0]
        logfile.write('Star boxes for %s\n' % l)
        for i, j in starboxes[l]:
            logfile.write('[:,%d:%d]\n' % (i, j))

            mod = scipy.where((yback[l] < j) & (yback[l] > i))
            a = mod[0].min() - 3  # We include a small amount of
            b = mod[0].max() + 3  #  padding for resampling.
            if a < 0:
                a = 0
            if b > bmax:
                b = bmax
            wide_stars[l].append([a, b])

    print "Bias trimming science data"
    nstars = len(starboxes['bottom']) + len(starboxes['top'])
    scidata = scipy.zeros((nsci, axis1, axis2), 'f4')
    center = scipy.zeros((nsci, nstars), 'f4')
    flux = scipy.zeros((nsci), 'f4')
    for i in range(nsci):
        filename = scinames[i]
        scitmp = pyfits.open(filename)

        scidatatmp = scitmp[0].data.copy()
        scidatatmp = biastrim(scidatatmp).astype(scipy.float32)
        """
		Remove screwed columns (this should already be done by biastrim
		  though...).
		"""
        bad = scipy.where(scidatatmp > 56000.)
        nbad = bad[0].size
        for k in range(nbad):
            y = bad[0][k]
            x = bad[1][k]
            scidatatmp[y,
                       x] = (scidatatmp[y, x - 1] + scidatatmp[y, x + 1]) / 2.
        """
		We don't flatfield blueside data because of ghosts and
		  reflections. Milan Bogosavljevic has data that show that
		  flatfielding is important if using very blue data--the bluest
		  end looks like it has fringes! I should add a flag to allow
		  flatfielding to be turned on....
		"""
        scidata[i, :, :] = scidatatmp.copy()
        """
		The try/except code is because sometimes the data just don't
		  have these keywords (I think this might be correlated to
		  stopping exposures early, though I'm not sure). Plus old data
		  might not have used the dichroic at all....
		"""
        try:
            disperser = scitmp[0].header['GRISNAME']
        except:
            pass
        try:
            dichroic = scitmp[0].header['DICHNAME']
        except:
            dichroic = None
        """
		We use the first quartile for the flux normalization.
		"""
        flux[i] = scipy.sort(scipy.ravel(scidatatmp))[scidatatmp.size / 4]
        """
		The starboxes are used to determine relative y-shifts between
		  mask exposures. If the offsets keyword was used, this will
		  be ignored.
		"""
        for l in ['bottom', 'top']:
            if offsets is not None:
                continue
            for j in range(len(starboxes[l])):
                a, b = starboxes[l][j]
                m, n = wide_stars[l][j]
                a -= 4
                b += 4
                m -= 2
                n += 2
                if a < 0:
                    a = 0
                if l == 'top':
                    m += YMID
                    n += YMID
                center[i, j] = offset.findoffset(scidatatmp[m:n],
                                                 yforw[l][a:b], m)

        del scitmp
        del scidatatmp

    if offsets is not None:
        center = scipy.asarray(offsets)
    else:
        center = stats.stats.nanmean(center, axis=1)
    center[scipy.isnan(center)] = 0.

    print "Normalizing Fluxes"
    cmax = center.max()
    fmax = flux.max()
    logfile.write('\nMask pixel and flux offsets\n')
    logfile.write('-----------------------------\n')
    logfile.write('Mask   Pixels   Flux\n')
    for i in range(center.size):
        center[i] -= cmax
        ratio = fmax / flux[i]
        scidata[i] *= ratio
        logfile.write('%4d   %6.2f   %4.2f\n' % (i, center[i], ratio))
    cmax = ceil(fabs(center.min()))
    logfile.write('\n')

    if disperser == "300/5000":
        scale = 1.41
        mswave = 5135.
    elif disperser == "400/3400":
        scale = 1.05
        mswave = 3990.
    elif disperser == "600/4000":
        scale = 0.63
        mswave = 4590.
    elif disperser == "1200/3400":
        scale = 0.24
        mswave = 3505.

    if dichroic == 'mirror':
        bluecutoff = 0.
        dich_file = ''
    elif dichroic == '460':
        bluecutoff = 4650.
        dich_file = '460'
    elif dichroic == '500':
        bluecutoff = 5100.
        dich_file = '500'
    elif dichroic == '560':
        bluecutoff = 5650.
        dich_file = '560'
    elif dichroic == '680':
        #		bluecutoff = 6800.
        bluecutoff = 5650.
        dich_file = '680'
    else:
        bluecutoff = 8000.
        dich_file = ''
    """
	We create 'wide' slits that describe the minimum and maximum y-position
	  of the slit in the *unstraightened* frame. We also determine the mask
	  resolution using every seventh slit.
	"""
    nsize = 0  # Size of straightened mask
    csize = 0  # Size of coadded mask
    wide_slits = {}
    linewidth = []
    print "Determining mask resolution"
    for l in ['bottom', 'top']:
        wide_slits[l] = []
        bmax = arc_ycor[l].shape[0]
        logfile.write('Slits for %s (%d total)\n' % (l, len(slits[l])))
        for i, j in slits[l]:
            logfile.write('[:,%d:%d]\n' % (i, j))
            csize += int(j - i + cmax) + 5
            nsize += j - i + 5
            mod = scipy.where((yback[l] > i) & (yback[l] < j))
            a = mod[0].min() - 4
            b = mod[0].max() + 4
            if a < 0:
                a = 0
            if b > bmax:
                b = bmax
            wide_slits[l].append([a, b])
            if len(wide_slits[l]) % 7 == 0:
                linewidth.append(
                    measure_width.measure(arc_ycor[l][(i + j) / 2, :], 15))
    csize -= 5
    nsize -= 5
    logfile.write("\n\n")
    logfile.close()

    linewidth = scipy.median(scipy.asarray(linewidth))
    """ We can temporarily delete the top CCD from memory """
    yforw = yforw['bottom']
    yback = yback['bottom']
    arc_ycor = arc_ycor['bottom']
    """
	Create the arclamp model by using only the blue lamps that were turned
	  on. Turning off the red lamps (Ne and Ar) reduces reflections on the
	  blue side, and it is difficult to use these lines for the wavelength
	  solution because 2nd order blue lines show up starting at ~5800.
	"""
    print "Loading wavelength model"
    lris_path = lris.__path__[0]
    lamps = lamps.split(',')
    wave = scipy.arange(2000., 8000., 0.1)
    filenames = []
    if lamps[0] == '1':
        filenames.append(lris_path + "/data/bluearcs/hg.dat")
    if lamps[3] == '1':
        filenames.append(lris_path + "/data/bluearcs/cd.dat")
    if lamps[4] == '1':
        filenames.append(lris_path + "/data/bluearcs/zn.dat")
    fluxlimit = None
    if filter == 'SP580' and bluecutoff > 5650:
        cutoff = 5650.
    else:
        fluxlimit = 150.
        cutoff = bluecutoff
    linefile = out_prefix + "_lines.dat"
    make_linelist(filenames, cutoff, fluxlimit, linefile)
    """
	The relative amplitudes of the lines in the hg, cd, and zn.dat files
	  are more appropriate for the bluer grisms. A separate linelist is
	  used for the 300 grism and assumes all three lamps were on. This is
	  one of the problems with (1) not knowing the throughput for each
	  setup and (2) not having stable lamps.
	"""
    if disperser == "300/5000":
        filename = lris_path + "/data/bluearcs/300_lines.dat"
        arc, lines = make_arc(filename, linewidth * scale, wave)
    else:
        arc, lines = make_arc(linefile, linewidth * scale, wave)
    finemodel = interpolate.splrep(wave, arc, s=0)
    smooth = ndimage.gaussian_filter1d(arc, 9. / 0.1)
    widemodel = interpolate.splrep(wave, smooth, s=0)
    linemodel = interpolate.splrep(wave, lines, s=0)

    filename = lris_path + "/data/uves_sky.model"
    infile = open(filename, "r")
    wavecalmodel = pickle.load(infile)
    infile.close()
    wave = scipy.arange(3400., 10400., 0.1)
    """ We attempt to model the dichroic cutoff for the sky mode. """
    if dichroic == '680' and disperser == "300/5000":
        filename = lris_path + "/data/dichroics/dichroic_680_t.dat"
        infile = open(filename, "r")
        input = sio.read_array(infile)
        infile.close()
        input[:, 1] = 1. - input[:, 1]
        spline = interpolate.splrep(input[:, 0], input[:, 1], s=0)
        dich = interpolate.splev(wave, spline)
        dich[wave < 4500.] = 1.
        dich[wave > 8800.] = 1.
        filename = lris_path + "/data/grisms/grism_300.dat"
        infile = open(filename, "r")
        input = sio.read_array(infile)
        infile.close()
        spline = interpolate.splrep(input[:, 0], input[:, 1], s=0)
        eff = interpolate.splev(wave, spline)
        eff[wave < 5100.] = 1.
        eff[wave > 7200.] = 1.
        dich *= eff
        del input, spline
    else:
        dich = scipy.ones(wave.size)
    wave = scipy.arange(3400., 10400., 0.1)
    wavemodel = interpolate.splev(wave, wavecalmodel)
    goodmodel = ndimage.gaussian_filter1d(wavemodel, linewidth * scale / 0.12)
    goodmodel *= dich
    goodmodel = interpolate.splrep(wave, goodmodel, s=0)

    extra = [linefile, cutoff, 3000]
    extractwidth = 15

    del arc, wave, smooth
    """
	Use the skyarcmatch routine if the 300grism is employed, otherwise
	  just match the arclines.
	"""
    if dichroic == '680' and disperser == "300/5000":
        from lris.lris_blue.skyarcmatch import arcmatch as wavematch
        extra2 = [linefile, 6850, 3500]
    else:
        from lris.lris_blue.arcmatch import arcmatch as wavematch
        extra2 = extra
    """
	This could be improved by making the arrays the (pre-determined) size
	  stipulated by the red and blue cutoffs.
	"""
    print "Creating output arrays"
    outlength = int(axis2 * 1.6)
    out = scipy.zeros((nsci, nsize, outlength), scipy.float32) * scipy.nan
    out2 = scipy.zeros((2, csize, outlength), scipy.float32) * scipy.nan
    """
        For systems with limited RAM, it might make sense to cache the output
          arrays to disk. This increases the time it takes to run but may be
          necessary and also allows the progress of the reduction to be
          monitored.
        """
    if cache:
        import os
        print "Caching..."
        strtfile = out_prefix + "_TMPSTRT.fits"
        bgfile = out_prefix + "_TMPBSUB.fits"
        try:
            os.remove(strtfile)
        except:
            pass

        outfile = pyfits.PrimaryHDU(out)
        outfile.header.update('CTYPE1', 'LINEAR')
        outfile.header.update('CRPIX1', 1)
        outfile.header.update('CRVAL1', mswave - (0.5 * out2.shape[2]) * scale)
        outfile.header.update('CD1_1', scale)
        outfile.header.update('CTYPE2', 'LINEAR')
        outfile.header.update('CRPIX2', 1)
        outfile.header.update('CRVAL2', 1)
        outfile.header.update('CD2_2', 1)
        if nsci > 1:
            outfile.header.update('CTYPE3', 'LINEAR')
            outfile.header.update('CRPIX3', 1)
            outfile.header.update('CRVAL3', 1)
            outfile.header.update('CD3_3', 1)
        outfile.writeto(strtfile)
        del outfile, out

        try:
            os.remove(bgfile)
        except:
            pass

        outfile = pyfits.PrimaryHDU(out2)
        outfile.header.update('CTYPE1', 'LINEAR')
        outfile.header.update('CRPIX1', 1)
        outfile.header.update('CRVAL1', mswave - (0.5 * out2.shape[2]) * scale)
        outfile.header.update('CD1_1', scale)
        outfile.header.update('CTYPE2', 'LINEAR')
        outfile.header.update('CRPIX2', 1)
        outfile.header.update('CRVAL2', 1)
        outfile.header.update('CD2_2', 1)
        outfile.header.update('CTYPE3', 'LINEAR')
        outfile.header.update('CRPIX3', 1)
        outfile.header.update('CRVAL3', 1)
        outfile.header.update('CD3_3', 1)
        outfile.writeto(bgfile)
        del outfile, out2

    logfile = open(logfile.name, 'a')
    logfile.write('Beginning Wavelength Solution and Resampling\n')
    logfile.write('--------------------------------------------\n')
    logfile.close()
    """
        Loop through all of the slits, determining the wavelength solution and
          performing the background subtraction. It might be more robust to
          determine all wavelength solutions, then jointly determine a 'master'
          solution.... posc stores the current (starting) position of the
          coadded array, and posn stores the current position of the straight
          array. off is 0 while looping over the bottom and YMID for the top. n
	  is the number of bottom slits, so that the 1st top slit is n+1.
        """
    nbottom = len(slits['bottom'])
    nslits = nbottom + len(slits['top'])
    posc = 0
    posn = 0
    count = 1
    off = 0
    n = 0
    narrow = slits['bottom']
    wide = wide_slits['bottom']
    """ Debugging feature; set to 1 to skip background subtraction """
    lris.lris_blue.skysub.RESAMPLE = 0
    for k in range(nslits):
        """
		When we have finished all of the bottom slits, switch
		  parameters over to their top values.
		"""
        if k == nbottom:
            arc_ycor = get_arc(out_prefix)
            yforw = get_yforw(out_prefix)
            yback = get_yback(out_prefix)
            n = nbottom
            off = YMID
            narrow = slits['top']
            wide = wide_slits['top']
        i, j = narrow[k - n]
        a, b = wide[k - n]
        """ Debugging feature; change number to skip initial slits """
        if count < 1:
            count += 1
            continue

        print "Working on slit %d (%d to %d)" % (count, i + off, j + off)
        logfile = open(logfile.name, 'a')
        logfile.write("Working on slit %d (%d to %d)\n" %
                      (count, i + off, j + off))
        logfile.close()
        sky2x, sky2y, ccd2wave = wavematch(a, scidata[:, a + off:b + off],
                                           arc_ycor[i:j], yforw[i:j],
                                           widemodel, finemodel, goodmodel,
                                           linemodel, scale, mswave, extra,
                                           logfile)
        logfile = open(logfile.name, 'a')
        logfile.write("\n")
        logfile.close()
        strt, bgsub, varimg = doskysub(i, j - i, outlength,
                                       scidata[:, a + off:b + off], yback[a:b],
                                       sky2x, sky2y, ccd2wave, scale, mswave,
                                       center, extra2)
        """ Store the resampled 2d spectra """
        h = strt.shape[1]
        if cache:
            file = pyfits.open(strtfile, mode="update")
            out = file[0].data
        out[:, posn:posn + h] = strt.copy()
        if cache:
            file.close()
            del file, out
        posn += h + 5

        if lris.lris_blue.skysub.RESAMPLE == 1:
            count += 1
            continue
        """ Store the resampled, background subtracted 2d spectra """
        h = bgsub.shape[0]
        if cache:
            file = pyfits.open(bgfile, mode="update")
            out2 = file[0].data
        out2[0, posc:posc + h] = bgsub.copy()
        out2[1, posc:posc + h] = varimg.copy()
        if cache:
            file.close()
            del file, out2
        posc += h + 5
        """ Find and extract object traces """
        tmp = scipy.where(scipy.isnan(bgsub), 0., bgsub)
        filter = tmp.sum(axis=0)
        mod = scipy.where(filter != 0)
        start = mod[0][0]
        end = mod[0][-1] + 1
        del tmp
        slit = bgsub[:, start:end]
        spectra = extract(slit, varimg[:, start:end], extractwidth)
        num = 1
        crval = mswave - (0.5 * bgsub.shape[1] - start) * scale
        for spec in spectra:
            for item in spec:
                if item.size == 4:
                    hdu = pyfits.PrimaryHDU()
                    hdu.header.update('CENTER', item[2])
                    hdu.header.update('WIDTH', item[3])
                    hdulist = pyfits.HDUList([hdu])
                else:
                    thdu = pyfits.ImageHDU(item)
                    thdu.header.update('CRVAL1', crval)
                    thdu.header.update('CD1_1', scale)
                    thdu.header.update('CRPIX1', 1)
                    thdu.header.update('CRVAL2', 1)
                    thdu.header.update('CD2_2', 1)
                    thdu.header.update('CRPIX2', 1)
                    thdu.header.update('CTYPE1', 'LINEAR')
                    hdulist.append(thdu)
            outname = out_prefix + "_spec_%02d_%02d.fits" % (count, num)
            hdulist.writeto(outname)
            num += 1

        count += 1
    """ Output 2d spectra"""
    if cache:
        file = pyfits.open(bgfile)
        out2 = file[0].data.copy()
        del file
    tmp = out2[0].copy()
    tmp = scipy.where(scipy.isnan(tmp), 0, 1)
    mod = scipy.where(tmp.sum(axis=0) != 0)
    start = mod[0][0]
    end = mod[0][-1] + 1
    del tmp

    outname = out_prefix + "_bgsub.fits"
    outfile = pyfits.PrimaryHDU(out2[0, :, start:end])
    outfile.header.update('CTYPE1', 'LINEAR')
    outfile.header.update('CRPIX1', 1)
    outfile.header.update('CRVAL1',
                          mswave - (0.5 * out2.shape[2] - start) * scale)
    outfile.header.update('CD1_1', scale)
    outfile.header.update('CRPIX2', 1)
    outfile.header.update('CRVAL2', 1)
    outfile.header.update('CD2_2', 1)
    outfile.writeto(outname)
    hdr = outfile.header.copy()

    outname = out_prefix + "_var.fits"
    outfile = pyfits.PrimaryHDU(out2[1, :, start:end])
    outfile.header = hdr
    outfile.writeto(outname)
    del out2, hdr

    if cache:
        file = pyfits.open(strtfile)
        out = file[0].data.copy()
        del file
    outname = out_prefix + "_straight.fits"
    outfile = pyfits.PrimaryHDU(out[:, :, start:end])
    outfile.header.update('CTYPE1', 'LINEAR')
    outfile.header.update('CRPIX1', 1)
    outfile.header.update('CRVAL1',
                          mswave - (0.5 * out.shape[2] - start) * scale)
    outfile.header.update('CD1_1', scale)
    outfile.header.update('CRPIX2', 1)
    outfile.header.update('CRVAL2', 1)
    outfile.header.update('CD2_2', 1)
    if nsci > 1:
        outfile.header.update('CRPIX3', 1)
        outfile.header.update('CRVAL3', 1)
        outfile.header.update('CD3_3', 1)
    outfile.writeto(outname)

    del out, outfile
Example #5
0
def lris_pipeline(prefix,
                  dir,
                  science,
                  arc,
                  flats,
                  out_prefix,
                  useflat=0,
                  usearc=0,
                  cache=0,
                  offsets=None):
    print "Processing mask", out_prefix

    scinums = science.split(",")
    flatnums = flats.split(",")

    for i in range(len(flatnums)):
        flatnums[i] = dir + prefix + flatnums[i] + ".fits"
    scinames = []
    for i in range(len(scinums)):
        name = dir + prefix + scinums[i] + ".fits"
        scinames.append(name)
    arcname = dir + prefix + arc + ".fits"

    nsci = len(scinums)

    print "Preparing flatfields"
    if useflat == 1:
        yforw, yback, slits, starboxes, flatnorm = flatload(out_prefix)
    else:
        yforw, yback, slits, starboxes, flatnorm = flatpipe(
            flatnums, out_prefix)
    axis1 = flatnorm.shape[0]
    axis2 = flatnorm.shape[1]

    print "Preparing arcs for line identification"
    if usearc == 1:
        arcname = out_prefix + "_arc.fits"
        arc_tmp = pyfits.open(arcname)
        arc_ycor = arc_tmp[0].data.astype(scipy.float32)
        lamps = arc_tmp[0].header['LAMPS']
        del arc_tmp
    else:
        arcname = dir + prefix + arc + ".fits"
        arc_tmp = pyfits.open(arcname)
        arcdata = arc_tmp[0].data.copy()
        lamps = arc_tmp[0].header['LAMPS']
        del arc_tmp
        arcdata = biastrim(arcdata)
        arc_ycor = spectools.resampley(arcdata, yforw).astype(scipy.float32)
        arcname = out_prefix + "_arc.fits"
        arc_hdu = pyfits.PrimaryHDU(arc_ycor)
        arc_hdu.header.update('LAMPS', lamps)
        arc_hdu.writeto(arcname)
        del arc_hdu

    wide_stars = []
    for i, j in starboxes:
        mod = scipy.where((yback < j) & (yback > i))
        a = mod[0].min() - 3
        b = mod[0].max() + 3
        if a < 0:
            a = 0
        if b > axis1:
            b = axis1
        wide_stars.append([a, b])

    print "Bias trimming and flatfielding science data"
    scidata = scipy.zeros((nsci, axis1, axis2), 'f4')
    center = scipy.zeros((nsci, len(starboxes)), 'f4')
    flux = scipy.zeros((nsci), 'f4')
    airmass = []
    for i in range(nsci):
        filename = scinames[i]
        scitmp = pyfits.open(filename)

        scidatatmp = scitmp[0].data.copy()
        scidatatmp = biastrim(scidatatmp).astype(scipy.float32)

        #Remove screwed columns (this should already be done though...)
        bad = scipy.where(scidatatmp > 56000.)
        nbad = bad[0].size
        for k in range(nbad):
            y = bad[0][k]
            x = bad[1][k]
            scidatatmp[y,
                       x] = (scidatatmp[y, x - 1] + scidatatmp[y, x + 1]) / 2.
        # Don't flatfield blueside data
        scidatatmp = scidatatmp / flatnorm
        scidata[i, :, :] = scidatatmp.copy()

        try:
            mswave = scitmp[0].header['MSWAVE']
        except:
            mswave = 6500.
        if len(slits) == 1:
            try:
                mswave = scitmp[0].header['WAVELEN']
            except:
                pass
        disperser = scitmp[0].header['GRANAME']
        airmass.append(scitmp[0].header['AIRMASS'])

        # Old data mightn't have a dichroic keyword!
        try:
            dichroic = scitmp[0].header['DICHNAME']
        except:
            dichroic = None

        flux[i] = scipy.sort(scipy.ravel(scidatatmp))[scidatatmp.size / 4]
        for j in range(len(starboxes)):
            a, b = starboxes[j]
            m, n = wide_stars[j]
            a -= 4
            b += 4
            m -= 2
            n += 2
            center[i, j] = offset.findoffset(scidatatmp[m:n], yforw[a:b], m)

        del scitmp
        del scidatatmp
    del flatnorm

    if offsets is not None:
        center = scipy.asarray(offsets)
    else:
        center = stats.stats.nanmean(center, axis=1)

    center[scipy.isnan(center)] = 0.
    print "Normalizing Fluxes"
    cmax = center.max()
    fmax = flux.max()
    for i in range(center.size):
        center[i] -= cmax
        ratio = fmax / flux[i]
        scidata[i] *= ratio
    cmax = ceil(fabs(center.min()))

    if disperser == "150/7500":
        scale = 4.8
    elif disperser == "300/5000":
        scale = 2.45
    elif disperser == "400/8500":
        scale = 1.85
    elif disperser == "600/5000":
        scale = 1.25
    elif disperser == "600/7500":
        scale = 1.25
    elif disperser == "600/10000":
        scale = 1.25
    elif disperser == "831/8200":
        scale = 0.915
    elif disperser == "900/5500":
        scale = 0.85
    elif disperser == "1200/7500":
        scale = 0.64

    if dichroic == 'mirror':
        redcutoff = 4000.
        dich_file = ''
    elif dichroic == '460':
        redcutoff = 4600.
        dich_file = '460'
    elif dichroic == '500':
        redcutoff = 5000.
        dich_file = '500'
    elif dichroic == '560':
        redcutoff = 5500.
        dich_file = '560'
    elif dichroic == '680':
        redcutoff = 6700.
        dich_file = '680'
    else:
        redcutoff = 3500.
        dich_file = ''

    nsize = 0
    csize = 0
    wide_slits = []
    linewidth = []
    slits = [[1150, 1250]]
    for i, j in slits:
        csize += int(j - i + cmax) + 5
        nsize += j - i + 5
        mod = scipy.where((yback > i) & (yback < j))
        a = mod[0].min() - 4
        b = mod[0].max() + 4
        if a < 0:
            a = 0
        if b > axis1:
            b = axis1
        wide_slits.append([a, b])
        if len(wide_slits) % 7 == 0 or len(slits) == 1:
            linewidth.append(
                measure_width.measure(arc_ycor[(i + j) / 2, :], 15))
    csize -= 5
    nsize -= 5

    linewidth = scipy.median(scipy.asarray(linewidth))

    print "Loading wavelength model"
    lris_path = lris_red.__path__[0]

    filename = lris_path + "/uves_sky.model"
    infile = open(filename, "r")
    wavecalmodel = load(infile)
    infile.close()
    wave = scipy.arange(3400., 10400., 0.1)

    if dich_file != '':
        filename = lris_path + "/dichroics/dichroic_" + dich_file + "_t.dat"
        infile = open(filename, "r")
        input = sio.read_array(infile)
        infile.close()
        spline = interpolate.splrep(input[:, 0], input[:, 1])
        dich = interpolate.splev(wave, spline)
        dich[wave < 4500.] = 1.
        dich[wave > 8800.] = 1.
        del input, spline
    else:
        dich = scipy.ones(wave.size)
    wavemodel = interpolate.splev(wave, wavecalmodel)
    finemodel = ndimage.gaussian_filter1d(wavemodel, linewidth * scale / 0.1)
    wavemodel = ndimage.gaussian_filter1d(finemodel, 5. / 0.1)
    finemodel *= dich
    finemodel = interpolate.splrep(wave, finemodel)
    wavemodel *= dich
    widemodel = interpolate.splrep(wave, wavemodel)
    goodmodel = finemodel
    del dich, wave, wavemodel

    extractwidth = 10

    print "Creating output arrays"
    outlength = int(axis2 * 1.6)
    out = scipy.zeros((nsci, nsize, outlength), scipy.float32) * scipy.nan
    out2 = scipy.zeros((2, csize, outlength), scipy.float32) * scipy.nan

    if cache:
        print "Caching..."
        strtfile = out_prefix + "_TMPSTRT.fits"
        bgfile = out_prefix + "_TMPBSUB.fits"
        try:
            os.remove(strtfile)
        except:
            pass

        outfile = pyfits.PrimaryHDU(out)
        outfile.header.update('CTYPE1', 'LINEAR')
        outfile.header.update('CRPIX1', 1)
        outfile.header.update('CRVAL1', mswave - (0.5 * out2.shape[2]) * scale)
        outfile.header.update('CD1_1', scale)
        outfile.header.update('CTYPE2', 'LINEAR')
        outfile.header.update('CRPIX2', 1)
        outfile.header.update('CRVAL2', 1)
        outfile.header.update('CD2_2', 1)
        if nsci > 1:
            outfile.header.update('CTYPE3', 'LINEAR')
            outfile.header.update('CRPIX3', 1)
            outfile.header.update('CRVAL3', 1)
            outfile.header.update('CD3_3', 1)
        outfile.writeto(strtfile)
        del outfile, out

        try:
            os.remove(bgfile)
        except:
            pass

        outfile = pyfits.PrimaryHDU(out2)
        outfile.header.update('CTYPE1', 'LINEAR')
        outfile.header.update('CRPIX1', 1)
        outfile.header.update('CRVAL1', mswave - (0.5 * out2.shape[2]) * scale)
        outfile.header.update('CD1_1', scale)
        outfile.header.update('CTYPE2', 'LINEAR')
        outfile.header.update('CRPIX2', 1)
        outfile.header.update('CRVAL2', 1)
        outfile.header.update('CD2_2', 1)
        outfile.header.update('CTYPE3', 'LINEAR')
        outfile.header.update('CRPIX3', 1)
        outfile.header.update('CRVAL3', 1)
        outfile.header.update('CD3_3', 1)
        outfile.writeto(bgfile)
        del outfile, out2

    posc = 0
    posn = 0
    count = 1
    for k in range(len(slits)):
        i, j = slits[k]
        a, b = wide_slits[k]
        ##
        if count < 1:
            count += 1
            continue
        ##
        print "Working on slit %d (%d to %d)" % (count, i, j)
        sky2x, sky2y, ccd2wave = wavematch(a, scidata[:, a:b], arc_ycor[i:j],
                                           yforw[i:j], widemodel, finemodel,
                                           goodmodel, scale, mswave, redcutoff)

        strt, bgsub, varimg = doskysub(i, j - i, outlength, scidata[:, a:b],
                                       yback[a:b], sky2x, sky2y, ccd2wave,
                                       scale, mswave, center, redcutoff,
                                       airmass)

        h = strt.shape[1]
        if cache:
            file = pyfits.open(strtfile, mode="update")
            out = file[0].data
        out[:, posn:posn + h] = strt.copy()
        if cache:
            file.close()
            del file, out
        posn += h + 5

        ##
        #		lris_red.skysub.RESAMPLE = 1
        #		count += 1
        #		continue
        ##

        h = bgsub.shape[0]
        if cache:
            file = pyfits.open(bgfile, mode="update")
            out2 = file[0].data
        out2[0, posc:posc + h] = bgsub.copy()
        out2[1, posc:posc + h] = varimg.copy()
        if cache:
            file.close()
            del file, out2
        posc += h + 5
        ##
        #		count += 1
        #		continue
        ##
        tmp = scipy.where(scipy.isnan(bgsub), 0., bgsub)
        filter = tmp.sum(axis=0)
        mod = scipy.where(filter != 0)
        start = mod[0][0]
        end = mod[0][-1] + 1
        del tmp
        slit = bgsub[:, start:end]
        spectra = extract(slit, varimg[:, start:end], extractwidth)
        num = 1
        crval = mswave - (0.5 * bgsub.shape[1] - start) * scale
        for spec in spectra:
            for item in spec:
                if item.size == 4:
                    hdu = pyfits.PrimaryHDU()
                    hdu.header.update('CENTER', item[2])
                    hdu.header.update('WIDTH', item[3])
                    hdulist = pyfits.HDUList([hdu])
                else:
                    thdu = pyfits.ImageHDU(item)
                    thdu.header.update('CRVAL1', crval)
                    thdu.header.update('CD1_1', scale)
                    thdu.header.update('CRPIX1', 1)
                    thdu.header.update('CRVAL2', 1)
                    thdu.header.update('CD2_2', 1)
                    thdu.header.update('CRPIX2', 1)
                    thdu.header.update('CTYPE1', 'LINEAR')
                    hdulist.append(thdu)
            outname = out_prefix + "_spec_%02d_%02d.fits" % (count, num)
            hdulist.writeto(outname)
            num += 1

        count += 1

##
#	file = pyfits.open(bgfile)
#	file.writeto(out_prefix+"_save.fits")
#	return
##

    if cache:
        file = pyfits.open(bgfile)
        out2 = file[0].data.copy()
        del file
    tmp = out2[0].copy()
    tmp = scipy.where(scipy.isnan(tmp), 0, 1)
    mod = scipy.where(tmp.sum(axis=0) != 0)
    start = mod[0][0]
    end = mod[0][-1] + 1
    del tmp

    outname = out_prefix + "_bgsub.fits"
    outfile = pyfits.PrimaryHDU(out2[0, :, start:end])
    outfile.header.update('CTYPE1', 'LINEAR')
    outfile.header.update('CRPIX1', 1)
    outfile.header.update('CRVAL1',
                          mswave - (0.5 * out2.shape[2] - start) * scale)
    outfile.header.update('CD1_1', scale)
    outfile.header.update('CRPIX2', 1)
    outfile.header.update('CRVAL2', 1)
    outfile.header.update('CD2_2', 1)
    outfile.writeto(outname)
    hdr = outfile.header.copy()

    outname = out_prefix + "_var.fits"
    outfile = pyfits.PrimaryHDU(out2[1, :, start:end])
    outfile.header = hdr
    outfile.writeto(outname)
    del out2, hdr

    if cache:
        file = pyfits.open(strtfile)
        out = file[0].data.copy()
        del file
    outname = out_prefix + "_straight.fits"
    outfile = pyfits.PrimaryHDU(out[:, :, start:end])
    outfile.header.update('CTYPE1', 'LINEAR')
    outfile.header.update('CRPIX1', 1)
    outfile.header.update('CRVAL1',
                          mswave - (0.5 * out.shape[2] - start) * scale)
    outfile.header.update('CD1_1', scale)
    outfile.header.update('CRPIX2', 1)
    outfile.header.update('CRVAL2', 1)
    outfile.header.update('CD2_2', 1)
    if nsci > 1:
        outfile.header.update('CRPIX3', 1)
        outfile.header.update('CRVAL3', 1)
        outfile.header.update('CD3_3', 1)
    outfile.writeto(outname)

    del out, outfile