Esempio n. 1
0
def fname_to_date_tuple(fname):
    '''Take a filename like m120507_0123, return 12may07'''
    months = {
        "01": "jan",
        "02": "feb",
        "03": "mar",
        "04": "apr",
        "05": "may",
        "06": "jun",
        "07": "jul",
        "08": "aug",
        "09": "sep",
        "10": "oct",
        "11": "nov",
        "12": "dec"
    }

    if len(fname) != 17:
        raise Exception("The file name '%s' is not of correct length. It "
                        "must be of the form mYYmmdd_nnnn.fits" % fname)

    try:
        fdate = fname.split("m")[1][0:6]
        yr, mn, dy = "20" + fdate[0:2], fdate[2:4], int(fdate[4:6])
        month = months[mn]
    except:
        warning("Could not parse date out of file name: %s" % (fname))

    return yr, month, dy
Esempio n. 2
0
def readfits(path, use_bpm=False):
    '''Read a fits file from path and return a tuple of (header, data, 
    Target List, Science Slit List (SSL), Mechanical Slit List (MSL),
    Alignment Slit List (ASL)).'''

    if os.path.exists(path + ".gz"):
        path = path + ".gz"

    if not os.path.exists(path):
        error("The file at path '%s' does not exist." % path)
        raise Exception("The file at path '%s' does not exist." % path)

    hdulist = pf.open(path)
    header = hdulist[0].header
    data = hdulist[0].data
    datasec = ""
    try:
        datasec = header["DATASEC"]
        warning("%s contains a DATASEC keyword not compatible with the pipeline" % path)
        warning("The content of the keyword will be erased on the reduced data")
        del header["DATASEC"]
    except:
        pass
    if use_bpm:
        theBPM = badpixelmask()
        data = np.ma.masked_array(data, theBPM, fill_value=0)

    return (header, data)
Esempio n. 3
0
def readfits(path, use_bpm=False):
    '''Read a fits file from path and return a tuple of (header, data, 
    Target List, Science Slit List (SSL), Mechanical Slit List (MSL),
    Alignment Slit List (ASL)).'''

    if os.path.exists(path + ".gz"):
        path = path + ".gz"

    if not os.path.exists(path):
        error("The file at path '%s' does not exist." % path)
        raise Exception("The file at path '%s' does not exist." % path)

    hdulist = pf.open(path)
    header = hdulist[0].header
    data = hdulist[0].data
    datasec = ""
    try:
        datasec = header["DATASEC"]
        warning(
            "%s contains a DATASEC keyword not compatible with the pipeline" %
            path)
        warning(
            "The content of the keyword will be erased on the reduced data")
        del header["DATASEC"]
    except:
        pass
    if use_bpm:
        theBPM = badpixelmask()
        data = np.ma.masked_array(data, theBPM, fill_value=0)

    return (header, data)
Esempio n. 4
0
def readscitbl(path):

    print path

    hdulist = pf.open(path)
    header = hdulist[0].header
    try:
        targs = hdulist[1].data
        ssl = hdulist[2].data
        msl = hdulist[3].data
        asl = hdulist[4].data
    except:
        warning("Improper MOSFIRE FITS File: %s" % path)

    return header, targs, ssl, msl, asl
Esempio n. 5
0
def readscitbl(path):

    print path

    hdulist = pf.open(path)
    header = hdulist[0].header
    try:
        targs = hdulist[1].data
        ssl = hdulist[2].data
        msl = hdulist[3].data
        asl = hdulist[4].data
    except:
        warning("Improper MOSFIRE FITS File: %s" % path)

    return header, targs, ssl, msl, asl
Esempio n. 6
0
def polyfit_clip(xs, ys, order, nsig=2.5):

    ff = np.poly1d(np.polyfit(xs, ys, order))
    sd = np.std(ys - ff(xs))
    if sd == 0.0:
        warning('Clipping failed because stddev=0, using unclipped fit.')
        result = ff
    else:
        r = np.abs(ys - ff(xs))
        ok = r < (sd * nsig)
        try:
            result = np.polyfit(xs[ok], ys[ok], order)
        except:
            warning('Clipping failed, using unclipped fit.')
            result = ff

    return result
Esempio n. 7
0
def fname_to_date_tuple(fname):
    '''Take a filename like m120507_0123, return 12may07'''
    months = {"01": "jan", "02": "feb", "03": "mar", "04": "apr", "05": "may",
        "06": "jun", "07": "jul", "08": "aug", "09": "sep", "10": "oct",
        "11": "nov", "12": "dec"}

    if len(fname) != 17:
        raise Exception("The file name '%s' is not of correct length. It "
                "must be of the form mYYmmdd_nnnn.fits" % fname)
    
    try:
        fdate = fname.split("m")[1][0:6]
        yr, mn, dy = "20" + fdate[0:2], fdate[2:4], int(fdate[4:6])
        month = months[mn]
    except:
        warning("Could not parse date out of file name: %s" % (fname))
    
    return yr, month, dy
Esempio n. 8
0
def load_edges(maskname, band, options):
    ''' Load the slit edge functions. Returns (edges, metadata) '''
    if False:
        path = os.path.join(options["outdir"], maskname)
        fn = os.path.join(path, "slit-edges_{0}.npy".format(band))

    fn = "slit-edges_{0}.npy".format(band)
    try:
        edges = np.load(fn)
    except:
        error("Cannot load slit edges file")
        raise Exception("Cannot load slit edges file")
    edges,meta = edges[0:-1], edges[-1]

    if meta['maskname'] != maskname:
        warning("The maskname for the edge file '%s' does not match "
                "that in the edge file '%s'" % (maskname, meta['maskname']))
        warning("Continuing")

    return edges, meta
Esempio n. 9
0
def load_edges(maskname, band, options):
    ''' Load the slit edge functions. Returns (edges, metadata) '''
    if False:
        path = os.path.join(options["outdir"], maskname)
        fn = os.path.join(path, "slit-edges_{0}.npy".format(band))

    fn = "slit-edges_{0}.npy".format(band)
    try:
        edges = np.load(fn)
    except:
        error("Cannot load slit edges file")
        raise Exception("Cannot load slit edges file")
    edges, meta = edges[0:-1], edges[-1]

    if meta['maskname'] != maskname:
        warning("The maskname for the edge file '%s' does not match "
                "that in the edge file '%s'" % (maskname, meta['maskname']))
        warning("Continuing")

    return edges, meta
Esempio n. 10
0
def load_lambdaslit(fnum, maskname, band, options):
    ''' Load the wavelength coefficient functions '''
    if False:
        path = os.path.join(options["outdir"], maskname)
        fn = os.path.join(path, "lambda_solution_{0}.fits".format(fnum))

    fn = "lambda_solution_{0}.fits".format(fnum)

    print fn

    ret = readfits(fn, options)
    if ret[0]['filter'] != band:
        error("Band name mismatch")
        raise Exception("band name mismatch")

    if ret[0]['maskname'] != maskname:
        warning("The maskname for the edge file '%s' does not match "
                "that in the edge file '%s'" % (maskname, ret[0]['maskname']))
        warning("Continuing")

    return readfits(fn, options)
Esempio n. 11
0
def load_lambdaslit(fnum, maskname, band, options):
    ''' Load the wavelength coefficient functions '''
    if False:
        path = os.path.join(options["outdir"], maskname)
        fn = os.path.join(path, "lambda_solution_{0}.fits".format(fnum))

    fn = "lambda_solution_{0}.fits".format(fnum)

    print fn

    ret = readfits(fn, options)
    if ret[0]['filter'] != band:
        error ("Band name mismatch")
        raise Exception("band name mismatch")

    if ret[0]['maskname'] != maskname:
        warning("The maskname for the edge file '%s' does not match "
                "that in the edge file '%s'" % (maskname, ret[0]['maskname']))
        warning("Continuing")

    
    return readfits(fn, options)
Esempio n. 12
0
def readmosfits(fname, options, extension=None):
    '''Read a fits file written by MOSFIRE from path and return a tuple of 
    (header, data, Target List, Science Slit List (SSL), Mechanical Slit 
    List (MSL), Alignment Slit List (ASL)).
    
    Note, the extension is typically not used, only used if the detector server
    does not append slit extension.
    '''

    if os.path.isabs(fname): path = fname
    else: path = os.path.join(fname_to_path(fname, options), fname)

    hdulist = pf.open(path)
    header = hdulist[0].header
    data = hdulist[0].data

    theBPM = badpixelmask()
    data = np.ma.masked_array(data, theBPM)

    if extension is not None:
        hdulist = pf.open(extension)

    try:
        header = hdulist[0].header
        datasec = ""
        try:
            datasec = header["DATASEC"]
            warning("%s contains a DATASEC keyword not compatible with the pipeline" % path)
            warning("The content of the keyword will be erased on the reduced data")
            del header["DATASEC"]
        except:
            pass
        targs = hdulist[1].data
        ssl = hdulist[2].data
        msl = hdulist[3].data
        asl = hdulist[4].data
    except:
        error("Improper MOSFIRE FITS File: %s" % path)
        raise Exception("Improper MOSFIRE FITS File: %s" % path)

    if np.abs(header["REGTMP1"] - 77) > 0.1:
        warning("**************************************")
        warning("The temperature of the detector is %3.3f where it "
                "should be 77.000 deg. Please notify Keck support staff." %
                header["REGTMP1"])

    ssl = ssl[ssl.field("Slit_Number") != ' ']
    msl = msl[msl.field("Slit_Number") != ' ']
    asl = asl[asl.field("Slit_Number") != ' ']
        

    # ELIMINATE POSITION B of the long2pos slit
    ssl = ssl[ssl.field("Target_Name") != 'posB']
    msl = msl[msl.field("Target_in_Slit") != 'posB']
    asl = asl[asl.field("Target_in_Slit") != 'posBalign']
    targs = targs[targs.field("Target_Name") !='posB']
    targs = targs[targs.field("Target_Name") != "posBalign"]

    bs = CSU.Barset()
    bs.set_header(header, ssl=ssl, msl=msl, asl=asl, targs=targs)

    return (header, data, bs)
Esempio n. 13
0
def readmosfits(fname, options, extension=None):
    '''Read a fits file written by MOSFIRE from path and return a tuple of 
    (header, data, Target List, Science Slit List (SSL), Mechanical Slit 
    List (MSL), Alignment Slit List (ASL)).
    
    Note, the extension is typically not used, only used if the detector server
    does not append slit extension.
    '''

    if os.path.isabs(fname): path = fname
    else: path = os.path.join(fname_to_path(fname, options), fname)

    hdulist = pf.open(path)
    header = hdulist[0].header
    data = hdulist[0].data

    theBPM = badpixelmask()
    data = np.ma.masked_array(data, theBPM)

    if extension is not None:
        hdulist = pf.open(extension)

    try:
        header = hdulist[0].header
        datasec = ""
        try:
            datasec = header["DATASEC"]
            #warning("%s contains a DATASEC keyword not compatible with the pipeline" % path)
            #warning("The content of the keyword will be erased on the reduced data")
            del header["DATASEC"]
        except:
            pass
        targs = hdulist[1].data
        ssl = hdulist[2].data
        msl = hdulist[3].data
        asl = hdulist[4].data
    except:
        error("Improper MOSFIRE FITS File: %s" % path)
        raise Exception("Improper MOSFIRE FITS File: %s" % path)

    if np.abs(header["REGTMP1"] - 77) > 0.1:
        warning("**************************************")
        warning("The temperature of the detector is %3.3f where it "
                "should be 77.000 deg. Please notify Keck support staff." %
                header["REGTMP1"])

    ssl = ssl[ssl.field("Slit_Number") != ' ']
    msl = msl[msl.field("Slit_Number") != ' ']
    asl = asl[asl.field("Slit_Number") != ' ']

    # ELIMINATE POSITION B of the long2pos slit
    ssl = ssl[ssl.field("Target_Name") != 'posB']
    msl = msl[msl.field("Target_in_Slit") != 'posB']
    asl = asl[asl.field("Target_in_Slit") != 'posBalign']
    targs = targs[targs.field("Target_Name") != 'posB']
    targs = targs[targs.field("Target_Name") != "posBalign"]

    bs = CSU.Barset()
    bs.set_header(header, ssl=ssl, msl=msl, asl=asl, targs=targs)

    return (header, data, bs)
Esempio n. 14
0
def handle_rectification(maskname, in_files, wavename, band_pass, files, options,
        commissioning_shift=3.0, target='default'):
    '''Handle slit rectification and coaddition.

    Args:
        maskname: The mask name string
        in_files: List of stacked spectra in electron per second. Will look
            like ['electrons_Offset_1.5.txt.fits', 'electrons_Offset_-1.5.txt.fits']
        wavename: path (relative or full) to the wavelength stack file, string
        band_pass: Band pass name, string
        barset_file: Path to a mosfire fits file containing the full set of
            FITS extensions for the barset. It can be any file in the list
            of science files.
    Returns:
        None

    Writes files:
        [maskname]_[band]_[object name]_eps.fits --
            The rectified, background subtracted, stacked eps spectrum
        [maskname]_[band]_[object name]_sig.fits --
            Rectified, background subtracted, stacked weight spectrum (STD/itime)
        [maskname]_[band]_[object_name]_itime.fits
            Rectified, CRR stacked integration time spectrum
        [maskname]_[band]_[object_name]_snrs.fits
            Rectified signal to noise spectrum
    '''

    global edges, dats, vars, itimes, shifts, lambdas, band, fidl, all_shifts
    band = band_pass

    
    dlambda = Wavelength.grating_results(band)

    hpp = Filters.hpp[band]
    fidl = np.arange(hpp[0], hpp[1], dlambda)

    lambdas = IO.readfits(wavename, options)

    if np.any(lambdas[1].data < 0) or np.any(lambdas[1].data > 29000):
        info("***********WARNING ***********")
        info("The file {0} may not be a wavelength file.".format(wavename))
        info("Check before proceeding.")
        info("***********WARNING ***********")

    edges, meta = IO.load_edges(maskname, band, options)
    shifts = []

    posnames = []
    postoshift = {}
    
    for file in in_files:

        info(":: "+str(file))
        II = IO.read_drpfits(maskname, file, options)

        off = np.array((II[0]["decoff"], II[0]["raoff"]),dtype=np.float64)
        if "yoffset" in II[0]:
            off = -II[0]["yoffset"]
        else:
            # Deal with data taken during commissioning
            if II[0]["frameid"] == 'A': off = 0.0
            else: off = commissioning_shift

        try: off0
        except: off0 = off

        shift = off - off0

        shifts.append(shift)
        posnames.append(II[0]["frameid"])
        postoshift[II[0]['frameid']] = shift
    
        info("Position {0} shift: {1:2.2f} as".format(off, shift))
    # this is to deal with cases in which we want to rectify one single file
    if len(set(posnames)) is 1:
        plans = [['A']]
    else:
        plans = Background.guess_plan_from_positions(set(posnames))

    all_shifts = []
    for plan in plans:
        to_append = []
        for pos in plan:
            to_append.append(postoshift[pos])

        all_shifts.append(to_append)

    # Reverse the elements in all_shifts to deal with an inversion
    all_shifts.reverse()

    theBPM = IO.badpixelmask()

    all_solutions = []
    cntr = 0

    if target is 'default':
        outname = maskname
    else:
        outname = target

    for plan in plans:
        if len(plan) is 1:
            p0 = 'A'
            p1 = 'B'
        else:
            p0 = plan[0].replace("'", "p")
            p1 = plan[1].replace("'", "p")
        suffix = "%s-%s" % (p0,p1)
        info("Handling plan %s" % suffix)
        fname = "bsub_{0}_{1}_{2}.fits".format(outname,band,suffix)
        EPS = IO.read_drpfits(maskname, fname, options)
        EPS[1] = np.ma.masked_array(EPS[1], theBPM, fill_value=0)

        fname = "var_{0}_{1}_{2}.fits".format(outname, band, suffix)
        VAR = IO.read_drpfits(maskname, fname, options)
        VAR[1] = np.ma.masked_array(VAR[1], theBPM, fill_value=np.inf)

        fname = "itime_{0}_{1}_{2}.fits".format(outname, band, suffix)
        ITIME = IO.read_drpfits(maskname, fname, options)
        ITIME[1] = np.ma.masked_array(ITIME[1], theBPM, fill_value=0)


        dats = EPS
        vars = VAR
        itimes = ITIME

        EPS[0]["ORIGFILE"] = fname

        tock = time.time()
        sols = range(len(edges)-1,-1,-1)

        shifts = all_shifts[cntr]
        cntr += 1
        p = Pool()
        solutions = p.map(handle_rectification_helper, sols)
        p.close()

        all_solutions.append(solutions)

    tick = time.time()
    info("-----> Mask took %i. Writing to disk." % (tick-tock))


    output = np.zeros((1, len(fidl)))
    snrs = np.zeros((1, len(fidl)))
    sdout= np.zeros((1, len(fidl)))
    itout= np.zeros((1, len(fidl)))


    # the barset [bs] is used for determining object position
    files = IO.list_file_to_strings(files)
    info("Using "+str(files[0])+" for slit configuration.")
    x, x, bs = IO.readmosfits(files[0], options)
    

    for i_slit in xrange(len(solutions)):
        solution = all_solutions[0][i_slit]
        header = EPS[0].copy()
        obj = header['OBJECT']

        target_name = bs.ssl[-(i_slit+1)]['Target_Name']
        header['OBJECT'] = target_name

        pixel_dist = np.float(bs.ssl[-(i_slit+1)]['Target_to_center_of_slit_distance'])/0.18

        pixel_dist -= solution['offset']

        ll = solution["lambda"]

        header["wat0_001"] = "system=world"
        header["wat1_001"] = "wtype=linear"
        header["wat2_001"] = "wtype=linear"
        header["dispaxis"] = 1
        header["dclog1"] = "Transform"
        header["dc-flag"] = 0
        header["ctype1"] = "AWAV"
        header["cunit1"] = "Angstrom"
        header["crval1"] = ll[0]
        header["crval2"] = -solution["eps_img"].shape[0]/2 - pixel_dist
        header["crpix1"] = 1
        header["crpix2"] = 1
        #remove redundant CDELTi due to wavelength issues with ds9
        #see: https://github.com/Keck-DataReductionPipelines/MosfireDRP/issues/44
        #header["cdelt1"] = 1
        #header["cdelt2"] = 1
        header["cname1"] = "angstrom"
        header["cname2"] = "pixel"
        header["cd1_1"] = ll[1]-ll[0]
        header["cd1_2"] = 0
        header["cd2_1"] = 0
        header["cd2_2"] = 1
        try:
            header["BARYCORR"]= (lambdas[0]['BARYCORR'],lambdas[0].comments['BARYCORR'])
        except KeyError:
            warning( "Barycentric corrections not applied to the wavelength solution")
            pass
        

        S = output.shape

        img = solution["eps_img"]
        std = solution["sd_img"]
        tms = solution["itime_img"]


        for i_solution in xrange(1,len(all_solutions)):
            info("Combining solution %i" %i_solution)
            solution = all_solutions[i_solution][i_slit]
            img += solution["eps_img"]
            std += solution["sd_img"]
            tms += solution["itime_img"]
        #print "adding in quadrature"
        
        output = np.append(output, img, 0)
        output = np.append(output, np.nan*np.zeros((3,S[1])), 0)
        snrs = np.append(snrs, img*tms/std, 0)
        snrs = np.append(snrs, np.nan*np.zeros((3,S[1])), 0)
        sdout = np.append(sdout, std, 0)
        sdout = np.append(sdout, np.nan*np.zeros((3,S[1])), 0)
        itout = np.append(itout, tms, 0)
        itout = np.append(itout, np.nan*np.zeros((3,S[1])), 0)

        header['bunit'] = ('electron/second', 'electron power')
        IO.writefits(img, maskname,
            "{0}_{1}_{2}_eps.fits".format(outname, band, target_name), options,
            overwrite=True, header=header, lossy_compress=False)

        header['bunit'] = ('electron/second', 'sigma/itime')
        IO.writefits(std/tms, maskname,
            "{0}_{1}_{2}_sig.fits".format(outname, band, target_name), options,
            overwrite=True, header=header, lossy_compress=False)

        header['bunit'] = ('second', 'exposure time')
        IO.writefits(tms, maskname,
            "{0}_{1}_{2}_itime.fits".format(outname, band, target_name), options,
            overwrite=True, header=header, lossy_compress=False)

        header['bunit'] = ('', 'SNR')
        IO.writefits(img*tms/std, maskname,
            "{0}_{1}_{2}_snrs.fits".format(outname, band, target_name), options,
            overwrite=True, header=header, lossy_compress=False)

    header = EPS[0].copy()
    header["wat0_001"] = "system=world"
    header["wat1_001"] = "wtype=linear"
    header["wat2_001"] = "wtype=linear"
    header["dispaxis"] = 1
    header["dclog1"] = "Transform"
    header["dc-flag"] = 0
    header["ctype1"] = "AWAV"
    header["cunit1"] = ("Angstrom", 'Start wavelength')
    header["crval1"] = ll[0]
    header["crval2"] = 1
    header["crpix1"] = 1
    header["crpix2"] = 1
    #remove redundant CDELTi due to wavelength issues with ds9
    #see: https://github.com/Keck-DataReductionPipelines/MosfireDRP/issues/44
    #header["cdelt1"] = 1
    #header["cdelt2"] = 1
    header["cname1"] = "angstrom"
    header["cname2"] = "pixel"
    header["cd1_1"] = (ll[1]-ll[0], 'Angstrom/pixel')
    header["cd1_2"] = 0
    header["cd2_1"] = 0
    header["cd2_2"] = 1
    try:
        header["BARYCORR"]= (lambdas[0]['BARYCORR'],lambdas[0].comments['BARYCORR'])
    except KeyError:
        warning( "Barycentric corrections not applied to the wavelength solution")
        pass


    header["bunit"] = "ELECTRONS/SECOND"
    info("############ Final reduced file: {0}_{1}_eps.fits".format(outname,band))
    IO.writefits(output, maskname, "{0}_{1}_eps.fits".format(outname,
        band), options, overwrite=True, header=header,
        lossy_compress=False)

    header["bunit"] = ""
    IO.writefits(snrs, maskname, "{0}_{1}_snrs.fits".format(outname,
        band), options, overwrite=True, header=header,
        lossy_compress=False)

    header["bunit"] = "ELECTRONS/SECOND"
    IO.writefits(sdout/itout, maskname, "{0}_{1}_sig.fits".format(outname,
        band), options, overwrite=True, header=header,
        lossy_compress=False)

    header["bunit"] = "SECOND"
    IO.writefits(itout, maskname, "{0}_{1}_itime.fits".format(outname,
        band), options, overwrite=True, header=header,
        lossy_compress=False)
Esempio n. 15
0
def background_subtract_helper(slitno):
    '''

    Background subtraction follows the methods outlined by Kelson (2003). Here
    a background is estimated as a function of wavelength using B-splines and
    subtracted off. The assumption is that background is primarily a function
    of wavelength, and thus by sampling the background across the full 2-d
    spectrum the background is sampled at much higher than the native spectral
    resolution of mosfire. 

    Formally, the assumption that background is only a function of wavelength
    is incorrect, and indeed a "transmission function" is estimated from the 2d
    spectrum. This gives an estimate of the throughput of the slit and divided
    out.

    1. Extract the slit from the 2d image.
    2. Convert the 2d spectrum into a 1d spectrum
    3. Estimate transmission function

    '''

    global header, bs, edges, data, Var, itime, lam, sky_sub_out, sky_model_out, band
    tick = time.time()

    # 1
    top = np.int(edges[slitno]["top"](1024))  
    bottom = np.int(edges[slitno]["bottom"](1024)) 
    info("Background subtracting slit %i [%i,%i]" % (slitno, top, bottom))

    pix = np.arange(2048)
    xroi = slice(0,2048)
    yroi = slice(bottom, top)

    stime = itime[yroi, xroi]
    slit  = data[yroi, xroi] 
    Var[np.logical_not(np.isfinite(Var))] = np.inf

    lslit = lam[1][yroi,xroi]

    # 2
    xx = np.arange(slit.shape[1])
    yy = np.arange(slit.shape[0])

    X,Y = np.meshgrid(xx,yy)

    train_roi = slice(5,-5)
    ls = lslit[train_roi].flatten().filled(0)
    ss = slit[train_roi].flatten()
    ys = Y[train_roi].flatten()

    dl = np.ma.median(np.diff(lslit[lslit.shape[0]/2,:]))
    if dl == 0:
        return {"ok": False}

    sort = np.argsort(ls)
    ls = ls[sort]
    ys = ys[sort]

    hpps = np.array(Filters.hpp[band]) 

    diff = np.append(np.diff(ls), False)

    OK = (diff > 0.001) & (ls > hpps[0]) & (ls < hpps[1]) & (np.isfinite(ls)) \
            & (np.isfinite(ss[sort]))

    if len(np.where(OK)[0]) < 1000:
        warning("Failed on slit "+str(slitno))
        return {"ok": False}

    # 3
    pp = np.poly1d([1.0])
    ss = (slit[train_roi] / pp(Y[train_roi])).flatten()
    ss = ss[sort]

    knotstart = max(hpps[0], min(ls[OK])) + 5
    knotend = min(hpps[1], max(ls[OK])) - 5


    for i in range(3):
        try:
            delta = dl*0.9
            knots = np.arange(knotstart, knotend, delta)
            bspline = II.splrep(ls[OK], ss[OK], k=5, task=-1, t=knots)
        except ValueError as e:
            warning('Failed to fit spline with delta = {:5f}'.format(delta))
            warning(str(e))
            delta = dl*1.4
            info('Trying with delta = {:5f}'.format(delta))
            knots = np.arange(knotstart, knotend, delta)
            try:
                bspline = II.splrep(ls[OK], ss[OK], k=5, task=-1, t=knots)
            except ValueError as e:
                warning("Could not construct spline on slit "+str(slitno))
                warning(str(e))
                return {"ok": False}

        ll = lslit.flatten()
        model = II.splev(ll, bspline)

        oob = np.where((ll < knotstart) | (ll > knotend))
        model[oob] = np.median(ss[~np.isnan(ss)])
        model = model.reshape(slit.shape)

        output = slit - model

        std = np.abs(output)/(np.sqrt(np.abs(model)+1))

        tOK = (std[train_roi] < 10).flatten() & \
                np.isfinite(std[train_roi]).flatten()  
        OK = OK & tOK[sort]

    return {"ok": True, "slitno": slitno, "bottom": bottom, "top": top,
            "output": output, "model": model, "bspline": bspline}
Esempio n. 16
0
def imcombine(files, maskname, options, flat, outname=None, shifts=None,
    extension=None):
    '''
    From a list of files it imcombine returns the imcombine of several values.
    The imcombine code also estimates the readnoise ad RN/sqrt(numreads) so
    that the variance per frame is equal to (ADU + RN^2) where RN is computed
    in ADUs.

    Arguments:
        files[]: list of full path to files to combine
        maskname: Name of mask
        options: Options dictionary
        flat[2048x2048]: Flat field (values should all be ~ 1.0)
        outname: If set, will write (see notes below for details)
            eps_[outname].fits: electron/sec file
            itimes_[outname].fits: integration time
            var_[outname].fits: Variance files
        shifts[len(files)]: If set, will "roll" each file by the 
            amount in the shifts vector in pixels. This argument
            is used when telescope tracking is poor. If you need
            to use this, please notify Keck staff about poor 
            telescope tracking.

    Returns 6-element tuple:
        header: The combined header
        electrons [2048x2048]:  e- (in e- units)
        var [2048x2048]: electrons + RN**2 (in e-^2 units)
        bs: The MOSFIRE.Barset instance
        itimes [2048x2048]: itimes (in s units)
        Nframe: The number of frames that contribute to the summed
            arrays above. If Nframe > 5 I use the sigma-clipping
            Cosmic Ray Rejection tool. If Nframe < 5 then I drop
            the max/min elements.

    Notes:

        header -- fits header
        ADUs -- The mean # of ADUs per frame
        var -- the Variance [in adu] per frame. 
        bs -- Barset
        itimes -- The _total_ integration time in second
        Nframe -- The number of frames in a stack.

        
        Thus the number of electron per second is derived as: 
            e-/sec = (ADUs * Gain / Flat) * (Nframe/itimes)

        The total number of electrons is:
            el = ADUs * Gain * Nframe


    '''

    ADUs = np.zeros((len(files), 2048, 2048))
    itimes = np.zeros((len(files), 2048, 2048))
    prevssl = None
    prevmn = None
    patternid = None
    maskname = None

    header = None

    if shifts is None:
        shifts = np.zeros(len(files))

    warnings.filterwarnings('ignore')
    for i in xrange(len(files)):
        fname = files[i]
        thishdr, data, bs = IO.readmosfits(fname, options, extension=extension)
        itimes[i,:,:] = thishdr["truitime"]

        base = os.path.basename(fname).rstrip(".fits")
        fnum = int(base.split("_")[1])
        
        if shifts[i] == 0:
            ADUs[i,:,:] = data.filled(0.0) / flat
        else:
            ADUs[i,:,:] = np.roll(data.filled(0.0) / flat, np.int(shifts[i]), axis=0)

        ''' Construct Header'''
        if header is None:
            header = thishdr

        header["imfno%3.3i" % (fnum)] =  (fname, "img%3.3i file name" % fnum)

        map(lambda x: rem_header_key(header, x), ["CTYPE1", "CTYPE2", "WCSDIM",
            "CD1_1", "CD1_2", "CD2_1", "CD2_2", "LTM1_1", "LTM2_2", "WAT0_001",
            "WAT1_001", "WAT2_001", "CRVAL1", "CRVAL2", "CRPIX1", "CRPIX2",
            "RADECSYS"])

        for card in header.cards:
            if card == '': continue
            key,val,comment = card
            
            if key in thishdr:
                if val != thishdr[key]:
                    newkey = key + ("_img%2.2i" % fnum)
                    try: header[newkey.rstrip()] = (thishdr[key], comment)
                    except: pass

        ''' Now handle error checking'''

        if maskname is not None:
            if thishdr["maskname"] != maskname:
                error("File %s uses mask '%s' but the stack is of '%s'" %
                    (fname, thishdr["maskname"], maskname))
                raise Exception("File %s uses mask '%s' but the stack is of '%s'" %
                    (fname, thishdr["maskname"], maskname))

        maskname = thishdr["maskname"]
            
        if thishdr["aborted"]:
            error("Img '%s' was aborted and should not be used" %
                    fname)
            raise Exception("Img '%s' was aborted and should not be used" %
                    fname)

        if prevssl is not None:
            if len(prevssl) != len(bs.ssl):
                # todo Improve these checks
                error("The stack of input files seems to be of "
                        "different masks")
                raise Exception("The stack of input files seems to be of "
                        "different masks")
        prevssl = bs.ssl

        if patternid is not None:
            if patternid != thishdr["frameid"]:
                error("The stack should be of '%s' frames only, but "
                        "the current image is a '%s' frame." % (patternid, 
                            thishdr["frameid"]))
                raise Exception("The stack should be of '%s' frames only, but "
                        "the current image is a '%s' frame." % (patternid, 
                            thishdr["frameid"]))

        patternid = thishdr["frameid"]


        if maskname is not None:
            if maskname != thishdr["maskname"]:
                error("The stack should be of CSU mask '%s' frames "
                        "only but contains a frame of '%s'." % (maskname,
                        thishdr["maskname"]))
                raise Exception("The stack should be of CSU mask '%s' frames "
                        "only but contains a frame of '%s'." % (maskname,
                        thishdr["maskname"]))

        maskname = thishdr["maskname"]

        if thishdr["BUNIT"] != "ADU per coadd":
            error("The units of '%s' are not in ADU per coadd and "
                    "this violates an assumption of the DRP. Some new code " 
                    "is needed in the DRP to handle the new units of "
                    "'%s'." % (fname, thishdr["BUNIT"]))
            raise Exception("The units of '%s' are not in ADU per coadd and "
                    "this violates an assumption of the DRP. Some new code " 
                    "is needed in the DRP to handle the new units of "
                    "'%s'." % (fname, thishdr["BUNIT"]))

        ''' Error checking is complete'''
        debug("%s %s[%s]/%s: %5.1f s,  Shift: %i px" % (fname, maskname, patternid,
            header['filter'], np.mean(itimes[i]), shifts[i]))

    warnings.filterwarnings('always')

    # the electrons and el_per_sec arrays are:
    #   [2048, 2048, len(files)] and contain values for
    # each individual frame that is being combined.
    # These need to be kept here for CRR reasons.
    electrons = np.array(ADUs) * Detector.gain 
    el_per_sec = electrons / itimes

    output = np.zeros((2048, 2048))
    exptime = np.zeros((2048, 2048))

    numreads = header["READS0"]
    RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain
    RN = Detector.RN / np.sqrt(numreads)

    # Cosmic ray rejection code begins here. This code construction the
    # electrons and itimes arrays.
    standard = True
    new_from_chuck = False
    # Chuck Steidel has provided a modified version of the CRR procedure. 
    # to enable it, modify the variables above.
    
    if new_from_chuck and not standard:
        if len(files) >= 5:
            print "Sigclip CRR"
            srt = np.argsort(electrons, axis=0, kind='quicksort')
            shp = el_per_sec.shape
            sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]

            electrons = electrons[srt, sti[1], sti[2]]
            el_per_sec = el_per_sec[srt, sti[1], sti[2]]
            itimes = itimes[srt, sti[1], sti[2]]

            # Construct the mean and standard deviation by dropping the top and bottom two 
            # electron fluxes. This is temporary.
            mean = np.mean(el_per_sec[1:-1,:,:], axis = 0)
            std = np.std(el_per_sec[1:-1,:,:], axis = 0)

            drop = np.where( (el_per_sec > (mean+std*4)) | (el_per_sec < (mean-std*4)) )
            print "dropping: ", len(drop[0])
            electrons[drop] = 0.0
            itimes[drop] = 0.0

            electrons = np.sum(electrons, axis=0)
            itimes = np.sum(itimes, axis=0)
            Nframe = len(files) 

        else:
            warning( "With less than 5 frames, the pipeline does NOT perform")
            warning( "Cosmic Ray Rejection.")
            # the "if false" line disables cosmic ray rejection"
            if False: 
                for i in xrange(len(files)):
                    el = electrons[i,:,:]
                    it = itimes[i,:,:]
                    el_mf = scipy.signal.medfilt(el, 5)

                    bad = np.abs(el - el_mf) / np.abs(el) > 10.0
                    el[bad] = 0.0
                    it[bad] = 0.0

                    electrons[i,:,:] = el
                    itimes[i,:,:] = it

            electrons = np.sum(electrons, axis=0)
            itimes = np.sum(itimes, axis=0)
            Nframe = len(files) 

    if standard and not new_from_chuck:
        if len(files) >= 9:
            info("Sigclip CRR")
            srt = np.argsort(electrons, axis=0, kind='quicksort')
            shp = el_per_sec.shape
            sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]

            electrons = electrons[srt, sti[1], sti[2]]
            el_per_sec = el_per_sec[srt, sti[1], sti[2]]
            itimes = itimes[srt, sti[1], sti[2]]

            # Construct the mean and standard deviation by dropping the top and bottom two 
            # electron fluxes. This is temporary.
            mean = np.mean(el_per_sec[2:-2,:,:], axis = 0)
            std = np.std(el_per_sec[2:-2,:,:], axis = 0)

            drop = np.where( (el_per_sec > (mean+std*4)) | (el_per_sec < (mean-std*4)) )
            info("dropping: "+str(len(drop[0])))
            electrons[drop] = 0.0
            itimes[drop] = 0.0

            electrons = np.sum(electrons, axis=0)
            itimes = np.sum(itimes, axis=0)
            Nframe = len(files) 

        elif len(files) > 5:
            warning( "WARNING: Drop min/max CRR")
            srt = np.argsort(el_per_sec,axis=0)
            shp = el_per_sec.shape
            sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]

            electrons = electrons[srt, sti[1], sti[2]]
            itimes = itimes[srt, sti[1], sti[2]]

            electrons = np.sum(electrons[1:-1,:,:], axis=0)
            itimes = np.sum(itimes[1:-1,:,:], axis=0)

            Nframe = len(files) - 2

        else:
            warning( "With less than 5 frames, the pipeline does NOT perform")
            warning( "Cosmic Ray Rejection.")
            # the "if false" line disables cosmic ray rejection"
            if False: 
                for i in xrange(len(files)):
                     el = electrons[i,:,:]
                     it = itimes[i,:,:]
                     # calculate the median image
                     el_mf = scipy.signal.medfilt(el, 5)
                     el_mf_large = scipy.signal.medfilt(el_mf, 15)
                     # LR: this is a modified version I was experimenting with. For the version 
                     #     written by Nick, see the new_from_chuck part of this code
                     # sky sub
                     el_sky_sub = el_mf - el_mf_large
                     # add a constant value
                     el_plus_constant = el_sky_sub + 100

                     bad = np.abs(el - el_mf) / np.abs(el_plus_constant) > 50.0
                     el[bad] = 0.0
                     it[bad] = 0.0

                     electrons[i,:,:] = el
                     itimes[i,:,:] = it

            electrons = np.sum(electrons, axis=0)
            itimes = np.sum(itimes, axis=0)
            Nframe = len(files) 


    ''' Now handle variance '''
    numreads = header["READS0"]
    RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain
    RN = Detector.RN / np.sqrt(numreads)

    var = (electrons + RN**2) 

    ''' Now mask out bad pixels '''
    electrons[data.mask] = np.nan
    var[data.mask] = np.inf

    if "RN" in header:
        error("RN Already populated in header")
        raise Exception("RN Already populated in header")
    header['RN'] = ("%1.3f" , "Read noise in e-")
    header['NUMFRM'] = (Nframe, 'Typical number of frames in stack')


    header['BUNIT'] = 'ELECTRONS/SECOND'
    IO.writefits(np.float32(electrons/itimes), maskname, "eps_%s" % (outname),
                 options, header=header, overwrite=True)

    # Update itimes after division in order to not introduce nans
    itimes[data.mask] = 0.0

    header['BUNIT'] = 'ELECTRONS^2'
    IO.writefits(var, maskname, "var_%s" % (outname),
                 options, header=header, overwrite=True, lossy_compress=True)

    header['BUNIT'] = 'SECOND'
    IO.writefits(np.float32(itimes), maskname, "itimes_%s" % (outname),
                options, header=header, overwrite=True, lossy_compress=True)

    return header, electrons, var, bs, itimes, Nframe
Esempio n. 17
0
def background_subtract_helper(slitno):
    '''

    Background subtraction follows the methods outlined by Kelson (2003). Here
    a background is estimated as a function of wavelength using B-splines and
    subtracted off. The assumption is that background is primarily a function
    of wavelength, and thus by sampling the background across the full 2-d
    spectrum the background is sampled at much higher than the native spectral
    resolution of mosfire. 

    Formally, the assumption that background is only a function of wavelength
    is incorrect, and indeed a "transmission function" is estimated from the 2d
    spectrum. This gives an estimate of the throughput of the slit and divided
    out.

    1. Extract the slit from the 2d image.
    2. Convert the 2d spectrum into a 1d spectrum
    3. Estimate transmission function

    '''

    global header, bs, edges, data, Var, itime, lam, sky_sub_out, sky_model_out, band
    tick = time.time()

    # 1
    top = np.int(edges[slitno]["top"](1024))  
    bottom = np.int(edges[slitno]["bottom"](1024)) 
    info("Background subtracting slit %i [%i,%i]" % (slitno, top, bottom))

    pix = np.arange(2048)
    xroi = slice(0,2048)
    yroi = slice(bottom, top)

    stime = itime[yroi, xroi]
    slit  = data[yroi, xroi] 
    Var[np.logical_not(np.isfinite(Var))] = np.inf

    lslit = lam[1][yroi,xroi]

    # 2
    xx = np.arange(slit.shape[1])
    yy = np.arange(slit.shape[0])

    X,Y = np.meshgrid(xx,yy)

    train_roi = slice(5,-5)
    ls = lslit[train_roi].flatten().filled(0)
    ss = slit[train_roi].flatten()
    ys = Y[train_roi].flatten()

    dl = np.ma.median(np.diff(lslit[lslit.shape[0]/2,:]))[0]
    if dl == 0:
        return {"ok": False}

    sort = np.argsort(ls)
    ls = ls[sort]
    ys = ys[sort]

    hpps = np.array(Filters.hpp[band]) 

    diff = np.append(np.diff(ls), False)

    OK = (diff > 0.001) & (ls > hpps[0]) & (ls < hpps[1]) & (np.isfinite(ls)) \
            & (np.isfinite(ss[sort]))

    if len(np.where(OK)[0]) < 1000:
        warning("Failed on slit "+str(slitno))
        return {"ok": False}

    # 3
    pp = np.poly1d([1.0])
    ss = (slit[train_roi] / pp(Y[train_roi])).flatten()
    ss = ss[sort]

    knotstart = max(hpps[0], min(ls[OK])) + 5
    knotend = min(hpps[1], max(ls[OK])) - 5


    for i in range(3):
        try:
            delta = dl*0.9
            knots = np.arange(knotstart, knotend, delta)
            bspline = II.splrep(ls[OK], ss[OK], k=5, task=-1, t=knots)
        except:
            delta = dl*1.4
            knots = np.arange(knotstart, knotend, delta)
            try:
                bspline = II.splrep(ls[OK], ss[OK], k=5, task=-1, t=knots)
            except:
                warning("Could not construct spline on slit "+str(slitno))
                return {"ok": False}

        ll = lslit.flatten()
        model = II.splev(ll, bspline)

        oob = np.where((ll < knotstart) | (ll > knotend))
        model[oob] = np.median(ss)
        model = model.reshape(slit.shape)

        output = slit - model

        std = np.abs(output)/(np.sqrt(np.abs(model)+1))

        tOK = (std[train_roi] < 10).flatten() & \
                np.isfinite(std[train_roi]).flatten()  
        OK = OK & tOK[sort]

    return {"ok": True, "slitno": slitno, "bottom": bottom, "top": top,
            "output": output, "model": model, "bspline": bspline}
Esempio n. 18
0
def imcombine(filelist, out, options, method="average", reject="none",\
              lsigma=3, hsigma=3, mclip=False,\
              nlow=None, nhigh=None):
    '''Combines images in input list with optional rejection algorithms.

    Args:
        filelist: The list of files to imcombine
        out: The full path to the output file
        method: either "average" or "median" combine
        options: Options dictionary
        bpmask: The full path to the bad pixel mask
        reject: none, minmax, sigclip
        nlow,nhigh: Parameters for minmax rejection, see iraf docs
        mclip: use median as the function to calculate the baseline values for
               sigclip rejection?
        lsigma, hsigma: low and high sigma rejection thresholds.
    
    Returns:
        None

    Side effects:
        Creates the imcombined file at location `out'
    '''
    assert method in ['average', 'median']
    if os.path.exists(out):
        os.remove(out)

    if reject == 'none':
        info('Combining files using ccdproc.combine task')
        info('  reject=none')
        for file in filelist:
            debug('  Combining: {}'.format(file))
        ccdproc.combine(filelist, out, method=method,\
                        minmax_clip=False,\
                        iraf_minmax_clip=True,\
                        sigma_clip=False,\
                        unit="adu")
        info('  Done.')
    elif reject == 'minmax':
        ## The IRAF imcombine minmax rejection behavior is different than the
        ## ccdproc minmax rejection behavior.  We are using the IRAF like
        ## behavior here.  To support this a pull request for the ccdproc
        ## package has been made:
        ##    https://github.com/astropy/ccdproc/pull/358
        ##
        ## Note that the ccdproc behavior still differs slightly from the
        ## nominal IRAF behavior in that the rejection does not consider whether
        ## any of the rejected pixels have been rejected for other reasons, so
        ## if nhigh=1 and that pixel was masked for some other reason, the
        ## new ccdproc algorithm, will not mask the next highest pixel, it will
        ## still just mask the highest pixel even if it is already masked.
        ##
        ## From IRAF (help imcombine):
        ##  nlow = 1,  nhigh = 1 (minmax)
        ##      The number of  low  and  high  pixels  to  be  rejected  by  the
        ##      "minmax"  algorithm.   These  numbers are converted to fractions
        ##      of the total number of input images so  that  if  no  rejections
        ##      have  taken  place  the  specified number of pixels are rejected
        ##      while if pixels have been rejected by masking, thresholding,  or
        ##      non-overlap,   then   the  fraction  of  the  remaining  pixels,
        ##      truncated to an integer, is used.
        ##

        ## Check that minmax rejection is possible given the number of images
        if nlow is None:
            nlow = 0
        if nhigh is None:
            nhigh = 0
        if nlow + nhigh >= len(filelist):
            warning(
                'nlow + nhigh >= number of input images.  Combining without rejection'
            )
            nlow = 0
            nhigh = 0

        if ccdproc.version.major >= 1 and ccdproc.version.minor >= 1\
           and ccdproc.version.release:
            info('Combining files using ccdproc.combine task')
            info('  reject=clip_extrema')
            info('  nlow={}'.format(nlow))
            info('  nhigh={}'.format(nhigh))
            for file in filelist:
                info('  {}'.format(file))
            ccdproc.combine(filelist, out, method=method,\
                            minmax_clip=False,\
                            clip_extrema=True,\
                            nlow=nlow, nhigh=nhigh,\
                            sigma_clip=False,\
                            unit="adu")
            info('  Done.')
        else:
            ## If ccdproc does not have new rejection algorithm in:
            ## https://github.com/astropy/ccdproc/pull/358
            ## Manually perform rejection using ccdproc.combiner.Combiner object
            info(
                'Combining files using local clip_extrema rejection algorithm')
            info('and the ccdproc.combiner.Combiner object.')
            info('  reject=clip_extrema')
            info('  nlow={}'.format(nlow))
            info('  nhigh={}'.format(nhigh))
            for file in filelist:
                info('  {}'.format(file))
            ccdlist = []
            for file in filelist:
                ccdlist.append(ccdproc.CCDData.read(file, unit='adu', hdu=0))
            c = ccdproc.combiner.Combiner(ccdlist)
            nimages, nx, ny = c.data_arr.mask.shape
            argsorted = np.argsort(c.data_arr.data, axis=0)
            mg = np.mgrid[0:nx, 0:ny]
            for i in range(-1 * nhigh, nlow):
                where = (argsorted[i, :, :].ravel(), mg[0].ravel(),
                         mg[1].ravel())
                c.data_arr.mask[where] = True
            if method == 'average':
                result = c.average_combine()
            elif method == 'median':
                result = c.median_combine()
            for key in ccdlist[0].header.keys():
                header_entry = ccdlist[0].header[key]
                if key != 'COMMENT':
                    result.header[key] = (header_entry,
                                          ccdlist[0].header.comments[key])
            hdul = result.to_hdu()
            #             print(hdul)
            #             for hdu in hdul:
            #                 print(type(hdu.data))
            hdul[0].writeto(out)
            #             result.write(out)
            info('  Done.')
    elif reject == 'sigclip':
        info('Combining files using ccdproc.combine task')
        info('  reject=sigclip')
        info('  mclip={}'.format(mclip))
        info('  lsigma={}'.format(lsigma))
        info('  hsigma={}'.format(hsigma))
        baseline_func = {False: np.mean, True: np.median}
        ccdproc.combine(filelist, out, method=method,\
                        minmax_clip=False,\
                        clip_extrema=False,\
                        sigma_clip=True,\
                        sigma_clip_low_thresh=lsigma,\
                        sigma_clip_high_thresh=hsigma,\
                        sigma_clip_func=baseline_func[mclip],\
                        sigma_clip_dev_func=np.std,\
                        )
        info('  Done.')
    else:
        raise NotImplementedError(
            '{} rejection unrecognized by MOSFIRE DRP'.format(reject))
Esempio n. 19
0
def imcombine(files, maskname, options, flat, outname=None, shifts=None,
    extension=None):
    '''
    From a list of files it imcombine returns the imcombine of several values.
    The imcombine code also estimates the readnoise ad RN/sqrt(numreads) so
    that the variance per frame is equal to (ADU + RN^2) where RN is computed
    in ADUs.

    Arguments:
        files[]: list of full path to files to combine
        maskname: Name of mask
        options: Options dictionary
        flat[2048x2048]: Flat field (values should all be ~ 1.0)
        outname: If set, will write (see notes below for details)
            eps_[outname].fits: electron/sec file
            itimes_[outname].fits: integration time
            var_[outname].fits: Variance files
        shifts[len(files)]: If set, will "roll" each file by the 
            amount in the shifts vector in pixels. This argument
            is used when telescope tracking is poor. If you need
            to use this, please notify Keck staff about poor 
            telescope tracking.

    Returns 6-element tuple:
        header: The combined header
        electrons [2048x2048]:  e- (in e- units)
        var [2048x2048]: electrons + RN**2 (in e-^2 units)
        bs: The MOSFIRE.Barset instance
        itimes [2048x2048]: itimes (in s units)
        Nframe: The number of frames that contribute to the summed
            arrays above. If Nframe > 5 I use the sigma-clipping
            Cosmic Ray Rejection tool. If Nframe < 5 then I drop
            the max/min elements.

    Notes:

        header -- fits header
        ADUs -- The mean # of ADUs per frame
        var -- the Variance [in adu] per frame. 
        bs -- Barset
        itimes -- The _total_ integration time in second
        Nframe -- The number of frames in a stack.

        
        Thus the number of electron per second is derived as: 
            e-/sec = (ADUs * Gain / Flat) * (Nframe/itimes)

        The total number of electrons is:
            el = ADUs * Gain * Nframe


    '''

    ADUs = np.zeros((len(files), 2048, 2048))
    itimes = np.zeros((len(files), 2048, 2048))
    prevssl = None
    prevmn = None
    patternid = None
    maskname = None

    header = None

    if shifts is None:
        shifts = np.zeros(len(files))

    warnings.filterwarnings('ignore')
    for i in xrange(len(files)):
        fname = files[i]
        thishdr, data, bs = IO.readmosfits(fname, options, extension=extension)
        itimes[i,:,:] = thishdr["truitime"]

        base = os.path.basename(fname).rstrip(".fits")
        fnum = int(base.split("_")[1])
        
        if shifts[i] == 0:
            ADUs[i,:,:] = data.filled(0.0) / flat
        else:
            ADUs[i,:,:] = np.roll(data.filled(0.0) / flat, np.int(shifts[i]), axis=0)

        ''' Construct Header'''
        if header is None:
            header = thishdr

        header["imfno%3.3i" % (fnum)] =  (fname, "img%3.3i file name" % fnum)

        map(lambda x: rem_header_key(header, x), ["CTYPE1", "CTYPE2", "WCSDIM",
            "CD1_1", "CD1_2", "CD2_1", "CD2_2", "LTM1_1", "LTM2_2", "WAT0_001",
            "WAT1_001", "WAT2_001", "CRVAL1", "CRVAL2", "CRPIX1", "CRPIX2",
            "RADECSYS"])

        for card in header.cards:
            if card == '': continue
            key,val,comment = card
            
            if key in thishdr:
                if val != thishdr[key]:
                    newkey = key + ("_img%2.2i" % fnum)
                    try: header[newkey.rstrip()] = (thishdr[key], comment)
                    except: pass

        ''' Now handle error checking'''

        if maskname is not None:
            if thishdr["maskname"] != maskname:
                error("File %s uses mask '%s' but the stack is of '%s'" %
                    (fname, thishdr["maskname"], maskname))
                raise Exception("File %s uses mask '%s' but the stack is of '%s'" %
                    (fname, thishdr["maskname"], maskname))

        maskname = thishdr["maskname"]
            
        if thishdr["aborted"]:
            error("Img '%s' was aborted and should not be used" %
                    fname)
            raise Exception("Img '%s' was aborted and should not be used" %
                    fname)

        if prevssl is not None:
            if len(prevssl) != len(bs.ssl):
                # todo Improve these checks
                error("The stack of input files seems to be of "
                        "different masks")
                raise Exception("The stack of input files seems to be of "
                        "different masks")
        prevssl = bs.ssl

        if patternid is not None:
            if patternid != thishdr["frameid"]:
                error("The stack should be of '%s' frames only, but "
                        "the current image is a '%s' frame." % (patternid, 
                            thishdr["frameid"]))
                raise Exception("The stack should be of '%s' frames only, but "
                        "the current image is a '%s' frame." % (patternid, 
                            thishdr["frameid"]))

        patternid = thishdr["frameid"]


        if maskname is not None:
            if maskname != thishdr["maskname"]:
                error("The stack should be of CSU mask '%s' frames "
                        "only but contains a frame of '%s'." % (maskname,
                        thishdr["maskname"]))
                raise Exception("The stack should be of CSU mask '%s' frames "
                        "only but contains a frame of '%s'." % (maskname,
                        thishdr["maskname"]))

        maskname = thishdr["maskname"]

        if thishdr["BUNIT"] != "ADU per coadd":
            error("The units of '%s' are not in ADU per coadd and "
                    "this violates an assumption of the DRP. Some new code " 
                    "is needed in the DRP to handle the new units of "
                    "'%s'." % (fname, thishdr["BUNIT"]))
            raise Exception("The units of '%s' are not in ADU per coadd and "
                    "this violates an assumption of the DRP. Some new code " 
                    "is needed in the DRP to handle the new units of "
                    "'%s'." % (fname, thishdr["BUNIT"]))

        ''' Error checking is complete'''
        info("%s %s[%s]/%s: %5.1f s,  Shift: %i px" % (fname, maskname, patternid,
            header['filter'], np.mean(itimes[i]), shifts[i]))

    warnings.filterwarnings('always')

    # the electrons and el_per_sec arrays are:
    #   [2048, 2048, len(files)] and contain values for
    # each individual frame that is being combined.
    # These need to be kept here for CRR reasons.
    electrons = np.array(ADUs) * Detector.gain 
    el_per_sec = electrons / itimes

    output = np.zeros((2048, 2048))
    exptime = np.zeros((2048, 2048))

    numreads = header["READS0"]
    RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain
    RN = Detector.RN / np.sqrt(numreads)

    # Cosmic ray rejection code begins here. This code construction the
    # electrons and itimes arrays.
    standard = True
    new_from_chuck = False
    # Chuck Steidel has provided a modified version of the CRR procedure. 
    # to enable it, modify the variables above.
    
    if new_from_chuck and not standard:
        if len(files) >= 5:
            print "Sigclip CRR"
            srt = np.argsort(electrons, axis=0, kind='quicksort')
            shp = el_per_sec.shape
            sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]

            electrons = electrons[srt, sti[1], sti[2]]
            el_per_sec = el_per_sec[srt, sti[1], sti[2]]
            itimes = itimes[srt, sti[1], sti[2]]

            # Construct the mean and standard deviation by dropping the top and bottom two 
            # electron fluxes. This is temporary.
            mean = np.mean(el_per_sec[1:-1,:,:], axis = 0)
            std = np.std(el_per_sec[1:-1,:,:], axis = 0)

            drop = np.where( (el_per_sec > (mean+std*4)) | (el_per_sec < (mean-std*4)) )
            print "dropping: ", len(drop[0])
            electrons[drop] = 0.0
            itimes[drop] = 0.0

            electrons = np.sum(electrons, axis=0)
            itimes = np.sum(itimes, axis=0)
            Nframe = len(files) 

        elif len(files) > 5:
            print "WARNING: Drop min/max CRR"
            srt = np.argsort(el_per_sec,axis=0)
            shp = el_per_sec.shape
            sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]

            electrons = electrons[srt, sti[1], sti[2]]
            itimes = itimes[srt, sti[1], sti[2]]

            electrons = np.sum(electrons[1:-1,:,:], axis=0)
            itimes = np.sum(itimes[1:-1,:,:], axis=0)

            Nframe = len(files) - 2

        else:
            warning( "With less than 5 frames, the pipeline does NOT perform")
            warning( "Cosmic Ray Rejection.")
            # the "if false" line disables cosmic ray rejection"
            if False: 
                for i in xrange(len(files)):
                    el = electrons[i,:,:]
                    it = itimes[i,:,:]
                    el_mf = scipy.signal.medfilt(el, 5)

                    bad = np.abs(el - el_mf) / np.abs(el) > 10.0
                    el[bad] = 0.0
                    it[bad] = 0.0

                    electrons[i,:,:] = el
                    itimes[i,:,:] = it

            electrons = np.sum(electrons, axis=0)
            itimes = np.sum(itimes, axis=0)
            Nframe = len(files) 

    if standard and not new_from_chuck:
        if len(files) >= 9:
            info("Sigclip CRR")
            srt = np.argsort(electrons, axis=0, kind='quicksort')
            shp = el_per_sec.shape
            sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]

            electrons = electrons[srt, sti[1], sti[2]]
            el_per_sec = el_per_sec[srt, sti[1], sti[2]]
            itimes = itimes[srt, sti[1], sti[2]]

            # Construct the mean and standard deviation by dropping the top and bottom two 
            # electron fluxes. This is temporary.
            mean = np.mean(el_per_sec[2:-2,:,:], axis = 0)
            std = np.std(el_per_sec[2:-2,:,:], axis = 0)

            drop = np.where( (el_per_sec > (mean+std*4)) | (el_per_sec < (mean-std*4)) )
            info("dropping: "+str(len(drop[0])))
            electrons[drop] = 0.0
            itimes[drop] = 0.0

            electrons = np.sum(electrons, axis=0)
            itimes = np.sum(itimes, axis=0)
            Nframe = len(files) 
            


        elif len(files) > 5:
            warning( "WARNING: Drop min/max CRR")
            srt = np.argsort(el_per_sec,axis=0)
            shp = el_per_sec.shape
            sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]

            electrons = electrons[srt, sti[1], sti[2]]
            itimes = itimes[srt, sti[1], sti[2]]

            electrons = np.sum(electrons[1:-1,:,:], axis=0)
            itimes = np.sum(itimes[1:-1,:,:], axis=0)

            Nframe = len(files) - 2

        else:
            warning( "With less than 5 frames, the pipeline does NOT perform")
            warning( "Cosmic Ray Rejection.")
            # the "if false" line disables cosmic ray rejection"
            if False: 
                for i in xrange(len(files)):
                     el = electrons[i,:,:]
                     it = itimes[i,:,:]
                     # calculate the median image
                     el_mf = scipy.signal.medfilt(el, 5)
                     el_mf_large = scipy.signal.medfilt(el_mf, 15)
                     # LR: this is a modified version I was experimenting with. For the version 
                     #     written by Nick, see the new_from_chuck part of this code
                     # sky sub
                     el_sky_sub = el_mf - el_mf_large
                     # add a constant value
                     el_plus_constant = el_sky_sub + 100

                     bad = np.abs(el - el_mf) / np.abs(el_plus_constant) > 50.0
                     el[bad] = 0.0
                     it[bad] = 0.0

                     electrons[i,:,:] = el
                     itimes[i,:,:] = it

            
            electrons = np.sum(electrons, axis=0)
            itimes = np.sum(itimes, axis=0)
            Nframe = len(files)


    ''' Now handle variance '''
    numreads = header["READS0"]
    RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain
    RN = Detector.RN / np.sqrt(numreads)

    var = (electrons + RN**2) 

    ''' Now mask out bad pixels '''
    electrons[data.mask] = np.nan
    var[data.mask] = np.inf
    
    print var[data.mask]

    if "RN" in header:
        error("RN Already populated in header")
        raise Exception("RN Already populated in header")
    header['RN'] = ("%1.3f" , "Read noise in e-")
    header['NUMFRM'] = (Nframe, 'Typical number of frames in stack')


    header['BUNIT'] = 'ELECTRONS/SECOND'
    IO.writefits(np.float32(electrons/itimes), maskname, "eps_%s" % (outname),
                 options, header=header, overwrite=True)

    # Update itimes after division in order to not introduce nans
    itimes[data.mask] = 0.0

    header['BUNIT'] = 'ELECTRONS^2'
    IO.writefits(var, maskname, "var_%s" % (outname),
                 options, header=header, overwrite=True, lossy_compress=True)

    header['BUNIT'] = 'SECOND'
    IO.writefits(np.float32(itimes), maskname, "itimes_%s" % (outname),
                options, header=header, overwrite=True, lossy_compress=True)

    return header, electrons, var, bs, itimes, Nframe