Пример #1
0
def readfits(path, use_bpm=False):
    '''Read a fits file from path and return a tuple of (header, data, 
    Target List, Science Slit List (SSL), Mechanical Slit List (MSL),
    Alignment Slit List (ASL)).'''

    if os.path.exists(path + ".gz"):
        path = path + ".gz"

    if not os.path.exists(path):
        error("The file at path '%s' does not exist." % path)
        raise Exception("The file at path '%s' does not exist." % path)

    hdulist = pf.open(path)
    header = hdulist[0].header
    data = hdulist[0].data
    datasec = ""
    try:
        datasec = header["DATASEC"]
        warning("%s contains a DATASEC keyword not compatible with the pipeline" % path)
        warning("The content of the keyword will be erased on the reduced data")
        del header["DATASEC"]
    except:
        pass
    if use_bpm:
        theBPM = badpixelmask()
        data = np.ma.masked_array(data, theBPM, fill_value=0)

    return (header, data)
Пример #2
0
    def scislit_to_csuslit(self, scislit):
        """Convert a science slit number to a mechanical slit list"""
        if (scislit < 1) or (scislit > len(self.ssl) + 1):
            error("The requested slit number (%i) does not exist" % scislit)
            raise Exception("The requested slit number (%i) does not exist" % scislit)

        return self.scislit_to_slit[scislit - 1]
Пример #3
0
def csu_mm_to_pix(x_mm, slitno, Please_Use=False):
    '''Convert a slit's position into a pixel value. This is a linear approximation to a sixth order polynomial fit by ccs.
    Positions are index from 1: 1 .. 2048
    '''

    if Please_Use == False:
        error(
            "Use csu_mm_to_pix_poly (a polynomial fit) rather than csu_mm_to_pix (a linear fit)"
        )
        raise Exception(
            "Use csu_mm_to_pix_poly (a polynomial fit) rather than csu_mm_to_pix (a linear fit)"
        )
        return

    # _kfp is keck focal plane
    centerx = 137.400
    x_kfp = (centerx - x_mm) * tempscale
    y_kfp = 5.8 * mm * (numslits / 2. - slitno + 0.35) * tempscale

    path = os.path.join(os.environ["MOSPATH"], "platescale",
                        "linear_pix2mm_120k.db")
    #
    return mosfire_geoxytran(x_kfp,
                             y_kfp,
                             database=path,
                             transform="linear_pix2mm_120k")
Пример #4
0
def readfits(path, use_bpm=False):
    '''Read a fits file from path and return a tuple of (header, data, 
    Target List, Science Slit List (SSL), Mechanical Slit List (MSL),
    Alignment Slit List (ASL)).'''

    if os.path.exists(path + ".gz"):
        path = path + ".gz"

    if not os.path.exists(path):
        error("The file at path '%s' does not exist." % path)
        raise Exception("The file at path '%s' does not exist." % path)

    hdulist = pf.open(path)
    header = hdulist[0].header
    data = hdulist[0].data
    datasec = ""
    try:
        datasec = header["DATASEC"]
        debug(
            "%s contains a DATASEC keyword not compatible with the pipeline" %
            path)
        debug("The content of the keyword will be erased on the reduced data")
        del header["DATASEC"]
    except:
        pass
    if use_bpm:
        theBPM = badpixelmask()
        data = np.ma.masked_array(data, theBPM, fill_value=0)

    return (header, data)
Пример #5
0
    def scislit_to_csuslit(self, scislit):
        '''Convert a science slit number to a mechanical slit list'''
        if (scislit < 1) or (scislit > len(self.ssl) + 1):
            error("The requested slit number (%i) does not exist" % scislit)
            raise Exception("The requested slit number (%i) does not exist" %
                            scislit)

        return self.scislit_to_slit[scislit - 1]
Пример #6
0
def fix_long2pos_headers(filelist):
    '''Fixes old long2pos observations which have a wrong set of keywords'''
    files = list_file_to_strings(filelist)
    # Print the filenames to Standard-out
    info("Fixing long2pos headers for files in " + str(filelist))

    # Iterate through files
    for fname in files:
        if os.path.isabs(fname): path = fname
        else: path = os.path.join(fname_to_path(fname, options), fname)

        hdulist = pf.open(path, mode='update')
        header = hdulist[0].header

        #determine if this file really needs to be updated (for example, prevents a second update of an already updated file
        if 'long2pos' in header['MASKNAME'] and header[
                'FRAMEID'] == 'object' and (header['PATTERN'] == 'long2pos'
                                            or header['PATTERN'] == 'Stare'):
            info("File " + str(fname) + " will be updated")

            # make a copy of the original file
            newname = path + ".original"
            info("copying ... " + str(path))
            info("into ...... " + str(newname))
            shutil.copyfile(path, newname)
            if not os.path.exists(newname):
                error(
                    "Error in generating original file:  '%s' does not exist (could not be created)."
                    % newname)
                raise Exception(
                    "Error in generating original file:  '%s' does not exist (could not be created)."
                    % newname)

            #updating header
            # assign FRAMEID to narrow slits
            if header['YOFFSET'] == 21 or header['YOFFSET'] == -7:
                header['FRAMEID'] = "B"
            if header['YOFFSET'] == -21 or header['YOFFSET'] == 7:
                header['FRAMEID'] = "A"

            # assign FRAMEID to wide slits
            if header['YOFFSET'] == 14 or header['YOFFSET'] == -14:
                header['FRAMEID'] = "A"

            #reverse sign of offsets for narrow slits
            if header['YOFFSET'] == -21:
                header['YOFFSET'] = 7
            if header['YOFFSET'] == 21:
                header['YOFFSET'] = -7

            #transform Xoffset from pixels to arcseconds
            header['XOFFSET'] = header['XOFFSET'] * 0.18
        else:
            info("File " + str(fname) + " does not need to be updated")
        hdulist.flush()
        hdulist.close()
Пример #7
0
    def science_slit_to_pixel(self, scislit):
        """Convert a science slit number to spatial pixel"""

        if (scislit < 1) or (scislit > len(self.ssl)):
            error("The requested science slit number %i does not exist" % scislit)
            raise Exception("The requested science slit number %i does not exist" % scislit)

        slits = self.scislit_to_csuslit(scislit)
        debug(str(slits))
        return self.csu_slit_to_pixel(np.median(slits))
Пример #8
0
    def csu_slit_to_pixel(self, slit):
        """Convert a CSU slit number to spatial pixel"""
        y0 = 2013

        if (slit < 1) or (slit > 46):
            error("The requested slit number (%i) does not exist" % slit)
            raise Exception("The requested slit number (%i) does not exist" % slit)

        pixel = np.int(y0 - (slit - 1) * 44.22)
        return pixel
Пример #9
0
    def set_header(self, header, ssl=None, msl=None, asl=None, targs=None):
        '''Passed "header" a FITS header dictionary and converts to a Barset'''
        self.pos = np.array(IO.parse_header_for_bars(header))
        self.set_pos_pix()

        self.ssl = ssl
        self.msl = msl
        self.asl = asl
        self.targs = targs

        def is_alignment_slit(slit):
            return (np.float(slit["Target_Priority"]) < 0)

        # If len(ssl) == 0 then the header is for a long slit
        if (header['MASKNAME'] == 'long2pos') or (header['MASKNAME'] == 'long2pos_specphot'):
            info("long2pos mode in CSU slit determination")
            self.long2pos_slit = True

        if (len(ssl) == 0):
        
            self.long_slit = True

            start = np.int(msl[0]["Slit_Number"])
            stop = np.int(msl[-1]["Slit_Number"])


            for mech_slit in msl:
                mech_slit["Target_in_Slit"] = "long"

            self.ssl = np.array([("1", "??", "??", "??", "??", "??", "??", msl[0]['Slit_width'],
                (stop-start+1)*7.6, "0", "long", "0")],
                dtype= [ ('Slit_Number', '|S2'), 
                ('Slit_RA_Hours', '|S2'), ('Slit_RA_Minutes', '|S2'), ('Slit_RA_Seconds', '|S5'),
                ('Slit_Dec_Degrees', '|S3'), ('Slit_Dec_Minutes', '|S2'), ('Slit_Dec_Seconds', '|S5'), 
                ('Slit_width', '|S5'), ('Slit_length', '|S5'), ('Target_to_center_of_slit_distance', '|S5'), 
                ('Target_Name', '|S80'), ('Target_Priority', '|S1')])
            self.scislit_to_slit = [ np.arange(start,stop) ]
            ssl = None

        # Create a map between scislit number and mechanical slit
        # recall that slits count from 1
        if ssl is not None:
            prev = self.msl[0]["Target_in_Slit"]

            v = []

            for science_slit in ssl:
                targ = science_slit["Target_Name"]
                v.append([int(x) for x in self.msl.field("Slit_Number")[np.where(self.msl.Target_in_Slit == targ)[0]]])
            self.scislit_to_slit = v

            if (len(self.scislit_to_slit) != len(ssl)) and not (self.long_slit
                    and len(self.scislit_to_slit) == 1):
                error("SSL should match targets in slit")
                raise Exception("SSL should match targets in slit")
Пример #10
0
    def csu_slit_to_pixel(self, slit):
        '''Convert a CSU slit number to spatial pixel'''
        y0 = 2013

        if (slit < 1) or (slit > 46):
            error("The requested slit number (%i) does not exist" % slit)
            raise Exception("The requested slit number (%i) does not exist" %
                            slit)

        pixel = np.int(y0 - (slit - 1) * 44.22)
        return pixel
Пример #11
0
    def csu_slit_center(self, slitno):
        """Returns the mechanical (middle) position of a csu slit in mm"""

        if (slitno < 1) or (slitno > 46):
            error("The requested slit number (%i) does not exist" % slitno)
            raise Exception("The requested slit number (%i) does not exist" % slitno)

        os = self.pos[slitno * 2 - 2]
        es = self.pos[slitno * 2 - 1]

        return (os + es) / 2.0
Пример #12
0
    def science_slit_to_pixel(self, scislit):
        '''Convert a science slit number to spatial pixel'''

        if (scislit < 1) or (scislit > len(self.ssl)):
            error("The requested science slit number %i does not exist" \
                    % scislit)
            raise Exception("The requested science slit number %i does not exist" \
                    % scislit)

        slits = self.scislit_to_csuslit(scislit)
        debug(str(slits))
        return self.csu_slit_to_pixel(np.median(slits))
Пример #13
0
    def csu_slit_center(self, slitno):
        '''Returns the mechanical (middle) position of a csu slit in mm'''

        if (slitno < 1) or (slitno > 46):
            error("The requested slit number (%i) does not exist" % slitno)
            raise Exception("The requested slit number (%i) does not exist" %
                            slitno)

        os = self.pos[slitno * 2 - 2]
        es = self.pos[slitno * 2 - 1]

        return (os + es) / 2.
Пример #14
0
def fix_long2pos_headers(filelist):
    '''Fixes old long2pos observations which have a wrong set of keywords'''
    files = list_file_to_strings(filelist)
    # Print the filenames to Standard-out
    info("Fixing long2pos headers for files in "+str(filelist))

    # Iterate through files
    for fname in files:
        if os.path.isabs(fname): path = fname
        else: path = os.path.join(fname_to_path(fname, options), fname)

        hdulist = pf.open(path, mode='update')
        header = hdulist[0].header

        #determine if this file really needs to be updated (for example, prevents a second update of an already updated file
        if 'long2pos' in header['MASKNAME'] and header['FRAMEID']=='object' and (header['PATTERN']=='long2pos' or header['PATTERN']=='Stare'):
            info( "File "+str(fname)+" will be updated")

            # make a copy of the original file
            newname = path+".original"
            info("copying ... "+str(path))
            info("into ...... "+str(newname))
            shutil.copyfile(path,newname)
            if not os.path.exists(newname):
                error("Error in generating original file:  '%s' does not exist (could not be created)." % newname)
                raise Exception("Error in generating original file:  '%s' does not exist (could not be created)." % newname)            

            #updating header
            # assign FRAMEID to narrow slits
            if header['YOFFSET']==21 or header['YOFFSET']==-7:
                header['FRAMEID']="B"
            if header['YOFFSET']==-21 or header['YOFFSET']==7:
                header['FRAMEID']="A"

            # assign FRAMEID to wide slits
            if header['YOFFSET']==14 or header['YOFFSET']==-14:
                header['FRAMEID']="A"
                
            #reverse sign of offsets for narrow slits
            if header['YOFFSET']==-21:
                header['YOFFSET']=7
            if header['YOFFSET']==21:
                header['YOFFSET']=-7

            #transform Xoffset from pixels to arcseconds
            header['XOFFSET'] = header['XOFFSET']*0.18
        else:
            info("File "+str(fname)+" does not need to be updated")
        hdulist.flush()
        hdulist.close()
Пример #15
0
def floatcompress(data, ndig=14):
    '''Adapted from Finkbeiner IDL routine floatcompress'''

    t = data.dtype
    if not ((t == 'float32') or (t == 'float64')):
        error("Only works on floating point numbers")
        raise Exception("Only works on floating point numbers")

    wzer = np.where(data == 0)
    data[wzer] = 1.0

    log2 = np.ceil(np.log(np.abs(data)) / np.log(2.0))
    mant = np.round(data / 2.0**(log2 - ndig)) / 2.0**ndig
    out = mant * 2.0**log2

    out[wzer] = 0.0
    return out
Пример #16
0
def floatcompress(data, ndig=14):
    '''Adapted from Finkbeiner IDL routine floatcompress'''

    t = data.dtype
    if not ((t == 'float32') or (t == 'float64')):
         error("Only works on floating point numbers")
         raise Exception("Only works on floating point numbers")

    wzer = np.where(data == 0)
    data[wzer] = 1.0

    log2 = np.ceil(np.log(np.abs(data)) / np.log(2.0))
    mant = np.round(data/2.0**(log2 - ndig))/2.0**ndig
    out = mant*2.0**log2

    out[wzer] = 0.0
    return out
Пример #17
0
def csu_mm_to_pix(x_mm, slitno, Please_Use=False):
    """Convert a slit's position into a pixel value. This is a linear approximation to a sixth order polynomial fit by ccs.
    Positions are index from 1: 1 .. 2048
    """

    if Please_Use == False:
        error("Use csu_mm_to_pix_poly (a polynomial fit) rather than csu_mm_to_pix (a linear fit)")
        raise Exception("Use csu_mm_to_pix_poly (a polynomial fit) rather than csu_mm_to_pix (a linear fit)")
        return

    # _kfp is keck focal plane
    centerx = 137.400
    x_kfp = (centerx - x_mm) * tempscale
    y_kfp = 5.8 * mm * (numslits / 2.0 - slitno + 0.35) * tempscale

    path = os.path.join(os.environ["MOSPATH"], "platescale", "linear_pix2mm_120k.db")
    #
    return mosfire_geoxytran(x_kfp, y_kfp, database=path, transform="linear_pix2mm_120k")
Пример #18
0
def fname_to_path(fname, options):
    '''Take a filename like m120507_0123, parse date, and return full path'''

    if os.path.isabs(fname): return fname

    yr, month, dy = fname_to_date_tuple(fname)
    path = os.path.join(options["indir"], yr + month + "%2.2i" % dy)
    if not os.path.exists(os.path.join(path, fname)):
        path = os.path.join(options["indir"], yr + month + "%2.2i" % (dy - 1))

        if not os.path.exists(path):
            error("Could not find file '%s' in '%s' out of parsed "
                  "%s, %s, %s" % (fname, options["indir"], yr, month, dy))
            raise Exception("Could not find file '%s' in '%s' out of parsed "
                            "%s, %s, %s" %
                            (fname, options["indir"], yr, month, dy))

    return path
Пример #19
0
def fit_edge_poly(xposs, xposs_missing, yposs, order):
    '''
    fit_edge_poly fits a polynomial to the measured slit edges.
    This polynomial is used to extract spectra.

    fit_edge_poly computes a parabola, and fills in missing data with a 
    parabola

    input-
    xposs, yposs [N]: The x and y positions of the slit edge [pix]
    order: the polynomial order
    '''

    # First fit low order polynomial to fill in missing data
    fun = np.poly1d(Fit.polyfit_clip(xposs, yposs, 2))

    xposs = np.append(xposs, xposs_missing)
    yposs = np.append(yposs, fun(xposs_missing))

    # Remove any fits that deviate wildly from the 2nd order polynomial
    ok = np.abs(yposs - fun(xposs)) < 1
    if not ok.any():
            error("Flat is not well illuminated? Cannot find edges")
            raise Exception("Flat is not well illuminated? Cannot find edges")

    # Now refit to user requested order
    fun = np.poly1d(Fit.polyfit_clip(xposs[ok], yposs[ok], order))
    res = fun(xposs[ok]) - yposs[ok]
    sd = np.std(res)
    ok = np.abs(res) < 2*sd


    # Check to see if the slit edge funciton is sane, 
    # if it's not, then we fix it.
    pix = np.arange(2048)
    V = fun(pix)
    if np.abs(V.max() - V.min()) > 10:
        info ("Forcing a horizontal slit edge")
        print "Forcing a horizontal slit edge"
        fun = np.poly1d(np.median(yposs[ok]))


    return (fun, res, sd, ok)
Пример #20
0
def fit_edge_poly(xposs, xposs_missing, yposs, order):
    '''
    fit_edge_poly fits a polynomial to the measured slit edges.
    This polynomial is used to extract spectra.

    fit_edge_poly computes a parabola, and fills in missing data with a 
    parabola

    input-
    xposs, yposs [N]: The x and y positions of the slit edge [pix]
    order: the polynomial order
    '''

    # First fit low order polynomial to fill in missing data
    fun = np.poly1d(Fit.polyfit_clip(xposs, yposs, 2))

    xposs = np.append(xposs, xposs_missing)
    yposs = np.append(yposs, fun(xposs_missing))

    # Remove any fits that deviate wildly from the 2nd order polynomial
    ok = np.abs(yposs - fun(xposs)) < 1
    if not ok.any():
            error("Flat is not well illuminated? Cannot find edges")
            raise Exception("Flat is not well illuminated? Cannot find edges")

    # Now refit to user requested order
    fun = np.poly1d(Fit.polyfit_clip(xposs[ok], yposs[ok], order))
    res = fun(xposs[ok]) - yposs[ok]
    sd = np.std(res)
    ok = np.abs(res) < 2*sd


    # Check to see if the slit edge funciton is sane, 
    # if it's not, then we fix it.
    pix = np.arange(2048)
    V = fun(pix)
    if np.abs(V.max() - V.min()) > 10:
        info ("Forcing a horizontal slit edge")
        print "Forcing a horizontal slit edge"
        fun = np.poly1d(np.median(yposs[ok]))


    return (fun, res, sd, ok)
Пример #21
0
def fname_to_path(fname, options):
    '''Take a filename like m120507_0123, parse date, and return full path'''

    if os.path.isabs(fname): return fname

    yr, month, dy = fname_to_date_tuple(fname)
    path = os.path.join(options["indir"], yr + month + "%2.2i" % dy)
    if not os.path.exists(os.path.join(path, fname)):
        path = os.path.join(options["indir"], yr + month + "%2.2i" % (dy-1))

        if not os.path.exists(path):
            error("Could not find file '%s' in '%s' out of parsed "
                "%s, %s, %s" % (fname,
                options["indir"], yr, month, dy))
            raise Exception("Could not find file '%s' in '%s' out of parsed "
                "%s, %s, %s" % (fname,
                options["indir"], yr, month, dy))

    return path
Пример #22
0
def load_edges(maskname, band, options):
    ''' Load the slit edge functions. Returns (edges, metadata) '''
    if False:
        path = os.path.join(options["outdir"], maskname)
        fn = os.path.join(path, "slit-edges_{0}.npy".format(band))

    fn = "slit-edges_{0}.npy".format(band)
    try:
        edges = np.load(fn)
    except:
        error("Cannot load slit edges file")
        raise Exception("Cannot load slit edges file")
    edges,meta = edges[0:-1], edges[-1]

    if meta['maskname'] != maskname:
        warning("The maskname for the edge file '%s' does not match "
                "that in the edge file '%s'" % (maskname, meta['maskname']))
        warning("Continuing")

    return edges, meta
Пример #23
0
def load_edges(maskname, band, options):
    ''' Load the slit edge functions. Returns (edges, metadata) '''
    if False:
        path = os.path.join(options["outdir"], maskname)
        fn = os.path.join(path, "slit-edges_{0}.npy".format(band))

    fn = "slit-edges_{0}.npy".format(band)
    try:
        edges = np.load(fn)
    except:
        error("Cannot load slit edges file")
        raise Exception("Cannot load slit edges file")
    edges, meta = edges[0:-1], edges[-1]

    if meta['maskname'] != maskname:
        warning("The maskname for the edge file '%s' does not match "
                "that in the edge file '%s'" % (maskname, meta['maskname']))
        warning("Continuing")

    return edges, meta
Пример #24
0
def xcor(a, b, lags):

    if len(a) != len(b):
        error("cross correlation (xcor) requires a and b "
              "to be of same length")
        raise Exception("cross correlation (xcor) requires a and b "
                        "to be of same length")
    cors = np.zeros(len(lags))

    a_pad = np.zeros(len(a) + len(lags))
    b_pad = np.zeros(len(b) + len(lags))

    st = np.argmin(np.abs(lags))
    a_pad[st:st + len(a)] = a
    b_pad[st:st + len(b)] = b

    for i in range(len(lags)):
        cors[i] = np.correlate(a_pad, np.roll(b_pad, lags[i]), 'valid')

    return cors
Пример #25
0
def xcor(a,b,lags):

    if len(a) != len(b):
        error("cross correlation (xcor) requires a and b "
                "to be of same length")
        raise Exception(
                "cross correlation (xcor) requires a and b "
                "to be of same length")
    cors = np.zeros(len(lags))

    a_pad = np.zeros(len(a)+len(lags))
    b_pad = np.zeros(len(b)+len(lags))

    st = np.argmin(np.abs(lags))
    a_pad[st:st+len(a)] = a
    b_pad[st:st+len(b)] = b

    for i in range(len(lags)):
        cors[i] = np.correlate(a_pad, np.roll(b_pad, lags[i]), 'valid')

    return cors
Пример #26
0
def load_lambdaslit(fnum, maskname, band, options):
    ''' Load the wavelength coefficient functions '''
    if False:
        path = os.path.join(options["outdir"], maskname)
        fn = os.path.join(path, "lambda_solution_{0}.fits".format(fnum))

    fn = "lambda_solution_{0}.fits".format(fnum)

    print fn

    ret = readfits(fn, options)
    if ret[0]['filter'] != band:
        error("Band name mismatch")
        raise Exception("band name mismatch")

    if ret[0]['maskname'] != maskname:
        warning("The maskname for the edge file '%s' does not match "
                "that in the edge file '%s'" % (maskname, ret[0]['maskname']))
        warning("Continuing")

    return readfits(fn, options)
Пример #27
0
def parse_header_for_bars(header):
    '''Parse {header} and convert to an array of CSU bar positions in mm. If 
    the positon is negative it means the barstat is not OK'''

    poss = []
    posfmt = "B%2.2iPOS"
    statfmt = "B%2.2iSTAT"
    for i in range(1, CSU.numbars + 1):
        p = posfmt % i
        s = statfmt % i
        pos = np.float32(header[p])
        if (header[s] != 'OK') and (header[s] != 'SETUP'):
            pos *= -1
        poss.append(pos)

    if len(poss) != CSU.numbars:
        error("Found %i bars instead of %i" % (lens(poss), CSU.numbars))
        raise CSU.MismatchError("Found %i bars instead of %i" %
                                (lens(poss), CSU.numbars))

    return np.array(poss)
Пример #28
0
def load_lambdaslit(fnum, maskname, band, options):
    ''' Load the wavelength coefficient functions '''
    if False:
        path = os.path.join(options["outdir"], maskname)
        fn = os.path.join(path, "lambda_solution_{0}.fits".format(fnum))

    fn = "lambda_solution_{0}.fits".format(fnum)

    print fn

    ret = readfits(fn, options)
    if ret[0]['filter'] != band:
        error ("Band name mismatch")
        raise Exception("band name mismatch")

    if ret[0]['maskname'] != maskname:
        warning("The maskname for the edge file '%s' does not match "
                "that in the edge file '%s'" % (maskname, ret[0]['maskname']))
        warning("Continuing")

    
    return readfits(fn, options)
Пример #29
0
def do_fit(data, residual_fun=residual_single):
    '''do_fit estimates parameters of fit_pair or fit_single.
    
    Use as follows:

    p0 = [0.5, 6, 1.1, 3, 1]
    ys = fit_single(p0, xs)
    lsf = do_fit(ys, residual_single)
    res = np.sum((lsf[0] - p0)**2)

    '''


    xs = np.arange(len(data))

    if residual_fun==residual_single:
        if data[0] > data[-1]:
            p0 = [0.5, len(data)/2., max(data), 0.0, 3.0]
        else:
            p0 = [0.5, len(data)/2., -max(data), 0.0, 3.0]
    elif residual_fun==residual_pair:
        p0 = [0.5, np.argmax(data), max(data), 0.0, 4.0]
    elif residual_fun==residual_disjoint_pair:
        width = 5
        p0 = [0.5, 
                np.argmin(data), 
                -np.ma.median(data[0:3]), 
                -np.ma.median(data[-4:-1]), 
                np.ma.median(data), 
                width]
    else:
        error("residual_fun not specified")
        raise Exception("residual_fun not specified")


    lsf = optimize.leastsq(residual_fun, p0, args=(xs, data), 
            full_output=True)

    return lsf
Пример #30
0
def parse_header_for_bars(header):
    '''Parse {header} and convert to an array of CSU bar positions in mm. If 
    the positon is negative it means the barstat is not OK'''

    poss = []
    posfmt = "B%2.2iPOS"
    statfmt = "B%2.2iSTAT"
    for i in range(1,CSU.numbars+1):
        p = posfmt % i
        s = statfmt % i
        pos = np.float32(header[p])
        if (header[s] != 'OK') and (header[s] != 'SETUP'):
            pos *= -1
        poss.append(pos)

    if len(poss) != CSU.numbars:
        error("Found %i bars instead of %i" % 
                (lens(poss), CSU.numbars))
        raise CSU.MismatchError("Found %i bars instead of %i" % 
                (lens(poss), CSU.numbars))
        

    return np.array(poss)
Пример #31
0
def do_fit(data, residual_fun=residual_single):
    '''do_fit estimates parameters of fit_pair or fit_single.
    
    Use as follows:

    p0 = [0.5, 6, 1.1, 3, 1]
    ys = fit_single(p0, xs)
    lsf = do_fit(ys, residual_single)
    res = np.sum((lsf[0] - p0)**2)

    '''

    xs = np.arange(len(data))

    if residual_fun == residual_single:
        if data[0] > data[-1]:
            p0 = [0.5, len(data) / 2., max(data), 0.0, 3.0]
        else:
            p0 = [0.5, len(data) / 2., -max(data), 0.0, 3.0]
    elif residual_fun == residual_pair:
        p0 = [0.5, np.argmax(data), max(data), 0.0, 4.0]
    elif residual_fun == residual_disjoint_pair:
        width = 5
        p0 = [
            0.5,
            np.argmin(data), -np.ma.median(data[0:3]),
            -np.ma.median(data[-4:-1]),
            np.ma.median(data), width
        ]
    else:
        error("residual_fun not specified")
        raise Exception("residual_fun not specified")

    lsf = optimize.leastsq(residual_fun, p0, args=(xs, data), full_output=True)

    return lsf
Пример #32
0
def read_drpfits(maskname, fname, options):
    '''Read a fits file written by the DRP'''

    if os.path.exists(fname): path = fname
    elif os.path.exists(fname + ".gz"): path = fname + ".gz"
    else: path = os.path.join(fname_to_path(fname, options), fname)

    if os.path.exists(path + ".gz"):
        path = path + ".gz"

    if not os.path.exists(path):
        error("The file at path '%s' does not exist." % path)
        raise Exception("The file at path '%s' does not exist." % path)

    hdulist = pf.open(path)
    output = []

    for hdu in hdulist:
        output.append(hdu.header)

        if "DRPVER" in hdu.header:
            itsver = hdu.header["DRPVER"]
            if itsver != MOSFIRE.__version__:
                error("The file requested '%s' uses DRP version %f "
                    "but the current DRP version is %f. There might be an "
                    "incompatibility" % (path, itsver, MOSFIRE.__version__))
                raise Exception("The file requested '%s' uses DRP version %f "
                    "but the current DRP version is %f. There might be an "
                    "incompatibility" % (path, itsver, MOSFIRE.__version__))

        else:
            error("The file requested '%s' does not seem to be "
                    "the result of this DRP. This should never be the "
                    " case.")
            raise Exception("The file requested '%s' does not seem to be "
                    "the result of this DRP. This should never be the "
                    " case.")

        output.append(hdu.data)


    return output
Пример #33
0
def read_drpfits(maskname, fname, options):
    '''Read a fits file written by the DRP'''

    if os.path.exists(fname): path = fname
    elif os.path.exists(fname + ".gz"): path = fname + ".gz"
    else: path = os.path.join(fname_to_path(fname, options), fname)

    if os.path.exists(path + ".gz"):
        path = path + ".gz"

    if not os.path.exists(path):
        error("The file at path '%s' does not exist." % path)
        raise Exception("The file at path '%s' does not exist." % path)

    hdulist = pf.open(path)
    output = []

    for hdu in hdulist:
        output.append(hdu.header)

        if "DRPVER" in hdu.header:
            itsver = hdu.header["DRPVER"]
            if itsver != MOSFIRE.__version__:
                error("The file requested '%s' uses DRP version %f "
                      "but the current DRP version is %f. There might be an "
                      "incompatibility" % (path, itsver, MOSFIRE.__version__))
                raise Exception(
                    "The file requested '%s' uses DRP version %f "
                    "but the current DRP version is %f. There might be an "
                    "incompatibility" % (path, itsver, MOSFIRE.__version__))

        else:
            error("The file requested '%s' does not seem to be "
                  "the result of this DRP. This should never be the "
                  " case.")
            raise Exception("The file requested '%s' does not seem to be "
                            "the result of this DRP. This should never be the "
                            " case.")

        output.append(hdu.data)

    return output
Пример #34
0
def find_longslit_edges(data, header, bs, options, edgeThreshold=450,longslit=None):


    y = 2034
    DY = 44.25


    toc = 0
    ssl = bs.ssl

    slits = []

    top = [0., np.float(Options.npix)]

    start_slit_num = int(bs.msl[0]['Slit_Number'])-1
    if start_slit_num > 0:
        y -= DY * start_slit_num
    # if the mask is a long slit, the default y value will be wrong. Set instead to be the middle
    if bs.long_slit:
        try:
            y=longslit["yrange"][1]
        except:
            error("Longslit reduction mode is specified, but the row position has not been specified. Defaulting to "+str(y))
            print "Longslit reduction mode is specified, but the row position has not been specified. Defaulting to "+str(y)


    # Count and check that the # of objects in the SSL matches that of the MSL
    # This is purely a safety check
    numslits = np.zeros(len(ssl))
    for i in xrange(len(ssl)):
        slit = ssl[i]
        M = np.where(slit["Target_Name"] == bs.msl["Target_in_Slit"])

        numslits[i] = len(M[0])
    numslits = np.array(numslits)
    info("Number of slits allocated for this longslit: "+str(np.sum(numslits)))

    # now begin steps outline above
    results = []
    result = {}

    result["Target_Name"] = ssl[0]["Target_Name"]

    # 1 Defines a polynomial of degree 0, which is a constant, with the value of the top of the slit
    result["top"] = np.poly1d([longslit["yrange"][1]])
    
    topfun = np.poly1d([longslit["yrange"][1]]) # this is a constant funtion with c=top of the slit
    botfun = np.poly1d([longslit["yrange"][0]]) # this is a constant funtion with c=bottom of the slit

    # xposs_top_this = [10 110 210 .... 1810 1910]
    xposs_top = np.arange(10,2000,100)
    xposs_bot = np.arange(10,2000,100)
    # yposs_top_this = [1104 1104 ... 1104 1104], it's the constant polynomium calculated at the X positions
    yposs_top = topfun(xposs_top)
    yposs_bot = botfun(xposs_bot)

    
    ''' Deal with the current slit '''
    target=0
    hpps = Wavelength.estimate_half_power_points(
                bs.scislit_to_csuslit(target+1)[0], header, bs)

    ok = np.where((xposs_top > hpps[0]) & (xposs_top < hpps[1]))

    xposs_bot = xposs_bot[ok]
    yposs_bot = yposs_bot[ok]
    xposs_top = xposs_top[ok]
    yposs_top = yposs_top[ok]

    if len(xposs_bot) == 0:
        error ("The slit edges specifications appear to be incorrect.")
        raise Exception("The slit edges specifications appear to be incorrect.")

    # bot is the polynomium that defines the shape of the bottom of the slit. In this case, we set it to a constant.
    bot = botfun.c.copy() 
    top = topfun.c.copy()


    #4
    result = {}
    result["Target_Name"] = ssl[target]["Target_Name"]
    result["xposs_top"] = xposs_top
    result["yposs_top"] = yposs_top
    result["xposs_bot"] = xposs_bot
    result["yposs_bot"] = yposs_bot
    result["top"] = np.poly1d(top)
    result["bottom"] = np.poly1d(bot)
    result["hpps"] = hpps
    result["ok"] = ok
    results.append(result)

    results.append({"version": options["version"]})

    return results
Пример #35
0
def handle_flats(flatlist, maskname, band, options, extension=None,edgeThreshold=450,lampOffList=None,longslit=None):
    '''
    handle_flats is the primary entry point to the Flats module.

    handle_flats takes a list of individual exposure FITS files and creates:
    1. A CRR, dark subtracted, pixel-response flat file.
    2. A set of polynomials that mark the edges of a slit

    Inputs:
    flatlist: 
    maskname: The name of a mask
    band: A string indicating the bandceil

    Outputs:

    file {maskname}/flat_2d_{band}.fits -- pixel response flat
    file {maskname}/edges.np
    '''

    tick = time.time()

    # Check
    bpos = np.ones(92) * -1

    #Retrieve the list of files to use for flat creation.
    flatlist = IO.list_file_to_strings(flatlist)
    # Print the filenames to Standard-out
    for flat in flatlist:
        info(str(flat))

    #Determine if flat files headers are in agreement
    for fname in flatlist:

        hdr, dat, bs = IO.readmosfits(fname, options, extension=extension)
        try: bs0
        except: bs0 = bs

        if np.any(bs0.pos != bs.pos):
            print "bs0: "+str(bs0.pos)+" bs: "+str(bs.pos)
            error("Barset do not seem to match")
            raise Exception("Barsets do not seem to match")

        if hdr["filter"] != band:
            error ("Filter name %s does not match header filter name "
                    "%s in file %s" % (band, hdr["filter"], fname))
            raise Exception("Filter name %s does not match header filter name "
                    "%s in file %s" % (band, hdr["filter"], fname))
        for i in xrange(len(bpos)):
            b = hdr["B{0:02d}POS".format(i+1)]
            if bpos[i] == -1:
                bpos[i] = b
            else:
                if bpos[i] != b:
                    error("Bar positions are not all the same in "
                            "this set of flat files")
                    raise Exception("Bar positions are not all the same in "
                            "this set of flat files")
    bs = bs0

    # Imcombine the lamps ON flats
    info("Attempting to combine previous files")
    combine(flatlist, maskname, band, options)

    # Imcombine the lamps OFF flats and subtract the off from the On sets
    if lampOffList != None: 
        #Retrieve the list of files to use for flat creation. 
        lampOffList = IO.list_file_to_strings(lampOffList)
        # Print the filenames to Standard-out
        for flat in lampOffList:
            info(str(flat))
        print "Attempting to combine Lamps off data"
        combine(lampOffList, maskname, band, options, lampsOff=True)
        combine_off_on( maskname, band, options)

    debug("Combined '%s' to '%s'" % (flatlist, maskname))
    info("Comgined to '%s'" % (maskname))
    path = "combflat_2d_%s.fits" % band
    (header, data) = IO.readfits(path, use_bpm=True)

    info("Flat written to %s" % path)

    # Edge Trace
    if bs.long_slit:
        info( "Long slit mode recognized")
        info( "Central row position:   "+str(longslit["row_position"]))
        info( "Upper and lower limits: "+str(longslit["yrange"][0])+" "+str(longslit["yrange"][1]))
        results = find_longslit_edges(data,header, bs, options, edgeThreshold=edgeThreshold, longslit=longslit)
    elif bs.long2pos_slit:
        info( "Long2pos mode recognized")
        results = find_long2pos_edges(data,header, bs, options, edgeThreshold=edgeThreshold, longslit=longslit)
    else:
        results = find_and_fit_edges(data, header, bs, options,edgeThreshold=edgeThreshold)
    results[-1]["maskname"] = maskname
    results[-1]["band"] = band
    np.save("slit-edges_{0}".format(band), results)
    save_ds9_edges(results, options)

    # Generate Flat
    out = "pixelflat_2d_%s.fits" % (band)
    if lampOffList != None: 
         make_pixel_flat(data, results, options, out, flatlist, lampsOff=True)
    else:
         make_pixel_flat(data, results, options, out, flatlist, lampsOff=False)

    info( "Pixel flat took {0:6.4} s".format(time.time()-tick))
Пример #36
0
def readmosfits(fname, options, extension=None):
    '''Read a fits file written by MOSFIRE from path and return a tuple of 
    (header, data, Target List, Science Slit List (SSL), Mechanical Slit 
    List (MSL), Alignment Slit List (ASL)).
    
    Note, the extension is typically not used, only used if the detector server
    does not append slit extension.
    '''

    if os.path.isabs(fname): path = fname
    else: path = os.path.join(fname_to_path(fname, options), fname)

    hdulist = pf.open(path)
    header = hdulist[0].header
    data = hdulist[0].data

    theBPM = badpixelmask()
    data = np.ma.masked_array(data, theBPM)

    if extension is not None:
        hdulist = pf.open(extension)

    try:
        header = hdulist[0].header
        datasec = ""
        try:
            datasec = header["DATASEC"]
            warning("%s contains a DATASEC keyword not compatible with the pipeline" % path)
            warning("The content of the keyword will be erased on the reduced data")
            del header["DATASEC"]
        except:
            pass
        targs = hdulist[1].data
        ssl = hdulist[2].data
        msl = hdulist[3].data
        asl = hdulist[4].data
    except:
        error("Improper MOSFIRE FITS File: %s" % path)
        raise Exception("Improper MOSFIRE FITS File: %s" % path)

    if np.abs(header["REGTMP1"] - 77) > 0.1:
        warning("**************************************")
        warning("The temperature of the detector is %3.3f where it "
                "should be 77.000 deg. Please notify Keck support staff." %
                header["REGTMP1"])

    ssl = ssl[ssl.field("Slit_Number") != ' ']
    msl = msl[msl.field("Slit_Number") != ' ']
    asl = asl[asl.field("Slit_Number") != ' ']
        

    # ELIMINATE POSITION B of the long2pos slit
    ssl = ssl[ssl.field("Target_Name") != 'posB']
    msl = msl[msl.field("Target_in_Slit") != 'posB']
    asl = asl[asl.field("Target_in_Slit") != 'posBalign']
    targs = targs[targs.field("Target_Name") !='posB']
    targs = targs[targs.field("Target_Name") != "posBalign"]

    bs = CSU.Barset()
    bs.set_header(header, ssl=ssl, msl=msl, asl=asl, targs=targs)

    return (header, data, bs)
Пример #37
0
def save_ds9_edges(results, options):
    '''
    Create a ds9 file that saves the fit slit edge positions determined
    by find_and_fit_edges
    '''

    ds9 = ''

    W = Options.npix
    delt = Options.npix/30.
    
    S = 1
    for i in range(len(results) - 1):
        res = results[i]

        top = res["top"]
        bottom = res["bottom"]
        for i in np.arange(W/delt):
            x = delt * i
            sx = x + 1
            ex = x + delt + 1

            sy = top(sx) 
            ey = top(ex) 

            smid = (top(sx) - bottom(sx)) / 2. + bottom(sx)
            emid = (top(ex) - bottom(ex)) / 2. + bottom(sx)

            # three quarter point
            stq = (top(sx) - bottom(sx)) * 3./4. + bottom(sx)
            etq = (top(ex) - bottom(ex)) * 3./4. + bottom(sx)
            # one quarter point
            soq = (top(sx) - bottom(sx)) * 1./4. + bottom(sx)
            eoq = (top(ex) - bottom(ex)) * 1./4. + bottom(sx)

            ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0 color=red\n" % (sx, smid, ex, emid)
            ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0 color=red\n" % (sx, stq, ex, etq)
            ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0 color=red\n" % (sx, soq, ex, eoq)


            ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0\n" % (sx, sy, ex, ey)


            if i == W/2:
                    ds9 += " # text={S%2.0i (%s)}" % (S, 
                                    res["Target_Name"])

            ds9 += "\n"

            sy = bottom(sx) + 1
            ey = bottom(ex) + 1
            if i == 10: txt=res["Target_Name"]
            else: txt=""

            ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0 color=blue text={%s}\n" % (sx, sy, ex, ey, txt)

        # Vertical line indicating half power points
        try:
            hpps = res["hpps"]
            sx = hpps[0] ; ex = hpps[0]
            sy = bottom(sx) ; ey = top(sx)
            ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0\n" % (sx, sy, ex, ey)

            sx = hpps[1] ; ex = hpps[1]
            sy = bottom(sx) ; ey = top(sx)
            ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0\n" % (sx, sy, ex, ey)
        except:
            continue
        
    band = results[-1]["band"]
    fn = "slit-edges_%s.reg" % band
    try:
            f = open(fn,'w')
            f.write(ds9)
            f.close()
    except IOError:
            error("IO Error")
            raise
    except:
            raise
Пример #38
0
def make_pixel_flat(data, results, options, outfile, inputs, lampsOff=None):
    '''
    Convert a flat image into a flat field
    '''

    def pixel_min(y): return np.floor(np.min(y))
    def pixel_max(y): return np.ceil(np.max(y))

    def collapse_flat_box(dat):
        '''Collapse data to the spectral axis (0)'''
        v = np.median(dat, axis=0).ravel()

        return v

    flat = np.ones(shape=Detector.npix)

    hdu = pyfits.PrimaryHDU((data/flat).astype(np.float32))
    hdu.header.update("version", __version__, "DRP version")
    i = 0
    for flatname in inputs:
        nm = flatname.split("/")[-1]
        hdu.header.update("infile%2.2i" % i, nm)
        i += 1

    slitno = 0
    for result in results[0:-1]:
        slitno += 1

        hdu.header.update("targ%2.2i" % slitno, result["Target_Name"])

        bf = result["bottom"]
        tf = result["top"]
        try:
            hpps = result["hpps"]
        except:
            error( "No half power points for this slit")
            hpps = [0, Detector.npix[0]]

        xs = np.arange(hpps[0], hpps[1])

        top = pixel_min(tf(xs))
        bottom = pixel_max(bf(xs))

        hdu.header.update("top%2.2i" % slitno, top)
        hdu.header.update("bottom%2.2i" % slitno, bottom)

        info( "%s] Bounding top/bottom: %i/%i" % (result["Target_Name"],
                bottom, top))

        v = collapse_flat_box(data[bottom:top,hpps[0]:hpps[1]])

        x2048 = np.arange(Options.npix)
        v = np.poly1d(np.polyfit(xs,v,
            options['flat-field-order']))(xs).ravel()

        for i in np.arange(bottom-1, top+1):
            flat[i,hpps[0]:hpps[1]] = v

    info( "Producing Pixel Flat...")
    for r in range(len(results)-1):
        theslit = results[r]

        try:
            bf = theslit["bottom"]
            tf = theslit["top"]
        except:
            pdb.set_trace()

        for i in range(hpps[0], hpps[1]):
            top = np.floor(tf(i))
            bottom = np.ceil(bf(i))
            
            data[top:bottom, i] = flat[top:bottom,i]

    hdu.data = (data/flat).astype(np.float32)
    bad = np.abs(hdu.data-1.0) > 0.5
    hdu.data[bad] = 1.0
    hdu.data = hdu.data.filled(1)
    if os.path.exists(outfile):
            os.remove(outfile)
    hdu.writeto(outfile)
Пример #39
0
def bar_to_slit(x):
    '''Convert a bar #(1-92) to a slit(1-46) number'''
    if (x < 1) or (x > numbars):
        error("Not indexing CSU properly")
        raise MismatchError("Not indexing CSU properly")
    return int(x+1)/2
Пример #40
0
def find_and_fit_edges(data, header, bs, options,edgeThreshold=450):
    '''
    Given a flat field image, find_and_fit_edges determines the position
    of all slits.

    The function works by starting with a guess at the location for a slit
    edge in the spatial direction(options["first-slit-edge"]). 
    
    Starting from the guess, find_edge_pair works out in either direction, 
    measuring the position of the (e.g.) bottom of slit 1 and top of slit 2:


    ------ pixel y value = 2048

    Slit 1 data

    ------ (bottom)
    deadband
    ------ (top)

    Slit N pixel data ....

    ------- (bottom) pixel = 0

    --------------------------------> Spectral direction


    1. At the top of the flat, the slit edge is defined to be a pixel value
    2. The code guesses the position of the bottom of the slit, and runs
            find_edge_pair to measure slit edge locations.
    3. A low-order polynomial is fit to the edge locations with
            fit_edge_poly
    4. The top and bottom of the current slit, is stored into the
            result list.
    5. The top of the next slit is stored temporarily for the next
            iteration of the for loop.
    6. At the bottom of the flat, the slit edge is defined to be pixel 4.


    options:
    options["edge-order"] -- The order of the polynomial [pixels] edge.
    options["edge-fit-width"] -- The length [pixels] of the edge to 
            fit over

    '''

    # TODO: move hardcoded values into Options.py
    # y is the location to start
    y = 2034
    DY = 44.25

    toc = 0
    ssl = bs.ssl

    slits = []

    top = [0., np.float(Options.npix)]

    start_slit_num = int(bs.msl[0]['Slit_Number'])-1
    if start_slit_num > 0:
        y -= DY * start_slit_num

    # Count and check that the # of objects in the SSL matches that of the MSL
    # This is purely a safety check
    numslits = np.zeros(len(ssl))
    for i in xrange(len(ssl)):
        slit = ssl[i]
        M = np.where(slit["Target_Name"] == bs.msl["Target_in_Slit"])

        numslits[i] = len(M[0])
    numslits = np.array(numslits)


    if (np.sum(numslits) != CSU.numslits) and (not bs.long_slit) and (not bs.long2pos_slit) and (not bs.long2pos_slit_specphot):
        error ("The number of allocated CSU slits (%i) does not match "
                " the number of possible slits (%i)." % (np.sum(numslits),
                    CSU.numslits))
        raise Exception("The number of allocated CSU slits (%i) does not match "
                " the number of possible slits (%i)." % (np.sum(numslits),
                    CSU.numslits))

    # if the mask is a long slit, the default y value will be wrong. Set instead to be the middle
    if bs.long_slit:
        y = 1104
        
    # now begin steps outline above
    results = []
    result = {}

    result["Target_Name"] = ssl[0]["Target_Name"]

    # 1
    result["top"] = np.poly1d([y])

    ''' Nomenclature here is confusing:
        
        ----- Edge  -- Top of current slit, bottom of prev slit
        . o ' Data
        ===== Data
        .;.;' Data
        ----- Edge  -- Bottom of current slit, top of next slit
    '''

    topfun = np.poly1d([y])
    xposs_top_this = np.arange(10,2000,100)
    yposs_top_this = topfun(xposs_top_this)

    for target in xrange(len(ssl)):

        y -= DY * numslits[target]
        y = max(y, 1)
        
        info("%2.2i] Finding Slit Edges for %s ending at %4.0i. Slit "
                "composed of %i CSU slits" % ( target,
                    ssl[target]["Target_Name"], y, numslits[target]))

        ''' First deal with the current slit '''
        hpps = Wavelength.estimate_half_power_points(
                bs.scislit_to_csuslit(target+1)[0], header, bs)

        if y == 1:
            xposs_bot = [1024]
            xposs_bot_missing = []
            yposs_bot = [4.25]
            botfun = np.poly1d(yposs_bot)
            ok = np.where((xposs_bot > hpps[0]) & (xposs_bot < hpps[1]))
        else:
            (xposs_top_next, xposs_top_next_missing, yposs_top_next, xposs_bot,
                xposs_bot_missing, yposs_bot, scatter_bot_this) = find_edge_pair(
                    data, y, options["edge-fit-width"],edgeThreshold=edgeThreshold)

            ok = np.where((xposs_bot > hpps[0]) & (xposs_bot < hpps[1]))
            ok2 = np.where((xposs_bot_missing > hpps[0]) & (xposs_bot_missing <
                hpps[1]))
            xposs_bot = xposs_bot[ok]
            xposs_bot_missing = xposs_bot_missing[ok2]
            yposs_bot = yposs_bot[ok]
            if len(xposs_bot) == 0:
                botfun = np.poly1d(y-DY)
            else:
                (botfun, bot_res, botsd, botok) =  fit_edge_poly(xposs_bot,
                         xposs_bot_missing, yposs_bot, options["edge-order"])


        bot = botfun.c.copy() 
        top = topfun.c.copy()

        #4
        result = {}
        result["Target_Name"] = ssl[target]["Target_Name"]
        result["xposs_top"] = xposs_top_this
        result["yposs_top"] = yposs_top_this
        result["xposs_bot"] = xposs_bot
        result["yposs_bot"] = yposs_bot
        result["top"] = np.poly1d(top)
        result["bottom"] = np.poly1d(bot)
        result["hpps"] = hpps
        result["ok"] = ok
        results.append(result)

        #5
        if y == 1:
            break
            

        next = target + 2
        if next > len(ssl): next = len(ssl)
        hpps_next = Wavelength.estimate_half_power_points(
                bs.scislit_to_csuslit(next)[0],
                    header, bs)

        ok = np.where((xposs_top_next > hpps_next[0]) & (xposs_top_next <
            hpps_next[1]))
        ok2 = np.where((xposs_top_next_missing > hpps_next[0]) &
            (xposs_top_next_missing < hpps_next[1]))

        xposs_top_next = xposs_top_next[ok]
        xposs_top_next_missing = xposs_top_next_missing[ok2]
        yposs_top_next = yposs_top_next[ok]

        if len(xposs_top_next) == 0:
            topfun = np.poly1d(y)
        else:
            (topfun, topres, topsd, ok) = fit_edge_poly(xposs_top_next,
                xposs_top_next_missing, yposs_top_next, options["edge-order"])

        xposs_top_this = xposs_top_next
        xposs_top_this_missing = xposs_top_next_missing
        yposs_top_this = yposs_top_next

    results.append({"version": options["version"]})

    return results
Пример #41
0
def readmosfits(fname, options, extension=None):
    '''Read a fits file written by MOSFIRE from path and return a tuple of 
    (header, data, Target List, Science Slit List (SSL), Mechanical Slit 
    List (MSL), Alignment Slit List (ASL)).
    
    Note, the extension is typically not used, only used if the detector server
    does not append slit extension.
    '''

    if os.path.isabs(fname): path = fname
    else: path = os.path.join(fname_to_path(fname, options), fname)

    hdulist = pf.open(path)
    header = hdulist[0].header
    data = hdulist[0].data

    theBPM = badpixelmask()
    data = np.ma.masked_array(data, theBPM)

    if extension is not None:
        hdulist = pf.open(extension)

    try:
        header = hdulist[0].header
        datasec = ""
        try:
            datasec = header["DATASEC"]
            debug(
                "%s contains a DATASEC keyword not compatible with the pipeline"
                % path)
            debug(
                "The content of the keyword will be erased on the reduced data"
            )
            del header["DATASEC"]
        except:
            pass
        targs = hdulist[1].data
        ssl = hdulist[2].data
        msl = hdulist[3].data
        asl = hdulist[4].data
    except:
        error("Improper MOSFIRE FITS File: %s" % path)
        raise Exception("Improper MOSFIRE FITS File: %s" % path)


#     if np.abs(header["REGTMP1"] - 77) > 0.1:
#         warning("**************************************")
#         warning("The temperature of the detector is %3.3f where it "
#                 "should be 77.000 deg. Please notify Keck support staff." %
#                 header["REGTMP1"])

    ssl = ssl[ssl.field("Slit_Number") != ' ']
    msl = msl[msl.field("Slit_Number") != ' ']
    asl = asl[asl.field("Slit_Number") != ' ']

    # ELIMINATE POSITION B of the long2pos slit
    ssl = ssl[ssl.field("Target_Name") != 'posB']
    msl = msl[msl.field("Target_in_Slit") != 'posB']
    asl = asl[asl.field("Target_in_Slit") != 'posBalign']
    targs = targs[targs.field("Target_Name") != 'posB']
    targs = targs[targs.field("Target_Name") != "posBalign"]

    bs = CSU.Barset()
    bs.set_header(header, ssl=ssl, msl=msl, asl=asl, targs=targs)

    return (header, data, bs)
Пример #42
0
    def set_header(self, header, ssl=None, msl=None, asl=None, targs=None):
        '''Passed "header" a FITS header dictionary and converts to a Barset'''
        self.pos = np.array(IO.parse_header_for_bars(header))
        self.set_pos_pix()

        self.ssl = ssl
        self.msl = msl
        self.asl = asl
        self.targs = targs

        def is_alignment_slit(slit):
            return (np.float(slit["Target_Priority"]) < 0)

        # If len(ssl) == 0 then the header is for a long slit
        if (header['MASKNAME'] == 'long2pos'):
            info("long2pos mode in CSU slit determination")
            self.long2pos_slit = True

        if (len(ssl) == 0):

            self.long_slit = True

            start = np.int(msl[0]["Slit_Number"])
            stop = np.int(msl[-1]["Slit_Number"])

            for mech_slit in msl:
                mech_slit["Target_in_Slit"] = "long"

            self.ssl = np.array([
                ("1", "??", "??", "??", "??", "??", "??", msl[0]['Slit_width'],
                 (stop - start + 1) * 7.6, "0", "long", "0")
            ],
                                dtype=[('Slit_Number', '|S2'),
                                       ('Slit_RA_Hours', '|S2'),
                                       ('Slit_RA_Minutes', '|S2'),
                                       ('Slit_RA_Seconds', '|S5'),
                                       ('Slit_Dec_Degrees', '|S3'),
                                       ('Slit_Dec_Minutes', '|S2'),
                                       ('Slit_Dec_Seconds', '|S5'),
                                       ('Slit_width', '|S5'),
                                       ('Slit_length', '|S5'),
                                       ('Target_to_center_of_slit_distance',
                                        '|S5'), ('Target_Name', '|S80'),
                                       ('Target_Priority', '|S1')])
            self.scislit_to_slit = [np.arange(start, stop)]
            ssl = None

        # Create a map between scislit number and mechanical slit
        # recall that slits count from 1
        if ssl is not None:
            prev = self.msl[0]["Target_in_Slit"]

            v = []

            for science_slit in ssl:
                targ = science_slit["Target_Name"]
                v.append([
                    int(x) for x in self.msl.field("Slit_Number")[np.where(
                        self.msl.field("Target_in_Slit").rstrip() == targ)[0]]
                ])
            self.scislit_to_slit = v

            if (len(self.scislit_to_slit) != len(ssl)
                ) and not (self.long_slit and len(self.scislit_to_slit) == 1):
                error("SSL should match targets in slit")
                raise Exception("SSL should match targets in slit")
Пример #43
0
def bar_to_slit(x):
    '''Convert a bar #(1-92) to a slit(1-46) number'''
    if (x < 1) or (x > numbars):
        error("Not indexing CSU properly")
        raise MismatchError("Not indexing CSU properly")
    return int(x + 1) / 2
Пример #44
0
def save_ds9_edges(results, options):
    '''
    Create a ds9 file that saves the fit slit edge positions determined
    by find_and_fit_edges
    '''

    ds9 = ''

    W = Options.npix
    delt = Options.npix / 30.

    S = 1
    for i in range(len(results) - 1):
        res = results[i]

        top = res["top"]
        bottom = res["bottom"]
        for i in np.arange(W / delt):
            x = delt * i
            sx = x + 1
            ex = x + delt + 1

            sy = top(sx)
            ey = top(ex)

            smid = (top(sx) - bottom(sx)) / 2. + bottom(sx)
            emid = (top(ex) - bottom(ex)) / 2. + bottom(sx)

            # three quarter point
            stq = (top(sx) - bottom(sx)) * 3. / 4. + bottom(sx)
            etq = (top(ex) - bottom(ex)) * 3. / 4. + bottom(sx)
            # one quarter point
            soq = (top(sx) - bottom(sx)) * 1. / 4. + bottom(sx)
            eoq = (top(ex) - bottom(ex)) * 1. / 4. + bottom(sx)

            ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0 color=red\n" % (
                sx, smid, ex, emid)
            ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0 color=red\n" % (
                sx, stq, ex, etq)
            ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0 color=red\n" % (
                sx, soq, ex, eoq)

            ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0\n" % (
                sx, sy, ex, ey)

            if i == W / 2:
                ds9 += " # text={S%2.0i (%s)}" % (S, res["Target_Name"])

            ds9 += "\n"

            sy = bottom(sx) + 1
            ey = bottom(ex) + 1
            if i == 10: txt = res["Target_Name"]
            else: txt = ""

            ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0 color=blue text={%s}\n" % (
                sx, sy, ex, ey, txt)

        # Vertical line indicating half power points
        try:
            hpps = res["hpps"]
            sx = hpps[0]
            ex = hpps[0]
            sy = bottom(sx)
            ey = top(sx)
            ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0\n" % (
                sx, sy, ex, ey)

            sx = hpps[1]
            ex = hpps[1]
            sy = bottom(sx)
            ey = top(sx)
            ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0\n" % (
                sx, sy, ex, ey)
        except:
            continue

    band = results[-1]["band"]
    fn = "slit-edges_%s.reg" % band
    try:
        f = open(fn, 'w')
        f.write(ds9)
        f.close()
    except IOError:
        error("IO Error")
        raise
    except:
        raise
Пример #45
0
def imcombine(files, maskname, options, flat, outname=None, shifts=None,
    extension=None):
    '''
    From a list of files it imcombine returns the imcombine of several values.
    The imcombine code also estimates the readnoise ad RN/sqrt(numreads) so
    that the variance per frame is equal to (ADU + RN^2) where RN is computed
    in ADUs.

    Arguments:
        files[]: list of full path to files to combine
        maskname: Name of mask
        options: Options dictionary
        flat[2048x2048]: Flat field (values should all be ~ 1.0)
        outname: If set, will write (see notes below for details)
            eps_[outname].fits: electron/sec file
            itimes_[outname].fits: integration time
            var_[outname].fits: Variance files
        shifts[len(files)]: If set, will "roll" each file by the 
            amount in the shifts vector in pixels. This argument
            is used when telescope tracking is poor. If you need
            to use this, please notify Keck staff about poor 
            telescope tracking.

    Returns 6-element tuple:
        header: The combined header
        electrons [2048x2048]:  e- (in e- units)
        var [2048x2048]: electrons + RN**2 (in e-^2 units)
        bs: The MOSFIRE.Barset instance
        itimes [2048x2048]: itimes (in s units)
        Nframe: The number of frames that contribute to the summed
            arrays above. If Nframe > 5 I use the sigma-clipping
            Cosmic Ray Rejection tool. If Nframe < 5 then I drop
            the max/min elements.

    Notes:

        header -- fits header
        ADUs -- The mean # of ADUs per frame
        var -- the Variance [in adu] per frame. 
        bs -- Barset
        itimes -- The _total_ integration time in second
        Nframe -- The number of frames in a stack.

        
        Thus the number of electron per second is derived as: 
            e-/sec = (ADUs * Gain / Flat) * (Nframe/itimes)

        The total number of electrons is:
            el = ADUs * Gain * Nframe


    '''

    ADUs = np.zeros((len(files), 2048, 2048))
    itimes = np.zeros((len(files), 2048, 2048))
    prevssl = None
    prevmn = None
    patternid = None
    maskname = None

    header = None

    if shifts is None:
        shifts = np.zeros(len(files))

    warnings.filterwarnings('ignore')
    for i in xrange(len(files)):
        fname = files[i]
        thishdr, data, bs = IO.readmosfits(fname, options, extension=extension)
        itimes[i,:,:] = thishdr["truitime"]

        base = os.path.basename(fname).rstrip(".fits")
        fnum = int(base.split("_")[1])
        
        if shifts[i] == 0:
            ADUs[i,:,:] = data.filled(0.0) / flat
        else:
            ADUs[i,:,:] = np.roll(data.filled(0.0) / flat, np.int(shifts[i]), axis=0)

        ''' Construct Header'''
        if header is None:
            header = thishdr

        header["imfno%3.3i" % (fnum)] =  (fname, "img%3.3i file name" % fnum)

        map(lambda x: rem_header_key(header, x), ["CTYPE1", "CTYPE2", "WCSDIM",
            "CD1_1", "CD1_2", "CD2_1", "CD2_2", "LTM1_1", "LTM2_2", "WAT0_001",
            "WAT1_001", "WAT2_001", "CRVAL1", "CRVAL2", "CRPIX1", "CRPIX2",
            "RADECSYS"])

        for card in header.cards:
            if card == '': continue
            key,val,comment = card
            
            if key in thishdr:
                if val != thishdr[key]:
                    newkey = key + ("_img%2.2i" % fnum)
                    try: header[newkey.rstrip()] = (thishdr[key], comment)
                    except: pass

        ''' Now handle error checking'''

        if maskname is not None:
            if thishdr["maskname"] != maskname:
                error("File %s uses mask '%s' but the stack is of '%s'" %
                    (fname, thishdr["maskname"], maskname))
                raise Exception("File %s uses mask '%s' but the stack is of '%s'" %
                    (fname, thishdr["maskname"], maskname))

        maskname = thishdr["maskname"]
            
        if thishdr["aborted"]:
            error("Img '%s' was aborted and should not be used" %
                    fname)
            raise Exception("Img '%s' was aborted and should not be used" %
                    fname)

        if prevssl is not None:
            if len(prevssl) != len(bs.ssl):
                # todo Improve these checks
                error("The stack of input files seems to be of "
                        "different masks")
                raise Exception("The stack of input files seems to be of "
                        "different masks")
        prevssl = bs.ssl

        if patternid is not None:
            if patternid != thishdr["frameid"]:
                error("The stack should be of '%s' frames only, but "
                        "the current image is a '%s' frame." % (patternid, 
                            thishdr["frameid"]))
                raise Exception("The stack should be of '%s' frames only, but "
                        "the current image is a '%s' frame." % (patternid, 
                            thishdr["frameid"]))

        patternid = thishdr["frameid"]


        if maskname is not None:
            if maskname != thishdr["maskname"]:
                error("The stack should be of CSU mask '%s' frames "
                        "only but contains a frame of '%s'." % (maskname,
                        thishdr["maskname"]))
                raise Exception("The stack should be of CSU mask '%s' frames "
                        "only but contains a frame of '%s'." % (maskname,
                        thishdr["maskname"]))

        maskname = thishdr["maskname"]

        if thishdr["BUNIT"] != "ADU per coadd":
            error("The units of '%s' are not in ADU per coadd and "
                    "this violates an assumption of the DRP. Some new code " 
                    "is needed in the DRP to handle the new units of "
                    "'%s'." % (fname, thishdr["BUNIT"]))
            raise Exception("The units of '%s' are not in ADU per coadd and "
                    "this violates an assumption of the DRP. Some new code " 
                    "is needed in the DRP to handle the new units of "
                    "'%s'." % (fname, thishdr["BUNIT"]))

        ''' Error checking is complete'''
        info("%s %s[%s]/%s: %5.1f s,  Shift: %i px" % (fname, maskname, patternid,
            header['filter'], np.mean(itimes[i]), shifts[i]))

    warnings.filterwarnings('always')

    # the electrons and el_per_sec arrays are:
    #   [2048, 2048, len(files)] and contain values for
    # each individual frame that is being combined.
    # These need to be kept here for CRR reasons.
    electrons = np.array(ADUs) * Detector.gain 
    el_per_sec = electrons / itimes

    output = np.zeros((2048, 2048))
    exptime = np.zeros((2048, 2048))

    numreads = header["READS0"]
    RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain
    RN = Detector.RN / np.sqrt(numreads)

    # Cosmic ray rejection code begins here. This code construction the
    # electrons and itimes arrays.
    standard = True
    new_from_chuck = False
    # Chuck Steidel has provided a modified version of the CRR procedure. 
    # to enable it, modify the variables above.
    
    if new_from_chuck and not standard:
        if len(files) >= 5:
            print "Sigclip CRR"
            srt = np.argsort(electrons, axis=0, kind='quicksort')
            shp = el_per_sec.shape
            sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]

            electrons = electrons[srt, sti[1], sti[2]]
            el_per_sec = el_per_sec[srt, sti[1], sti[2]]
            itimes = itimes[srt, sti[1], sti[2]]

            # Construct the mean and standard deviation by dropping the top and bottom two 
            # electron fluxes. This is temporary.
            mean = np.mean(el_per_sec[1:-1,:,:], axis = 0)
            std = np.std(el_per_sec[1:-1,:,:], axis = 0)

            drop = np.where( (el_per_sec > (mean+std*4)) | (el_per_sec < (mean-std*4)) )
            print "dropping: ", len(drop[0])
            electrons[drop] = 0.0
            itimes[drop] = 0.0

            electrons = np.sum(electrons, axis=0)
            itimes = np.sum(itimes, axis=0)
            Nframe = len(files) 

        elif len(files) > 5:
            print "WARNING: Drop min/max CRR"
            srt = np.argsort(el_per_sec,axis=0)
            shp = el_per_sec.shape
            sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]

            electrons = electrons[srt, sti[1], sti[2]]
            itimes = itimes[srt, sti[1], sti[2]]

            electrons = np.sum(electrons[1:-1,:,:], axis=0)
            itimes = np.sum(itimes[1:-1,:,:], axis=0)

            Nframe = len(files) - 2

        else:
            warning( "With less than 5 frames, the pipeline does NOT perform")
            warning( "Cosmic Ray Rejection.")
            # the "if false" line disables cosmic ray rejection"
            if False: 
                for i in xrange(len(files)):
                    el = electrons[i,:,:]
                    it = itimes[i,:,:]
                    el_mf = scipy.signal.medfilt(el, 5)

                    bad = np.abs(el - el_mf) / np.abs(el) > 10.0
                    el[bad] = 0.0
                    it[bad] = 0.0

                    electrons[i,:,:] = el
                    itimes[i,:,:] = it

            electrons = np.sum(electrons, axis=0)
            itimes = np.sum(itimes, axis=0)
            Nframe = len(files) 

    if standard and not new_from_chuck:
        if len(files) >= 9:
            info("Sigclip CRR")
            srt = np.argsort(electrons, axis=0, kind='quicksort')
            shp = el_per_sec.shape
            sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]

            electrons = electrons[srt, sti[1], sti[2]]
            el_per_sec = el_per_sec[srt, sti[1], sti[2]]
            itimes = itimes[srt, sti[1], sti[2]]

            # Construct the mean and standard deviation by dropping the top and bottom two 
            # electron fluxes. This is temporary.
            mean = np.mean(el_per_sec[2:-2,:,:], axis = 0)
            std = np.std(el_per_sec[2:-2,:,:], axis = 0)

            drop = np.where( (el_per_sec > (mean+std*4)) | (el_per_sec < (mean-std*4)) )
            info("dropping: "+str(len(drop[0])))
            electrons[drop] = 0.0
            itimes[drop] = 0.0

            electrons = np.sum(electrons, axis=0)
            itimes = np.sum(itimes, axis=0)
            Nframe = len(files) 
            


        elif len(files) > 5:
            warning( "WARNING: Drop min/max CRR")
            srt = np.argsort(el_per_sec,axis=0)
            shp = el_per_sec.shape
            sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]

            electrons = electrons[srt, sti[1], sti[2]]
            itimes = itimes[srt, sti[1], sti[2]]

            electrons = np.sum(electrons[1:-1,:,:], axis=0)
            itimes = np.sum(itimes[1:-1,:,:], axis=0)

            Nframe = len(files) - 2

        else:
            warning( "With less than 5 frames, the pipeline does NOT perform")
            warning( "Cosmic Ray Rejection.")
            # the "if false" line disables cosmic ray rejection"
            if False: 
                for i in xrange(len(files)):
                     el = electrons[i,:,:]
                     it = itimes[i,:,:]
                     # calculate the median image
                     el_mf = scipy.signal.medfilt(el, 5)
                     el_mf_large = scipy.signal.medfilt(el_mf, 15)
                     # LR: this is a modified version I was experimenting with. For the version 
                     #     written by Nick, see the new_from_chuck part of this code
                     # sky sub
                     el_sky_sub = el_mf - el_mf_large
                     # add a constant value
                     el_plus_constant = el_sky_sub + 100

                     bad = np.abs(el - el_mf) / np.abs(el_plus_constant) > 50.0
                     el[bad] = 0.0
                     it[bad] = 0.0

                     electrons[i,:,:] = el
                     itimes[i,:,:] = it

            
            electrons = np.sum(electrons, axis=0)
            itimes = np.sum(itimes, axis=0)
            Nframe = len(files)


    ''' Now handle variance '''
    numreads = header["READS0"]
    RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain
    RN = Detector.RN / np.sqrt(numreads)

    var = (electrons + RN**2) 

    ''' Now mask out bad pixels '''
    electrons[data.mask] = np.nan
    var[data.mask] = np.inf
    
    print var[data.mask]

    if "RN" in header:
        error("RN Already populated in header")
        raise Exception("RN Already populated in header")
    header['RN'] = ("%1.3f" , "Read noise in e-")
    header['NUMFRM'] = (Nframe, 'Typical number of frames in stack')


    header['BUNIT'] = 'ELECTRONS/SECOND'
    IO.writefits(np.float32(electrons/itimes), maskname, "eps_%s" % (outname),
                 options, header=header, overwrite=True)

    # Update itimes after division in order to not introduce nans
    itimes[data.mask] = 0.0

    header['BUNIT'] = 'ELECTRONS^2'
    IO.writefits(var, maskname, "var_%s" % (outname),
                 options, header=header, overwrite=True, lossy_compress=True)

    header['BUNIT'] = 'SECOND'
    IO.writefits(np.float32(itimes), maskname, "itimes_%s" % (outname),
                options, header=header, overwrite=True, lossy_compress=True)

    return header, electrons, var, bs, itimes, Nframe
Пример #46
0
def find_longslit_edges(data,
                        header,
                        bs,
                        options,
                        edgeThreshold=450,
                        longslit=None):

    y = 2034
    DY = 44.25

    toc = 0
    ssl = bs.ssl

    slits = []

    top = [0., np.float(Options.npix)]

    start_slit_num = int(bs.msl[0]['Slit_Number']) - 1
    if start_slit_num > 0:
        y -= DY * start_slit_num
    # if the mask is a long slit, the default y value will be wrong. Set instead to be the middle
    if bs.long_slit:
        try:
            y = longslit["yrange"][1]
        except:
            error(
                "Longslit reduction mode is specified, but the row position has not been specified. Defaulting to "
                + str(y))
            print "Longslit reduction mode is specified, but the row position has not been specified. Defaulting to " + str(
                y)

    # Count and check that the # of objects in the SSL matches that of the MSL
    # This is purely a safety check
    numslits = np.zeros(len(ssl))
    for i in xrange(len(ssl)):
        slit = ssl[i]
        M = np.where(slit["Target_Name"] == bs.msl["Target_in_Slit"])

        numslits[i] = len(M[0])
    numslits = np.array(numslits)
    info("Number of slits allocated for this longslit: " +
         str(np.sum(numslits)))

    # now begin steps outline above
    results = []
    result = {}

    result["Target_Name"] = ssl[0]["Target_Name"]

    # 1 Defines a polynomial of degree 0, which is a constant, with the value of the top of the slit
    result["top"] = np.poly1d([longslit["yrange"][1]])

    topfun = np.poly1d([longslit["yrange"][1]
                        ])  # this is a constant funtion with c=top of the slit
    botfun = np.poly1d([
        longslit["yrange"][0]
    ])  # this is a constant funtion with c=bottom of the slit

    # xposs_top_this = [10 110 210 .... 1810 1910]
    xposs_top = np.arange(10, 2000, 100)
    xposs_bot = np.arange(10, 2000, 100)
    # yposs_top_this = [1104 1104 ... 1104 1104], it's the constant polynomium calculated at the X positions
    yposs_top = topfun(xposs_top)
    yposs_bot = botfun(xposs_bot)
    ''' Deal with the current slit '''
    target = 0
    hpps = Wavelength.estimate_half_power_points(
        bs.scislit_to_csuslit(target + 1)[0], header, bs)

    ok = np.where((xposs_top > hpps[0]) & (xposs_top < hpps[1]))

    xposs_bot = xposs_bot[ok]
    yposs_bot = yposs_bot[ok]
    xposs_top = xposs_top[ok]
    yposs_top = yposs_top[ok]

    if len(xposs_bot) == 0:
        error("The slit edges specifications appear to be incorrect.")
        raise Exception(
            "The slit edges specifications appear to be incorrect.")

    # bot is the polynomium that defines the shape of the bottom of the slit. In this case, we set it to a constant.
    bot = botfun.c.copy()
    top = topfun.c.copy()

    #4
    result = {}
    result["Target_Name"] = ssl[target]["Target_Name"]
    result["xposs_top"] = xposs_top
    result["yposs_top"] = yposs_top
    result["xposs_bot"] = xposs_bot
    result["yposs_bot"] = yposs_bot
    result["top"] = np.poly1d(top)
    result["bottom"] = np.poly1d(bot)
    result["hpps"] = hpps
    result["ok"] = ok
    results.append(result)

    results.append({"version": options["version"]})

    return results
Пример #47
0
def make_pixel_flat(data, results, options, outfile, inputs, lampsOff=None):
    '''
    Convert a flat image into a flat field
    '''
    def pixel_min(y):
        return np.floor(np.min(y))

    def pixel_max(y):
        return np.ceil(np.max(y))

    def collapse_flat_box(dat):
        '''Collapse data to the spectral axis (0)'''
        v = np.median(dat, axis=0).ravel()

        return v

    flat = np.ones(shape=Detector.npix)

    hdu = pyfits.PrimaryHDU((data / flat).astype(np.float32))
    hdu.header.update("version", __version__, "DRP version")
    i = 0
    for flatname in inputs:
        nm = flatname.split("/")[-1]
        hdu.header.update("infile%2.2i" % i, nm)
        i += 1

    slitno = 0
    for result in results[0:-1]:
        slitno += 1

        hdu.header.update("targ%2.2i" % slitno, result["Target_Name"])

        bf = result["bottom"]
        tf = result["top"]
        try:
            hpps = result["hpps"]
        except:
            error("No half power points for this slit")
            hpps = [0, Detector.npix[0]]

        xs = np.arange(hpps[0], hpps[1])

        top = pixel_min(tf(xs))
        bottom = pixel_max(bf(xs))

        hdu.header.update("top%2.2i" % slitno, top)
        hdu.header.update("bottom%2.2i" % slitno, bottom)

        info("%s] Bounding top/bottom: %i/%i" %
             (result["Target_Name"], bottom, top))

        v = collapse_flat_box(data[bottom:top, hpps[0]:hpps[1]])

        x2048 = np.arange(Options.npix)
        v = np.poly1d(np.polyfit(xs, v,
                                 options['flat-field-order']))(xs).ravel()

        for i in np.arange(bottom - 1, top + 1):
            flat[i, hpps[0]:hpps[1]] = v

    info("Producing Pixel Flat...")
    for r in range(len(results) - 1):
        theslit = results[r]

        try:
            bf = theslit["bottom"]
            tf = theslit["top"]
        except:
            pdb.set_trace()

        for i in range(hpps[0], hpps[1]):
            top = np.floor(tf(i))
            bottom = np.ceil(bf(i))

            data[top:bottom, i] = flat[top:bottom, i]

    hdu.data = (data / flat).astype(np.float32)
    bad = np.abs(hdu.data - 1.0) > 0.5
    hdu.data[bad] = 1.0
    hdu.data = hdu.data.filled(1)
    if os.path.exists(outfile):
        os.remove(outfile)
    hdu.writeto(outfile)
Пример #48
0
def handle_background(filelist, wavename, maskname, band_name, options,
                      shifts=None, plan=None, extension=None, target='default'): 
    '''
    Perform difference imaging and subtract residual background.

    The plan looks something like: [['A', 'B']]
    In this case, the number of output files is equal to the length of the list (1).

    If you choose to use an ABA'B' pattern then the plan will be: [["A", "B"], ["A'", "B'"]]
    the background subtraction code will make and handle two files, "A-B" and "A'-B'".
    '''
    
    global header, bs, edges, data, Var, itime, lam, sky_sub_out, sky_model_out, band

    band = band_name

    flatname = "pixelflat_2d_%s.fits" % band_name
    hdr, flat = IO.readfits("pixelflat_2d_%s.fits" % (band_name), options)

    if np.abs(np.median(flat) - 1) > 0.1:
        error("Flat seems poorly behaved.")
        raise Exception("Flat seems poorly behaved.")

    '''
        This next section of the code figures out the observing plan
        and then deals with the bookeeping of sending the plan
        to the background subtracter.
    '''

    hdrs = []
    epss = {}
    vars = {}
    bss = []
    times = {}
    Nframes = []

    i = 0
    header = pf.Header()
    for i in xrange(len(filelist)):
        fl = filelist[i]
        files = IO.list_file_to_strings(fl)
        info("Combining observation files listed in {}".format(fl))
        if shifts is None: shift = None
        else: shift = shifts[i]
        hdr, electron, var, bs, time, Nframe = imcombine(files, maskname,
            options, flat, outname="%s.fits" % (fl),
            shifts=shift, extension=extension)

        hdrs.append(hdr) 
        header = merge_headers(header, hdr)
        epss[hdr['FRAMEID']] = electron/time
        vars[hdr['FRAMEID']] = var
        times[hdr['FRAMEID']] = time
        bss.append(bs)
        Nframes.append(Nframe)

    positions = {}
    i = 0
    for h in hdrs:
        positions[h['FRAMEID']] = i
        i += 1
    posnames = set(positions.keys())
    if plan is None:
        plan = guess_plan_from_positions(posnames)

    num_outputs = len(plan)


    edges, meta = IO.load_edges(maskname, band, options)
    lam = IO.readfits(wavename, options)

    bs = bss[0]

    for i in xrange(num_outputs):
        posname0 = plan[i][0]
        posname1 = plan[i][1]
        info("Handling %s - %s" % (posname0, posname1))
        data = epss[posname0] - epss[posname1]
        Var = vars[posname0] + vars[posname1]
        itime = np.mean([times[posname0], times[posname1]], axis=0)

        p = Pool()
        solutions = p.map(background_subtract_helper, xrange(len(bs.ssl)))
        p.close()

        write_outputs(solutions, itime, header, maskname, band, plan[i], options, target=target)
Пример #49
0
def handle_flats(flatlist,
                 maskname,
                 band,
                 options,
                 extension=None,
                 edgeThreshold=450,
                 lampOffList=None,
                 longslit=None):
    '''
    handle_flats is the primary entry point to the Flats module.

    handle_flats takes a list of individual exposure FITS files and creates:
    1. A CRR, dark subtracted, pixel-response flat file.
    2. A set of polynomials that mark the edges of a slit

    Inputs:
    flatlist: 
    maskname: The name of a mask
    band: A string indicating the bandceil

    Outputs:

    file {maskname}/flat_2d_{band}.fits -- pixel response flat
    file {maskname}/edges.np
    '''

    tick = time.time()

    # Check
    bpos = np.ones(92) * -1

    #Retrieve the list of files to use for flat creation.
    flatlist = IO.list_file_to_strings(flatlist)
    # Print the filenames to Standard-out
    for flat in flatlist:
        info(str(flat))

    #Determine if flat files headers are in agreement
    for fname in flatlist:

        hdr, dat, bs = IO.readmosfits(fname, options, extension=extension)
        try:
            bs0
        except:
            bs0 = bs

        if np.any(bs0.pos != bs.pos):
            print "bs0: " + str(bs0.pos) + " bs: " + str(bs.pos)
            error("Barset do not seem to match")
            raise Exception("Barsets do not seem to match")

        if hdr["filter"] != band:
            error("Filter name %s does not match header filter name "
                  "%s in file %s" % (band, hdr["filter"], fname))
            raise Exception("Filter name %s does not match header filter name "
                            "%s in file %s" % (band, hdr["filter"], fname))
        for i in xrange(len(bpos)):
            b = hdr["B{0:02d}POS".format(i + 1)]
            if bpos[i] == -1:
                bpos[i] = b
            else:
                if bpos[i] != b:
                    error("Bar positions are not all the same in "
                          "this set of flat files")
                    raise Exception("Bar positions are not all the same in "
                                    "this set of flat files")
    bs = bs0

    # Imcombine the lamps ON flats
    info("Attempting to combine previous files")
    combine(flatlist, maskname, band, options)

    # Imcombine the lamps OFF flats and subtract the off from the On sets
    if lampOffList != None:
        #Retrieve the list of files to use for flat creation.
        lampOffList = IO.list_file_to_strings(lampOffList)
        # Print the filenames to Standard-out
        for flat in lampOffList:
            info(str(flat))
        print "Attempting to combine Lamps off data"
        combine(lampOffList, maskname, band, options, lampsOff=True)
        combine_off_on(maskname, band, options)

    debug("Combined '%s' to '%s'" % (flatlist, maskname))
    info("Comgined to '%s'" % (maskname))
    path = "combflat_2d_%s.fits" % band
    (header, data) = IO.readfits(path, use_bpm=True)

    info("Flat written to %s" % path)

    # Edge Trace
    if bs.long_slit:
        info("Long slit mode recognized")
        info("Central row position:   " + str(longslit["row_position"]))
        info("Upper and lower limits: " + str(longslit["yrange"][0]) + " " +
             str(longslit["yrange"][1]))
        results = find_longslit_edges(data,
                                      header,
                                      bs,
                                      options,
                                      edgeThreshold=edgeThreshold,
                                      longslit=longslit)
    elif bs.long2pos_slit:
        info("Long2pos mode recognized")
        results = find_long2pos_edges(data,
                                      header,
                                      bs,
                                      options,
                                      edgeThreshold=edgeThreshold,
                                      longslit=longslit)
    else:
        results = find_and_fit_edges(data,
                                     header,
                                     bs,
                                     options,
                                     edgeThreshold=edgeThreshold)
    results[-1]["maskname"] = maskname
    results[-1]["band"] = band
    np.save("slit-edges_{0}".format(band), results)
    save_ds9_edges(results, options)

    # Generate Flat
    out = "pixelflat_2d_%s.fits" % (band)
    if lampOffList != None:
        make_pixel_flat(data, results, options, out, flatlist, lampsOff=True)
    else:
        make_pixel_flat(data, results, options, out, flatlist, lampsOff=False)

    info("Pixel flat took {0:6.4} s".format(time.time() - tick))
Пример #50
0
def imcombine(files, maskname, options, flat, outname=None, shifts=None,
    extension=None):
    '''
    From a list of files it imcombine returns the imcombine of several values.
    The imcombine code also estimates the readnoise ad RN/sqrt(numreads) so
    that the variance per frame is equal to (ADU + RN^2) where RN is computed
    in ADUs.

    Arguments:
        files[]: list of full path to files to combine
        maskname: Name of mask
        options: Options dictionary
        flat[2048x2048]: Flat field (values should all be ~ 1.0)
        outname: If set, will write (see notes below for details)
            eps_[outname].fits: electron/sec file
            itimes_[outname].fits: integration time
            var_[outname].fits: Variance files
        shifts[len(files)]: If set, will "roll" each file by the 
            amount in the shifts vector in pixels. This argument
            is used when telescope tracking is poor. If you need
            to use this, please notify Keck staff about poor 
            telescope tracking.

    Returns 6-element tuple:
        header: The combined header
        electrons [2048x2048]:  e- (in e- units)
        var [2048x2048]: electrons + RN**2 (in e-^2 units)
        bs: The MOSFIRE.Barset instance
        itimes [2048x2048]: itimes (in s units)
        Nframe: The number of frames that contribute to the summed
            arrays above. If Nframe > 5 I use the sigma-clipping
            Cosmic Ray Rejection tool. If Nframe < 5 then I drop
            the max/min elements.

    Notes:

        header -- fits header
        ADUs -- The mean # of ADUs per frame
        var -- the Variance [in adu] per frame. 
        bs -- Barset
        itimes -- The _total_ integration time in second
        Nframe -- The number of frames in a stack.

        
        Thus the number of electron per second is derived as: 
            e-/sec = (ADUs * Gain / Flat) * (Nframe/itimes)

        The total number of electrons is:
            el = ADUs * Gain * Nframe


    '''

    ADUs = np.zeros((len(files), 2048, 2048))
    itimes = np.zeros((len(files), 2048, 2048))
    prevssl = None
    prevmn = None
    patternid = None
    maskname = None

    header = None

    if shifts is None:
        shifts = np.zeros(len(files))

    warnings.filterwarnings('ignore')
    for i in xrange(len(files)):
        fname = files[i]
        thishdr, data, bs = IO.readmosfits(fname, options, extension=extension)
        itimes[i,:,:] = thishdr["truitime"]

        base = os.path.basename(fname).rstrip(".fits")
        fnum = int(base.split("_")[1])
        
        if shifts[i] == 0:
            ADUs[i,:,:] = data.filled(0.0) / flat
        else:
            ADUs[i,:,:] = np.roll(data.filled(0.0) / flat, np.int(shifts[i]), axis=0)

        ''' Construct Header'''
        if header is None:
            header = thishdr

        header["imfno%3.3i" % (fnum)] =  (fname, "img%3.3i file name" % fnum)

        map(lambda x: rem_header_key(header, x), ["CTYPE1", "CTYPE2", "WCSDIM",
            "CD1_1", "CD1_2", "CD2_1", "CD2_2", "LTM1_1", "LTM2_2", "WAT0_001",
            "WAT1_001", "WAT2_001", "CRVAL1", "CRVAL2", "CRPIX1", "CRPIX2",
            "RADECSYS"])

        for card in header.cards:
            if card == '': continue
            key,val,comment = card
            
            if key in thishdr:
                if val != thishdr[key]:
                    newkey = key + ("_img%2.2i" % fnum)
                    try: header[newkey.rstrip()] = (thishdr[key], comment)
                    except: pass

        ''' Now handle error checking'''

        if maskname is not None:
            if thishdr["maskname"] != maskname:
                error("File %s uses mask '%s' but the stack is of '%s'" %
                    (fname, thishdr["maskname"], maskname))
                raise Exception("File %s uses mask '%s' but the stack is of '%s'" %
                    (fname, thishdr["maskname"], maskname))

        maskname = thishdr["maskname"]
            
        if thishdr["aborted"]:
            error("Img '%s' was aborted and should not be used" %
                    fname)
            raise Exception("Img '%s' was aborted and should not be used" %
                    fname)

        if prevssl is not None:
            if len(prevssl) != len(bs.ssl):
                # todo Improve these checks
                error("The stack of input files seems to be of "
                        "different masks")
                raise Exception("The stack of input files seems to be of "
                        "different masks")
        prevssl = bs.ssl

        if patternid is not None:
            if patternid != thishdr["frameid"]:
                error("The stack should be of '%s' frames only, but "
                        "the current image is a '%s' frame." % (patternid, 
                            thishdr["frameid"]))
                raise Exception("The stack should be of '%s' frames only, but "
                        "the current image is a '%s' frame." % (patternid, 
                            thishdr["frameid"]))

        patternid = thishdr["frameid"]


        if maskname is not None:
            if maskname != thishdr["maskname"]:
                error("The stack should be of CSU mask '%s' frames "
                        "only but contains a frame of '%s'." % (maskname,
                        thishdr["maskname"]))
                raise Exception("The stack should be of CSU mask '%s' frames "
                        "only but contains a frame of '%s'." % (maskname,
                        thishdr["maskname"]))

        maskname = thishdr["maskname"]

        if thishdr["BUNIT"] != "ADU per coadd":
            error("The units of '%s' are not in ADU per coadd and "
                    "this violates an assumption of the DRP. Some new code " 
                    "is needed in the DRP to handle the new units of "
                    "'%s'." % (fname, thishdr["BUNIT"]))
            raise Exception("The units of '%s' are not in ADU per coadd and "
                    "this violates an assumption of the DRP. Some new code " 
                    "is needed in the DRP to handle the new units of "
                    "'%s'." % (fname, thishdr["BUNIT"]))

        ''' Error checking is complete'''
        debug("%s %s[%s]/%s: %5.1f s,  Shift: %i px" % (fname, maskname, patternid,
            header['filter'], np.mean(itimes[i]), shifts[i]))

    warnings.filterwarnings('always')

    # the electrons and el_per_sec arrays are:
    #   [2048, 2048, len(files)] and contain values for
    # each individual frame that is being combined.
    # These need to be kept here for CRR reasons.
    electrons = np.array(ADUs) * Detector.gain 
    el_per_sec = electrons / itimes

    output = np.zeros((2048, 2048))
    exptime = np.zeros((2048, 2048))

    numreads = header["READS0"]
    RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain
    RN = Detector.RN / np.sqrt(numreads)

    # Cosmic ray rejection code begins here. This code construction the
    # electrons and itimes arrays.
    standard = True
    new_from_chuck = False
    # Chuck Steidel has provided a modified version of the CRR procedure. 
    # to enable it, modify the variables above.
    
    if new_from_chuck and not standard:
        if len(files) >= 5:
            print "Sigclip CRR"
            srt = np.argsort(electrons, axis=0, kind='quicksort')
            shp = el_per_sec.shape
            sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]

            electrons = electrons[srt, sti[1], sti[2]]
            el_per_sec = el_per_sec[srt, sti[1], sti[2]]
            itimes = itimes[srt, sti[1], sti[2]]

            # Construct the mean and standard deviation by dropping the top and bottom two 
            # electron fluxes. This is temporary.
            mean = np.mean(el_per_sec[1:-1,:,:], axis = 0)
            std = np.std(el_per_sec[1:-1,:,:], axis = 0)

            drop = np.where( (el_per_sec > (mean+std*4)) | (el_per_sec < (mean-std*4)) )
            print "dropping: ", len(drop[0])
            electrons[drop] = 0.0
            itimes[drop] = 0.0

            electrons = np.sum(electrons, axis=0)
            itimes = np.sum(itimes, axis=0)
            Nframe = len(files) 

        else:
            warning( "With less than 5 frames, the pipeline does NOT perform")
            warning( "Cosmic Ray Rejection.")
            # the "if false" line disables cosmic ray rejection"
            if False: 
                for i in xrange(len(files)):
                    el = electrons[i,:,:]
                    it = itimes[i,:,:]
                    el_mf = scipy.signal.medfilt(el, 5)

                    bad = np.abs(el - el_mf) / np.abs(el) > 10.0
                    el[bad] = 0.0
                    it[bad] = 0.0

                    electrons[i,:,:] = el
                    itimes[i,:,:] = it

            electrons = np.sum(electrons, axis=0)
            itimes = np.sum(itimes, axis=0)
            Nframe = len(files) 

    if standard and not new_from_chuck:
        if len(files) >= 9:
            info("Sigclip CRR")
            srt = np.argsort(electrons, axis=0, kind='quicksort')
            shp = el_per_sec.shape
            sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]

            electrons = electrons[srt, sti[1], sti[2]]
            el_per_sec = el_per_sec[srt, sti[1], sti[2]]
            itimes = itimes[srt, sti[1], sti[2]]

            # Construct the mean and standard deviation by dropping the top and bottom two 
            # electron fluxes. This is temporary.
            mean = np.mean(el_per_sec[2:-2,:,:], axis = 0)
            std = np.std(el_per_sec[2:-2,:,:], axis = 0)

            drop = np.where( (el_per_sec > (mean+std*4)) | (el_per_sec < (mean-std*4)) )
            info("dropping: "+str(len(drop[0])))
            electrons[drop] = 0.0
            itimes[drop] = 0.0

            electrons = np.sum(electrons, axis=0)
            itimes = np.sum(itimes, axis=0)
            Nframe = len(files) 

        elif len(files) > 5:
            warning( "WARNING: Drop min/max CRR")
            srt = np.argsort(el_per_sec,axis=0)
            shp = el_per_sec.shape
            sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]

            electrons = electrons[srt, sti[1], sti[2]]
            itimes = itimes[srt, sti[1], sti[2]]

            electrons = np.sum(electrons[1:-1,:,:], axis=0)
            itimes = np.sum(itimes[1:-1,:,:], axis=0)

            Nframe = len(files) - 2

        else:
            warning( "With less than 5 frames, the pipeline does NOT perform")
            warning( "Cosmic Ray Rejection.")
            # the "if false" line disables cosmic ray rejection"
            if False: 
                for i in xrange(len(files)):
                     el = electrons[i,:,:]
                     it = itimes[i,:,:]
                     # calculate the median image
                     el_mf = scipy.signal.medfilt(el, 5)
                     el_mf_large = scipy.signal.medfilt(el_mf, 15)
                     # LR: this is a modified version I was experimenting with. For the version 
                     #     written by Nick, see the new_from_chuck part of this code
                     # sky sub
                     el_sky_sub = el_mf - el_mf_large
                     # add a constant value
                     el_plus_constant = el_sky_sub + 100

                     bad = np.abs(el - el_mf) / np.abs(el_plus_constant) > 50.0
                     el[bad] = 0.0
                     it[bad] = 0.0

                     electrons[i,:,:] = el
                     itimes[i,:,:] = it

            electrons = np.sum(electrons, axis=0)
            itimes = np.sum(itimes, axis=0)
            Nframe = len(files) 


    ''' Now handle variance '''
    numreads = header["READS0"]
    RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain
    RN = Detector.RN / np.sqrt(numreads)

    var = (electrons + RN**2) 

    ''' Now mask out bad pixels '''
    electrons[data.mask] = np.nan
    var[data.mask] = np.inf

    if "RN" in header:
        error("RN Already populated in header")
        raise Exception("RN Already populated in header")
    header['RN'] = ("%1.3f" , "Read noise in e-")
    header['NUMFRM'] = (Nframe, 'Typical number of frames in stack')


    header['BUNIT'] = 'ELECTRONS/SECOND'
    IO.writefits(np.float32(electrons/itimes), maskname, "eps_%s" % (outname),
                 options, header=header, overwrite=True)

    # Update itimes after division in order to not introduce nans
    itimes[data.mask] = 0.0

    header['BUNIT'] = 'ELECTRONS^2'
    IO.writefits(var, maskname, "var_%s" % (outname),
                 options, header=header, overwrite=True, lossy_compress=True)

    header['BUNIT'] = 'SECOND'
    IO.writefits(np.float32(itimes), maskname, "itimes_%s" % (outname),
                options, header=header, overwrite=True, lossy_compress=True)

    return header, electrons, var, bs, itimes, Nframe
Пример #51
0
def find_and_fit_edges(data, header, bs, options, edgeThreshold=450):
    '''
    Given a flat field image, find_and_fit_edges determines the position
    of all slits.

    The function works by starting with a guess at the location for a slit
    edge in the spatial direction(options["first-slit-edge"]). 
    
    Starting from the guess, find_edge_pair works out in either direction, 
    measuring the position of the (e.g.) bottom of slit 1 and top of slit 2:


    ------ pixel y value = 2048

    Slit 1 data

    ------ (bottom)
    deadband
    ------ (top)

    Slit N pixel data ....

    ------- (bottom) pixel = 0

    --------------------------------> Spectral direction


    1. At the top of the flat, the slit edge is defined to be a pixel value
    2. The code guesses the position of the bottom of the slit, and runs
            find_edge_pair to measure slit edge locations.
    3. A low-order polynomial is fit to the edge locations with
            fit_edge_poly
    4. The top and bottom of the current slit, is stored into the
            result list.
    5. The top of the next slit is stored temporarily for the next
            iteration of the for loop.
    6. At the bottom of the flat, the slit edge is defined to be pixel 4.


    options:
    options["edge-order"] -- The order of the polynomial [pixels] edge.
    options["edge-fit-width"] -- The length [pixels] of the edge to 
            fit over

    '''

    # TODO: move hardcoded values into Options.py
    # y is the location to start
    y = 2034
    DY = 44.25

    toc = 0
    ssl = bs.ssl

    slits = []

    top = [0., np.float(Options.npix)]

    start_slit_num = int(bs.msl[0]['Slit_Number']) - 1
    if start_slit_num > 0:
        y -= DY * start_slit_num

    # Count and check that the # of objects in the SSL matches that of the MSL
    # This is purely a safety check
    numslits = np.zeros(len(ssl))
    for i in xrange(len(ssl)):
        slit = ssl[i]
        M = np.where(slit["Target_Name"] == bs.msl["Target_in_Slit"])

        numslits[i] = len(M[0])
    numslits = np.array(numslits)

    if (np.sum(numslits) != CSU.numslits) and (not bs.long_slit) and (
            not bs.long2pos_slit) and (not bs.long2pos_slit_specphot):
        error("The number of allocated CSU slits (%i) does not match "
              " the number of possible slits (%i)." %
              (np.sum(numslits), CSU.numslits))
        raise Exception(
            "The number of allocated CSU slits (%i) does not match "
            " the number of possible slits (%i)." %
            (np.sum(numslits), CSU.numslits))

    # if the mask is a long slit, the default y value will be wrong. Set instead to be the middle
    if bs.long_slit:
        y = 1104

    # now begin steps outline above
    results = []
    result = {}

    result["Target_Name"] = ssl[0]["Target_Name"]

    # 1
    result["top"] = np.poly1d([y])
    ''' Nomenclature here is confusing:
        
        ----- Edge  -- Top of current slit, bottom of prev slit
        . o ' Data
        ===== Data
        .;.;' Data
        ----- Edge  -- Bottom of current slit, top of next slit
    '''

    topfun = np.poly1d([y])
    xposs_top_this = np.arange(10, 2000, 100)
    yposs_top_this = topfun(xposs_top_this)

    for target in xrange(len(ssl)):

        y -= DY * numslits[target]
        y = max(y, 1)

        info("%2.2i] Finding Slit Edges for %s ending at %4.0i. Slit "
             "composed of %i CSU slits" %
             (target, ssl[target]["Target_Name"], y, numslits[target]))
        ''' First deal with the current slit '''
        hpps = Wavelength.estimate_half_power_points(
            bs.scislit_to_csuslit(target + 1)[0], header, bs)

        if y == 1:
            xposs_bot = [1024]
            xposs_bot_missing = []
            yposs_bot = [4.25]
            botfun = np.poly1d(yposs_bot)
            ok = np.where((xposs_bot > hpps[0]) & (xposs_bot < hpps[1]))
        else:
            (xposs_top_next, xposs_top_next_missing, yposs_top_next, xposs_bot,
             xposs_bot_missing, yposs_bot,
             scatter_bot_this) = find_edge_pair(data,
                                                y,
                                                options["edge-fit-width"],
                                                edgeThreshold=edgeThreshold)

            ok = np.where((xposs_bot > hpps[0]) & (xposs_bot < hpps[1]))
            ok2 = np.where((xposs_bot_missing > hpps[0])
                           & (xposs_bot_missing < hpps[1]))
            xposs_bot = xposs_bot[ok]
            xposs_bot_missing = xposs_bot_missing[ok2]
            yposs_bot = yposs_bot[ok]
            if len(xposs_bot) == 0:
                botfun = np.poly1d(y - DY)
            else:
                (botfun, bot_res, botsd,
                 botok) = fit_edge_poly(xposs_bot, xposs_bot_missing,
                                        yposs_bot, options["edge-order"])

        bot = botfun.c.copy()
        top = topfun.c.copy()

        #4
        result = {}
        result["Target_Name"] = ssl[target]["Target_Name"]
        result["xposs_top"] = xposs_top_this
        result["yposs_top"] = yposs_top_this
        result["xposs_bot"] = xposs_bot
        result["yposs_bot"] = yposs_bot
        result["top"] = np.poly1d(top)
        result["bottom"] = np.poly1d(bot)
        result["hpps"] = hpps
        result["ok"] = ok
        results.append(result)

        #5
        if y == 1:
            break

        next = target + 2
        if next > len(ssl): next = len(ssl)
        hpps_next = Wavelength.estimate_half_power_points(
            bs.scislit_to_csuslit(next)[0], header, bs)

        ok = np.where((xposs_top_next > hpps_next[0])
                      & (xposs_top_next < hpps_next[1]))
        ok2 = np.where((xposs_top_next_missing > hpps_next[0])
                       & (xposs_top_next_missing < hpps_next[1]))

        xposs_top_next = xposs_top_next[ok]
        xposs_top_next_missing = xposs_top_next_missing[ok2]
        yposs_top_next = yposs_top_next[ok]

        if len(xposs_top_next) == 0:
            topfun = np.poly1d(y)
        else:
            (topfun, topres, topsd,
             ok) = fit_edge_poly(xposs_top_next, xposs_top_next_missing,
                                 yposs_top_next, options["edge-order"])

        xposs_top_this = xposs_top_next
        xposs_top_this_missing = xposs_top_next_missing
        yposs_top_this = yposs_top_next

    results.append({"version": options["version"]})

    return results
Пример #52
0
def handle_background(filelist, wavename, maskname, band_name, options, shifts=None, plan=None, extension=None, target='default'): 
    '''
    Perform difference imaging and subtract residual background.

    The plan looks something like: [['A', 'B']]
    In this case, the number of output files is equal to the length of the list (1).

    If you choose to use an ABA'B' pattern then the plan will be: [["A", "B"], ["A'", "B'"]]
    the background subtraction code will make and handle two files, "A-B" and "A'-B'".
    '''
    
    global header, bs, edges, data, Var, itime, lam, sky_sub_out, sky_model_out, band

    band = band_name

    flatname = "pixelflat_2d_%s.fits" % band_name
    hdr, flat = IO.readfits("pixelflat_2d_%s.fits" % (band_name), options)

    if np.abs(np.median(flat) - 1) > 0.1:
        error("Flat seems poorly behaved.")
        raise Exception("Flat seems poorly behaved.")

    '''
        This next section of the code figures out the observing plan
        and then deals with the bookeeping of sending the plan
        to the background subtracter.
    '''

    hdrs = []
    epss = {}
    vars = {}
    bss = []
    times = {}
    Nframes = []

    i = 0
    header = pf.Header()
    for i in xrange(len(filelist)):
        fl = filelist[i]
        files = IO.list_file_to_strings(fl)
        info("Combining")
        if shifts is None: shift = None
        else: shift = shifts[i]
        hdr, electron, var, bs, time, Nframe = imcombine(files, maskname,
            options, flat, outname="%s.fits" % (fl),
            shifts=shift, extension=extension)

        hdrs.append(hdr) 
        header = merge_headers(header, hdr)
        epss[hdr['FRAMEID']] = electron/time
        vars[hdr['FRAMEID']] = var
        times[hdr['FRAMEID']] = time
        bss.append(bs)
        Nframes.append(Nframe)

    positions = {}
    i = 0
    for h in hdrs:
        positions[h['FRAMEID']] = i
        i += 1
    posnames = set(positions.keys())
    if plan is None:
        plan = guess_plan_from_positions(posnames)

    num_outputs = len(plan)


    edges, meta = IO.load_edges(maskname, band, options)
    lam = IO.readfits(wavename, options)

    bs = bss[0]

    for i in xrange(num_outputs):
        posname0 = plan[i][0]
        posname1 = plan[i][1]
        info("Handling %s - %s" % (posname0, posname1))
        data = epss[posname0] - epss[posname1]
        Var = vars[posname0] + vars[posname1]
        itime = np.mean([times[posname0], times[posname1]], axis=0)

        p = Pool()
        solutions = p.map(background_subtract_helper, xrange(len(bs.ssl)))
        p.close()

        write_outputs(solutions, itime, header, maskname, band, plan[i], options, target=target)