Exemple #1
0
def generate_mangle_polyfile(args=None):
    """
    Command line call to generate a Mangle polygon file in vertices format

    Mangle reference: http://space.mit.edu/~molly/mangle/

    Parameters
    ----------
    args : list (optional)
        list of arguments to parse. If None, grab
        from command line

    """

    parser = argparse.ArgumentParser(description="""Generate a polygon file
                                     suitable for use in the Mangle mask
                                     software in vertices format. A line
                                     contains the four corners of an IFU in ra,
                                     dec. You can pass this to suitable Mangle
                                     commands, like poly2poly, with -iv4d
                                     input-type flag.""")

    parser.add_argument("shot_file",
                        help="""An ascii file containing the
                        header 'SHOTID RACEN DECCEN PARANGLE FPLANE' and
                        appropriate entries. Coordinates should be given in
                        degrees.""")

    parser.add_argument("out_file",
                        help="""File name for the Mangle compatible polygon
                        file""")

    parser.add_argument("rot_offset",
                        help="Rotation difference to add to PARANGLE",
                        default=0.0,
                        type=float)

    opts = parser.parse_args(args=args)

    tables = []

    try:
        table_shots = Table.read(opts.shot_file, format='ascii')
    except IOError as e:
        print("Problem opening input file {:s}".format(opts.shot_file))
        raise e

    fplane_name_last = ""
    for row in table_shots:

        if row['FPLANE'] != fplane_name_last or not fplane:
            fplane = FPlane(row['FPLANE'])
            fplane_name_last = row['FPLANE']

        # Carry out required changes to astrometry
        rot = 360.0 - (row['PARANGLE'] + 90.0 + opts.rot_offset)
        tp = TangentPlane(row['RACEN'], row['DECCEN'], rot)

        table = generate_ifu_corner_ra_decs(tp, fplane)
        print(row['SHOTID'])
        table['shotid'] = row['SHOTID']
        tables.append(table)

    table_out = vstack(tables)
    table_out.write(opts.out_file,
                    format='ascii.commented_header',
                    comment='#')
def generate_sencube_hdf(datevshot,
                         ra,
                         dec,
                         pa,
                         fplane_output_dir,
                         nx,
                         ny,
                         nz,
                         ifusize,
                         skip_ifus=[
                             "000", "600", "555", "601", "602", "603", "604",
                             "610", "611", "612", "613", "614", "615", "616"
                         ],
                         hdf_filename=None):
    """
    Generate an empty real or mock sensitivity HDF5 container, 
    with the proper astrometry in the cubes. Real containters
    are of the SensitivityCubeHDF5Container class and are 
    written to a file. The mock containers are of
    HDF5MockContainer class and do not have a real HDF5
    file - useful for simulations. 

    Parameters
    ----------
    datevshot : str
        the 8 digit YYYYMMDDvSSS date
        of the shot and the shot, used to get the
        correct focal plane file
    ra, dec, pa : float
        the astrometry of the shot
    fplane_output_dir : str
        directory to output fplane files to
    hdf_filename : str (optional)
        if passed, generate a real
        SensitivityCubeHDF5Container
        with this filename. If None
        generate a mock container.
    ifusize : float
        size of x,y of IFU in arcsec
    skip_ifus : list (optional)
        the IFUSLOTS to skip

    Returns
    -------
    hdfcont : SensitivityCubeHDF5Container or HDF5MockContainer
       a real or mock sensivity cube container depending on
       the ``hdf_filename`` parameter
    """
    if hdf_filename:
        hdfcont = SensitivityCubeHDF5Container(hdf_filename, mode="w")
    else:
        hdfcont = HDF5MockContainer()

    # Generate the shot astrometry
    rot = 360.0 - (pa + 90.)
    tp = TangentPlane(ra, dec, rot)

    date = datevshot[:8]
    fplane_bn = "{:s}_fplane.txt".format(date)
    fplane_fn = join(fplane_output_dir, fplane_bn)

    if not isfile(fplane_fn):
        get_fplane(fplane_fn, datestr=str(date))
        fplane = FPlane(fplane_fn)
    else:
        fplane = FPlane(fplane_fn)

    for ifuslot, ifu in iteritems(fplane.difus_ifuslot):

        if ifuslot in skip_ifus:
            continue

        ifuslot_str = "ifuslot_" + ifuslot
        # Note x, y swapped in focal fplane
        ra_ifu, dec_ifu = tp.xy2raDec(ifu.y, ifu.x)
        scube = create_sensitivity_cube_from_astrom(ra_ifu.item(),
                                                    dec_ifu.item(), pa, nx, ny,
                                                    nz, ifusize)
        hdfcont.add_sensitivity_cube(datevshot, ifuslot_str, scube)

    return hdfcont
def generate_ifu_mask(output_fn, survey_hdf, badshots_file, ramin, ramax, decmin, decmax, specific_shot=None,
                      xsize=25.0, ysize=25.0, other_cuts={}, specific_field=None):
    """
    Generate a mask of IFU corners from the survey HDF 

    Parameters
    ----------
    output_fn : str
        a file to output the ra/dec corners to
    survey_hdf : str
        path to the survey HDF
    badshots_file : str
        path to the bad shots file
    ramin, ramax, decmin, decmax : float
        restrict the mask to a subregion
    specific_shot : str (Optional)
        overides the ra and dec range
        and instead only outputs a bad
        mask only for the given shotid
    xsize, ysize : float
        half of the size of the IFU in x and y
        in arcseconds. Vertices are produced
        a +/-xsize and +/-ysize. Optional,
        default xsize=ysize=25.0
    other_cuts : dict
        dictionary of shot property and a
        2 element list of minimum and maximum
        allowed value
    """
    # Read in the survey file
    survey_hdf = tb.open_file(survey_hdf)
       
    # Read bad shots file
    table_bad_shots = Table.read(badshots_file, format="ascii", names = ["shotid"])
    bad_shots = array(table_bad_shots["shotid"])

    if specific_shot is not None:
        survey_ttable = survey_hdf.root.Survey.read_where('shotid == specific_shot')
    elif specific_field is not None:
        survey_ttable = survey_hdf.root.Survey.read_where('field == specific_field')
    else:   
        query = '(ra < ramax) & (ra > ramin) & (dec < decmax) & (dec > decmin)'

        for param, lims in iteritems(other_cuts):
            query += ' & ({:s} > {:f}) & ({:s} < {:f})'.format(param, lims[0], param, lims[1])

        print(query) 
        survey_ttable = survey_hdf.root.Survey.read_where(query)

    # Loop over the datevshots and see if there are bad amps
    polys = []
    shids = []
    for line in survey_ttable:
        print(line)
        # skip bad shots
        if line["shotid"] in bad_shots:
            continue

        date = line["date"]

        rot = 360.0 - (line["pa"] + 90.)
        tp = TangentPlane(line["ra"], line["dec"], rot)
        fplane_fn = "fplanes/{:d}_fplane.txt".format(date)
   
        if not isfile(fplane_fn):
           get_fplane(fplane_fn, datestr=str(date))
           fplane = FPlane(fplane_fn)
        else:
           fplane = FPlane(fplane_fn) 

        rect = [[-1.0*xsize, -1.0*xsize, xsize, xsize],
                [-1.0*ysize, ysize, ysize, -1.0*ysize]]
    
        for ifu in fplane.ifus: 
            x = array(rect[0]) + ifu.y  
            y = array(rect[1]) + ifu.x
            ra, dec = tp.xy2raDec(x, y)
            polys.append([ra[0], dec[0], ra[1], dec[1], 
                          ra[2], dec[2], ra[3], dec[3]])  
            shids.append(line["shotid"])
 
    # Should now have a list of polygons to output
    with open(output_fn, "w") as fp:
        for poly, shid in zip(polys, shids):
            fp.write("{:7.6f} {:7.6f} {:7.6f} {:7.6f} {:7.6f} {:7.6f} {:7.6f} {:7.6f} {:d}\n".format(*poly, shid))
def generate_bad_amp_mask(output_fn, survey_hdf, badamps_file, badshots_file, 
                          ramin, ramax, decmin, decmax, specific_shot=None):

    """
    Generate a Mangle-compatible list of ra/dec pairs corresponding
    to the corners of the bad amplifiers on the sky. The amplifiers
    are split into squares and rectangles to better follow their 
    shape.

    Parameters
    ----------
    output_fn : str
        a file to output the ra/dec corners to
    survey_hdf : str
        path to the survey HDF
    badamps_file : str
        path to the bad amps file
    ramin, ramax, decmin, decmax : float
        restrict the mask to a subregion
    specific_shot : str (Optional)
        overides the ra and dec range
        and instead only outputs a bad
        mask only for te given shotid

    """

    # Read in the survey file
    survey_hdf = tb.open_file(survey_hdf)
   
    if specific_shot is not None:
        # 20190209027
        survey_ttable = survey_hdf.root.Survey.read_where('shotid == specific_shot')
    else:    
        survey_ttable = survey_hdf.root.Survey.read_where('(ra < ramax) & (ra > ramin) & (dec < decmax) & (dec > decmin)')

    # Read in the bad amps
    table_bad_amps = Table.read(badamps_file, format="ascii", names = ["IFUSLOT", "AMP", "multiframe", 
                                                                       "start", "end"])
    # Read bad shots file
    table_bad_shots = Table.read(badshots_file, format="ascii", names = ["shotid"])
    bad_shots = array(table_bad_shots["shotid"])

    # Loop over the datevshots and see if there are bad amps
    polys = []
    amps = []
    for line in survey_ttable:

        date = line["date"]
        fplane_fn = "fplanes/{:d}_fplane.txt".format(date)

        if not isfile(fplane_fn):
           get_fplane(fplane_fn, datestr=str(date))
        else:
           fplane = FPlane(fplane_fn) 
 
        if line["shotid"] in bad_shots:
            # if shot bad mask all IFUS
            print("Masking whole bad shot found {:d}".format(line["shotid"]))
            bad_amps_here = Table()
            bad_amps_here["IFUSLOT"] = [int(x) for x in fplane.ifuslots]
            bad_amps_here["AMP"] = "AA"
        else:
            # otherwise just mask bad amps
            sel = (table_bad_amps["start"] <= date) & (table_bad_amps["end"] >= date) 
            bad_amps_here = table_bad_amps[sel]
    
        # If any, grab the focal plane and generate 
        # a tangent plane for the astrometry
        if len(bad_amps_here) > 0:
           print("{:d} has bad amps. Adding to mask".format(line["shotid"]))
    
           rot = 360.0 - (line["pa"] + 90.)
           tp = TangentPlane(line["ra"], line["dec"], rot)
                  
           for bad_amp in bad_amps_here:
               try:
                   ifu = fplane.by_ifuslot("{:03d}".format(bad_amp["IFUSLOT"]))
               except NoIFUError:
                   print("Warning. IFU {:d} not found for dateobs {:d}".format(bad_amp["IFUSLOT"], line["shotid"]))
                   continue
    
               if bad_amp["AMP"] == "AA":
                   # whole IFU with a little extra border
                   rects_to_mask = [[[-30.0, -30.0, 30.0, 30.0],
                                     [-30.0, 30.0, 30.0, -30.0]]]
               else:
                   # Check if the amps in this IFU are swapped around
                   ampkey = "{:03d}{:s}".format(bad_amp["IFUSLOT"], bad_amp["AMP"])
                   if ampkey in swapped_around_amps:
                       amp = swapped_around_amps[ampkey]
                   else:
                       amp = bad_amp["AMP"]
                    
                   # coordinates of amplifier for default dither and IFU cen
                   rects_to_mask = amp_corners[amp]
    
               for rect in rects_to_mask:
                   # Flip is correct
                   x = array(rect[0]) + ifu.y  
                   y = array(rect[1]) + ifu.x
                   ra, dec = tp.xy2raDec(x, y)
                   polys.append([ra[0], dec[0], ra[1], dec[1], 
                                 ra[2], dec[2], ra[3], dec[3]])
                  
                   amps.append(bad_amp["AMP"])
    
    # Should now have a list of polygons to output
    with open(output_fn, "w") as fp:
        for poly, amp in zip(polys, amps):
            fp.write("{:7.6f} {:7.6f} {:7.6f} {:7.6f} {:7.6f} {:7.6f} {:7.6f} {:7.6f} {:s}\n".format(*poly, amp))
def generate_bad_amp_mask_fits(output_fn, survey_hdf, badamps_fits, ramin, ramax, decmin, 
                               decmax, specific_shot = None):
    """
    Generate a Mangle-compatible list of ra/dec pairs corresponding
    to the corners of the bad amplifiers on the sky. The amplifiers
    are split into squares and rectangles to better follow their 
    shape.

    Parameters
    ----------
    output_fn : str
        a file to output the ra/dec corners to
    survey_hdf : str
        path to the survey HDF
    badamps_file : str
        path to the bad amps file
    ramin, ramax, decmin, decmax : float
        restrict the mask to a subregion
    specific_shot : str (Optional)
        overides the ra and dec range
        and instead only outputs a bad
        mask only for te given shotid
    """
    # Read in the survey file
    survey_hdf = tb.open_file(survey_hdf)
   
    if specific_shot is not None:
        # 20190209027
        survey_ttable = survey_hdf.root.Survey.read_where('shotid == specific_shot')
    else:    
        survey_ttable = survey_hdf.root.Survey.read_where('(ra < ramax) & (ra > ramin) & (dec < decmax) & (dec > decmin)')

    # Read in the bad amps
    table_bad_amps = Table.read(badamps_fits)
    table_bad_amps = table_bad_amps[table_bad_amps["flag"] == 0]
    pattern = re.compile("multi_[0-9]{3}_([0-9]{3})_[0-9]{3}_([RLU]{2})") 
    table_bad_amps["AMP"] = [pattern.findall(x)[0][1] for x in table_bad_amps["multiframe"]]
    table_bad_amps["IFUSLOT"] = [pattern.findall(x)[0][0] for x in table_bad_amps["multiframe"]]

    # Loop over the datevshots and see if there are bad amps
    polys = []
    amps = []
    for line in survey_ttable:
        bad_amps_here = table_bad_amps[table_bad_amps["shotid"] == line["shotid"]]

        # If any, grab the focal plane and generate 
        # a tangent plane for the astrometry
        if len(bad_amps_here) > 0:
            #print("{:d} has bad amps. Adding to mask".format(line["shotid"]))
    
            date = line["date"]
            fplane_fn = "fplanes/{:d}_fplane.txt".format(date)

            if not isfile(fplane_fn):
               get_fplane(fplane_fn, datestr=str(date))
            else:
               fplane = FPlane(fplane_fn) 
    

            rot = 360.0 - (line["pa"] + 90.)
            tp = TangentPlane(line["ra"], line["dec"], rot)
                   
            for bad_amp in bad_amps_here:
                try:
                    ifu = fplane.by_ifuslot("{:s}".format(bad_amp["IFUSLOT"]))
                except NoIFUError:
                    print("Warning. IFU {:s} not found for dateobs {:d}".format(bad_amp["IFUSLOT"], line["shotid"]))
                    continue
    
                # Check if the amps in this IFU are swapped around
                ampkey = "{:s}{:s}".format(bad_amp["IFUSLOT"], bad_amp["AMP"])
                if ampkey in swapped_around_amps:
                    amp = swapped_around_amps[ampkey]
                else:
                    amp = bad_amp["AMP"]
                 
                # coordinates of amplifier for default dither and IFU cen
                rects_to_mask = amp_corners[amp]
    
                for rect in rects_to_mask:
                    # Flip is correct
                    x = array(rect[0]) + ifu.y  
                    y = array(rect[1]) + ifu.x
                    ra, dec = tp.xy2raDec(x, y)
                    polys.append([ra[0], dec[0], ra[1], dec[1], 
                                  ra[2], dec[2], ra[3], dec[3]])
                   
                    amps.append(bad_amp["AMP"])
    
    # Should now have a list of polygons to output
    with open(output_fn, "w") as fp:
        for poly, amp in zip(polys, amps):
            fp.write("{:7.6f} {:7.6f} {:7.6f} {:7.6f} {:7.6f} {:7.6f} {:7.6f} {:7.6f} {:s}\n".format(*poly, amp))
Exemple #6
0
class ShotSensitivity(object):
    """
    Generate completeness estimates for a shot
    on the fly, using the Extract class. This
    is written to be as backward compatible as
    possible with the user interface of 
    hdf5_sensitivity_cubes:SensitivityCubeHDF5Container 
    
    A lot of this is adapted from 
   `hetdex_tools/get_spec.py` `hetdex_api/extract.py`
    and scripts written by Erin Mentuch Cooper.

    The aperture correction stuff was taken
    from 'Remedy' by Greg Zeimann:
    grzeimann Remedy 
   
    Parameters
    ----------
    datevshot : str
        the name of the shot in the form
        YYYYMMDDvSSS
    release : str (Optional)
        The name of the release e.g. hdr2.1
        defaults to latest
    flim_model : str
        The flux limit model to convert
        the noise to completeness, defaults
        to the latest (which might not be
        compatible with the release, see README)
    rad : float
        A radius in arcseconds to grab fibers
        from when computing the flux limit, 
        default 3.5
    ffsky : boolean
        Full frame sky subtraction (default: False)
    wavenpix : int
        Number of wave pixels either side of the
        pixel the source is on to add in
        quadrature, or sets the size of tophat 
        convolution when producing data cubes as 
        2*wavenpix + 1 (default 3)
    d25scale : float
        Sets the multiplier for the galaxy masks
        applied (default 3.0)
    sclean_bad : bool
        Replace bad data using the sclean
        tool (see hetdex_api.extract:Extract)
    verbose : bool
        Print information about the flux limit model
        to the screen
    """
    def __init__(self,
                 datevshot,
                 release=None,
                 flim_model=None,
                 rad=3.5,
                 ffsky=False,
                 wavenpix=3,
                 d25scale=3.0,
                 verbose=False,
                 sclean_bad=True,
                 log_level="WARNING"):

        self.conf = HDRconfig()
        self.extractor = Extract()
        self.shotid = int(datevshot.replace("v", ""))
        self.date = datevshot[:8]
        self.rad = rad
        self.ffsky = ffsky
        self.wavenpix = wavenpix
        self.sclean_bad = sclean_bad

        logger = logging.getLogger(name="ShotSensitivity")
        logger.setLevel(log_level)

        if verbose:
            raise DeprecationWarning(
                "Using verbose is deprecated, set log_level instead")
            logger.setLevel("DEBUG")

        logger.info("shotid: {:d}".format(self.shotid))

        if not release:
            self.release = self.conf.LATEST_HDR_NAME
        else:
            self.release = release

        logger.info("Data release: {:s}".format(self.release))
        self.survey = Survey(survey=self.release)

        # Set up flux limit model
        self.f50_from_noise, self.sinterp, interp_sigmas \
                                       = return_flux_limit_model(flim_model,
                                                                 cache_sim_interp=False,
                                                                 verbose=verbose)

        # Generate astrometry for this shot
        survey_sel = (self.survey.shotid == self.shotid)
        self.shot_pa = self.survey.pa[survey_sel][0]
        self.shot_ra = self.survey.ra[survey_sel][0]
        self.shot_dec = self.survey.dec[survey_sel][0]
        rot = 360.0 - (self.shot_pa + 90.)
        self.tp = TangentPlane(self.shot_ra, self.shot_dec, rot)

        #Set up masking
        logger.info("Using d25scale {:f}".format(d25scale))
        self.setup_mask(d25scale)

        # Set up spectral extraction
        if release == "hdr1":
            fwhm = self.survey.fwhm_moffat[survey_sel][0]
        else:
            fwhm = self.survey.fwhm_virus[survey_sel][0]

        logger.info("Using Moffat PSF with FWHM {:f}".format(fwhm))
        self.moffat = self.extractor.moffat_psf(fwhm, 3. * rad, 0.25)
        self.extractor.load_shot(self.shotid, fibers=True, survey=self.release)

        # Set up the focal plane astrometry
        fplane_table = self.extractor.shoth5.root.Astrometry.fplane

        # Bit of a hack to avoid changing pyhetdex
        with NamedTemporaryFile(mode='w') as tpf:
            for row in fplane_table.iterrows():
                tpf.write(
                    "{:03d} {:8.5f} {:8.5f} {:03d} {:03d} {:03d} {:8.5f} {:8.5f}\n"
                    .format(row['ifuslot'], row['fpx'], row['fpy'],
                            row['specid'], row['specslot'], row['ifuid'],
                            row['ifurot'], row['platesc']))
            tpf.seek(0)
            self.fplane = FPlane(tpf.name)

    def setup_mask(self, d25scale):
        """
        Setup the masking, to speed up checking
        if sources are in the mask later. This
        is run at initialisation, so you need
        only run again to change `d25scale`

        Parameters
        ----------
        d25scale : float
            Sets the multiplier for the galaxy masks
            applied (default 3.5)
        """

        logger = logging.getLogger(name="ShotSensitivity")

        # see if this is a bad shot
        #print("Bad shot from ", self.conf.badshot)
        badshot = loadtxt(self.conf.badshot, dtype=int)
        badtpshots = loadtxt(self.conf.lowtpshots, dtype=int)
        if (self.shotid in badshot) or (self.shotid in badtpshots):
            logger.warn("Shot is in bad. Making mask zero everywhere")
            self.badshot = True
        else:
            self.badshot = False

        # set up bad amps
        logger.info("Bad amps from {:s}".format(self.conf.badamp))
        self.bad_amps = Table.read(self.conf.badamp)
        sel_shot = (self.bad_amps["shotid"] == self.shotid)
        self.bad_amps = self.bad_amps[sel_shot]

        # set up galaxy mask
        logger.info("Galaxy mask from {:s}".format(self.conf.rc3cat))
        galaxy_cat = Table.read(self.conf.rc3cat, format='ascii')
        gal_coords = SkyCoord(galaxy_cat['Coords'], frame='icrs')
        shot_coords = SkyCoord(ra=self.shot_ra, dec=self.shot_dec, unit="deg")
        sel_reg = where(shot_coords.separation(gal_coords) < 1. * u.deg)[0]

        self.gal_regions = []
        if len(sel_reg) > 0:
            for idx in sel_reg:
                self.gal_regions.append(
                    create_gal_ellipse(galaxy_cat,
                                       row_index=idx,
                                       d25scale=d25scale))

        # set up meteor mask
        # check if there are any meteors in the shot:
        logger.info("Meteors from {:s}".format(self.conf.meteor))
        self.met_tab = Table.read(self.conf.meteor, format="ascii")
        self.met_tab = self.met_tab[self.shotid == self.met_tab["shotid"]]

    def extract_ifu_sensitivity_cube(self,
                                     ifuslot,
                                     nx=31,
                                     ny=31,
                                     ifusize=62,
                                     generate_sigma_array=True,
                                     cache_sim_interp=True):
        """       
        Extract the sensitivity cube
        from IFU `ifuslot`

        Parameters
        ----------
        ifuslot : string
            the IFU slot to extract
        nx, ny : int
            the dimensions in pixels 
            of the cube (default 31,31)
        ifusize : float
            the length of the side of
            the cube in arcseconds,
            default is 62 arcseconds
        generate_sigma_array: bool
            this fills the 3D array
            of noise, this makes it quite
            slow to run, so if you want
            to just use the cube for 
            the astrometry do not use
            this option (default: True)
        cache_sim_interp : bool
            cache the simulation interpolator
            to speed up execution (default: True)

        Returns
        -------
        scube : hetdex_api.flux_limits.sensitivity_cube:SensitivityCube
            the sensitivity cube
 
        """

        waves = self.extractor.get_wave()
        wrange = [waves[0], waves[-1]]
        nz = len(waves)

        pa = self.shot_pa
        ifu = self.fplane.difus_ifuslot[ifuslot.replace("ifuslot_", "")]
        ra_ifu, dec_ifu = self.tp.xy2raDec(ifu.y, ifu.x)

        scube = create_sensitivity_cube_from_astrom(
            float(ra_ifu),
            float(dec_ifu),
            pa,
            nx,
            ny,
            nz,
            ifusize,
            wrange=wrange,
            cache_sim_interp=cache_sim_interp)

        if generate_sigma_array:

            ix, iy = meshgrid(arange(0, nx, 1.0), arange(0, ny, 1.0))

            all_ra, all_dec, junk = scube.wcs.all_pix2world(
                ix.ravel(), iy.ravel(), [500.], 0)
            noises, norm, mask = self.get_f50(all_ra,
                                              all_dec,
                                              None,
                                              1.0,
                                              direct_sigmas=True)
            sigmas = noises.ravel(order="F").reshape(nz, ny, nx)

            mask = logical_not(mask.reshape(ny, nx))
            mask3d = repeat(mask[newaxis, :, :], sigmas.shape[0], axis=0)
            scube.sigmas = MaskedArray(sigmas, mask=mask3d, fill_value=999.0)

        return scube

    def get_f50(self,
                ra,
                dec,
                wave,
                sncut,
                direct_sigmas=False,
                nmax=5000,
                return_amp=False,
                linewidth=None):
        """
        Return flux at 50% for the input positions
        most of this is cut and paste from 
        `hetdex_tools/get_spec.py` and 
        the old sensitivity_cube:SensitivityCube 
        class.

        This class splits the data up into sets of
        up to `nmax` to save memory

        Parameters
        ----------
        ra, dec : array 
            right ascension and dec in degrees
        wave : array 
            wavelength in Angstroms. If None,
            then return flux limits for
            all wave bins.
        sncut : float
            cut in detection significance 
            that defines this catalogue
        direct_sigmas : bool
            return the noise values directly
            without passing them through
            the noise to 50% completeness 
            flux (default = False)
        nmax : int
            maximum number of sources to 
            consider at once, otherwise split
            up and loop.
        return_amp : bool
            if True return amplifier information
            for the closest fiber to each source
            (default = False)
        linewidth : array
            optionally pass the linewidth of
            the source (in AA) to activate the linewidth
            dependent part of the completeness
            model (default = None).

        Returns
        -------
        f50s : array
            50% completeness. If outside
            of cube return 999. If None
            was passed for wave this is 
            a 2D array of ra, dec and
            all wavelengths
        mask : array
            Only returned if `wave=None`. This
            mask is True where the ra/dec
            positions passed are in good
            regions of data
        amp : array
            Only returned in `return_amp=True`,
            it's an array of amplifier information
            for the closest fiber to each source
        """
        if type(wave) != type(None):
            wave_passed = True
        else:
            wave_passed = False

        try:
            nsrc = len(ra)

            if wave_passed:
                # Trim stuff very far away
                gal_coords = SkyCoord(ra=ra, dec=dec, unit="deg")
                shot_coords = SkyCoord(ra=self.shot_ra,
                                       dec=self.shot_dec,
                                       unit="deg")

                sel = array(shot_coords.separation(gal_coords) < 2.0 * u.deg)

                ra_sel = array(ra)[sel]
                dec_sel = array(dec)[sel]
                wave_sel = array(wave)[sel]
                nsel = len(ra_sel)
            else:
                # If not passing wave always loop
                # over all ra/dec in range
                ra_sel = ra
                dec_sel = dec
                wave_sel = None
                nsel = len(ra)

            nsplit = int(ceil(float(nsel) / float(nmax)))

        except TypeError as e:

            # If the user does not pass arrays
            nsplit = 1
            nsrc = 1
            nsel = 1
            sel = True
            ra_sel = array([ra])
            dec_sel = array([dec])
            wave_sel = array([wave])

        # Array to store output actually in the shot
        f50s_sel = []
        mask_sel = []
        amp_sel = []
        norm_sel = []

        wave_rect = self.extractor.get_wave()
        pixsize_aa = wave_rect[1] - wave_rect[0]

        # This will give 999 once the noise is scaled suitably
        badval = 999 * 1e17 / pixsize_aa

        # Arrays to store full output
        f50s = badval * ones(nsrc)
        mask = ones(nsrc)
        norm = ones(nsrc)
        amp = array(["notinshot"] * nsrc)

        if nsel > 0:
            for i in range(nsplit):

                tra = ra_sel[i * nmax:(i + 1) * nmax]
                tdec = dec_sel[i * nmax:(i + 1) * nmax]

                if wave_passed:
                    twave = wave_sel[i * nmax:(i + 1) * nmax]
                    if not self.badshot:
                        tf50s, tamp, tnorm = self._get_f50_worker(
                            tra,
                            tdec,
                            twave,
                            sncut,
                            direct_sigmas=direct_sigmas,
                            linewidth=linewidth)
                    else:
                        tamp = ["bad"] * len(tra)
                        tf50s = [badval] * len(tra)
                        tnorm = [1.0] * len(tra)
                else:
                    # if bad shot then the mask is all set to zero
                    tf50s, tmask, tamp, tnorm = \
                                      self._get_f50_worker(tra, tdec, None, sncut,
                                                          direct_sigmas = direct_sigmas,
                                                          linewidth = linewidth)

                    mask_sel.extend(tmask)

                f50s_sel.extend(tf50s)
                amp_sel.extend(tamp)
                norm_sel.extend(tnorm)

        if return_amp:
            if wave_passed:

                # copy to output
                f50s[sel] = f50s_sel
                amp[sel] = amp_sel
                norm[sel] = norm_sel

                return f50s, norm, amp
            else:
                return array(f50s_sel), array(norm_sel), array(
                    mask_sel), array(amp_sel)
        else:
            if wave_passed:
                f50s[sel] = f50s_sel
                norm[sel] = norm_sel

                return f50s, norm
            else:
                return array(f50s_sel), array(norm_sel), array(mask_sel)

    def _get_f50_worker(self,
                        ra,
                        dec,
                        wave,
                        sncut,
                        direct_sigmas=False,
                        linewidth=None):
        """
        Return flux at 50% for the input positions
        most of this is cut and paste from 
        `hetdex_tools/get_spec.py` and 
        the old sensitivity_cube:SensitivityCube 
        class.

        Parameters
        ----------
        ra, dec : array 
            right ascension and dec in degrees
        wave : array 
            wavelength in Angstroms. If None,
            then return flux limits for
            all wave bins.
        sncut : float
            cut in detection significance 
            that defines this catalogue
        direct_sigmas : bool
            return the noise values directly
            without passing them through
            the noise to 50% completeness 
            flux
        linewidth : array
            optionally pass the linewidth of
            the source (in AA) to activate the linewidth
            dependent part of the completeness
            model (default = None).


        Returns
        -------
        f50s : array
            50% completeness. If outside
            of cube return 999. If None
            was passed for wave this is 
            a 2D array of ra, dec and
            all wavelengths
        norm_all : array
            the aperture corrections
        mask : array
            Only returned if `wave=None`. This
            mask is True where the ra/dec
            positions passed are in good
            regions of data
        amp : array
            Only returned in `return_amp=True`,
            it's an array of amplifier information
            for the closest fiber to each source 
        """

        logger = logging.getLogger(name="ShotSensitivity")

        try:
            [x for x in ra]
        except TypeError:
            ra = array([ra])
            dec = array([dec])
            wave = array([wave])

        coords = SkyCoord(ra=ra, dec=dec, unit="deg")
        wave_rect = self.extractor.get_wave()
        pixsize_aa = wave_rect[1] - wave_rect[0]

        # This will give 999 once the noise is scaled suitably
        badval = 999 * 1e17 / pixsize_aa

        # Size of window in wave elements
        filter_len = 2 * self.wavenpix + 1

        if type(wave) != type(None):
            wave_passed = True
        else:
            wave_passed = False
            convolution_filter = ones(filter_len)
            mask = True * ones(len(coords), dtype=int)

        noise = []

        info_results = self.extractor.get_fiberinfo_for_coords(
            coords,
            radius=self.rad,
            ffsky=self.ffsky,
            return_fiber_info=True,
            fiber_lower_limit=2,
            verbose=False)

        id_, aseps, aifux, aifuy, axc, ayc, ara, adec, adata, aerror, afmask, afiberid, \
                    amultiframe = info_results

        I = None
        fac = None
        norm_all = []
        amp = []
        nan_fib_mask = []

        for i, c in enumerate(coords):

            sel = (id_ == i)

            if type(wave) != type(None):
                logger.debug("Running on source {:f} {:f} {:f}".format(
                    ra[i], dec[i], wave[i]))
            else:
                logger.debug("Running on position {:f} {:f}".format(
                    ra[i], dec[i]))

            logger.debug("Found {:d} fibers".format(sum(sel)))

            if sum(sel) > 0:

                # fiber properties
                xc = axc[sel][0]
                yc = ayc[sel][0]
                ifux = aifux[sel]
                ifuy = aifuy[sel]
                data = adata[sel]
                error = aerror[sel]
                fmask = afmask[sel]
                fiberid = afiberid[sel]
                multiframe = amultiframe[sel]
                seps = aseps[sel]

                # Flag the zero elements as bad
                fmask[(abs(data) < 1e-30) | (abs(error) < 1e-30)] = False

                iclosest = argmin(seps)

                amp.append(fiberid[iclosest])

                if len(self.bad_amps) > 0:
                    amp_flag = amp_flag_from_fiberid(fiberid[iclosest],
                                                     self.bad_amps)
                else:
                    amp_flag = True

                # XXX Could be faster - reloads the file every run
                meteor_flag = meteor_flag_from_coords(c, self.shotid)

                if not (amp_flag and meteor_flag):
                    logger.debug("The data here are bad, position is masked")
                    if wave_passed:
                        noise.append(badval)
                        norm_all.append(1.0)
                        # value doesn't matter as in amp flag
                        nan_fib_mask.append(True)
                        continue
                    else:
                        mask[i] = False

                weights, I, fac = self.extractor.build_weights(
                    xc,
                    yc,
                    ifux,
                    ifuy,
                    self.moffat,
                    I=I,
                    fac=fac,
                    return_I_fac=True)

                # (See Greg Zeimann's Remedy code)
                # normalized in the fiber direction
                norm = sum(weights, axis=0)
                weights = weights / norm

                result = self.extractor.get_spectrum(
                    data,
                    error,
                    fmask,
                    weights,
                    remove_low_weights=False,
                    sclean_bad=self.sclean_bad,
                    return_scleaned_mask=True)

                spectrum_aper, spectrum_aper_error, scleaned = [
                    res for res in result
                ]

                if wave_passed:

                    index = where(wave_rect >= wave[i])[0][0]
                    ilo = index - self.wavenpix
                    ihi = index + self.wavenpix + 1

                    # If lower index less than zero, truncate
                    if ilo < 0:
                        ilo = 0

                    if ihi < 0:
                        ihi = 0

                    # Output lots of information for very detailed debugging
                    if logger.getEffectiveLevel() == logging.DEBUG:
                        logger.debug("Table of fibers:")
                        logger.debug(
                            "# fiberid    wave_index ifux ifuy  weight     noise"
                        )
                        for fibidx, fid in enumerate(fiberid):
                            for wi, (tw, tnoise) in enumerate(
                                    zip((weights * norm)[fibidx, ilo:ihi],
                                        error[fibidx, ilo:ihi]), ilo):
                                logger.debug(
                                    "{:s} {:d} {:f} {:f} {:f} {:f}".format(
                                        fid, wi, ifux[fibidx], ifuy[fibidx],
                                        tw, tnoise))

                    # Mask source if bad values within the central 3 wavebins
                    nan_fib = bad_central_mask(weights * norm,
                                               logical_not(fmask), index)
                    nan_fib_mask.append(nan_fib)

                    # Account for NaN and masked spectral bins
                    bad = isnan(spectrum_aper_error[ilo:ihi])
                    goodfrac = 1.0 - sum(bad) / len(bad)

                    if all(isnan(spectrum_aper_error[ilo:ihi])):
                        sum_sq = badval
                    else:
                        sum_sq = \
                            sqrt(nansum(square(spectrum_aper_error[ilo:ihi])/goodfrac))

                    norm_all.append(mean(norm[ilo:ihi]))
                    noise.append(sum_sq)
                else:
                    logger.debug(
                        "Convolving with window to get flux limits versus wave"
                    )

                    # Use astropy convolution so NaNs are ignored
                    convolved_variance = convolve(square(spectrum_aper_error),
                                                  convolution_filter,
                                                  normalize_kernel=False)
                    std = sqrt(convolved_variance)

                    # Also need to convolve aperture corrections to get
                    # a total apcor across the wavelength window
                    convolved_norm = convolve(norm,
                                              convolution_filter,
                                              normalize_kernel=True)

                    # To get mean account for the edges in
                    # the convolution
                    for iend in range(self.wavenpix):
                        fac = filter_len / (filter_len + iend - self.wavenpix)
                        convolved_norm[iend] *= fac
                        convolved_norm[-iend - 1] *= fac

                    # Mask wavelengths with too many bad pixels
                    # equivalent to nan_fib in the wave != None mode
                    wunorm = weights * norm
                    for index in range(len(convolved_variance)):
                        if not bad_central_mask(wunorm, logical_not(fmask),
                                                index):
                            std[index] = badval

                    noise.append(std)
                    norm_all.append(convolved_norm)

            else:
                if wave_passed:
                    noise.append(badval)
                    norm_all.append(1.0)
                    amp.append("000")
                    nan_fib_mask.append(True)
                else:
                    noise.append(badval * ones(len(wave_rect)))
                    norm_all.append(ones(len(wave_rect)))
                    amp.append("000")
                    mask[i] = False

        # Apply the galaxy mask
        gal_mask = ones(len(coords), dtype=int)
        for gal_region in self.gal_regions:
            dummy_wcs = create_dummy_wcs(gal_region.center,
                                         imsize=2 * gal_region.height)
            # zero if near galaxy
            gal_mask = gal_mask & invert(gal_region.contains(
                coords, dummy_wcs))

        noise = array(noise)
        snoise = pixsize_aa * 1e-17 * noise

        if wave_passed:

            bad = (gal_mask <
                   0.5) | (snoise > 998) | isnan(snoise) | invert(nan_fib_mask)

            normnoise = snoise / norm_all

            if not direct_sigmas:
                normnoise = self.f50_from_noise(normnoise,
                                                wave,
                                                sncut,
                                                linewidth=linewidth)

            normnoise[bad] = 999.

            return normnoise, amp, norm_all

        else:
            mask[gal_mask < 0.5] = False

            if self.badshot:
                mask[:] = False

            bad = (snoise > 998) | logical_not(isfinite(snoise))
            normnoise = snoise / norm_all

            if not direct_sigmas:
                normnoise = self.f50_from_noise(normnoise,
                                                wave,
                                                sncut,
                                                linewidth=linewidth)

            normnoise[bad] = 999

            return normnoise, mask, amp, norm_all

    def return_completeness(self,
                            flux,
                            ra,
                            dec,
                            lambda_,
                            sncut,
                            f50s=None,
                            linewidth=None):
        """
        Return completeness at a 3D position as an array. 
        If for whatever reason the completeness is NaN, it's
        replaced by 0.0. This is cut and paste from
        sensitivity_cube:SensitivityCube

        Parameters
        ----------
        flux : array
            fluxes of objects
        ra, dec : array
            right ascension and dec in degrees
        lambda_ : array
            wavelength in Angstrom
        sncut : float
            the detection significance (S/N) cut
            applied to the data
        f50s : array (optional)
            optional array of precomputed
            50% completeness fluxes. Otherwise
            the method will compute them itself
            from the ra/dec/linewidth (default:None)
        linewidth : array (optional)
            optionally pass the linewidth of
            the source (in AA) to activate the linewidth
            dependent part of the completeness
            model (default = None). Only does
            anything when you don't pass the f50s
            (default: None)

        Return
        ------
        fracdet : array
            fraction detected 

        Raises
        ------
        WavelengthException :
            Annoys user if they pass
            wavelength outside of
            VIRUS range
        """

        logger = logging.getLogger(name="ShotSensitivity")

        try:
            if lambda_[0] < 3000.0 or lambda_[0] > 6000.0:

                raise WavelengthException("""Odd wavelength value. Are you
                                             sure it's in Angstrom?""")
        except TypeError as e:
            if lambda_ < 3000.0 or lambda_ > 6000.0:

                raise WavelengthException("""Odd wavelength value. Are you
                                             sure it's in Angstrom?""")

        if type(f50s) == type(None):
            f50s, norm = self.get_f50(ra,
                                      dec,
                                      lambda_,
                                      sncut,
                                      linewidth=linewidth)

        try:
            # to stop bad values breaking interpolation
            bad = (f50s > 998)
            f50s[bad] = 1e-16
            fracdet = self.sinterp(flux, f50s, lambda_, sncut)
            #print(min(flux), max(flux), min(f50s), max(f50s))

            # check to see if we're passing multiple fluxes
            # for one f50 value
            if any(bad):
                logger.debug("There are bad values here to mask")
                if len(f50s) == 1:
                    logger.debug("Just one ra/dec/wave passed.")
                    fracdet[:] = 0.0
                    f50s[:] = 999.0
                else:
                    fracdet[bad] = 0.0
                    f50s[bad] = 999.0

        except IndexError as e:
            print("Interpolation failed!")
            print(min(flux), max(flux), min(f50s), max(f50s))
            print(min(lambda_), max(lambda_))
            raise e

        try:
            fracdet[isnan(fracdet)] = 0.0
        except TypeError:
            if isnan(fracdet):
                fracdet = 0.0

        return fracdet

    def close(self):
        """ 
        Close the Extractor object 
        (especially if it has a Shot HDF
        file open)
        
        """
        self.extractor.close()

    def __enter__(self):
        """ Added to support using the `with` statement """
        return self

    def __exit__(self, type_, value, traceback):
        """ Support tidying up after using the `with` statement """
        self.close()
Exemple #7
0
    def __init__(self,
                 datevshot,
                 release=None,
                 flim_model=None,
                 rad=3.5,
                 ffsky=False,
                 wavenpix=3,
                 d25scale=3.0,
                 verbose=False,
                 sclean_bad=True,
                 log_level="WARNING"):

        self.conf = HDRconfig()
        self.extractor = Extract()
        self.shotid = int(datevshot.replace("v", ""))
        self.date = datevshot[:8]
        self.rad = rad
        self.ffsky = ffsky
        self.wavenpix = wavenpix
        self.sclean_bad = sclean_bad

        logger = logging.getLogger(name="ShotSensitivity")
        logger.setLevel(log_level)

        if verbose:
            raise DeprecationWarning(
                "Using verbose is deprecated, set log_level instead")
            logger.setLevel("DEBUG")

        logger.info("shotid: {:d}".format(self.shotid))

        if not release:
            self.release = self.conf.LATEST_HDR_NAME
        else:
            self.release = release

        logger.info("Data release: {:s}".format(self.release))
        self.survey = Survey(survey=self.release)

        # Set up flux limit model
        self.f50_from_noise, self.sinterp, interp_sigmas \
                                       = return_flux_limit_model(flim_model,
                                                                 cache_sim_interp=False,
                                                                 verbose=verbose)

        # Generate astrometry for this shot
        survey_sel = (self.survey.shotid == self.shotid)
        self.shot_pa = self.survey.pa[survey_sel][0]
        self.shot_ra = self.survey.ra[survey_sel][0]
        self.shot_dec = self.survey.dec[survey_sel][0]
        rot = 360.0 - (self.shot_pa + 90.)
        self.tp = TangentPlane(self.shot_ra, self.shot_dec, rot)

        #Set up masking
        logger.info("Using d25scale {:f}".format(d25scale))
        self.setup_mask(d25scale)

        # Set up spectral extraction
        if release == "hdr1":
            fwhm = self.survey.fwhm_moffat[survey_sel][0]
        else:
            fwhm = self.survey.fwhm_virus[survey_sel][0]

        logger.info("Using Moffat PSF with FWHM {:f}".format(fwhm))
        self.moffat = self.extractor.moffat_psf(fwhm, 3. * rad, 0.25)
        self.extractor.load_shot(self.shotid, fibers=True, survey=self.release)

        # Set up the focal plane astrometry
        fplane_table = self.extractor.shoth5.root.Astrometry.fplane

        # Bit of a hack to avoid changing pyhetdex
        with NamedTemporaryFile(mode='w') as tpf:
            for row in fplane_table.iterrows():
                tpf.write(
                    "{:03d} {:8.5f} {:8.5f} {:03d} {:03d} {:03d} {:8.5f} {:8.5f}\n"
                    .format(row['ifuslot'], row['fpx'], row['fpy'],
                            row['specid'], row['specslot'], row['ifuid'],
                            row['ifurot'], row['platesc']))
            tpf.seek(0)
            self.fplane = FPlane(tpf.name)