Beispiel #1
0
  def update_hdr_config(self):
      self.survey = self.survey_widget.value.lower()
      self.hetdex_api_config = HDRconfig( survey=self.survey)
      
      self.FibIndex = FiberIndex(self.survey)
      self.ampflag_table = Table.read(self.hetdex_api_config.badamp)
      # update survey class and shot list
 
      self.survey_class = Survey(self.survey)
def mock_hdf_cube(datadir):
    """ Generate a mock HDF cube for a given shot """
    datevobs = "20190201v019"
    survey_obj = Survey("hdr2.1")
    shot = survey_obj[survey_obj.datevobs == datevobs]
    sencube_hdf = generate_sencube_hdf(datevobs, shot.ra[0], 
                                       shot.dec[0], shot.pa[0], datadir, 
                                       31, 31, 1036, 62.0)

    return sencube_hdf
def main(argv=None):

    parser = get_parser()
    args = parser.parse_args(argv)
    args.log = setup_logging()

    if args.infile:

        args.log.info("Loading External File")

        table_in = Table.read(args.infile, format="ascii")
        args.ID = table_in["ID"]
        args.ra = table_in["ra"]
        args.dec = table_in["dec"]
    else:
        if args.ID is None:
            args.ID = "DEX" + str(args.ra).zfill(4) + "_"
            +str(args.dec).zfill(4)

        args.log.info("Extracting for ID: %s" % args.ID)

    args.coords = SkyCoord(args.ra * u.deg, args.dec * u.deg)

    args.survey = Survey("hdr1")

    args.matched_sources = {}
    shots_of_interest = []

    count = 0

    # this radius applies to the inital shot search and requires a large
    # aperture for the wide FOV of VIRUS

    max_sep = 11.0 * u.arcminute

    args.log.info("Finding shots of interest")

    for i, coord in enumerate(args.survey.coords):
        dist = args.coords.separation(coord)
        sep_constraint = dist < max_sep
        shotid = args.survey.shotid[i]
        idx = np.where(sep_constraint)[0]
        if np.size(idx) > 0:
            args.matched_sources[shotid] = idx
            count += np.size(idx)
            if len(idx) > 0:
                shots_of_interest.append(shotid)

    args.log.info("Number of shots of interest: %i" % len(shots_of_interest))
    args.log.info("Saved shot list to file " + str(args.outfile))
    np.savetxt("shotlist", shots_of_interest, fmt="%i")
Beispiel #4
0
from hetdex_api.mask import *
from hetdex_api.flux_limits.hdf5_sensitivity_cubes import (SensitivityCubeHDF5Container, return_sensitivity_hdf_path)
from hetdex_api.detections import Detections

date = sys.argv[1]
obs = sys.argv[2]

try:
    wave_slice = float(sys.argv[3])
except:
    wave_slice = 4540.
    
datevobs = str(date) + 'v' + str(obs).zfill(3)
shotid = int(str(date) + str(obs).zfill(3))

survey = Survey()

shot_coords = survey.coords[survey.shotid == shotid][0]

seeing = survey.fwhm_virus[survey.shotid == shotid][0]
tp = survey.response_4540[survey.shotid == shotid][0]

hdf_filename_hdr2pt1, mask_fn = return_sensitivity_hdf_path(datevobs, release="hdr2.1", return_mask_fn=True)

hdfcont_hdr2 = SensitivityCubeHDF5Container(filename=hdf_filename_hdr2pt1, aper_corr=1.0,
                                            flim_model="hdr2pt1",  mask_filename=mask_fn)

#overplot detections
#detects = Detections(curated_version='2.1.3')
#sel_shot = detects.shotid == shotid
#detects_shot = detects[sel_shot]
Beispiel #5
0
def main(argv=None):
    """ Main Function """
    # Call initial parser from init_utils
    parser = ap.ArgumentParser(description="""Create Region Files.""",
                               add_help=True)

    parser.add_argument(
        "-s",
        "--shotid",
        help="""Shot identifier, an integer""",
        type=int,
        default=None,
    )

    parser.add_argument(
        "-d",
        "--date",
        help="""Date, e.g., 20170321, YYYYMMDD""",
        type=str,
        default=None,
    )

    parser.add_argument(
        "-o",
        "--observation",
        help='''Observation number, "00000007" or "7"''',
        type=str,
        default=None,
    )

    parser.add_argument(
        "-f",
        "--field",
        help="""Options=""",
        type=str,
        default=None,
    )

    args = parser.parse_args(argv)
    args.log = setup_logging()

    if args.field is not None:

        S = Survey()
        survey_table = S.return_astropy_table()
        sel_field = survey_table["field"] == args.field
        ifuregions = []

        for row in survey_table[sel_field]:
            args.log.info("Working on " + str(row["shotid"]))
            ifuregions.extend(get_regions_from_flim(row["shotid"]))

        outname = args.field

    elif args.shotid:
        ifuregions = get_regions_from_flim(args.shotid)
        outname = str(args.shotid)

    elif args.date is not None:
        if args.observation is not None:
            shotid = int(str(args.date) + str(args.observation).zfill(3))
            ifuregions = get_regions_from_flim(shotid)
            outname = str(shotid)

    region_file = outname + '.reg'
    write_ds9(ifuregions, region_file)
Beispiel #6
0
    def __init__(
        self,
        survey=LATEST_HDR_NAME,
        catalog_type="lines",
        curated_version=None,
        loadtable=True,
    ):
        """
        Initialize the detection catalog class for a given data release

        Input
        -----
        survey : string
            Data release you would like to load, i.e., 'hdr2','HDR1'
            This is case insensitive.
        catalog_type : string
            Catalog to laod up. Either 'lines' or 'continuum'. Default is 
            'lines'.
        load_table : bool
           Boolean flag to load all detection table info upon initialization.
           For example, if you just want to grab a spectrum this isn't needed.
        
        """
        survey_options = ["hdr1", "hdr2", "hdr2.1"]
        catalog_type_options = ["lines", "continuum", "broad"]

        if survey.lower() not in survey_options:
            print("survey not in survey options")
            print(survey_options)
            return None

        if catalog_type.lower() not in catalog_type_options:
            print("catalog_type not in catalog_type options")
            print(catalog_type_options)
            return None

        # store to class
        if curated_version is not None:
            self.version = curated_version
            self.loadtable = False
            self.survey = "hdr" + curated_version[0:3]
        else:
            self.version = None
            self.survey = survey
            self.loadtable = loadtable

        global config
        config = HDRconfig(survey=self.survey)

        if catalog_type == "lines":
            self.filename = config.detecth5
        elif catalog_type == "continuum":
            self.filename = config.contsourceh5
        elif catalog_type == "broad":
            try:
                self.filename = config.detectbroadh5
            except:
                print("Could not locate broad line catalog")

        self.hdfile = tb.open_file(self.filename, mode="r")

        # store to class
        if curated_version is not None:
            self.version = curated_version
            self.loadtable = False
            self.survey = "hdr" + curated_version[0:3]
        else:
            self.survey = survey
            self.loadtable = loadtable

        if self.version is not None:

            try:
                catfile = op.join(
                    config.detect_dir, "catalogs", "detect_hdr" + self.version + ".fits"
                )
                det_table = Table.read(catfile)

                for col in det_table.colnames:
                    if isinstance(det_table[col][0], str):
                        setattr(self, col, np.array(det_table[col]).astype(str))
                    else:
                        setattr(self, col, np.array(det_table[col]))

                self.vis_class = -1 * np.ones(np.size(self.detectid))

            except:
                print("Could not open curated catalog version: " + self.version)
                return None

        elif self.loadtable:
            colnames = self.hdfile.root.Detections.colnames
            for name in colnames:
                if isinstance(
                    getattr(self.hdfile.root.Detections.cols, name)[0], np.bytes_
                ):
                    setattr(
                        self,
                        name,
                        getattr(self.hdfile.root.Detections.cols, name)[:].astype(str),
                    )
                else:
                    setattr(
                        self, name, getattr(self.hdfile.root.Detections.cols, name)[:]
                    )
            if self.survey == "hdr2.1":
                # Fix fluxes and continuum values for aperture corrections  
                wave = self.hdfile.root.Detections.cols.wave[:]
                apcor = self.hdfile.root.Spectra.cols.apcor[:]
                wave_spec = self.hdfile.root.Spectra.cols.wave1d[:]

                apcor_array = np.ones_like(wave)
                for idx in np.arange(0, np.size(wave)):
                    sel_apcor = np.where(wave_spec[idx, :] > wave[idx])[0][0]
                    apcor_array[idx]=apcor[idx, sel_apcor]

                self.flux /= apcor_array
                self.flux_err /= apcor_array
                self.continuum /= apcor_array
                self.continuum_err /= apcor_array
                self.apcor = apcor_array

            # add in the elixer probabilties and associated info:
            if self.survey == "hdr1" and catalog_type == "lines":

                self.hdfile_elix = tb.open_file(config.elixerh5, mode="r")
                colnames2 = self.hdfile_elix.root.Classifications.colnames
                for name2 in colnames2:
                    if name2 == "detectid":
                        setattr(
                            self,
                            "detectid_elix",
                            self.hdfile_elix.root.Classifications.cols.detectid[:],
                        )
                    else:
                        if isinstance(
                            getattr(self.hdfile_elix.root.Classifications.cols, name2)[
                                0
                            ],
                            np.bytes_,
                        ):
                            setattr(
                                self,
                                name2,
                                getattr(
                                    self.hdfile_elix.root.Classifications.cols, name2
                                )[:].astype(str),
                            )
                        else:
                            setattr(
                                self,
                                name2,
                                getattr(
                                    self.hdfile_elix.root.Classifications.cols, name2
                                )[:],
                            )
            else:

                # add elixer info if node exists
                try:
                    colnames = self.hdfile.root.Elixer.colnames
                    for name in colnames:
                        if name == "detectid":
                            continue
                        if isinstance(
                            getattr(self.hdfile.root.Elixer.cols, name)[0], np.bytes_
                        ):
                            setattr(
                                self,
                                name,
                                getattr(self.hdfile.root.Elixer.cols, name)[:].astype(
                                    str
                                ),
                            )
                        else:
                            setattr(
                                self,
                                name,
                                getattr(self.hdfile.root.Elixer.cols, name)[:],
                            )
                    self.gmag = self.mag_sdss_g
                    self.gmag_err = self.mag_sdss_g
                except:
                    print("No Elixer table found")

            # also assign a field and some QA identifiers
            self.field = np.chararray(np.size(self.detectid), 12, unicode=True)
            self.fwhm = np.zeros(np.size(self.detectid))
            if self.survey == "hdr1":
                self.fluxlimit_4550 = np.zeros(np.size(self.detectid))
            else:
                self.fluxlimit_4540 = np.zeros(np.size(self.detectid))

            self.throughput = np.zeros(np.size(self.detectid))
            self.n_ifu = np.zeros(np.size(self.detectid), dtype=int)

            S = Survey(self.survey)

            for index, shot in enumerate(S.shotid):
                ix = np.where(self.shotid == shot)
                self.field[ix] = S.field[index].astype(str)
                # NOTE: python2 to python3 strings now unicode
                if self.survey == "hdr1":
                    self.fwhm[ix] = S.fwhm_moffat[index]
                    self.fluxlimit_4550[ix] = S.fluxlimit_4550[index]
                else:
                    self.fwhm[ix] = S.fwhm_virus[index]
                try:
                    self.fluxlimit_4540[ix] = S.fluxlimit_4540[index]
                except:
                    pass
                self.throughput[ix] = S.response_4540[index]
                self.n_ifu[ix] = S.n_ifu[index]

                # assign a vis_class field for future classification
                # -2 = ignore (bad detectid, shot)
                # -1 = no assignemnt
                # 0 = artifact
                # 1 = OII emitter
                # 2 = LAE emitter
                # 3 = star
                # 4 = nearby galaxies (HBeta, OIII usually)
                # 5 = other line
            # close the survey HDF5 file
            S.close()

            self.vis_class = -1 * np.ones(np.size(self.detectid))

            if self.survey == "hdr1":
                self.add_hetdex_gmag(loadpickle=True, picklefile=config.gmags)

            if self.survey == "hdr1":
                if PYTHON_MAJOR_VERSION < 3:
                    self.plae_poii_hetdex_gmag = np.array(
                        pickle.load(open(config.plae_poii_hetdex_gmag, "rb"))
                    )
                else:
                    self.plae_poii_hetdex_gmag = np.array(
                        pickle.load(
                            open(config.plae_poii_hetdex_gmag, "rb"), encoding="bytes"
                        )
                    )

        else:
            # just get coordinates and detectid
            self.detectid = self.hdfile.root.Detections.cols.detectid[:]
            self.ra = self.hdfile.root.Detections.cols.ra[:]
            self.dec = self.hdfile.root.Detections.cols.dec[:]

        # set the SkyCoords
        self.coords = SkyCoord(self.ra * u.degree, self.dec * u.degree, frame="icrs")
def generate_sources_to_simulate(args=None):
    """
    Generate source simulation inputs
    spread evenly over the sensitivity cube
    pixels, from an input sensitivity cube. Not
    suitable as a random catalogue, as not uniform
    in redshift - good for probing the detection
    efficiency etc. over redshift though

    """

    parser = ArgumentParser(
        description=
        "Generate inputs for source simulations, uniform in x,y,z datacube coords"
    )
    parser.add_argument("--nsplit-start",
                        default=0,
                        type=int,
                        help="What number to start the file split labeling at")
    parser.add_argument("--ifusize",
                        help="Size of IFU, in arcsec",
                        default=52.)
    parser.add_argument("--nperifu",
                        help="Number of sources to add per IFU",
                        default=1000,
                        type=int)
    parser.add_argument("--flimcut",
                        help="Don't simulate source above this",
                        type=float,
                        default=1e-15)
    parser.add_argument("--fix-flux",
                        help="If not None, set all sources to this flux value",
                        default=None,
                        type=float)
    parser.add_argument("--frac-range",
                        default=[0.2, 2.0],
                        nargs=2,
                        type=float,
                        help="The range in flux as a fraction of flim")
    parser.add_argument("--nsplit",
                        default=1,
                        help="Number of jobs per line in .run file")
    parser.add_argument("--nmax",
                        default=1000,
                        help="Maximum number of sources in one sim",
                        type=int)
    parser.add_argument(
        "filelist",
        help="Ascii file with list of sensitivity HDF5 files or date shots")
    parser.add_argument("outdir", help="Directory for output files")
    opts = parser.parse_args(args=args)

    survey_obj = None
    tables = []
    with open(opts.filelist, "r") as fp:
        for line in fp:
            input_ = line.strip()
            if ".h5" in input_:
                print("Assuming {:s} is an HDF5 file".format(input_))
                field = filename.strip("_sensitivity_cube.h5")
                sencube_hdf = SensitivityCubeHDF5Container(filename)
                add_flim = True
            else:
                print("Assuming {:s} is date shot".format(input_))
                field = input_
                add_flim = False

                if type(survey_obj) == type(None):
                    survey_obj = Survey("hdr2")

                shot = survey_obj[survey_obj.datevobs == field]
                sencube_hdf = generate_sencube_hdf(shot.datevobs[0],
                                                   shot.ra[0], shot.dec[0],
                                                   shot.pa[0], opts.outdir, 31,
                                                   31, 1036, opts.ifusize)

        ttable = rdz_flux_from_hdf_cubes(sencube_hdf,
                                         add_flim=add_flim,
                                         minfrac=opts.frac_range[0],
                                         maxfrac=opts.frac_range[1],
                                         nperifu=opts.nperifu)
        ttable["field"] = field
        tables.append(ttable)

    table = vstack(tables)
    table["id"] = range(len(table))

    if opts.fix_flux:
        table["flux"] = opts.fix_flux

    if add_flim:
        table = table[table["flim"] < opts.flimcut]
        print("After flimcut left with {:d} sources!".format(len(table)))

    #table.write("full_input_{:d}.txt".format(opts.nsplit_start), format="ascii.commented_header")

    # Split into IFUS and fields
    unique_ifus = unique(table["ifuslot"])
    unique_fields = unique(table["field"])

    # Second part of the filename
    fn2s = []
    split_into_ifus(table,
                    unique_fields,
                    unique_ifus,
                    opts.outdir,
                    NMAX=opts.nmax,
                    nsplit_start=opts.nsplit_start,
                    nsplit=opts.nsplit)
Beispiel #8
0
def get_spectra(
    coords,
    ID=None,
    rad=3.5,
    multiprocess=True,
    shotid=None,
    survey=LATEST_HDR_NAME,
    tpmin=0.08,
    keep_bad_shots=False,
    ffsky=False,
    fiberweights=False,
    return_fiber_info=False,
    loglevel='INFO',
):
    """
    Function to retrieve PSF-weighted, ADR and aperture corrected
    spectral extractions of HETDEX fibers. It will search all shots
    within a specific HETDEX Data Release and return a table of
    spectra for each extraction per shot in which more than 7 fibers
    are found in order to generate an extracted spectrum.

    Parameters
    ----------
    coords
        list astropy coordinates
    ID
        list of ID names (must be same length as coords). Will
        generate a running index if no ID is given
    rad
        radius of circular aperture to be extracted in arcsec.
        Default is 3.5
    multiprocess: bool
        boolean flag to use multiprocessing. This will greatly
        speed up its operation as it will extract on 32 shots at
        time. But only use this when on a compute node. Use
        idev, a jupyter notebook, or submit the job as a single
        python slurm job. Default is True
    shotid: int
        list of integer shotids to do extractions on. By default
        it will search the whole survey except for shots located
        in the bad.shotlist file
    survey: str
        Survey you want to access. User note that HDR1 extractions
        are much slower compared to HDR2.
    tpmin: float
        Include only shots above tpmin. Default is 0.08
    ffsky: bool
        Use the full frame 2D sky subtraction model. Default is
        to use the local sky subtracted, flux calibrated fibers.
    fiberweights: bool
        Boolean flag to include fiber_weights tuple in source
        dictionary. This is used in Elixer, but is slow
        when used on large source lists.
    fiber_info: bool
        returns the fiber_info and weights of the fibers used
        in the extraction
    keep_bad_shots: bool
        Set this to True if you want to include fibers from bad
        shots. This is dangerous as it can include fibers with
        bad astrometry, bad calibration. Default is False.
    loglevel: str
        Level to set logging. Options are ERROR, WARNING, INFO,
        DEBUG. Defaults to INFO

    Returns
    -------
    sources
        an astropy table object of source spectra for all input
        coords/ID that have spectra in the survey shots. There
        is one row per source ID/shotid observation.
    """

    args = types.SimpleNamespace()

    args.multiprocess = multiprocess
    args.coords = coords
    args.rad = rad * u.arcsec
    args.survey = survey

    args.ffsky = ffsky
    args.fiberweights = fiberweights
    args.return_fiber_info = return_fiber_info

    args.keep_bad_shots = keep_bad_shots

    S = Survey(survey)

    if args.keep_bad_shots:
        ind_good_shots = np.ones_like(S.shotid, dtype=bool)
    else:
        ind_good_shots = S.remove_shots()

    if tpmin:
        ind_tp = S.response_4540 > tpmin
        args.survey_class = S[ind_good_shots * ind_tp]
    else:
        args.survey_class = S[ind_good_shots]

    if shotid is not None:
        try:
            if np.size(shotid) == 1:
                sel_shot = args.survey_class.shotid == int(shotid)
                # shut off multiproces flag if its just one shot
                args.multiprocess = False
            else:
                sel_shot = np.zeros(np.size(args.survey_class.shotid),
                                    dtype=bool)

                for shot_i in shotid:

                    sel_i = args.survey_class.shotid == int(shot_i)
                    sel_shot = np.logical_or(sel_shot, sel_i)

        except Exception:
            sel_shot = args.survey_class.datevobs == str(shotid)

        args.survey_class = args.survey_class[sel_shot]
    else:
        pass
        # sel_shot = args.survey_class.shotid > 20171200000
        # args.survey_class = args.survey_class[sel_shot]

    args.log = setup_logging()

    if loglevel == 'INFO':
        args.log.setLevel(logging.INFO)
    elif loglevel == 'ERROR':
        args.log.setLevel(logging.ERROR)
    elif loglevel == 'WARNING':
        args.log.setLevel(logging.WARNING)
    elif loglevel == 'DEBUG':
        args.log.setLevel(logging.DEBUG)
    else:
        args.log.WARNING('No loglevel set, using INFO')
        args.log.setLevel(logging.INFO)

    if ID is None:
        try:
            nobj = len(args.coords)
            if nobj > 1:
                args.ID = np.arange(1, nobj + 1)
            else:
                args.ID = 1
        except Exception:
            args.ID = 1
    else:
        args.ID = ID

    Source_dict = get_spectra_dictionary(args)

    args.survey_class.close()

    output = return_astropy_table(Source_dict,
                                  fiberweights=args.fiberweights,
                                  return_fiber_info=args.return_fiber_info)

    args.log.info("Retrieved " + str(np.size(output)) + " spectra.")

    return output
Beispiel #9
0
def main(argv=None):
    """ Main Function """

    parser = get_parser()
    args = parser.parse_args(argv)
    args.log = setup_logging()

    if args.pickle:
        args.fits = False

    if args.merge:

        if args.fits:
            master_table = Table()
            files = glob.glob(op.join(args.mergepath, "*.fits"))
            args.log.info("Merging all fits files in " + args.mergepath)

            for file in files:
                file_table = Table.read(open(file, "rb"))
                if np.size(file_table) > 0:
                    master_table = vstack([master_table, file_table])
            outfile = args.outfile + ".fits"
            master_table.write(outfile, format="fits", overwrite=True)

        else:
            all_source_dict = {}
            files = glob.glob(op.join(args.mergepath, "*.pkl"))
            args.log.info("Merging all pickle files in " + args.mergepath)
            for file in files:
                file_dict = pickle.load(open(file, "rb"))
                if len(file_dict) > 0:
                    all_source_dict = merge(all_source_dict, file_dict)

            outfile = args.outfile + ".pkl"
            pickle.dump(all_source_dict, open(outfile, "wb"))

        args.log.info("Saved output file to " + outfile)
        sys.exit("Exiting")

    if args.infile:

        args.log.info("Loading External File")

        try:
            try:
                table_in = Table.read(args.infile, format="ascii")
                if table_in.colnames == ["col1", "col2", "col3"]:
                    table_in["col1"].name = "ID"
                    table_in["col2"].name = "ra"
                    table_in["col3"].name = "dec"
                elif np.size(table_in.colnames) != 3:
                    args.log.info("Input file not in three column format")
            except Exception:
                pass
            try:
                table_in = Table.read(args.infile, format="fits")
            except Exception:
                pass
        except Exception:
            if op.exists(args.infile):
                args.log.warning("Could not open input file")
                sys.exit("Exiting")
            else:
                args.log.warning("Input file not found")
                sys.exit("Exiting")
        try:
            args.ID = table_in["ID"]
        except:
            args.ID = table_in["id"]

        try:
            args.ra = table_in["ra"]
            args.dec = table_in["dec"]
        except:
            args.ra = table_in["RA"]
            args.dec = table_in["DEC"]

    else:
        if args.ID == None:
            if np.size(args.ra) > 1:
                args.ID = str(np.arange(1, np.size(table_in) + 1)).zfill(9)
            else:
                args.ID = 1

        args.log.info("Extracting for ID: %s" % args.ID)

    # generate astropy coordinates object for searching

    if re.search(":", str(args.ra)):
        args.coords = SkyCoord(args.ra, args.dec, unit=(u.hourangle, u.deg))
    else:
        args.coords = SkyCoord(args.ra, args.dec, unit=u.deg)

    S = Survey(args.survey)

    if args.keep_bad_shots:
        ind_good_shots = np.ones_like(S.shotid, dtype=bool)
    else:
        ind_good_shots = S.remove_shots()

    if args.tpmin:
        ind_tp = S.response_4540 > args.tpmin
        args.survey_class = S[ind_good_shots * ind_tp]
    else:
        args.survey_class = S[ind_good_shots]

    # if args.shotidid exists, only select those shots

    if args.shotid:
        try:
            sel_shot = args.survey_class.shotid == int(args.shotid)
        except Exception:
            sel_shot = args.survey_class.datevobs == str(args.shotid)

        args.survey_class = args.survey_class[sel_shot]

    else:
        pass

    # main function to retrieve spectra dictionary
    Source_dict = get_spectra_dictionary(args)

    args.survey_class.close()

    if args.pickle:
        outfile = args.outfile + ".pkl"
        pickle.dump(Source_dict, open(outfile, "wb"))

    if args.single:
        # loop over every ID/observation combo:
        fluxden_u = 1e-17 * u.erg * u.s**(-1) * u.cm**(-2) * u.AA**(-1)
        for ID in Source_dict.keys():
            for shotid in Source_dict[ID].keys():

                wave_rect = 2.0 * np.arange(1036) + 3470.0
                spec = Source_dict[ID][shotid][0]
                spec_err = Source_dict[ID][shotid][1]
                weights = Source_dict[ID][shotid][2]

                sel = np.isfinite(spec)
                if np.sum(sel) > 0:
                    output = Table()

                    output.add_column(
                        Column(wave_rect, name="wavelength", unit=u.AA))
                    output.add_column(Column(spec, name="spec",
                                             unit=fluxden_u))
                    output.add_column(
                        Column(spec_err, name="spec_err", unit=fluxden_u))
                    output.add_column(Column(weights, name="weights"))

                    output.write("spec_" + str(ID) + "_" + str(shotid) +
                                 ".tab",
                                 format="ascii")

    if args.fits:
        output = return_astropy_table(Source_dict,
                                      fiberweights=args.fiberweights)
        if args.fiberweights:
            # cannot save fiberweights to a fits file
            output.remove_column('fiber_weights')
        output.write(args.outfile + ".fits", format="fits", overwrite=True)
Beispiel #10
0
    def __init__(self,
                 datevshot,
                 release=None,
                 flim_model=None,
                 rad=3.5,
                 ffsky=False,
                 wavenpix=3,
                 d25scale=3.0,
                 verbose=False,
                 sclean_bad=True,
                 log_level="WARNING"):

        self.conf = HDRconfig()
        self.extractor = Extract()
        self.shotid = int(datevshot.replace("v", ""))
        self.date = datevshot[:8]
        self.rad = rad
        self.ffsky = ffsky
        self.wavenpix = wavenpix
        self.sclean_bad = sclean_bad

        logger = logging.getLogger(name="ShotSensitivity")
        logger.setLevel(log_level)

        if verbose:
            raise DeprecationWarning(
                "Using verbose is deprecated, set log_level instead")
            logger.setLevel("DEBUG")

        logger.info("shotid: {:d}".format(self.shotid))

        if not release:
            self.release = self.conf.LATEST_HDR_NAME
        else:
            self.release = release

        logger.info("Data release: {:s}".format(self.release))
        self.survey = Survey(survey=self.release)

        # Set up flux limit model
        self.f50_from_noise, self.sinterp, interp_sigmas \
                                       = return_flux_limit_model(flim_model,
                                                                 cache_sim_interp=False,
                                                                 verbose=verbose)

        # Generate astrometry for this shot
        survey_sel = (self.survey.shotid == self.shotid)
        self.shot_pa = self.survey.pa[survey_sel][0]
        self.shot_ra = self.survey.ra[survey_sel][0]
        self.shot_dec = self.survey.dec[survey_sel][0]
        rot = 360.0 - (self.shot_pa + 90.)
        self.tp = TangentPlane(self.shot_ra, self.shot_dec, rot)

        #Set up masking
        logger.info("Using d25scale {:f}".format(d25scale))
        self.setup_mask(d25scale)

        # Set up spectral extraction
        if release == "hdr1":
            fwhm = self.survey.fwhm_moffat[survey_sel][0]
        else:
            fwhm = self.survey.fwhm_virus[survey_sel][0]

        logger.info("Using Moffat PSF with FWHM {:f}".format(fwhm))
        self.moffat = self.extractor.moffat_psf(fwhm, 3. * rad, 0.25)
        self.extractor.load_shot(self.shotid, fibers=True, survey=self.release)

        # Set up the focal plane astrometry
        fplane_table = self.extractor.shoth5.root.Astrometry.fplane

        # Bit of a hack to avoid changing pyhetdex
        with NamedTemporaryFile(mode='w') as tpf:
            for row in fplane_table.iterrows():
                tpf.write(
                    "{:03d} {:8.5f} {:8.5f} {:03d} {:03d} {:03d} {:8.5f} {:8.5f}\n"
                    .format(row['ifuslot'], row['fpx'], row['fpy'],
                            row['specid'], row['specslot'], row['ifuid'],
                            row['ifurot'], row['platesc']))
            tpf.seek(0)
            self.fplane = FPlane(tpf.name)