Пример #1
0
    def __init__(self, logger=None):
        #intial values are meant to be near the truth
        #and are expected to come from, say, some initial "best" fit

        self.log = logger
        if self.log is None:
            self.log = setup_logging()
            self.log.setLevel(logging.INFO)

        self.initial_mu = None
        self.initial_sigma = None
        self.initial_A = None  #note: set to a negative value if this is an absorption line
        self.initial_y = None
        self.initial_peak = None

        self.max_sigma = 20.0
        self.range_mu = 5.0
        self.max_A_mult = 2.0
        self.max_y_mult = 2.0
        self.min_y = -10.0

        self.data_x = None
        self.data_y = None
        self.err_x = None
        self.err_y = None

        #just for reference ... MCMC itself does not need to know about this
        #the caller DOES though and needs to adjust the line_flux accordingly
        #self.dx = None #original bin width IF NOT part of the data_y already

        #this is mostly a guess ... no great way to automate, but this is pretty quick
        #and since the initials are from a scipy curve fit, we stabablize pretty fast
        self.burn_in = 100
        self.main_run = 1000
        self.walkers = 100

        self.sampler = None  #mcmc sampler
        self.samples = None  #resulting samples

        #####################
        # Outputs
        #####################
        #3-tuples [0] = fit, [1] = fit +16%,  [2] = fit - 16%
        self.mcmc_mu = None
        self.mcmc_sigma = None
        self.mcmc_A = None  #note: typically this is over the HETDEX 2AA bins, so if using as the area as integrated
        # lineflux you need to divide by 2AA and scale appropriately (e.g. as 1e-17)
        self.mcmc_y = None

        #not tuple, just single floats
        self.mcmc_snr = None
        self.mcmc_snr_err = 0
Пример #2
0
def main(argv=None):

    parser = get_parser()
    args = parser.parse_args(argv)
    args.log = setup_logging()

    if args.infile:

        args.log.info("Loading External File")

        table_in = Table.read(args.infile, format="ascii")
        args.ID = table_in["ID"]
        args.ra = table_in["ra"]
        args.dec = table_in["dec"]
    else:
        if args.ID is None:
            args.ID = "DEX" + str(args.ra).zfill(4) + "_"
            +str(args.dec).zfill(4)

        args.log.info("Extracting for ID: %s" % args.ID)

    args.coords = SkyCoord(args.ra * u.deg, args.dec * u.deg)

    args.survey = Survey("hdr1")

    args.matched_sources = {}
    shots_of_interest = []

    count = 0

    # this radius applies to the inital shot search and requires a large
    # aperture for the wide FOV of VIRUS

    max_sep = 11.0 * u.arcminute

    args.log.info("Finding shots of interest")

    for i, coord in enumerate(args.survey.coords):
        dist = args.coords.separation(coord)
        sep_constraint = dist < max_sep
        shotid = args.survey.shotid[i]
        idx = np.where(sep_constraint)[0]
        if np.size(idx) > 0:
            args.matched_sources[shotid] = idx
            count += np.size(idx)
            if len(idx) > 0:
                shots_of_interest.append(shotid)

    args.log.info("Number of shots of interest: %i" % len(shots_of_interest))
    args.log.info("Saved shot list to file " + str(args.outfile))
    np.savetxt("shotlist", shots_of_interest, fmt="%i")
Пример #3
0
    def __init__(self, wave=None):
        """
        Initialize Extract class

        Parameters
        ----------
        wave: numpy 1d array
            wavelength of calfib extension for hdf5 files, does not need to be
            set unless needed by development team
        """
        if wave is not None:
            self.wave = wave
        else:
            self.wave = self.get_wave()
        self.get_ADR()
        self.log = setup_logging("Extract")
Пример #4
0
def main(argv=None):
    """ Main Function """
    # Call initial parser from init_utils
    parser = ap.ArgumentParser(description="""Create Region Files.""",
                               add_help=True)

    parser.add_argument(
        "-s",
        "--shotid",
        help="""Shot identifier, an integer""",
        type=int,
        default=None,
    )

    parser.add_argument(
        "-d",
        "--date",
        help="""Date, e.g., 20170321, YYYYMMDD""",
        type=str,
        default=None,
    )

    parser.add_argument(
        "-o",
        "--observation",
        help='''Observation number, "00000007" or "7"''',
        type=str,
        default=None,
    )

    parser.add_argument(
        "-f",
        "--field",
        help="""Options=""",
        type=str,
        default=None,
    )

    args = parser.parse_args(argv)
    args.log = setup_logging()

    if args.field is not None:

        S = Survey()
        survey_table = S.return_astropy_table()
        sel_field = survey_table["field"] == args.field
        ifuregions = []

        for row in survey_table[sel_field]:
            args.log.info("Working on " + str(row["shotid"]))
            ifuregions.extend(get_regions_from_flim(row["shotid"]))

        outname = args.field

    elif args.shotid:
        ifuregions = get_regions_from_flim(args.shotid)
        outname = str(args.shotid)

    elif args.date is not None:
        if args.observation is not None:
            shotid = int(str(args.date) + str(args.observation).zfill(3))
            ifuregions = get_regions_from_flim(shotid)
            outname = str(shotid)

    region_file = outname + '.reg'
    write_ds9(ifuregions, region_file)
Пример #5
0
def main(argv=None):
    """ Main Function """
    # Call initial parser from init_utils
    parser = ap.ArgumentParser(
        description="""Create HDF5 Astrometry file.""", add_help=True
    )

    parser.add_argument(
        "-sdir",
        "--shotdir",
        help="""Directory for shot H5 files to ingest""",
        type=str,
        default="/data/05350/ecooper/hdr2.1/reduction/data",
    )

    parser.add_argument(
        "-sl",
        "--shotlist",
        help="""Text file of DATE OBS list""",
        type=str,
        default="hdr2.1.shotlist",
    )

    parser.add_argument(
        "-of",
        "--outfilename",
        type=str,
        help="""Relative or absolute path for output HDF5
                        file.""",
        default=None,
    )

    parser.add_argument("-survey", "--survey", type=str, default="hdr2.1")

    args = parser.parse_args(argv)
    args.log = setup_logging()

    fileh = tb.open_file(
        args.outfilename, mode="w", title=args.survey.upper() + " Fiber Index file "
    )

    shotlist = Table.read(
        args.shotlist, format="ascii.no_header", names=["date", "obs"]
    )

    tableFibers = fileh.create_table(
        fileh.root,
        "FiberIndex",
        VIRUSFiberIndex,
        "Survey Fiber Coord Info",
        expectedrows=140369264,
    )

    # set up HEALPIX options
    Nside = 2 ** 15
    hp.max_pixrad(Nside, degrees=True) * 3600  # in unit of arcsec

    config = HDRconfig(survey=args.survey)

    badshot = np.loadtxt(config.badshot, dtype=int)

    for shotrow in shotlist:
        datevshot = str(shotrow["date"]) + "v" + str(shotrow["obs"]).zfill(3)
        shotid = int(str(shotrow["date"]) + str(shotrow["obs"]).zfill(3))
        
        date = shotrow["date"]
        
        try:
            args.log.info("Ingesting %s" % datevshot)
            file_obs = tb.open_file(op.join(args.shotdir, datevshot + ".h5"), "r")
            tableFibers_i = file_obs.root.Data.FiberIndex

            for row_i in tableFibers_i:

                row_main = tableFibers.row

                for col in tableFibers_i.colnames:
                    row_main[col] = row_i[col]

                fiberid = row_i["fiber_id"]

                try:
                    row_main["healpix"] = hp.ang2pix(
                        Nside, row_i["ra"], row_i["dec"], lonlat=True)
                except:
                    row_main["healpix"] = 0

                row_main["shotid"] = shotid
                row_main["date"] = date
                row_main["datevobs"] = datevshot
                
                row_main["specid"] = fiberid[20:23]
                row_main["ifuslot"] = fiberid[24:27]
                row_main["ifuid"] = fiberid[28:31]
                row_main["amp"] = fiberid[32:34]
                row_main.append()

            file_obs.close()

        except:            
            if shotid in badshot:
                pass
            else:
                args.log.error("could not ingest %s" % datevshot)

    tableFibers.cols.healpix.create_csindex()
    tableFibers.cols.ra.create_csindex()
    tableFibers.flush()
    fileh.close()
Пример #6
0
def main(argv=None):
    ''' Main Function '''
    # Call initial parser from init_utils
    parser = ap.ArgumentParser(description="""Create HDF5 Astrometry file.""",
                               add_help=True)

    parser.add_argument("-d", "--date",
                        help='''Date, e.g., 20170321, YYYYMMDD''',
                        type=str, default=None)

    parser.add_argument("-o", "--observation",
                        help='''Observation number, "00000007" or "7"''',
                        type=str, default=None)

    parser.add_argument("-r", "--rootdir",
                        help='''Root Directory for Shifts''',
                        type=str, default='/scratch/00115/gebhardt/vdrp/shifts')

    parser.add_argument('-of', '--outfilename', type=str,
                        help='''Relative or absolute path for output HDF5
                        file.''', default=None)

    parser.add_argument('-a', '--append',
                        help='''Appending to existing file.''',
                        action="count", default=0)

    parser.add_argument("-tp", "--tpdir",
                        help='''Directory for Throughput Info''',
                        type=str,
                        default='/scratch/00115/gebhardt/detect')

    parser.add_argument("-detdir", "--detectdir",
                        help='''Directory for Detect Info''',
                        type=str,
                        default='/scratch/03946/hetdex/detect')
    
    parser.add_argument("-survey", "--survey",
                        help="""{hdr1, hdr2, hdr2.1, hdr3}""",
                        type=str, default="hdr3")
    
    
    args = parser.parse_args(argv)
    args.log = setup_logging()

    # Creates a new file if the "--append" option is not set or the file                                              
    # does not already exist.
    does_exist = False
    if op.exists(args.outfilename) and args.append:
        args.log.info('Appending astrometry to %s' % args.outfilename)
        fileh = tb.open_file(args.outfilename, 'a')
        does_exist = True
        try:
            fileh.remove_node(fileh.root.Astrometry, recursive=True)
        except:
            pass
    else:
        args.log.info('Creating new file for astrometry %s' % args.outfilename)
        fileh = tb.open_file(args.outfilename, 'w')
        
    groupAstrometry = fileh.create_group(fileh.root, 'Astrometry', 'Astrometry Info')
    groupCoadd = fileh.create_group(groupAstrometry, 'CoaddImages', 'Coadd Images')
    groupDithall = fileh.create_group(groupAstrometry, 'Dithall', 'Fiber Astrometry Info')
    groupOffsets = fileh.create_group(groupAstrometry, 'PositionOffsets', 
                                      'Offset in star matches')
    groupMatches = fileh.create_group(groupAstrometry, 'CatalogMatches', 'Match Catalog Info')

    tableQA = fileh.create_table(groupAstrometry, 'QA', QualityAssessment, 
                             'Quality Assessment')
    tableNV = fileh.create_table(groupAstrometry,'NominalVals', NominalVals,
                                 'Nominal Values')

    datevshot = str(args.date) + 'v' + str(args.observation).zfill(3)
    shotid = int(str(args.date) + str(args.observation).zfill(3))

    #check if shotid is in badlist

    config = HDRconfig(args.survey)
    badshots = np.loadtxt(config.badshot, dtype=int)

    badshotflag = False

    if shotid in badshots:
        badshotflag = True
    
    # store shuffle.cfg and DATEvOBS.log files

    fileshuffle = op.join(args.rootdir, str(args.date) + 'v' + str(args.observation).zfill(3),
                          'shuffle.cfg')
    try:
        f = open(fileshuffle, 'r')
        shuffle = fileh.create_array(groupAstrometry, 'ShuffleCfg', f.read().encode())
        shuffle.set_attr('filename','shuffle.cfg')
        f.close()
    except:
        args.log.warning('Could not include %s' % fileshuffle)

    # store fplane table

    filefplane = op.join(args.tpdir, str(args.date) + "v" + str(args.observation).zfill(3),
                         'coords','fplane.txt')    
    try:
        f = ascii.read(filefplane, names=['ifuslot', 'fpx', 'fpy', 'specid',
                                          'specslot', 'ifuid', 'ifurot', 'platesc'])
        fplanetable = fileh.create_table(groupAstrometry, 'fplane', f.as_array())
        fplanetable.set_attr('filename', 'fplane.txt')
    except:
        args.log.warning('Could not include %s' % filefplane)

    # store catalogs of stars used in astrometric fit

    file_stars = op.join(args.tpdir, str(args.date) + 'v' + str(args.observation).zfill(3), 
                         str(args.date) + 'v' + str(args.observation).zfill(3) + '.ifu')    
    try:
        f_stars = ascii.read(file_stars, names=['ignore', 'star_ID', 'ra_cat', 'dec_cat',
                                                'u', 'g', 'r', 'i', 'z'])
        starstable = fileh.create_table(groupAstrometry, 'StarCatalog', f_stars.as_array())
        starstable.set_attr('filename', 'DATEvOBS.ifu')
        if any(f_stars['z'] > 0):
            starstable.set_attr('catalog', 'SDSS')
        else:
            starstable.set_attr('catalog', 'GAIA')
    except:
        args.log.warning('Could not include %s' % file_stars)
    
    pngfiles = glob.glob(op.join(args.rootdir, str(args.date) + 'v' + str(args.observation).zfill(3), '*.png'))
    pngnum = 1

    for pngfile in pngfiles:
        plt_image = plt.imread(pngfile)
        pngim = fileh.create_array(groupCoadd, 'png_exp' + str(pngnum).zfill(2), plt_image)
        pngim.attrs['CLASS'] = 'IMAGE'
        pngim.attrs['filename'] = pngfile
        pngnum += 1

    fileallmch = op.join(args.tpdir, str(args.date) + 'v' + str(args.observation).zfill(3),
                         'coords', 'all.mch')
    try:
        allmch = ascii.read(fileallmch)
    except:
        args.log.warning('Could not include %s' % fileallmch)

    filenorm = op.join(args.detectdir, 'norm.all')

    try:
        norm = Table.read(filenorm, format='ascii.no_header')
    except:
        args.log.warning('Could not include %s' % filenorm)

    # index over dithers to gather diher specific info    
    for idx, expn in enumerate(['exp01', 'exp02', 'exp03']):

        radecfile = op.join(args.rootdir, str(args.date) + 'v' + str(args.observation).zfill(3), 
                             'radec2_' + expn + '.dat')
        rowNV = tableNV.row
        try:
            radec = ascii.read(radecfile)
            rowNV['expnum'] = int(expn[3:5])
            rowNV['ra'] = radec['col1']
            rowNV['dec'] = radec['col2']
            rowNV['parangle'] = radec['col3']
        except:
            args.log.warning('Could not include %s' % radecfile)

        try:
            sel_datevobs = norm['col1'] == str(args.date) + 'v' + str(args.observation).zfill(3)
            if idx == 0:
                rowNV['relflux_virus'] = norm['col2'][sel_datevobs]
            elif idx == 1:
                rowNV['relflux_virus'] = norm['col3'][sel_datevobs]
            elif idx == 2:
                rowNV['relflux_virus'] = norm['col4'][sel_datevobs]
        except Exception:
            args.log.warning('Could not include norm.all')
        
        try:
            rowNV['x_dither_pos'] = allmch['col3'][idx]
            rowNV['y_dither_pos'] = allmch['col4'][idx]
            rowNV['als_filename'] = allmch['col1'][idx]
        except:
            args.log.warning('Could not include %s' % fileallmch)
        
        rowNV.append()

        fitsfile = op.join(args.rootdir, str(args.date) + 'v' + str(args.observation).zfill(3),
                           str(args.date) + 'v' + str(args.observation).zfill(3)
                           + 'fp_' + expn + '.fits')

        if op.exists(fitsfile):
            
            F = fits.open(fitsfile)
            fitsim = fileh.create_array(groupCoadd, expn, F[0].data)
            fitsim.attrs['CLASS'] = 'IMAGE'
            fitsim.attrs['IMAGE_MINMAXRANGE'] = (-1.5, 100)
            fitsim.attrs['HEADER'] = F[0].header
            fitsim.attrs['filename'] = 'DATEvOBSfp_exp??.fits'
            F.close()


        matchpdf = op.join(args.rootdir, str(args.date) + 'v' + str(args.observation).zfill(3),
                           'match_' + expn + '.pdf')
        matchpng = 'match_pngs/match_'+ str(args.date) + 'v' + str(args.observation).zfill(3) + '_' + expn + '.png'
        
        if op.exists(matchpdf):
            os.system('convert ' + matchpdf + ' ' + matchpng)  
            plt_matchim = plt.imread(matchpng)
            matchim = fileh.create_array(groupCoadd, 'match_' + expn, plt_matchim)
            matchim.attrs['CLASS'] = 'IMAGE'
            matchim.attrs['filename'] = matchpdf
        else:
            args.log.warning('Count not include %s' % matchpdf)

        # populate offset info for catalog matches
        file_getoff = op.join(args.rootdir, str(args.date) + 'v' + str(args.observation).zfill(3),
                              'getoff_' + expn + '.out')

        try:
            f_getoff = ascii.read(file_getoff, names=['xoffset', 'yoffset', 'ra_dex',
                                                      'dec_dex', 'ra_cat', 'dec_cat',
                                                      'ifuslot'])
            getoffinfo = fileh.create_table(groupOffsets, expn, f_getoff.as_array())
            getoffinfo.set_attr('filename', 'getoff_exp??.out')
        except:
            args.log.warning('Could not include %s' % file_getoff)
            
        # populate fiber astrometry data
        file_dith = op.join(args.rootdir, str(args.date) + 'v' + str(args.observation).zfill(3),
                            'dith_' + expn + '.all')    
        try:
            f_dith = ascii.read(file_dith)
            dithtab = fileh.create_table(groupDithall, expn, Dithall)
            
            for f_dith_row in f_dith:
                dithrow = dithtab.row
                
                dithrow['ra'] = f_dith_row['ra']
                dithrow['dec'] = f_dith_row['dec']
                dithrow['ifuslot'] = f_dith_row['ifuslot']
                dithrow['XS'] = f_dith_row['XS']
                dithrow['YS'] = f_dith_row['YS']
                dithrow['xfplane'] = f_dith_row['xfplane']
                dithrow['yfplane'] = f_dith_row['yfplane']
                dithrow['multifits'] = f_dith_row['multifits']
                dithrow['timestamp'] = f_dith_row['timestamp']
                dithrow['exposure'] = f_dith_row['exposure']
                dithrow.append()
           
            dithtab.set_attr('filename', file_dith)
            dithtab.flush()
        except:
            args.log.warning('Could not include %s' % file_dith)
            
        # populate median and rms in offsets for quality assessment purposes

        file_getoff2 = op.join(args.rootdir, str(args.date) + 'v'
                               + str(args.observation).zfill(3), 'getoff2_' + expn + '.out')
        try:
            f_getoff2 = ascii.read(file_getoff2)
            row = tableQA.row
            row['expnum'] = int(expn[3:5])
            row['xoffset'] = f_getoff2['col1']
            row['yoffset'] = f_getoff2['col2']
            row['xrms'] = f_getoff2['col3']
            row['yrms'] = f_getoff2['col4']
            row['nstars'] = f_getoff2['col5']
            row.append()

        except:
            args.log.warning('Could not include %s' % file_getoff2)
        
        
        file_xy = op.join(args.rootdir, str(args.date) + 'v'
                          + str(args.observation).zfill(3), 'xy_' + expn + '.dat')
        try:
            xy_table = ascii.read(file_xy)
            tableXY = fileh.create_table(groupMatches, expn, xy_table.as_array())
            tableXY.set_attr('filename','xy_exp??.dat')
        except:
            args.log.warning('Could not include %s' % file_xy)
            
    tableQA.set_attr('filename', 'getoff2_exp??.out')
    tableNV.set_attr('dither_file', 'all.mch')
    tableNV.set_attr('norm_file', 'norm.dat')
    tableNV.set_attr('radec_file', 'radec2_exp??.dat')
    tableQA.flush()
    tableNV.flush()

    tableQA = fileh.root.Astrometry.QA
    tableNV = fileh.root.Astrometry.NominalVals

    try:
        radecfinalfile = op.join(args.tpdir,
                                 str(args.date) + 'v' + str(args.observation).zfill(3),
                                 'coords', 'radec2.dat')
        radectab = ascii.read(radecfinalfile, names=['ra','dec','pa'])

    except:

        if badshotflag:
            args.log.warning('Could not open %s' % radecfinalfile)
        else:
            args.log.error('Could not open %s' % radecfinalfile)
        
    shottable = fileh.root.Shot

    for shot in shottable:
        if op.exists(radecfinalfile):
            shot['ra'] = radectab['ra'][0]
            shot['dec'] = radectab['dec'][0]
            shot['pa'] = radectab['pa'][0]
        else:
            if badshotflag:
                args.log.warning('Could not open %s' % radecfinalfile)
            else:
                args.log.error('Could not open %s' % radecfinalfile)

        try:
            shot['xoffset'] = tableQA.cols.xoffset[:]
            shot['yoffset'] = tableQA.cols.yoffset[:]
            shot['xrms'] = tableQA.cols.xrms[:]
            shot['yrms'] = tableQA.cols.yrms[:]
            shot['nstars_fit'] = tableQA.cols.nstars[:]
        except:
            if badshotflag:
                args.log.warning('Could not include astrometry shot info for %s' % datevshot)
            else:
                args.log.error('Could not include astrometry shot info for %s' % datevshot)
        try:
            shot['xditherpos'] = tableNV.cols.x_dither_pos[:]
            shot['yditherpos'] = tableNV.cols.y_dither_pos[:]
        except:
            args.log.warning('Could not include astrometry shot info for %s' % datevshot)
        try:
            shot['relflux_virus'] = tableNV.cols.relflux_virus[:]
        except:
            if badshotflag:
                args.log.warning('Could not include relflux_virus info for %s' % datevshot)
            else:
                args.log.error('Could not include relflux_virus info for %s' % datevshot)
                
        shot.update()

    fileh.close()
Пример #7
0
def main(argv=None):
    """ Main Function """

    parser = get_parser()
    args = parser.parse_args(argv)
    args.log = setup_logging()

    args.log.info(args)

    class FiberImage2D(tb.IsDescription):
        detectid = tb.Int64Col(pos=0)
        im_wave = tb.Float32Col(args.width, pos=1)
        im_sum = tb.Float32Col((args.height, args.width), pos=2)
        im_array = tb.Float32Col((4, args.height, args.width), pos=3)

    if args.merge:
        fileh = tb.open_file("merged_im2D.h5", "w")

        fibim2D_table = fileh.create_table(fileh.root,
                                           "FiberImages",
                                           FiberImage2D,
                                           "Fiber Cutout Images",
                                           expectedrows=1000000)
        phot_table = fileh.create_table(fileh.root,
                                        "PhotImages",
                                        PhotImage,
                                        "Photometric Images",
                                        expectedrows=1000000)
        spec_table = fileh.create_table(fileh.root,
                                        "Spec1D",
                                        Spec1D,
                                        "Aperture Summed Spectrum",
                                        expectedrows=1000000)

        files = sorted(glob.glob("im2D*.h5"))

        for file in files:
            args.log.info('Ingesting %s' % file)
            fileh_i = tb.open_file(file, "r")
            fibim2D_table_i = fileh_i.root.FiberImages.read()
            phot_table_i = fileh_i.root.PhotImages.read()
            spec_table_i = fileh_i.root.Spec1D.read()

            fibim2D_table.append(fibim2D_table_i)
            phot_table.append(phot_table_i)
            spec_table.append(spec_table_i)

            fileh_i.close()

        fibim2D_table.flush()
        phot_table.flush()
        spec_table.flush()

        fibim2D_table.cols.detectid.create_csindex()
        phot_table.cols.detectid.create_csindex()
        spec_table.cols.detectid.create_csindex()

        fibim2D_table.flush()
        phot_table.flush()
        spec_table.flush()

        fileh.close()
        sys.exit("Merged h5 files in current directory. Exiting")

    shotid_i = args.shotid

    detects = Detections(args.survey, loadtable=False)

    if args.infile:

        try:
            catalog = Table.read(args.infile, format="ascii")
        except:
            catalog = Table.read(args.infile)

        selcat = catalog["shotid"] == args.shotid

        detectlist = np.array(catalog["detectid"][selcat])

    elif args.dets:
        if op.exists(args.dets):
            try:
                catalog = Table.read(args.dets, format="ascii")
                selcat = catalog["shotid"] == int(shotid_i)
                detectlist = np.array(catalog["detectid"][selcat])
            except:
                detectlist = np.loadtxt(args.dets, dtype=int)
        else:
            args.log.warning('No dets for ' + str(shotid_i))
            sys.exit()

    if len(detectlist) == 0:
        sys.exit()

    # open up catalog library from elixer
    catlib = catalogs.CatalogLibrary()

    args.log.info("Opening shot: " + str(shotid_i))

    fibers = Fibers(args.shotid, survey=args.survey)

    if args.h5file:

        fileh = tb.open_file("im2D_" + str(args.shotid) + ".h5", "w")

        fibim2D_table = fileh.create_table(fileh.root, "FiberImages",
                                           FiberImage2D, "Fiber Cutout Images")

        phot_table = fileh.create_table(fileh.root, "PhotImages", PhotImage,
                                        "Photometric Images")
        spec_table = fileh.create_table(fileh.root, "Spec1D", Spec1D,
                                        "Aperture Summed Spectrum")

        for detectid_i in detectlist:

            # add data to HDF5 file
            row = fibim2D_table.row
            row["detectid"] = detectid_i
            sel = detects.detectid == detectid_i

            try:
                row["im_wave"] = get_2Dimage_wave(detectid_i,
                                                  detects,
                                                  fibers,
                                                  width=args.width,
                                                  height=args.height)
            except:
                args.log.error("Could not get wave array for %s" % detectid_i)

            try:
                row["im_sum"] = get_2Dimage(detectid_i,
                                            detects,
                                            fibers,
                                            width=args.width,
                                            height=args.height)
            except:
                args.log.error("Could not get Fiber sum for %s" % detectid_i)
            try:
                im_arr, fiber_table = get_2Dimage_array(detectid_i,
                                                        detects,
                                                        fibers,
                                                        width=args.width,
                                                        height=args.height)

                row["im_array"] = im_arr
            except:
                args.log.error("Could not get 4 Fiber info for %s" %
                               detectid_i)

            row.append()

            row_spec = spec_table.row
            spec_tab = detects.get_spectrum(detectid_i)
            row_spec["detectid"] = detectid_i
            row_spec["spec1D"] = spec_tab["spec1d"]
            row_spec["spec1D_err"] = spec_tab["spec1d_err"]
            row_spec.append()

            row_phot = phot_table.row

            # add in phot image, need RA/DEC from catalog
            # sel_det = detects.detectid == detectid_i
            # coord = detects.coords[sel_det]

            det_row = detects.hdfile.root.Detections.read_where(
                'detectid == detectid_i')

            coord = SkyCoord(ra=det_row["ra"] * u.deg,
                             dec=det_row["dec"] * u.deg)

            row_phot["detectid"] = detectid_i

            # ignore the Fall data for now
            if coord.dec.value > 10:
                try:
                    cutout = catlib.get_cutouts(
                        position=coord,
                        radius=5,
                        aperture=None,
                        dynamic=False,
                        filter="r",
                        first=True,
                    )[0]
                    if cutout["instrument"] == "HSC":
                        # get shape to ensure slicing on cropped images
                        phot = np.shape(cutout["cutout"].data)

                        row_phot["im_phot"] = cutout["cutout"].data
                        header = cutout["cutout"].wcs.to_header()
                        row_phot["im_phot_hdr"] = header.tostring()

                except:
                    pass
                    #args.log.warning("No imaging available for source")
            else:
                pass

            row_phot.append()

        spec_table.flush()
        phot_table.flush()
        fibim2D_table.flush()
        fileh.close()

    else:
        for detectid_i in detectlist:

            save_2Dimage(
                detectid_i,
                detects,
                fibers,
                width=args.width,
                height=args.height,
                path=args.path,
            )

    if args.ra and args.dec:
        # NOTE this has not been updated yet.. will build functionality soon
        obj_coords = SkyCoord(args.ra * u.deg, args.dec * u.deg, frame="icrs")
        idx = fibers.query_region_idx(obj_coords, radius=(args.rad / 3600.0))

        output = Table()
        output["ra"] = fibers.coords.ra[idx] * u.deg
        output["dec"] = fibers.coords.dec[idx] * u.deg
        filenames = []

        fileidx = 101
        for i in idx:
            filename = "tmp" + str(fileidx) + ".dat"
            filenames.append(filename)
            save_rsp_spectrum(
                fibers,
                i,
                file=filename,
            )
            fileidx += 1

        output["filename"] = np.array(filenames)
        ascii.write(output, "fib_coords.dat", overwrite=True)

    fibers.close()
    detects.close()
    tb.file._open_files.close_all()
Пример #8
0
def main(argv=None):
    ''' Main Function '''
    # Call initial parser from init_utils                                                          
    parser = ap.ArgumentParser(description="""Create HDF5 file.""",
                               add_help=True)

    parser.add_argument("-d", "--date",
                        help='''Date, e.g., 20170321, YYYYMMDD''',
                        type=str, default=None)

    parser.add_argument("-o", "--observation",
                        help='''Observation number, "00000007" or "7"''',
                        type=str, default=None)

    parser.add_argument("-r", "--rootdir",
                        help='''Root Directory for Reductions''',
                        type=str, default='/data/00115/gebhardt/calfits/')

    parser.add_argument('-of', '--outfilename', type=str,
                        help='''Relative or absolute path for output HDF5                          
                        file.''', default=None)

    parser.add_argument("-survey", "--survey", help='''{hdr1, hdr2, hdr2.1}''',
                        type=str, default='hdr2.1')

    args = parser.parse_args(argv)
    args.log = setup_logging()

    calfiles = get_cal_files(args)

    datestr = '%sv%03d' % (args.date, int(args.observation))

    shotid = int(str(args.date) + str(args.observation).zfill(3))

    #check if shotid is in badlist
    config = HDRconfig(args.survey)
    badshots = np.loadtxt(config.badshot, dtype=int)
    
    badshotflag = False
    
    if shotid in badshots:
        badshotflag = True
    
    if len(calfiles) == 0:
        if badshotflag:
            args.log.warning("No calfits file to append for %s" % datestr)
        else:
            args.log.error("No calfits file to append for %s" % datestr)

        sys.exit('Exiting cal append script for %s' % datestr)

    if op.exists(args.outfilename):
        fileh = tb.open_file(args.outfilename, 'a')
    else:
        args.log.error('Problem opening : ' + args.outfilename)
        sys.exit('Exiting Script')
    
    args.log.info('Appending calibrated fiber arrays to ' + args.outfilename)

    fibtable = fileh.root.Data.Fibers
    
    for calfile in calfiles:

        multi  = calfile[49:60]
        try:
            cal_table = get_cal_table(calfile)
        except:
            continue
            args.log.error('Could not ingest calfile: %s' % calfile)
            
        args.log.info('Working on IFU ' + multi )
        for amp_i in ['LL','LU','RL','RU']:
            
            multiframe_i = 'multi_'+ multi + '_' + amp_i

            for fibrow in fibtable.where('multiframe == multiframe_i'):
                
                idx = (cal_table['expnum'] == fibrow['expnum']) * (cal_table['multiframe'] == fibrow['multiframe'].decode()) * (cal_table['fibidx'] == fibrow['fibidx'])

                if np.sum(idx) >= 1:
                    fibrow['calfib']  = cal_table['calfib'][idx]
                    fibrow['calfibe'] = cal_table['calfibe'][idx]
                    fibrow['calfib_counts'] = cal_table['calfib_counts'][idx]
                    fibrow['calfibe_counts'] = cal_table['calfibe_counts'][idx]
                    fibrow['spec_fullsky_sub'] = cal_table['spec_fullsky_sub'][idx]
                    fibrow.update()
                #else:
                   # args.log.warning("No fiber match for %s" % fibrow['fiber_id'])
                    
    args.log.info('Flushing and closing H5 file')
    fibtable.flush()
    fileh.close()
Пример #9
0
def main(argv=None):
    """ Main Function """
    # Call initial parser from init_utils
    parser = ap.ArgumentParser(description="""Create HDF5 Survey file.""",
                               add_help=True)

    parser.add_argument(
        "-r",
        "--rootdir",
        help="""Root Directory for Reductions""",
        type=str,
        default="/work/03946/hetdex/maverick/red1/reductions/",
    )

    parser.add_argument(
        "-sdir",
        "--shotdir",
        help="""Directory for shot H5 files to ingest""",
        type=str,
        default="/scratch/03946/hetdex/hdr3/reduction/data",
    )

    parser.add_argument(
        "-sl",
        "--shotlist",
        help="""Text file of DATE OBS list""",
        type=str,
        default="dex.hdr2.shotlist",
    )

    parser.add_argument(
        "-ad",
        "--astrometry_dir",
        help="""Directory for Shifts""",
        type=str,
        default="/data/00115/gebhardt/vdrp/shifts/",
    )

    parser.add_argument(
        "-of",
        "--outfilename",
        type=str,
        help="""Relative or absolute path for output HDF5 file.""",
        default=None,
    )

    parser.add_argument(
        "-flim",
        "--flim",
        help="""Path to flim look up table""",
        type=str,
        default="/data/05350/ecooper/hdr2.1/survey/average_one_sigma.txt",
    )

    parser.add_argument("-survey", "--survey", type=str, default="hdr2.1")

    args = parser.parse_args(argv)

    print(args)

    args.log = setup_logging()

    fileh = tb.open_file(args.outfilename,
                         mode="w",
                         title=args.survey.upper() + "Survey file ")

    shotlist = Table.read(args.shotlist,
                          format="ascii.no_header",
                          names=["date", "obs"])

    survey = Table()

    for shotrow in shotlist:
        datevshot = str(shotrow["date"]) + "v" + str(shotrow["obs"]).zfill(3)

        if True:
            args.log.info('Ingesting ' + datevshot)
            file_obs = tb.open_file(op.join(args.shotdir, datevshot + ".h5"),
                                    "r")

            shottable = Table(file_obs.root.Shot.read())

            # updating field in survey file
            shottable['field'] = define_field(str(shottable['objid'][0]))

            survey = vstack([survey, shottable])
            file_obs.close()
        else:  #except:
            args.log.error("Could not ingest %s" % datevshot)

    tableMain = fileh.create_table(fileh.root, "Survey", obj=survey.as_array())

    tableMain.flush()
    fileh.close()
Пример #10
0
def main(argv=None):
    """ Main Function """
    # Call initial parser from init_utils
    parser = ap.ArgumentParser(description="""Create HDF5 file.""",
                               add_help=True)

    parser.add_argument("-m",
                        "--month",
                        help="""Month to run: 201901""",
                        type=str,
                        default=None)

    parser.add_argument(
        "-d",
        "--date",
        help="""Date, e.g., 20170321, YYYYMMDD""",
        type=str,
        default=None,
    )

    parser.add_argument(
        "-o",
        "--observation",
        help='''Observation number, "00000007" or "7"''',
        type=str,
        default=None,
    )

    parser.add_argument(
        "-of",
        "--outfilename",
        type=str,
        help="""Relative or absolute path for output HDF5
                        file.""",
        default=None,
    )

    parser.add_argument(
        "-a",
        "--append",
        help="""Appending to existing detection HDF5 file.""",
        action="count",
        default=0,
    )

    parser.add_argument(
        "-dp",
        "--detect_path",
        help="""Path to detections""",
        type=str,
        default="/data/00115/gebhardt/alldet/output",
    )

    parser.add_argument(
        "-ifu",
        "--ifu",
        help="""IFU to ingest""",
        type=str,
        default=None,
    )

    parser.add_argument(
        "-md",
        "--mergedir",
        help="""Merge all HDF5 files in the defined merge 
                        directory. Can append to existing file using --append option""",
        type=str,
        default=os.getcwd(),
    )

    parser.add_argument(
        "--merge",
        "-merge",
        help="""Boolean trigger to merge all 2*.fits files in cwd""",
        default=False,
        required=False,
        action="store_true",
    )

    parser.add_argument(
        "--mergemonth",
        "-mm",
        help=""" Boolean trigger to merge all detect_month*.h5 files""",
        default=False,
        required=False,
        action="store_true",
    )

    parser.add_argument(
        "--broad",
        "-broad",
        help=""" Boolean trigger to select broad sources""",
        default=False,
        required=False,
        action="store_true",
    )

    args = parser.parse_args(argv)
    args.log = setup_logging()

    #check if shotid is in badlist
    #    config = HDRconfig(args.survey)
    #    badshots = np.loadtxt(config.badshot, dtype=int)

    if args.outfilename:
        outfilename = args.outfilename
    elif args.month and args.merge:
        outfilename = 'detect_month_' + str(args.month) + '.h5'
    else:
        outfilename = 'detect_' + str(args.date) + str(
            args.observation).zfill(3) + '.h5'

    # Creates a new file if the "--append" option is not set or the file
    # does not already exist.

    if args.append:
        fileh = tb.open_file(args.outfilename, "a",
                             "HDR2.1 Detections Database")
        detectidx = np.max(fileh.root.Detections.cols.detectid) + 1
    else:

        if args.broad:
            fileh = tb.open_file(outfilename, "w",
                                 "HDR2.1 Broad Detections Database")
            index_buff = 2160000000


#        elif args.continuum:
#            fileh = tb.open_file(outfilename, "w", "HDR2.1 Continuum Source Database")
#            index_buff = 2190000000
        else:
            fileh = tb.open_file(outfilename, "w",
                                 "HDR2.1 Detections Database")
            index_buff = 2100000000

        detectidx = index_buff

    if args.merge:

        tableMain = fileh.create_table(
            fileh.root,
            "Detections",
            Detections,
            "HETDEX Line Detection Catalog",
            expectedrows=1000000,
        )
        tableFibers = fileh.create_table(
            fileh.root,
            "Fibers",
            Fibers,
            "Fiber info for each detection",
            expectedrows=15000000,
        )
        tableSpectra = fileh.create_table(
            fileh.root,
            "Spectra",
            Spectra,
            "1D Spectra for each Line Detection",
            expectedrows=1000000,
        )

        if args.month:
            files = sorted(
                glob.glob(
                    op.join(args.mergedir,
                            "detect_" + str(args.month) + '*.h5')))
        elif args.mergemonth:
            files = sorted(
                glob.glob(op.join(args.mergedir, "detect_month*.h5")))
        else:
            files = sorted(glob.glob(op.join(args.mergedir, "detect_2*.h5")))

        detectid_max = 0

        for file in files:

            args.log.info("Appending detect H5 file: %s" % file)

            fileh_i = tb.open_file(file, "r")

            tableMain_i = fileh_i.root.Detections.read()

            if np.size(tableMain_i) == 0:
                args.log.error('No detections for %s' % file)
                continue

            tableFibers_i = fileh_i.root.Fibers.read()
            tableSpectra_i = fileh_i.root.Spectra.read()

            tableMain_i["detectid"] += detectid_max
            tableFibers_i["detectid"] += detectid_max
            tableSpectra_i["detectid"] += detectid_max

            # after first table be sure to add one to the index

            detectid_max = 1

            tableMain.append(tableMain_i)
            tableFibers.append(tableFibers_i)
            tableSpectra.append(tableSpectra_i)

            detectid_max = np.max(tableMain.cols.detectid[:]) - index_buff + 1

            fileh_i.close()
            tableFibers.flush()  # just to be safe
            tableSpectra.flush()
            tableMain.flush()

        if args.month:
            ifufiles = sorted(
                glob.glob(
                    op.join(args.mergedir,
                            "ifustat_" + str(args.month) + "*.tab")))
        elif args.mergemonth:
            ifufiles = sorted(
                glob.glob(op.join(args.mergedir, "ifustats_month*.tab")))
        else:
            ifufiles = sorted(
                glob.glob(op.join(args.mergedir, "ifustat_2*.tab")))

        ifu_tab = Table()

        for ifufile in ifufiles:
            ifu_i = Table.read(ifufile, format='ascii')
            if np.size(ifu_i) > 0:
                ifu_tab = vstack([ifu_tab, ifu_i])
            else:
                args.log.warning('IFU stats file is empty: ' + ifufile)

        if args.month:
            ifu_tab.write('ifustats_month_' + str(args.month) + '.tab',
                          format='ascii')
        else:
            ifu_tab.write('ifustats_merged.tab', format='ascii')

    else:

        if args.append:
            tableMain = fileh.root.Detections
            tableSpectra = fileh.root.Spectra
            tableFibers = fileh.root.Fibers
        else:
            tableMain = fileh.create_table(fileh.root, "Detections",
                                           Detections,
                                           "HETDEX Line Detection Catalog")
            tableFibers = fileh.create_table(fileh.root, "Fibers", Fibers,
                                             "Fiber info for each detection")
            tableSpectra = fileh.create_table(
                fileh.root, "Spectra", Spectra,
                "1D Spectra for each Line Detection")

        amp_stats = Table.read(
            '/data/05350/ecooper/hdr2.1/survey/amp_flag.fits')

        colnames = [
            'wave', 'wave_err', 'flux', 'flux_err', 'linewidth',
            'linewidth_err', 'continuum', 'continuum_err', 'sn', 'sn_err',
            'chi2', 'chi2_err', 'ra', 'dec', 'datevshot', 'noise_ratio',
            'linewidth_fix', 'chi2_fix', 'chi2fib', 'src_index', 'multiname',
            'exp', 'xifu', 'yifu', 'xraw', 'yraw', 'weight'
        ]

        if args.date and args.observation:
            mcres_str = str(args.date) + "v" + str(
                args.observation).zfill(3) + "*mc"
            shotid = int(str(args.date) + str(args.observation).zfill(3))
            amp_stats = amp_stats[amp_stats['shotid'] == shotid]
        elif args.month:
            mcres_str = str(args.month) + "*mc"
            amp_stats['month'] = (amp_stats['shotid'] / 100000).astype(int)
            amp_stats = amp_stats[amp_stats['month'] == int(args.month)]
        elif args.ifu:
            mcres_str = "*" + args.ifu + ".mc"
        else:
            args.log.warning(
                'Please provide a date(YYYMMDD)+observation or month (YYYYMM')
            sys.exit()

        catfiles = sorted(glob.glob(op.join(args.detect_path, mcres_str)))

        det_cols = fileh.root.Detections.colnames

        amplist = []
        ndet = []
        ndet_sel = []

        for catfile in catfiles:

            amp_i = catfile[-27:-3]

            if args.ifu:
                # Fudge to add in V038 for 201701 to 20180915 only
                date_i = int(amp_i[0:8])
                if date_i > 20180915:
                    break

            amplist.append(amp_i)

            args.log.info('Ingesting Amp: ' + amp_i)

            ndet_file = sum(1 for line in open(catfile))
            ndet.append(ndet_file)

            if ndet_file == 0:
                ndet_sel.append(0)
                continue

            try:
                detectcatall = Table.read(catfile,
                                          format='ascii.no_header',
                                          names=colnames)
            except:
                ndet_sel.append(0)
                args.log.warning('Could not ingest ' + catfile)
                continue

            if args.broad:
                selSN = (detectcatall['sn'] > 5)
                selLW = (detectcatall['linewidth'] > 5)
                selchi2 = (detectcatall['chi2'] > 1.6)
                selcont = (detectcatall['continuum'] >=
                           -3) * (detectcatall['continuum'] <= 8)
                selwave = (detectcatall['wave'] > 3510) * (detectcatall['wave']
                                                           < 5480)
                selchi2fib = (detectcatall['chi2fib'] < 5)
                selcat = selSN * selLW * selcont * selwave * selchi2fib
            else:
                selSN = (detectcatall['sn'] > 4.5)
                selLW = (detectcatall['linewidth'] > 1.7)
                #                selchi2 = (detectcatall['chi2'] <= 5)
                #                selcont = (detectcatall['continuum'] >= -3) * (detectcatall['continuum'] <= 20)
                #                selwave = (detectcatall['wave'] > 3510) * (detectcatall['wave'] < 5490)
                selchi2fib = (detectcatall['chi2fib'] < 5)
                selcat = selSN * selLW * selchi2fib

            detectcat = detectcatall[selcat]

            nsel_file = np.sum(selcat)

            try:
                specfile = op.join(args.detect_path, amp_i + ".spec")
                spectable = Table.read(specfile, format="ascii.no_header")
            except:
                args.log.warning('Could not ingest ' + specfile)
                ndet_sel.append(0)
                continue

            try:
                filefiberinfo = op.join(args.detect_path, amp_i + ".list")
                fibertable = Table.read(filefiberinfo,
                                        format="ascii.no_header")
            except:
                args.log.warning('Could not ingest ' + filefiberinfo)
                ndet_sel.append(0)
                continue

            ndet_sel.append(nsel_file)

            for row in detectcat:

                inputid_i = amp_i + '_' + str(row['src_index']).zfill(3)

                rowMain = tableMain.row

                rowMain['detectid'] = detectidx
                if args.date and args.observation:
                    rowMain['shotid'] = int(
                        str(args.date) + str(args.observation).zfill(3))
                    rowMain['date'] = int(args.date)
                    rowMain['obsid'] = int(args.observation)
                else:
                    rowMain['date'] = int(amp_i[0:8])
                    rowMain['obsid'] = int(amp_i[9:12])
                    rowMain['shotid'] = int(amp_i[0:8] + amp_i[9:12])

                # check if amp is in bad amp list
                multiframe = row['multiname'][0:20]

                if multiframe in [
                        'multi_051_105_051_RL', 'multi_051_105_051_RU'
                ]:
                    if (row['wave'] > 3540) and (row['wave'] < 3560):
                        continue

                if multiframe in ['multi_032_094_028_RU']:
                    if (row['wave'] > 3530) and (row['wave'] < 3545):
                        continue

                selamp = (amp_stats['shotid'] == rowMain['shotid']) * (
                    amp_stats['multiframe'] == multiframe)
                ampflag = amp_stats['flag'][selamp]

                if np.size(ampflag) == 0:
                    args.log.error('No ampflag for ' + str(rowMain['shotid']) +
                                   ' ' + multiframe)

                if ampflag == False:
                    continue

                # check if Karl stored the same fiber as me:
                fiber_id_Karl = str(rowMain["shotid"]) + "_" + str(row["exp"][4:5]) \
                                + "_" + multiframe + "_" \
                                + str(int(row['multiname'][21:24])).zfill(3)
                karl_weight = row['weight']

                rowMain['inputid'] = inputid_i

                for col in colnames:
                    try:
                        rowMain[col] = row[col]
                    except:
                        pass

                rowMain['detectname'] = get_detectname(row['ra'], row['dec'])

                selspec = spectable['col11'] == row['src_index']

                rowspectra = tableSpectra.row

                rowspectra["detectid"] = detectidx

                dataspec = spectable[selspec]

                rowspectra["spec1d"] = dataspec["col2"] / dataspec["col9"]
                rowspectra["spec1d_err"] = dataspec["col3"] / dataspec["col9"]
                rowspectra["wave1d"] = dataspec["col1"]
                rowspectra["spec1d_nc"] = dataspec["col2"]
                rowspectra["spec1d_nc_err"] = dataspec["col3"]
                rowspectra["counts1d"] = dataspec["col4"]
                rowspectra["counts1d_err"] = dataspec["col5"]
                rowspectra["apsum_counts"] = dataspec["col6"]
                rowspectra["apsum_counts_err"] = dataspec["col7"]
                rowspectra["apcor"] = dataspec["col9"]
                rowspectra["flag_pix"] = dataspec["col10"]

                #rowspectra.append()

                # add fiber info for each detection

                filefiberinfo = op.join(args.detect_path, amp_i + ".list")
                fibertable = Table.read(filefiberinfo,
                                        format="ascii.no_header")

                selfiber = fibertable['col16'] == row['src_index']

                datafiber = fibertable[selfiber]

                # check to see if any of the 5 highest weight fibers fall on a bad amplifier

                mf_array = []
                weight_array = []
                for ifiber in np.arange(np.size(datafiber)):
                    multiname = datafiber["col5"][ifiber]

                    mf_array.append(multiname[0:20])
                    weight_array.append(datafiber["col14"][ifiber])

                isort = np.flipud(np.argsort(weight_array))

                sort_mf = np.array(mf_array)[isort]

                for multiframe in sort_mf[0:5]:

                    if args.date and args.observation:
                        ampflag = amp_stats['flag'][amp_stats['multiframe'] ==
                                                    multiframe][0]

                    elif args.month:
                        selamp = (amp_stats['shotid'] == rowMain['shotid']) * (
                            amp_stats['multiframe'] == multiframe)
                        ampflag = amp_stats['flag'][selamp]

                    if np.size(ampflag) == 0:
                        args.log.error('No ampflag for ' +
                                       str(rowMain['shotid']) + ' ' +
                                       multiframe)

                    if ampflag == False:
                        break

                # skip appending source to Fibers and Spectra table
                if ampflag == False:
                    continue

                for ifiber in np.arange(np.size(datafiber)):
                    rowfiber = tableFibers.row
                    rowfiber["detectid"] = detectidx
                    rowfiber["ra"] = datafiber["col1"][ifiber]
                    rowfiber["dec"] = datafiber["col2"][ifiber]
                    rowfiber["x_ifu"] = datafiber["col3"][ifiber]
                    rowfiber["y_ifu"] = datafiber["col4"][ifiber]
                    rowfiber["expnum"] = int(
                        str(datafiber["col6"][ifiber])[3:5])
                    multiname = datafiber["col5"][ifiber]
                    multiframe = multiname[0:20]
                    fiber_id_i = (str(rowMain["shotid"]) + "_" +
                                  str(int(rowfiber["expnum"])) + "_" +
                                  multiframe + "_" +
                                  str(int(multiname[21:24])).zfill(3))
                    rowfiber["fiber_id"] = fiber_id_i
                    rowfiber["multiframe"] = multiframe
                    rowfiber["specid"] = multiframe[6:9]
                    rowfiber["ifuslot"] = multiframe[10:13]
                    rowfiber["ifuid"] = multiframe[14:17]
                    rowfiber["amp"] = multiframe[18:20]
                    rowfiber["fibnum"] = int(multiname[21:24])
                    rowfiber["distance"] = datafiber["col7"][ifiber]
                    rowfiber["wavein"] = datafiber["col8"][ifiber]
                    rowfiber["timestamp"] = datafiber["col9"][ifiber]
                    rowfiber["date"] = datafiber["col10"][ifiber]
                    rowfiber["obsid"] = str(datafiber["col11"][ifiber])[0:3]
                    rowfiber["x_raw"] = datafiber["col12"][ifiber]
                    rowfiber["y_raw"] = datafiber["col13"][ifiber]
                    rowfiber["flag"] = datafiber["col15"][ifiber]
                    rowfiber["weight"] = datafiber["col14"][ifiber]

                    rowfiber.append()

                # Now append brightest fiber info to Detections table:
                ifiber = np.argmax(datafiber["col14"])
                multiname = datafiber["col5"][ifiber]
                multiframe = multiname[0:20]
                rowMain["expnum"] = int(str(datafiber["col6"][ifiber])[3:5])
                fiber_id_i = (str(rowMain["shotid"]) + "_" +
                              str(rowMain["expnum"]) + "_" + multiframe + "_" +
                              str(int(multiname[21:24])).zfill(3))

                if fiber_id_i == fiber_id_Karl:
                    pass
                else:
                    weight = datafiber["col14"][ifiber]
                    weightdif = np.abs(weight - karl_weight)
                    if (weightdif > 0.001):
                        args.log.error("Karl's FiberID does not match: " +
                                       inputid_i)

                rowMain["fiber_id"] = fiber_id_i
                rowMain["multiframe"] = multiframe
                rowMain["specid"] = multiframe[6:9]
                rowMain["ifuslot"] = multiframe[10:13]
                rowMain["ifuid"] = multiframe[14:17]
                rowMain["amp"] = multiframe[18:20]
                rowMain["fibnum"] = int(multiname[21:24])
                rowMain["x_raw"] = datafiber["col12"][ifiber]
                rowMain["y_raw"] = datafiber["col13"][ifiber]
                rowMain["x_ifu"] = datafiber["col3"][ifiber]
                rowMain["y_ifu"] = datafiber["col4"][ifiber]
                rowMain["weight"] = datafiber["col14"][ifiber]

                rowMain.append()
                rowspectra.append()

                detectidx += 1

        tableMain.flush()
        tableSpectra.flush()
        tableFibers.flush()

        ifu_stat_tab = Table([amplist, ndet, ndet_sel],
                             names=['ampid', 'ndet', 'ndetsel'])

        if args.month:
            ifutabname = 'ifustat_' + str(args.month) + '.tab'
        else:
            ifutabname = 'ifustat_' + str(args.date) + str(
                args.observation).zfill(3) + '.tab'

        ifu_stat_tab.write(ifutabname, format='ascii', overwrite=True)

    # create completely sorted index on the detectid
    # to make queries against that column much faster
    if args.append:
        args.log.info("Reindexing the detectid column")
        tableMain.cols.detectid.reindex()
        tableFibers.cols.detectid.reindex()
        tableSpectra.cols.detectid.reindex()
        tableFibers.flush()  # just to be safe
        tableSpectra.flush()
        tableMain.flush()
    else:
        tableMain.cols.detectid.create_csindex()
        tableFibers.cols.detectid.create_csindex()
        tableSpectra.cols.detectid.create_csindex()
        tableFibers.flush()  # just to be safe
        tableSpectra.flush()
        tableMain.flush()
    args.log.info("File finished: %s" % outfilename)
    fileh.close()
Пример #11
0
def main(argv=None):
    ''' Main Function '''
    # Call initial parser from init_utils
    parser = ap.ArgumentParser(description="""Create HDF5 Astrometry file.""",
                               add_help=True)

    parser.add_argument("-d",
                        "--date",
                        help='''Date, e.g., 20170321, YYYYMMDD''',
                        type=str,
                        default=None)

    parser.add_argument("-o",
                        "--observation",
                        help='''Observation number, "00000007" or "7"''',
                        type=str,
                        default=None)

    parser.add_argument("-r",
                        "--rootdir",
                        help='''Root Directory for Shifts''',
                        type=str,
                        default='/data/00115/gebhardt/skysub/skys/')

    parser.add_argument('-of',
                        '--outfilename',
                        type=str,
                        help='''Relative or absolute path for output HDF5
                        file.''',
                        default=None)

    parser.add_argument('-a',
                        '--append',
                        help='''Appending to existing file.''',
                        action="count",
                        default=0)

    parser.add_argument("-survey",
                        "--survey",
                        help="""{hdr1, hdr2, hdr2.1}""",
                        type=str,
                        default="hdr2.1")

    args = parser.parse_args(argv)
    args.log = setup_logging()

    if op.exists(args.outfilename) and args.append:
        args.log.info('Appending fullskymodel to %s' % args.outfilename)
        fileh = tb.open_file(args.outfilename, 'a')

        try:
            fileh.remove_node(fileh.root.FullSkyModel, recursive=True)
        except:
            pass
    else:
        args.log.info('Creating new file for FullSkyModel %s' %
                      args.outfilename)
        fileh = tb.open_file(args.outfilename, 'w')

    groupFullSkyModel = fileh.create_group(fileh.root, 'FullSkyModel',
                                           'FullSkyModel')

    datevshot = str(args.date) + 'v' + str(args.observation).zfill(3)
    shotid = int(str(args.date) + str(args.observation).zfill(3))

    #check if shotid is in badlist

    config = HDRconfig(args.survey)

    badshots = np.loadtxt(config.badshot, dtype=int)

    badshotflag = False

    if shotid in badshots:
        badshotflag = True

    # store shuffle.cfg and DATEvOBS.log files

    for expn in ['exp01', 'exp02', 'exp03']:

        skyfile = op.join(
            args.rootdir, 'd' + str(args.date) + 's' +
            str(args.observation).zfill(3) + expn + 'sky.dat')
        try:
            sky_array = np.loadtxt(skyfile)
            if np.size(sky_array) > 0:
                fileh.create_array(groupFullSkyModel, expn, sky_array)
            else:
                if badshotflag:
                    args.log.warning('File empty %s' % skyfile)
                else:
                    args.log.error('File empty %s' % skyfile)
        except:
            if badshotflag:
                args.log.warning('Could not include %s' % skyfile)
            else:
                args.log.error('Could not include %s' % skyfile)

    fileh.close()
Пример #12
0
def main(argv=None):
    """ Main Function """
    # Call initial parser from init_utils
    parser = ap.ArgumentParser(description="""Create HDF5 file.""",
                               add_help=True)

    parser.add_argument("-m",
                        "--month",
                        help="""Month to run: 201901""",
                        type=str,
                        default=None)

    parser.add_argument(
        "-d",
        "--date",
        help="""Date, e.g., 20170321, YYYYMMDD""",
        type=str,
        default=None,
    )

    parser.add_argument(
        "-o",
        "--observation",
        help='''Observation number, "00000007" or "7"''',
        type=str,
        default=None,
    )

    parser.add_argument(
        "-cs",
        "--contsource",
        help="""Path to Karls rext catalog""",
        type=str,
        default='/scratch/00115/gebhardt/cs/rcs0',
    )

    parser.add_argument(
        "-dp",
        "--detect_path",
        help="""Path to detections""",
        type=str,
        default="/scratch/00115/gebhardt/cs/spec",
    )

    parser.add_argument(
        "-of",
        "--outfilename",
        type=str,
        help="""Relative or absolute path for output HDF5
        file.""",
        default=None,
    )

    parser.add_argument(
        "-a",
        "--append",
        help="""Appending to existing detection HDF5 file.""",
        action="count",
        default=0,
    )

    parser.add_argument(
        "-sl",
        "--shotlist",
        help="""Text file of DATE OBS list""",
        type=str,
        default="/scratch/03946/hetdex/hdr3/survey/hdr3.shotlist",
    )

    parser.add_argument(
        "--merge",
        "-merge",
        help="""Boolean trigger to merge all cont_2*.fits files in cwd""",
        default=False,
        required=False,
        action="store_true",
    )

    parser.add_argument(
        "-md",
        "--mergedir",
        help="""Merge all HDF5 files in the defined merge
        directory. Can append to existing file using --append option""",
        type=str,
        default=os.getcwd(),
    )

    args = parser.parse_args(argv)
    args.log = setup_logging()

    index_buff = 3090000000
    detectidx = index_buff

    if args.merge:
        n_size = 300000

        fileh = tb.open_file(args.outfilename, "w",
                             "HDR3 Continuum Source Database")

        tableMain = fileh.create_table(
            fileh.root,
            "Detections",
            Detections,
            "HETDEX Continuum Source Catalog",
            expectedrows=n_size,
        )
        tableFibers = fileh.create_table(
            fileh.root,
            "Fibers",
            Fibers,
            "Fiber info for each source",
            expectedrows=n_size,
        )
        tableSpectra = fileh.create_table(
            fileh.root,
            "Spectra",
            Spectra,
            "1D Spectra for each Line Detection",
            expectedrows=15 * n_size,
        )
        files = sorted(glob.glob(op.join(args.mergedir, "cont_2*.h5")))

        detectid_max = 0

        for file in files:

            args.log.info("Appending detect H5 file: %s" % file)

            fileh_i = tb.open_file(file, "r")

            tableMain_i = fileh_i.root.Detections.read()

            if np.size(tableMain_i) == 0:
                args.log.error('No detections for %s' % file)
                continue

            tableFibers_i = fileh_i.root.Fibers.read()
            tableSpectra_i = fileh_i.root.Spectra.read()
            tableMain_i["detectid"] += detectid_max
            tableFibers_i["detectid"] += detectid_max
            tableSpectra_i["detectid"] += detectid_max

            # after first table be sure to add one to the index

            detectid_max = 1

            tableMain.append(tableMain_i)
            tableFibers.append(tableFibers_i)
            tableSpectra.append(tableSpectra_i)

            detectid_max = np.max(tableMain.cols.detectid[:]) - index_buff + 1

            fileh_i.close()
            tableFibers.flush()  # just to be safe
            tableSpectra.flush()
            tableMain.flush()

        tableMain.cols.shotid.create_csindex()
        tableMain.cols.detectid.create_csindex()
        tableFibers.cols.detectid.create_csindex()
        tableSpectra.cols.detectid.create_csindex()
        tableFibers.flush()  # just to be safe
        tableSpectra.flush()
        tableMain.flush()
        args.log.info("File finished: %s" % args.outfilename)
        sys.exit()
    # open up datevobs tarball with ingestion data

    datevobs = str(args.date) + 'v' + str(args.observation).zfill(3)

    spectarfile = op.join(args.detect_path, "{}cs.tar".format(datevobs))

    if not op.exists(spectarfile):
        args.log.error("Could not find {}".format(spectarfile))
        sys.exit()

    if args.outfilename:
        outfilename = args.outfilename
    elif args.month and args.merge:
        outfilename = 'cont_month_' + str(args.month) + '.h5'
    else:
        outfilename = 'cont_' + str(args.date) + str(
            args.observation).zfill(3) + '.h5'

    fileh = tb.open_file(outfilename, "w", "HDR3 Continuum Source Database")

    # open up datevobs tarball with ingestion data

    spectar = tarfile.open(spectarfile)

    if not os.path.exists('./spec'):
        os.makedirs('./spec')

    spectar.extractall('./spec')
    spectar.close()

    detectcat = Table.read('./spec/{}.rcs'.format(datevobs),
                           format="ascii.no_header")
    detectcat.remove_columns([
        "col1",
        "col4",
        "col5",
        "col6",
        "col9",
        "col10",
        "col11",
        "col12",
        "col13",
        "col14",
    ])
    detectcat["col2"].name = "ra"
    detectcat["col3"].name = "dec"
    detectcat["col7"].name = "obnum"
    detectcat["col8"].name = "datevshot"

    tableMain = fileh.create_table(
        fileh.root,
        "Detections",
        Detections,
        "HETDEX Continuum Source Catalog",
        expectedrows=np.size(detectcat),
    )
    tableFibers = fileh.create_table(
        fileh.root,
        "Fibers",
        Fibers,
        "Fiber info for each source",
        expectedrows=np.size(detectcat),
    )
    tableSpectra = fileh.create_table(
        fileh.root,
        "Spectra",
        Spectra,
        "1D Spectra for each Line Detection",
        expectedrows=15 * np.size(detectcat),
    )

    shotid = []
    date = []
    obsid = []
    inputid = []
    detectid = []

    detectid_i = detectidx

    for row in detectcat:
        p = re.compile("v")
        shotid_i = int(p.sub("", row["datevshot"]))
        inputid_i = str(row["datevshot"]) + "_" + str(row["obnum"])

        detectid.append(detectid_i)
        inputid.append(inputid_i)
        date.append(int(str(shotid_i)[0:8]))
        obsid.append(int(str(shotid_i)[8:11]))
        shotid.append(shotid_i)
        detectid_i += 1

    detectcat["detectid"] = detectid
    detectcat["inputid"] = inputid
    detectcat["date"] = date
    detectcat["obsid"] = obsid
    detectcat["detectid"] = detectid
    detectcat["shotid"] = shotid

    det_cols = fileh.root.Detections.colnames

    shottab = Table.read(args.shotlist, format='ascii.no_header')
    shotlist = []
    for row in shottab:
        shotlist.append(int(str(row['col1']) + str(row['col2']).zfill(3)))

    for row in detectcat:

        if row['shotid'] not in shotlist:
            continue

        rowMain = tableMain.row

        for col in det_cols:
            try:
                rowMain[col] = row[col]
            except:
                rowMain[col] = 0.0

        rowMain.append()

    tableMain.flush()

    for row in tableMain:
        try:
            inputid_i = row["inputid"].decode()
            specfile = "./spec/{}.spec".format(inputid_i)
            dataspec = Table(
                np.loadtxt(specfile),
                names=[
                    "wave1d",
                    "spec1d_nc",
                    "spec1d_nc_err",
                    "counts1d",
                    "counts1d_err",
                    "apsum_counts",
                    "apsum_counts_err",
                    "dummy",
                    "apcor",
                    "flag_pix",
                    "obnum",
                    "spec1d_nc_ffsky",
                ],
            )

            rowspectra = tableSpectra.row
            rowspectra["detectid"] = row["detectid"]
            rowspectra["spec1d"] = dataspec["spec1d_nc"] / dataspec["apcor"]
            rowspectra[
                "spec1d_err"] = dataspec["spec1d_nc_err"] / dataspec["apcor"]
            rowspectra["spec1d_ffsky"] = dataspec[
                "spec1d_nc_ffsky"] / dataspec["apcor"]
            rowspectra["wave1d"] = dataspec["wave1d"]
            rowspectra["spec1d_nc"] = dataspec["spec1d_nc"]
            rowspectra["spec1d_nc_err"] = dataspec["spec1d_nc_err"]
            rowspectra["counts1d"] = dataspec["counts1d"]
            rowspectra["counts1d_err"] = dataspec["counts1d_err"]
            rowspectra["apsum_counts"] = dataspec["apsum_counts"]
            rowspectra["apsum_counts_err"] = dataspec["apsum_counts_err"]
            rowspectra["apcor"] = dataspec["apcor"]
            rowspectra["flag_pix"] = dataspec["flag_pix"]
            rowspectra.append()
        except Exception:
            args.log.error("Could not ingest %s" % specfile)

    tableSpectra.flush()

    # add fiber info for each detection

    for row in tableMain:
        inputid_i = row["inputid"].decode()

        filefiberinfo = "./spec/{}.list".format(inputid_i)

        try:
            datafiber = Table.read(filefiberinfo, format="ascii.no_header")

            for ifiber in np.arange(np.size(datafiber)):
                rowfiber = tableFibers.row
                rowfiber["detectid"] = row["detectid"]
                rowfiber["ra"] = datafiber["col1"][ifiber]
                rowfiber["dec"] = datafiber["col2"][ifiber]
                rowfiber["x_ifu"] = datafiber["col3"][ifiber]
                rowfiber["y_ifu"] = datafiber["col4"][ifiber]
                rowfiber["expnum"] = str(datafiber["col6"][ifiber])[3:5]
                multiname = datafiber["col5"][ifiber]
                multiframe = multiname[0:20]
                fiber_id_i = (str(row["shotid"]) + "_" +
                              str(int(rowfiber["expnum"])) + "_" + multiframe +
                              "_" + str(int(multiname[21:24])).zfill(3))
                rowfiber["fiber_id"] = fiber_id_i
                rowfiber["multiframe"] = multiframe
                rowfiber["specid"] = multiframe[6:9]
                rowfiber["ifuslot"] = multiframe[10:13]
                rowfiber["ifuid"] = multiframe[14:17]
                rowfiber["amp"] = multiframe[18:20]
                rowfiber["fibnum"] = int(multiname[21:24])
                rowfiber["distance"] = datafiber["col7"][ifiber]
                rowfiber["wavein"] = datafiber["col8"][ifiber]
                rowfiber["timestamp"] = datafiber["col9"][ifiber]
                rowfiber["date"] = datafiber["col10"][ifiber]
                rowfiber["obsid"] = str(datafiber["col11"][ifiber])[0:3]
                rowfiber["x_raw"] = datafiber["col12"][ifiber]
                rowfiber["y_raw"] = datafiber["col13"][ifiber]
                rowfiber["flag"] = datafiber["col15"][ifiber]
                rowfiber["weight"] = datafiber["col14"][ifiber]
                rowfiber.append()

            # Now append brightest fiber info to Detections table:
            ifiber = 0  # np.argmax(datafiber["col14"])
            multiname = datafiber["col5"][ifiber]
            multiframe = multiname[0:20]
            row["expnum"] = int(str(datafiber["col6"][ifiber])[3:5])
            fiber_id_i = (str(row["shotid"]) + "_" + str(row["expnum"]) + "_" +
                          multiframe + "_" +
                          str(int(multiname[21:24])).zfill(3))
            row["fiber_id"] = fiber_id_i
            row["multiframe"] = multiframe
            row["specid"] = multiframe[6:9]
            row["ifuslot"] = multiframe[10:13]
            row["ifuid"] = multiframe[14:17]
            row["amp"] = multiframe[18:20]
            row["fibnum"] = int(multiname[21:24])
            row["x_raw"] = datafiber["col12"][ifiber]
            row["y_raw"] = datafiber["col13"][ifiber]
            row["x_ifu"] = datafiber["col3"][ifiber]
            row["y_ifu"] = datafiber["col4"][ifiber]
            row["expnum"] = str(datafiber["col6"][ifiber])[3:5]
            row["weight"] = datafiber["col14"][ifiber]
            row.update()

        except Exception:
            args.log.error("Could not ingest %s" % filefiberinfo)

        tableFibers.flush()

    tableMain.cols.detectid.create_csindex()
    tableFibers.cols.detectid.create_csindex()
    tableSpectra.cols.detectid.create_csindex()
    tableFibers.flush()  # just to be safe
    tableSpectra.flush()
    tableMain.flush()
    args.log.info("File finished: {}".format(outfilename))
    fileh.close()

    # remove untarred files

    tarfiles = glob.glob('./spec/{}*'.format(datevobs))

    for f in tarfiles:
        try:
            os.remove(f)
        except OSError as e:
            print("Error: %s : %s" % (f, e.strerror))
Пример #13
0
def main(argv=None):
    ''' Main Function '''
    # Call initial parser from init_utils
    parser = ap.ArgumentParser(description="""Create HDF5 file.""",
                               add_help=True)

    parser.add_argument("-df",
                        "--detectfile",
                        help='''Provide HDF5 of detections''',
                        type=str,
                        default=config.detecth5)

    parser.add_argument(
        "-dets",
        "--dets",
        help='''List of detections in form DATEvSHOT_inputID''',
        type=str,
        default=None)

    parser.add_argument('-of',
                        '--outfilename',
                        type=str,
                        help='''Relative or absolute path for output HDF5
                        file.''',
                        default=None)

    parser.add_argument('-a',
                        '--append',
                        help='''Appending to existing detection HDF5 file.''',
                        action="count",
                        default=0)

    parser.add_argument("-ep",
                        "--elixer_path",
                        help='''Path to elixer output''',
                        type=str,
                        default='/scratch/03261/polonius/')

    parser.add_argument(
        "-cat",
        "--elixer_cat",
        help='''Path to elixer catalog''',
        type=str,
        default='/work/03261/polonius/stampede2/erin/simple_cat.txt')

    args = parser.parse_args(argv)
    args.log = setup_logging()

    # open elixer catalog file to be ingested

    if op.exists(args.elixer_cat):
        colnames = [
            'detectid', 'ra', 'dec', 'z_prelim', 'ew_obs', 'ew_rest',
            'plae_poii_hetdex', 'plae_poii_aperture', 'aperture_mag',
            'aperture_filter', 'plae_poii_cat', 'cat_filter', 'dist_match',
            'mag_match', 'ra_match', 'dec_match'
        ]
        elixer_table = ascii.read(args.elixer_cat, names=colnames, comment="#")
        #delimiter="\t", guess=False, format='basic')
    else:
        print('Could not open %s' % args.elixer_cat)

    filedet = tb.open_file(config.detecth5, 'r')
    detect_list = filedet.root.Detections.cols.detectid[:]
    inputid = filedet.root.Detections.cols.inputid[:]
    filedet.close()

    # open elixer HDF5 file, if append option is given this will be a group added
    # to an existing exlier HDF5 file
    if args.append:
        try:
            fileh = tb.open_file(args.outfilename, 'a')
        except:
            args.log.warning('Could not open %s to append.', args.outfilename)

    else:
        try:
            fileh = tb.open_file(args.outfilename, 'w')
            groupElix = fileh.create_group(fileh.root, 'Elixer',
                                           "ELiXer Summaries")
            fileh.create_table(fileh.root, 'Classifications', Classifications)
        except:
            args.log.warning('Could not open %s.', args.outfilename)

    tableMain = fileh.root.Classifications

    # set array of columns to store

    colkeep = [
        'ra', 'dec', 'z_prelim', 'plae_poii_hetdex', 'plae_poii_aperture',
        'aperture_mag', 'aperture_filter', 'plae_poii_cat', 'cat_filter',
        'dist_match', 'mag_match', 'ra_match', 'dec_match'
    ]

    for index, detect_i in enumerate(detect_list):
        row = tableMain.row
        row['detectid'] = detect_i

        idx = np.where(elixer_table['detectid'] == detect_i)

        if np.size(idx) > 0:
            if np.size(idx > 1):
                print(detect_i, inputid[index])

            for colname_i in colkeep:
                try:
                    if colname_i == 'aperture_filter' or colname_i == 'cat_filter':
                        row[colname_i] = str(
                            elixer_table[colname_i]
                            [idx])[-elixer_table[colname_i][idx].size:]
                    else:
                        row[colname_i] = elixer_table[colname_i][idx]
                except Exception as e:
                    args.log.warning('Could not ingest col(%s) detectid(%d)' %
                                     (colname_i, detect_i),
                                     exc_info=True)
                    #args.log.warning('Could not ingest %s' % detect_i, exc_info=True)
        else:
            print('Could not ingest %s' % detect_i)

        row.append()

    tableMain.cols.detectid.create_csindex()
    tableMain.flush()
    fileh.close()
Пример #14
0
def main(argv=None):
    """ Main Function """
    # Call initial parser from init_utils
    parser = ap.ArgumentParser(description="""Create HDF5 Astrometry file.""",
                               add_help=True)

    parser.add_argument(
        "-sdir",
        "--shotdir",
        help="""Directory for shot H5 files to ingest""",
        type=str,
        default="/scratch/03946/hetdex/hdr3/reduction/data",
    )

    parser.add_argument(
        "-sl",
        "--shotlist",
        help="""Text file of DATE OBS list""",
        type=str,
        default="hdr3.shotlist",
    )

    parser.add_argument(
        "-of",
        "--outfilename",
        type=str,
        help="""Relative or absolute path for output HDF5
                        file.""",
        default=None,
    )

    parser.add_argument("-survey", "--survey", type=str, default="hdr3")

    parser.add_argument(
        "-m",
        "--month",
        type=int,
        default=None,
        help="""Create FiberIndex for a single month""",
    )

    parser.add_argument(
        "--merge",
        "-merge",
        help="""Boolean trigger to merge all 2*.fits files in cwd""",
        default=False,
        required=False,
        action="store_true",
    )

    args = parser.parse_args(argv)
    args.log = setup_logging()

    fileh = tb.open_file(args.outfilename,
                         mode="w",
                         title=args.survey.upper() + " Fiber Index file ")

    shotlist = Table.read(args.shotlist,
                          format="ascii.no_header",
                          names=["date", "obs"])

    tableFibers = fileh.create_table(
        fileh.root,
        "FiberIndex",
        VIRUSFiberIndex,
        "Survey Fiber Coord Info",
        expectedrows=300000000,
    )

    if args.merge:
        files = glob.glob("mfi*h5")
        for file in files:
            args.log.info("Appending detect H5 file: %s" % file)
            fileh_i = tb.open_file(file, "r")
            tableFibers_i = fileh_i.root.FiberIndex.read()
            tableFibers.append(tableFibers_i)

        tableFibers.cols.healpix.create_csindex()
        tableFibers.cols.ra.create_csindex()
        tableFibers.cols.shotid.create_csindex()
        tableFibers.flush()
        fileh.close()
        args.log.info("Completed {}".format(args.outfilename))
        sys.exit()

    # set up HEALPIX options
    Nside = 2**15
    hp.max_pixrad(Nside, degrees=True) * 3600  # in unit of arcsec

    config = HDRconfig(survey=args.survey)

    badshot = np.loadtxt(config.badshot, dtype=int)

    if args.month is not None:
        args.log.info("Working on month {}".format(args.month))
        # if working on a single month downselect
        shotlist["month"] = np.array(shotlist["date"] / 100, dtype=int)
        sel_month = shotlist["month"] == args.month
        shotlist = shotlist[sel_month]

    for shotrow in shotlist:
        datevshot = str(shotrow["date"]) + "v" + str(shotrow["obs"]).zfill(3)
        shotid = int(str(shotrow["date"]) + str(shotrow["obs"]).zfill(3))

        date = shotrow["date"]

        try:
            args.log.info("Ingesting %s" % datevshot)
            file_obs = tb.open_file(op.join(args.shotdir, datevshot + ".h5"),
                                    "r")
            tableFibers_i = file_obs.root.Data.FiberIndex

            for row_i in tableFibers_i:

                row_main = tableFibers.row

                for col in tableFibers_i.colnames:
                    row_main[col] = row_i[col]

                fiberid = row_i["fiber_id"]

                try:
                    row_main["healpix"] = hp.ang2pix(Nside,
                                                     row_i["ra"],
                                                     row_i["dec"],
                                                     lonlat=True)
                except:
                    row_main["healpix"] = 0

                row_main["shotid"] = shotid
                row_main["date"] = date
                row_main["datevobs"] = datevshot

                row_main["specid"] = fiberid[20:23]
                row_main["ifuslot"] = fiberid[24:27]
                row_main["ifuid"] = fiberid[28:31]
                row_main["amp"] = fiberid[32:34]
                row_main.append()

            file_obs.close()

        except:
            if shotid in badshot:
                pass
            else:
                args.log.error("could not ingest %s" % datevshot)

    tableFibers.cols.healpix.create_csindex()
    tableFibers.cols.ra.create_csindex()
    tableFibers.cols.shotid.create_csindex()
    tableFibers.flush()
    fileh.close()
    args.log.info("Completed {}".format(args.outfilename))
Пример #15
0
def main(argv=None):
    """ Main Function """
    # Call initial parser from init_utils
    parser = ap.ArgumentParser(description="""Create HDF5 file.""",
                               add_help=True)

    parser.add_argument(
        "-cs",
        "--contsource",
        help="""Path to Karls rext catalog""",
        type=str,
        default=None,
    )

    parser.add_argument(
        "-dp",
        "--detect_path",
        help="""Path to detections""",
        type=str,
        default="/data/00115/gebhardt/alldet/output",
    )

    parser.add_argument(
        "-of",
        "--outfilename",
        type=str,
        help="""Relative or absolute path for output HDF5
        file.""",
        default=None,
    )

    parser.add_argument(
        "-a",
        "--append",
        help="""Appending to existing detection HDF5 file.""",
        action="count",
        default=0,
    )

    parser.add_argument(
        "-sl",
        "--shotlist",
        help="""Text file of DATE OBS list""",
        type=str,
        default="/scratch/03946/hetdex/hdr3/survey/hdr3.shotlist",
    )

    args = parser.parse_args(argv)
    args.log = setup_logging()

    outfilename = args.outfilename

    fileh = tb.open_file(outfilename, "w", "HDR3 Continuum Source Database")
    index_buff = 3090000000
    detectidx = index_buff

    detectcat = Table.read(args.contsource, format="ascii.no_header")
    detectcat.remove_columns([
        "col1",
        "col4",
        "col5",
        "col6",
        "col9",
        "col10",
        "col11",
        "col12",
        "col13",
        "col14",
    ])
    detectcat["col2"].name = "ra"
    detectcat["col3"].name = "dec"
    detectcat["col7"].name = "obnum"
    detectcat["col8"].name = "datevshot"

    tableMain = fileh.create_table(
        fileh.root,
        "Detections",
        Detections,
        "HETDEX Continuum Source Catalog",
        expectedrows=np.size(detectcat),
    )
    tableFibers = fileh.create_table(
        fileh.root,
        "Fibers",
        Fibers,
        "Fiber info for each source",
        expectedrows=np.size(detectcat),
    )
    tableSpectra = fileh.create_table(
        fileh.root,
        "Spectra",
        Spectra,
        "1D Spectra for each Line Detection",
        expectedrows=15 * np.size(detectcat),
    )

    shotid = []
    date = []
    obsid = []
    inputid = []
    detectid = []

    detectid_i = detectidx

    for row in detectcat:
        p = re.compile("v")
        shotid_i = int(p.sub("", row["datevshot"]))
        inputid_i = str(row["datevshot"]) + "_" + str(row["obnum"])

        detectid.append(detectid_i)
        inputid.append(inputid_i)
        date.append(int(str(shotid_i)[0:8]))
        obsid.append(int(str(shotid_i)[8:11]))
        shotid.append(shotid_i)
        detectid_i += 1

    detectcat["detectid"] = detectid
    detectcat["inputid"] = inputid
    detectcat["date"] = date
    detectcat["obsid"] = obsid
    detectcat["detectid"] = detectid
    detectcat["shotid"] = shotid

    det_cols = fileh.root.Detections.colnames

    shottab = Table.read(args.shotlist, format='ascii.no_header')
    shotlist = []
    for row in shottab:
        shotlist.append(int(str(row['col1']) + str(row['col2']).zfill(3)))

    for row in detectcat:

        if row['shotid'] not in shotlist:
            continue

        rowMain = tableMain.row

        for col in det_cols:
            try:
                rowMain[col] = row[col]
            except:
                rowMain[col] = 0.0

        rowMain.append()

    tableMain.flush()

    # add spectra for each detectid in the detections table

    for row in tableMain:
        try:

            inputid_i = row["inputid"].decode()
            specfile = op.join(args.detect_path, inputid_i + ".spec")
            dataspec = Table(
                np.loadtxt(specfile),
                names=[
                    "wave1d",
                    "spec1d_nc",
                    "spec1d_nc_err",
                    "counts1d",
                    "counts1d_err",
                    "apsum_counts",
                    "apsum_counts_err",
                    "dummy",
                    "apcor",
                    "flag_pix",
                    "obnum",
                    "spec1d_nc_ffsky",
                ],
            )

            rowspectra = tableSpectra.row
            rowspectra["detectid"] = row["detectid"]
            rowspectra["spec1d"] = dataspec["spec1d_nc"] / dataspec["apcor"]
            rowspectra[
                "spec1d_err"] = dataspec["spec1d_nc_err"] / dataspec["apcor"]
            rowspectra["spec1d_ffsky"] = dataspec[
                "spec1d_nc_ffsky"] / dataspec["apcor"]
            rowspectra["wave1d"] = dataspec["wave1d"]
            rowspectra["spec1d_nc"] = dataspec["spec1d_nc"]
            rowspectra["spec1d_nc_err"] = dataspec["spec1d_nc_err"]
            rowspectra["counts1d"] = dataspec["counts1d"]
            rowspectra["counts1d_err"] = dataspec["counts1d_err"]
            rowspectra["apsum_counts"] = dataspec["apsum_counts"]
            rowspectra["apsum_counts_err"] = dataspec["apsum_counts_err"]
            rowspectra["apcor"] = dataspec["apcor"]
            rowspectra["flag_pix"] = dataspec["flag_pix"]
            rowspectra.append()
        except Exception:
            args.log.error("Could not ingest %s" % specfile)

    tableSpectra.flush()

    # add fiber info for each detection

    for row in tableMain:
        inputid_i = row["inputid"].decode()

        filefiberinfo = op.join(args.detect_path, inputid_i + ".list")

        try:
            datafiber = Table.read(filefiberinfo, format="ascii.no_header")

            for ifiber in np.arange(np.size(datafiber)):
                rowfiber = tableFibers.row
                rowfiber["detectid"] = row["detectid"]
                rowfiber["ra"] = datafiber["col1"][ifiber]
                rowfiber["dec"] = datafiber["col2"][ifiber]
                rowfiber["x_ifu"] = datafiber["col3"][ifiber]
                rowfiber["y_ifu"] = datafiber["col4"][ifiber]
                rowfiber["expnum"] = str(datafiber["col6"][ifiber])[3:5]
                multiname = datafiber["col5"][ifiber]
                multiframe = multiname[0:20]
                fiber_id_i = (str(row["shotid"]) + "_" +
                              str(int(rowfiber["expnum"])) + "_" + multiframe +
                              "_" + str(int(multiname[21:24])).zfill(3))
                rowfiber["fiber_id"] = fiber_id_i
                rowfiber["multiframe"] = multiframe
                rowfiber["specid"] = multiframe[6:9]
                rowfiber["ifuslot"] = multiframe[10:13]
                rowfiber["ifuid"] = multiframe[14:17]
                rowfiber["amp"] = multiframe[18:20]
                rowfiber["fibnum"] = int(multiname[21:24])
                rowfiber["distance"] = datafiber["col7"][ifiber]
                rowfiber["wavein"] = datafiber["col8"][ifiber]
                rowfiber["timestamp"] = datafiber["col9"][ifiber]
                rowfiber["date"] = datafiber["col10"][ifiber]
                rowfiber["obsid"] = str(datafiber["col11"][ifiber])[0:3]
                rowfiber["x_raw"] = datafiber["col12"][ifiber]
                rowfiber["y_raw"] = datafiber["col13"][ifiber]
                rowfiber["flag"] = datafiber["col15"][ifiber]
                rowfiber["weight"] = datafiber["col14"][ifiber]
                rowfiber.append()

            # Now append brightest fiber info to Detections table:
            ifiber = 0  # np.argmax(datafiber["col14"])
            multiname = datafiber["col5"][ifiber]
            multiframe = multiname[0:20]
            row["expnum"] = int(str(datafiber["col6"][ifiber])[3:5])
            fiber_id_i = (str(row["shotid"]) + "_" + str(row["expnum"]) + "_" +
                          multiframe + "_" +
                          str(int(multiname[21:24])).zfill(3))
            row["fiber_id"] = fiber_id_i
            row["multiframe"] = multiframe
            row["specid"] = multiframe[6:9]
            row["ifuslot"] = multiframe[10:13]
            row["ifuid"] = multiframe[14:17]
            row["amp"] = multiframe[18:20]
            row["fibnum"] = int(multiname[21:24])
            row["x_raw"] = datafiber["col12"][ifiber]
            row["y_raw"] = datafiber["col13"][ifiber]
            row["x_ifu"] = datafiber["col3"][ifiber]
            row["y_ifu"] = datafiber["col4"][ifiber]
            row["expnum"] = str(datafiber["col6"][ifiber])[3:5]
            row["weight"] = datafiber["col14"][ifiber]
            row.update()

        except Exception:
            args.log.error("Could not ingest %s" % filefiberinfo)

        tableFibers.flush()

    tableMain.cols.detectid.create_csindex()
    tableFibers.cols.detectid.create_csindex()
    tableSpectra.cols.detectid.create_csindex()
    tableFibers.flush()  # just to be safe
    tableSpectra.flush()
    tableMain.flush()
    args.log.info("File finished: %s" % args.outfilename)
    fileh.close()
Пример #16
0
def main(argv=None):
    """ Main Function """

    parser = get_parser()
    args = parser.parse_args(argv)
    args.log = setup_logging()

    if args.pickle:
        args.fits = False

    if args.merge:

        if args.fits:
            master_table = Table()
            files = glob.glob(op.join(args.mergepath, "*.fits"))
            args.log.info("Merging all fits files in " + args.mergepath)

            for file in files:
                file_table = Table.read(open(file, "rb"))
                if np.size(file_table) > 0:
                    master_table = vstack([master_table, file_table])
            outfile = args.outfile + ".fits"
            master_table.write(outfile, format="fits", overwrite=True)

        else:
            all_source_dict = {}
            files = glob.glob(op.join(args.mergepath, "*.pkl"))
            args.log.info("Merging all pickle files in " + args.mergepath)
            for file in files:
                file_dict = pickle.load(open(file, "rb"))
                if len(file_dict) > 0:
                    all_source_dict = merge(all_source_dict, file_dict)

            outfile = args.outfile + ".pkl"
            pickle.dump(all_source_dict, open(outfile, "wb"))

        args.log.info("Saved output file to " + outfile)
        sys.exit("Exiting")

    if args.infile:

        args.log.info("Loading External File")

        try:
            try:
                table_in = Table.read(args.infile, format="ascii")
                if table_in.colnames == ["col1", "col2", "col3"]:
                    table_in["col1"].name = "ID"
                    table_in["col2"].name = "ra"
                    table_in["col3"].name = "dec"
                elif np.size(table_in.colnames) != 3:
                    args.log.info("Input file not in three column format")
            except Exception:
                pass
            try:
                table_in = Table.read(args.infile, format="fits")
            except Exception:
                pass
        except Exception:
            if op.exists(args.infile):
                args.log.warning("Could not open input file")
                sys.exit("Exiting")
            else:
                args.log.warning("Input file not found")
                sys.exit("Exiting")
        try:
            args.ID = table_in["ID"]
        except:
            args.ID = table_in["id"]

        try:
            args.ra = table_in["ra"]
            args.dec = table_in["dec"]
        except:
            args.ra = table_in["RA"]
            args.dec = table_in["DEC"]

    else:
        if args.ID == None:
            if np.size(args.ra) > 1:
                args.ID = str(np.arange(1, np.size(table_in) + 1)).zfill(9)
            else:
                args.ID = 1

        args.log.info("Extracting for ID: %s" % args.ID)

    # generate astropy coordinates object for searching

    if re.search(":", str(args.ra)):
        args.coords = SkyCoord(args.ra, args.dec, unit=(u.hourangle, u.deg))
    else:
        args.coords = SkyCoord(args.ra, args.dec, unit=u.deg)

    S = Survey(args.survey)

    if args.keep_bad_shots:
        ind_good_shots = np.ones_like(S.shotid, dtype=bool)
    else:
        ind_good_shots = S.remove_shots()

    if args.tpmin:
        ind_tp = S.response_4540 > args.tpmin
        args.survey_class = S[ind_good_shots * ind_tp]
    else:
        args.survey_class = S[ind_good_shots]

    # if args.shotidid exists, only select those shots

    if args.shotid:
        try:
            sel_shot = args.survey_class.shotid == int(args.shotid)
        except Exception:
            sel_shot = args.survey_class.datevobs == str(args.shotid)

        args.survey_class = args.survey_class[sel_shot]

    else:
        pass

    # main function to retrieve spectra dictionary
    Source_dict = get_spectra_dictionary(args)

    args.survey_class.close()

    if args.pickle:
        outfile = args.outfile + ".pkl"
        pickle.dump(Source_dict, open(outfile, "wb"))

    if args.single:
        # loop over every ID/observation combo:
        fluxden_u = 1e-17 * u.erg * u.s**(-1) * u.cm**(-2) * u.AA**(-1)
        for ID in Source_dict.keys():
            for shotid in Source_dict[ID].keys():

                wave_rect = 2.0 * np.arange(1036) + 3470.0
                spec = Source_dict[ID][shotid][0]
                spec_err = Source_dict[ID][shotid][1]
                weights = Source_dict[ID][shotid][2]

                sel = np.isfinite(spec)
                if np.sum(sel) > 0:
                    output = Table()

                    output.add_column(
                        Column(wave_rect, name="wavelength", unit=u.AA))
                    output.add_column(Column(spec, name="spec",
                                             unit=fluxden_u))
                    output.add_column(
                        Column(spec_err, name="spec_err", unit=fluxden_u))
                    output.add_column(Column(weights, name="weights"))

                    output.write("spec_" + str(ID) + "_" + str(shotid) +
                                 ".tab",
                                 format="ascii")

    if args.fits:
        output = return_astropy_table(Source_dict,
                                      fiberweights=args.fiberweights)
        if args.fiberweights:
            # cannot save fiberweights to a fits file
            output.remove_column('fiber_weights')
        output.write(args.outfile + ".fits", format="fits", overwrite=True)
Пример #17
0
def main(argv=None):
    """ Main Function """
    # Call initial parser from init_utils
    parser = ap.ArgumentParser(
        description="""Append HDF5 Calibration info table.""", add_help=True)

    parser.add_argument(
        "-d",
        "--date",
        help="""Date, e.g., 20170321, YYYYMMDD""",
        type=str,
        default=None,
    )

    parser.add_argument(
        "-o",
        "--observation",
        help='''Observation number, "00000007" or "7"''',
        type=str,
        default=None,
    )

    parser.add_argument(
        "-tp",
        "--tpdir",
        help="""Directory for Throughput Info""",
        type=str,
        default="/scratch/00115/gebhardt/detect",
    )

    parser.add_argument(
        "-detdir",
        "--detectdir",
        help="""Directory for Detect Info""",
        type=str,
        default="/scratch/03946/hetdex/detect",
    )

    parser.add_argument(
        "-of",
        "--outfilename",
        type=str,
        help="""Relative or absolute path for output HDF5file.""",
        default=None,
    )

    parser.add_argument(
        "-a",
        "--append",
        help="""Appending to existing shot HDF5 file.""",
        action="count",
        default=0,
    )

    parser.add_argument("-survey",
                        "--survey",
                        help="""{hdr1, hdr2, hdr2.1, hdr3}""",
                        type=str,
                        default="hdr3")

    args = parser.parse_args(argv)
    args.log = setup_logging()

    # Creates a new file if the "--append" option is not set or the file
    # does not already exist.

    does_exist = False
    if op.exists(args.outfilename) and args.append:
        fileh = tb.open_file(args.outfilename, "a")
        args.log.info("Appending calibration info to %s" % args.outfilename)
        does_exist = True
    else:
        fileh = tb.open_file(args.outfilename, "w")
        args.log.info("Writingcalibration info to %s" % args.outfilename)

    shotid = int(str(args.date) + str(args.observation).zfill(3))

    #check if shotid is in badlist

    config = HDRconfig(args.survey)
    badshots = np.loadtxt(config.badshot, dtype=int)

    badshotflag = False

    if shotid in badshots:
        badshotflag = True

    try:
        fileh.remove_node(fileh.root.Calibration, recursive=True)
    except:
        args.log.info("Creating new Calibration group")

    group = fileh.create_group(fileh.root, "Calibration",
                               "HETDEX Calibration Info")

    groupThroughput = fileh.create_group(group, "Throughput",
                                         "Throughput Curve")

    datevshot = str(args.date) + "v" + str(args.observation.zfill(3))

    tpfile = op.join(args.detectdir, "tp", datevshot + "sedtp_f.dat")

    try:
        tp_data = ascii.read(
            tpfile,
            names=[
                "wavelength",
                "throughput",
                "tp_low",
                "tp_high",
                "rat_poly",
                "tp_gband",
            ],
        )
        tp_4540 = tp_data["throughput"][np.where(
            tp_data["wavelength"] == 4540.0)][0]

        tp_array = fileh.create_table(groupThroughput, "throughput",
                                      tp_data.as_array())
        tp_array.set_attr("filename", tpfile)
    except:
        args.log.warning("Could not include %s" % tpfile)

    tppngfile = op.join(
        args.tpdir,
        str(args.date) + "v" + str(args.observation.zfill(3)),
        "res",
        str(args.date) + "v" + str(args.observation.zfill(3)) + "sedtpa.png",
    )

    try:
        pngimarr = plt.imread(tppngfile)
        pngim = fileh.create_array(groupThroughput, "tp_png", pngimarr)
        pngim.attrs["CLASS"] = "IMAGE"
    except:
        args.log.warning("Could not include %s" % tppngfile)

    # add virus FWHM and response_4540 to the Shot table

    shottable = fileh.root.Shot
    fwhm_file = op.join(args.detectdir, "fwhm.all")

    try:

        fwhm_tab = Table.read(fwhm_file, format='ascii.no_header')
        sel_datevobs = fwhm_tab['col1'] == str(args.date) + "v" + str(
            args.observation.zfill(3))

        for row in shottable:
            row["fwhm_virus"] = float(fwhm_tab['col2'][sel_datevobs])
            row["fwhm_virus_err"] = float(fwhm_tab['col3'][sel_datevobs])
            row["nstars_fit_fwhm"] = int(fwhm_tab['col4'][sel_datevobs])
            row["response_4540"] = tp_4540
            row.update()

    except:
        if badshotflag:
            args.log.warning(
                "Could not include cal info in shot table for %s" % datevshot)
        else:
            args.log.error("Could not include cal info in shot table for %s" %
                           datevshot)

    fileh.close()
Пример #18
0
def get_spectra(
    coords,
    ID=None,
    rad=3.5,
    multiprocess=True,
    shotid=None,
    survey=LATEST_HDR_NAME,
    tpmin=0.08,
    keep_bad_shots=False,
    ffsky=False,
    fiberweights=False,
    return_fiber_info=False,
    loglevel='INFO',
):
    """
    Function to retrieve PSF-weighted, ADR and aperture corrected
    spectral extractions of HETDEX fibers. It will search all shots
    within a specific HETDEX Data Release and return a table of
    spectra for each extraction per shot in which more than 7 fibers
    are found in order to generate an extracted spectrum.

    Parameters
    ----------
    coords
        list astropy coordinates
    ID
        list of ID names (must be same length as coords). Will
        generate a running index if no ID is given
    rad
        radius of circular aperture to be extracted in arcsec.
        Default is 3.5
    multiprocess: bool
        boolean flag to use multiprocessing. This will greatly
        speed up its operation as it will extract on 32 shots at
        time. But only use this when on a compute node. Use
        idev, a jupyter notebook, or submit the job as a single
        python slurm job. Default is True
    shotid: int
        list of integer shotids to do extractions on. By default
        it will search the whole survey except for shots located
        in the bad.shotlist file
    survey: str
        Survey you want to access. User note that HDR1 extractions
        are much slower compared to HDR2.
    tpmin: float
        Include only shots above tpmin. Default is 0.08
    ffsky: bool
        Use the full frame 2D sky subtraction model. Default is
        to use the local sky subtracted, flux calibrated fibers.
    fiberweights: bool
        Boolean flag to include fiber_weights tuple in source
        dictionary. This is used in Elixer, but is slow
        when used on large source lists.
    fiber_info: bool
        returns the fiber_info and weights of the fibers used
        in the extraction
    keep_bad_shots: bool
        Set this to True if you want to include fibers from bad
        shots. This is dangerous as it can include fibers with
        bad astrometry, bad calibration. Default is False.
    loglevel: str
        Level to set logging. Options are ERROR, WARNING, INFO,
        DEBUG. Defaults to INFO

    Returns
    -------
    sources
        an astropy table object of source spectra for all input
        coords/ID that have spectra in the survey shots. There
        is one row per source ID/shotid observation.
    """

    args = types.SimpleNamespace()

    args.multiprocess = multiprocess
    args.coords = coords
    args.rad = rad * u.arcsec
    args.survey = survey

    args.ffsky = ffsky
    args.fiberweights = fiberweights
    args.return_fiber_info = return_fiber_info

    args.keep_bad_shots = keep_bad_shots

    S = Survey(survey)

    if args.keep_bad_shots:
        ind_good_shots = np.ones_like(S.shotid, dtype=bool)
    else:
        ind_good_shots = S.remove_shots()

    if tpmin:
        ind_tp = S.response_4540 > tpmin
        args.survey_class = S[ind_good_shots * ind_tp]
    else:
        args.survey_class = S[ind_good_shots]

    if shotid is not None:
        try:
            if np.size(shotid) == 1:
                sel_shot = args.survey_class.shotid == int(shotid)
                # shut off multiproces flag if its just one shot
                args.multiprocess = False
            else:
                sel_shot = np.zeros(np.size(args.survey_class.shotid),
                                    dtype=bool)

                for shot_i in shotid:

                    sel_i = args.survey_class.shotid == int(shot_i)
                    sel_shot = np.logical_or(sel_shot, sel_i)

        except Exception:
            sel_shot = args.survey_class.datevobs == str(shotid)

        args.survey_class = args.survey_class[sel_shot]
    else:
        pass
        # sel_shot = args.survey_class.shotid > 20171200000
        # args.survey_class = args.survey_class[sel_shot]

    args.log = setup_logging()

    if loglevel == 'INFO':
        args.log.setLevel(logging.INFO)
    elif loglevel == 'ERROR':
        args.log.setLevel(logging.ERROR)
    elif loglevel == 'WARNING':
        args.log.setLevel(logging.WARNING)
    elif loglevel == 'DEBUG':
        args.log.setLevel(logging.DEBUG)
    else:
        args.log.WARNING('No loglevel set, using INFO')
        args.log.setLevel(logging.INFO)

    if ID is None:
        try:
            nobj = len(args.coords)
            if nobj > 1:
                args.ID = np.arange(1, nobj + 1)
            else:
                args.ID = 1
        except Exception:
            args.ID = 1
    else:
        args.ID = ID

    Source_dict = get_spectra_dictionary(args)

    args.survey_class.close()

    output = return_astropy_table(Source_dict,
                                  fiberweights=args.fiberweights,
                                  return_fiber_info=args.return_fiber_info)

    args.log.info("Retrieved " + str(np.size(output)) + " spectra.")

    return output
Пример #19
0
def main(argv=None):
    """ Main Function """
    # Call initial parser from init_utils
    parser = ap.ArgumentParser(description="""Create HDF5 file.""",
                               add_help=True)

    parser.add_argument(
        "-d",
        "--date",
        help="""Date, e.g., 20170321, YYYYMMDD""",
        type=str,
        default=None,
    )

    parser.add_argument(
        "-o",
        "--observation",
        help='''Observation number, "00000007" or "7"''',
        type=str,
        default=None,
    )

    parser.add_argument(
        "-r",
        "--rootdir",
        help="""Root Directory for Reductions""",
        type=str,
        default="/data/05350/ecooper/",
    )

    parser.add_argument(
        "-of",
        "--outfilename",
        type=str,
        help="""Relative or absolute path for output HDF5
                        file.""",
        default=None,
    )

    parser.add_argument(
        "-a",
        "--append",
        help="""Appending to existing file.""",
        action="count",
        default=0,
    )

    parser.add_argument(
        "-dp",
        "--detect_path",
        help="""Path to detections""",
        type=str,
        #        default="/work/00115/gebhardt/maverick/detect",
        default="/data/00115/gebhardt/detect")

    parser.add_argument("-survey",
                        "--survey",
                        help="""{hdr1, hdr2, hdr2.1}""",
                        type=str,
                        default="hdr2.1")

    parser.add_argument("-tar",
                        "--tar",
                        help="""Flag to open tarred multifits""",
                        action="store_true")

    parser.add_argument("-tp", "--tmppath", type=str, default=os.getcwd())

    args = parser.parse_args(argv)
    args.log = setup_logging()

    global hdr_survey
    hdr_survey = args.survey
    if args.survey != hdr_survey:
        args.log.warning("Hard coded hdr_survey does not match input survey.")
        sys.exit()

    config = HDRconfig(args.survey)
    badshots = np.loadtxt(config.badshot, dtype=int)

    # Get the daterange over which reduced files will be collected
    files = get_files(args)
    datestr = "%sv%03d" % (args.date, int(args.observation))
    filepath = "%s/%s/dithall.use" % (args.detect_path, datestr)
    shotid = int(str(args.date) + str(args.observation).zfill(3))
    badshotflag = False

    if shotid in badshots:
        badshotflag = True
        args.log.warning('Shot is in badshot list. Ingesting anyways')

    try:
        T = Table.read(filepath, format="ascii")
    except:
        T = None
        if badshotflag:
            args.log.info("Could not open the dithall file from %s" % filepath)
        else:
            args.log.error("Could not open the dithall file from %s" %
                           filepath)

    # Creates a new file if the "--append" option is not set or the file
    # does not already exist.
    does_exist = False
    if op.exists(args.outfilename) and args.append:
        # for now this is used to update the shot table
        fileh = tb.open_file(args.outfilename, "a")
        fileh.remove_node(fileh.root, "Shot")
        fileh.create_table(fileh.root, "Shot", VIRUSShot, "Shot Info")
        #does_exist = True
    else:
        outfile = op.join(args.tmppath, args.outfilename)
        fileh = tb.open_file(outfile, "w")
        group = fileh.create_group(fileh.root, "Data",
                                   "VIRUS Fiber Data and Metadata")
        fileh.create_table(group, "Fibers", VIRUSFiber, "Fiber Info")
        fileh.create_table(fileh.root, "Shot", VIRUSShot, "Shot Info")
        fileh.create_table(group, "Images", VIRUSImage, "Image Info")
        fileh.create_table(group, "FiberIndex", VIRUSFiberIndex,
                           "Fiber Coord Info")

    # Grab the fiber table and amplifier table for writing
    fibtable = fileh.root.Data.Fibers
    shottable = fileh.root.Shot
    imagetable = fileh.root.Data.Images
    fibindextable = fileh.root.Data.FiberIndex

    if does_exist:
        cnt = shottable[-1]["obsind"]
    else:
        cnt = 1

    if args.tar == True:

        shot = shottable.row
        n_ifu = {}
        for file_i in files:
            tar = tarfile.open(name=file_i, mode="r")

            members = tar.getmembers()
            fn = tar.extractfile(members[0])

            filename = fn.name
            idx = filename.find("exp")
            expn = filename[idx:idx + 5]

            n_ifu[expn] = int(len(members) / 4)

            success = append_shot_to_table(shot, shottable, fn, cnt)

            if args.append:
                continue

            for member in members:
                fn = tar.extractfile(member)

                args.log.info("Working on %s" % member.name)
                fib = fibtable.row
                im = imagetable.row
                fibindex = fibindextable.row

                success = append_fibers_to_table(fibindex, fib, im, fn, cnt, T,
                                                 args)
                if success:
                    fibtable.flush()
                    imagetable.flush()
                    fibindextable.flush()

        shot["n_ifu"] = n_ifu["exp01"]

    else:

        shot = shottable.row

        try:

            filename = files[0]

            idx = filename.find("exp")
            expn = filename[idx:idx + 5]

            f_exp01 = re.sub(expn, "exp01", filename)
            f_exp02 = re.sub(expn, "exp02", filename)
            f_exp03 = re.sub(expn, "exp03", filename)

            success = append_shot_to_table(shot, shottable, f_exp01, cnt)
            success = append_shot_to_table(shot, shottable, f_exp02, cnt)
            success = append_shot_to_table(shot, shottable, f_exp03, cnt)

            n_ifu = int(len(files) / 12)

            shot["n_ifu"] = n_ifu

        except:
            if badshotflag:
                args.log.info('No multifits files for %s' % datestr)
            else:
                args.log.error('No multifits files for %s' % datestr)

        if args.append:

            args.log.info('Appending shot table only.')

        else:
            for fn in files:
                args.log.info("Working on %s" % fn)
                fib = fibtable.row
                im = imagetable.row
                fibindex = fibindextable.row

                success = append_fibers_to_table(fibindex, fib, im, fn, cnt, T,
                                                 args)
                if success:
                    fibtable.flush()
                    imagetable.flush()
                    fibindextable.flush()

    shot.append()

    # create completely sorted index on the specid to make queries against that column much faster
    # specid chosen as the old multi*fits naming started with specid and it is fixed vs ifuslot and ifuid
    # for any given shot
    if args.append:
        pass
    else:
        fibtable.cols.ra.create_csindex()
        fibindextable.cols.ra.create_csindex()

        imagetable.cols.multiframe.create_csindex()
        fibindextable.cols.multiframe.create_csindex()
        fibtable.cols.multiframe.create_csindex()

        fibtable.flush()
        fibindextable.flush()
        imagetable.flush()

    shottable.flush()
    fileh.close()

    # remove all temporary multifits
    if args.tar:
        datestr = "d%ss%03d" % (args.date, int(args.observation))
        datepath = op.join(args.tmppath, datestr)
        shutil.rmtree(datepath, ignore_errors=True)
        outfile = op.join(args.tmppath, args.outfilename)
        try:
            shutil.move(outfile, args.outfilename)
        except:
            os.remove(args.outfilename)
            shututil.move(outfile, args.outfilename)
Пример #20
0
def main(argv=None):
    """ Main Function """
    # Call initial parser from init_utils
    parser = ap.ArgumentParser(
        description="""Append calfib arrays to HDF5 file.""", add_help=True)

    parser.add_argument(
        "-d",
        "--date",
        help="""Date, e.g., 20170321, YYYYMMDD""",
        type=str,
        default=None,
    )

    parser.add_argument(
        "-o",
        "--observation",
        help='''Observation number, "00000007" or "7"''',
        type=str,
        default=None,
    )

    parser.add_argument(
        "-r",
        "--rootdir",
        help="""Root Directory for Reductions""",
        type=str,
        #        default="/scratch/03946/hetdex/alldet/cal_out/",
        default="/scratch/00115/gebhardt/alldet/cal_out/")

    parser.add_argument(
        "-of",
        "--outfilename",
        type=str,
        help=
        """Relative or absolute path for output HDF5                          
                        file.""",
        default=None,
    )

    parser.add_argument(
        "-survey",
        "--survey",
        help="""{hdr1, hdr2, hdr2.1, hdr3}""",
        type=str,
        default="hdr3",
    )

    args = parser.parse_args(argv)
    args.log = setup_logging()

    calfiles = get_cal_files(args)

    datestr = "%sv%03d" % (args.date, int(args.observation))

    shotid = int(str(args.date) + str(args.observation).zfill(3))

    # check if shotid is in badlist
    config = HDRconfig(args.survey)
    badshots = np.loadtxt(config.badshot, dtype=int)

    badshotflag = False

    if shotid in badshots:
        badshotflag = True

    if len(calfiles) == 0:
        if badshotflag:
            args.log.warning("No calfits file to append for %s" % datestr)
        else:
            args.log.error("No calfits file to append for %s" % datestr)

        sys.exit("Exiting cal append script for %s" % datestr)

    if op.exists(args.outfilename):
        fileh = tb.open_file(args.outfilename, "a")
    else:
        args.log.error("Problem opening : " + args.outfilename)
        sys.exit("Exiting Script")

    args.log.info("Appending calibrated fiber arrays to " + args.outfilename)

    fibtable = fileh.root.Data.Fibers

    for calfile in calfiles:

        #        multi  = calfile[49:60]
        ifuname = calfile.split("/")[-1]
        multi = ifuname[13:24]

        try:
            cal_table = get_cal_table(calfile)
        except:
            continue
            args.log.error("Could not ingest calfile: %s" % calfile)

        args.log.info("Working on IFU " + multi)

        for amp_i in ["LL", "LU", "RL", "RU"]:

            multiframe_i = "multi_" + multi + "_" + amp_i

            for fibrow in fibtable.where("multiframe == multiframe_i"):

                idx = ((cal_table["expnum"] == fibrow["expnum"]) *
                       (cal_table["multiframe"]
                        == fibrow["multiframe"].decode()) *
                       (cal_table["fibidx"] == fibrow["fibidx"]))

                if np.sum(idx) >= 1:
                    fibrow["calfib"] = cal_table["calfib"][idx]
                    fibrow["calfibe"] = cal_table["calfibe"][idx]
                    fibrow["calfib_counts"] = cal_table["calfib_counts"][idx]
                    fibrow["calfibe_counts"] = cal_table["calfibe_counts"][idx]
                    fibrow["calfib_ffsky"] = cal_table["calfib_ffsky"][idx]
                    fibrow.update()
                # else:
                # args.log.warning("No fiber match for %s" % fibrow['fiber_id'])

    args.log.info("Flushing and closing H5 file")
    fibtable.flush()
    fileh.close()