def coarse_calib_configure_tables(dict_of_dicts):
    maxlines = np.max([len(val['clines']) for val in dict_of_dicts.values()])
    coeftab, metrictab, linestab, pixtab = Table(), Table(), Table(), Table()

    for fib, fibdict in dict_of_dicts.items():
        coefcolvals = np.array(fibdict['coefs'])
        coefcol = Table.Column(name=fib, data=coefcolvals)
        coeftab.add_column(coefcol)

        metcol = Table.Column(name=fib, data=np.array([fibdict['metric']]))
        metrictab.add_column(metcol)

        lines = np.append(fibdict['clines'],
                          np.zeros(maxlines - len(fibdict['clines'])))
        pixels = np.append(fibdict['pixels'],
                           np.zeros(maxlines - len(fibdict['clines'])))

        linecol = Table.Column(name=fib, data=lines)
        linestab.add_column(linecol)

        pixelcol = Table.Column(name=fib, data=pixels)
        pixtab.add_column(pixelcol)

    out = {
        'coefs': coeftab,
        'metric': metrictab,
        'clines': linestab,
        'pixels': pixtab
    }
    return out
예제 #2
0
    def __init__(self, n_grid, mag, noise=False):
        
        # Prepare a grid of positions
        img_size = 10 # Approximate size of the image (")
        row = np.delete(np.arange(-(img_size / 2), (img_size / 2), (img_size / (n_grid + 1))), 0)
        grid = np.asarray([row] * n_grid)
        
        if noise:
            x_noise = np.random.uniform(low=-noise/2, high=noise/2, size=len(grid.flatten()))
            y_noise = np.random.uniform(low=-noise/2, high=noise/2, size=len(grid.flatten()))
        else:
            x_noise = np.zeros(len(grid.flatten()))
            y_noise = np.zeros(len(grid.flatten()))
        
        x_now = Table.Column(data=grid.flatten()+x_noise, name='x')
        y_now = Table.Column(data=grid.flatten('F')+y_noise, name='x')

        # Use the same magnitude calibration as in GCstars
        aper_corr = 387.605
        ZP_flux = (24000.0 * 4 / 2.8) * aper_corr
        ZP_mag = 9.0

        f_now = Table.Column(data=[10 ** ((mag - ZP_mag) / -2.5) * ZP_flux] * (n_grid ** 2), name='Kmag')
        mag_now = Table.Column(data=[mag] * (n_grid ** 2), name='Kmag')
        name_now = Table.Column(data=['dummy_star'] * (n_grid ** 2), name='name')

        super(self.__class__, self).__init__(x_now, y_now, f_now, mag_now,
                                             name_now)

        return
예제 #3
0
    def load_matchdata(self, matchcatfilename=None):
        """Load a 3DHST catalog to identify galaxies that match the
        properties of the SN host galaxies.
        """
        if len(self.matchdata) > 0:
            print("SNANA sim outputs already matched to 3DHST." +
                  "No changes done.")
            return
        if matchcatfilename is None:
            matchcatfilename = '3DHST/3dhst_master.phot.v4.1.cat.FITS'

        if self.verbose:
            print("Loading observed galaxy data from the 3DHST catalogs")
        matchdata = fits.getdata(matchcatfilename)
        f160 = matchdata['f_F160W']
        zspec = matchdata['z_spec']
        zphot = matchdata['z_peak']
        zbest = np.where(zspec > 0, zspec, zphot)
        usephot = matchdata['use_phot']
        ivalid = np.where(((f160 > 0) & (zbest > 0)) & (usephot == 1))[0]
        isort = np.argsort(zbest[ivalid])
        z3d = zbest[ivalid][isort]
        idgal = matchdata['id'][ivalid][isort].astype(int)
        field = matchdata['field'][ivalid][isort]
        mag3d = (-2.5 * np.log10(f160[ivalid]) + 25)[isort]
        id3d = np.array([
            '{}.{:04d}'.format(field[i], idgal[i]) for i in range(len(field))
        ])
        self.matchdata.add_column(Table.Column(data=z3d, name='z3D'))
        self.matchdata.add_column(Table.Column(data=mag3d, name='mag3D'))
        self.matchdata.add_column(Table.Column(data=id3d, name='id3D'))
        return
예제 #4
0
def clusters(cat, mask, colors):

    table = cat[mask]
    table.keep_columns(colors)
    data = table.to_pandas()

    clusterer = HDBSCAN(min_cluster_size=20)  #100 for real
    clusterer.fit(data)

    labels = Table.Column(clusterer.labels_, name='ct')
    proba = Table.Column(clusterer.probabilities_, name='prob_ct')
    stars = Table([cat[mask]['XMMSRCID'], labels, proba])

    return stars
예제 #5
0
    def pick_random_matches(self, dz=0.05, dmag=0.2):
        """For each simulated SN host gal, find all observed galaxies (from
        the 3DHST catalogs) that have similar redshift and magnitude---i.e.,
        a redshift within dz of the simulated z, and an H band mag within
        dmag of the simulated H band mag.

        Pick one at random, and adopt it as the template for our simulated SN
        host gal (to be used for simulating the host gal spectrum).
        """
        if self.matchdata is None:
            self.load_matchdata()
        zsim = self.simdata['zsim']
        magsim= self.simdata['magsim']
        z3d = self.matchdata['z3D']
        mag3d = self.matchdata['mag3D']
        id3d = self.matchdata['id3D']

        nsim = len(zsim)
        if self.verbose:
            print("Finding observed galaxies that ~match simulated SN host" +
                  "\ngalaxy properties (redshift and magnitude)...")

        # TODO: find the nearest 10 or 100 galaxies, instead of all within
        # a specified dz and dmag range.

        nmatch, magmatch, zmatch, idmatch = [], [], [], []
        for i in range(nsim):
            isimilar = np.where((z3d + dz > zsim[i]) &
                                (z3d - dz < zsim[i]) &
                                (mag3d + dmag > magsim[i]) &
                                (mag3d - dz < magsim[i]))[0]
            nmatch.append(len(isimilar))
            irandmatch = np.random.choice(isimilar)
            magmatch.append(mag3d[irandmatch])
            zmatch.append(z3d[irandmatch])
            idmatch.append(id3d[irandmatch])

        # record the 3DHST data for each galaxy we have randomly picked:
        #   z, mag, id (field name + 3DHST catalog index)
        # TODO: don't use add_column... we should update columns if they
        # already exist.
        self.simdata.add_column(
            Table.Column(data=np.array(idmatch), name='idmatch'))
        self.simdata.add_column(
            Table.Column(data=np.array(nmatch), name='nmatch'))
        self.simdata.add_column(
            Table.Column(data=np.array(magmatch), name='magmatch'))
        self.simdata.add_column(
            Table.Column(data=np.array(zmatch), name='zmatch'))
예제 #6
0
    def __load_data(self, event_path):
        """Returns dataframe with raw data."""
        # I'm not sure that time for MOA data is in HJD
        with open(event_path) as f:
            contents = f.readlines()
            processed = ""
            for i in range(len(contents)):
                processed += re.sub("\s+", ",", contents[i].strip()) + "\n"
            t = Table.read(processed, format="ascii")
            t.keep_columns(["col1", "col2", "col3"])
            t.rename_column("col1", "HJD")
            t.rename_column("col2", "flux")
            t.rename_column("col3", "flux_err")
            t.meta = {"filter": "I", "observatory": "MOA"}

            # Remove the random rows with zero time and negative time
            t = t[t["HJD"] > 0]

            # Add mask column
            mask = Table.Column(np.ones(len(t["HJD"]), dtype=bool),
                                name="mask",
                                dtype=bool)
            t.add_column(mask)  # Insert before the first table column

        self._Data__tables.append(t)
예제 #7
0
 def add_snana_simdata(self, infilename):
     """read in a catalog of SN host galaxy data. Initialize a new
     catalog from a SNANA head.fits file
     """
     simdata = Table()
     hdulist = fits.open(infilename)
     bindata = hdulist[1].data
     zsim = bindata['SIM_REDSHIFT_HOST']
     if 'HOSTGAL_MAG_H' in [col.name for col in bindata.columns]:
         magsim = bindata['HOSTGAL_MAG_H']
     else:
         magsim = bindata['HOSTGAL_MAG_J']
     simdata.add_column(Table.Column(data=magsim, name='magsim'))
     simdata.add_column(Table.Column(data=zsim, name='zsim'))
     self.simdata = table.vstack([self.simdata, simdata])
     self.simfilelist.append(infilename)
예제 #8
0
    def simulate_host_spectra(self,
                              indexlist=None,
                              outdir='3DHST/sedsim.output',
                              clobber=False):
        """Use Gabe Brammer's EAZY code to simulate the host gal spectrum
        for every host galaxy in the sample.
        """
        if 'idmatch' not in self.simdata.colnames:
            print("No idmatch data. Run 'pick_random_matches()'")
            return
        if indexlist is None:
            indexlist = self.simdata['index']
        if self.verbose:
            print("Using Gabe Brammer's EAZY code to generate "
                  "the best-fit SEDs of the observed galaxies that "
                  "we have matched up to the SNANA simulation hostgal data.")
        if not os.path.isdir(outdir):
            os.mkdir(outdir)
        sedoutfilelist = []
        for idx in self.simdata['index']:
            fieldidx = self.simdata['idmatch'][idx]
            fieldstr, idxstr = fieldidx.split('.')
            field3dhst = fieldstr.lower().replace('-', '')
            idx3dhst = int(idxstr)
            thiseazydat = self.eazydata[field3dhst]
            sedoutfilename = os.path.join(
                outdir, 'wfirst_simsed.{:06d}.dat'.format(idx))
            sedoutfilelist.append(sedoutfilename)
            headerstring = """# WFIRST SN Host Gal SED simulated with EAZYpy
# field3d={:s}
# idx3d={:d}
# z3d={:.3f}
# mag3d={:.3f}
# zsim={:.3f}
# magsim={:.3f}
# idxsim={:d}
# wave_nm   mag_AB\n""".format(field3dhst, idx3dhst,
                               self.simdata['zmatch'][idx],
                               self.simdata['magmatch'][idx],
                               self.simdata['zsim'][idx],
                               self.simdata['magsim'][idx],
                               self.simdata['index'][idx])
            if idx not in indexlist:
                if self.verbose > 1:
                    print("Skipping SED simulation for idx={:d}".format(idx))
                continue
            if clobber or not os.path.isfile(sedoutfilename):
                if self.verbose > 1:
                    print("Generating {:s}".format(sedoutfilename))
                simulate_eazy_sed(fieldidx=fieldidx,
                                  eazydata=thiseazydat,
                                  savetofile=sedoutfilename,
                                  headerstring=headerstring)
            else:
                if self.verbose > 1:
                    print(
                        "{:s} exists. Not clobbering.".format(sedoutfilename))
        # assert(len(self.simdata['zsim']) == len(sedoutfilelist))
        self.simdata.add_column(
            Table.Column(data=sedoutfilelist, name='sedoutfile'))
예제 #9
0
    def __load_data(self, event_dir):
        """Returns a table with raw data."""
        t1 = Table.read(event_dir + "/KMTA01_I.diapl", format="ascii")
        t1["col1"] += 2450000
        t1.keep_columns(("col1", "col2", "col3"))
        t1.rename_column("col1", "HJD")
        t1.rename_column("col2", "flux")
        t1.rename_column("col3", "flux_err")
        t1.meta = {"filter": "I", "observatory": "KMTA"}

        t2 = Table.read(event_dir + "/KMTC01_I.diapl", format="ascii")
        t2["col1"] += 2450000
        t2.keep_columns(("col1", "col2", "col3"))
        t2.rename_column("col1", "HJD")
        t2.rename_column("col2", "flux")
        t2.rename_column("col3", "flux_err")
        t2.meta = {"filter": "I", "observatory": "KMTC"}

        t3 = Table.read(event_dir + "/KMTS01_I.diapl", format="ascii")
        t3["col1"] += 2450000
        t3.keep_columns(("col1", "col2", "col3"))
        t3.rename_column("col1", "HJD")
        t3.rename_column("col2", "flux")
        t3.rename_column("col3", "flux_err")
        t3.meta = {"filter": "I", "observatory": "KMTS"}

        self._Data__tables = [t1, t2, t3]

        for t in self._Data__tables:
            # Add mask column
            mask = Table.Column(np.ones(len(t["HJD"]), dtype=bool),
                                name="mask",
                                dtype=bool)
            t.add_column(mask)  # Insert before the first table column
예제 #10
0
def merge_cat(tmass, ukidss, vista):
    moc_vista = MOC()
    read_moc_fits(moc_vista, '../data/vista/moc_vista.fits')
    moc_ukidss = MOC()
    read_moc_fits(moc_ukidss, '../data/ukidss/moc_ukidss.fits')

    moc_order = moc_vista.order
    hp = HEALPix(nside=2**moc_order, order='nested', frame=ICRS())
    moc_allsky = MOC(0, tuple(range(12)))
    moc_tmass = moc_allsky - moc_ukidss - moc_vista
    moc_vista = moc_vista - moc_ukidss

    vista_final = utils.sources_inmoc(vista,
                                      hp,
                                      moc_vista,
                                      moc_order=moc_order,
                                      ra='posRA',
                                      dec='posDec')
    vista_final.rename_column('NVTobjID', 'NIRobjID')
    vista_final.add_column(
        Table.Column(['VISTA'] * len(vista_final), name='NIR_SURVEY'))

    tmass_final = utils.sources_inmoc(tmass,
                                      hp,
                                      moc_tmass,
                                      moc_order=moc_order,
                                      ra='posRA',
                                      dec='posDec')
    tmass_final.rename_column('NTMobjID', 'NIRobjID')
    tmass_final.add_column(
        Table.Column(['2MASS'] * len(tmass_final), name='NIR_SURVEY'))

    ukidss.rename_column('NUKobjID', 'NIRobjID')
    ukidss.add_column(Table.Column(['UKIDSS'] * len(ukidss),
                                   name='NIR_SURVEY'))

    xmatchcat_final = vstack([tmass_final, vista_final, ukidss])

    msk = np.logical_or(xmatchcat_final['NIR_SURVEY'] == 'UKIDSS',
                        xmatchcat_final['NIR_SURVEY'] == '2MASS')
    msk = np.logical_and(xmatchcat_final['NIRobjID'].mask, msk)

    return xmatchcat_final
예제 #11
0
def find(cat):

    colors = [
        'gmr', 'gmi', 'gmz', 'gmy', 'rmi', 'rmz', 'rmy', 'imz', 'imy', 'zmy'
    ]

    train_file = '../photoz/samples/training/stars.dat'
    test_file = '../photoz/samples/stars.dat'
    run_file = '../photoz/runs/stars.input'

    results_folder = '../photoz/results/stars'
    results = os.path.join(results_folder, 'stars')
    if not os.path.exists(results_folder):
        os.makedirs(results_folder)

    training = irsample(cat)
    columns = make_training(training, colors, train_file)
    testids = make_testing(cat, columns, test_file)
    make_tpzinput(run_file,
                  train_file,
                  test_file,
                  results,
                  columns=columns,
                  zmin=0,
                  zmax=1,
                  znbins=2,
                  nrandom=10,
                  ntrees=15,
                  pmode='TPZ_C',
                  pclass='Class')
    runtpz(run_file, testids, results, pdfs=False)

    stars_tpz = Table.read(results + '.fits', memmap=True)
    msk = stars_tpz['zmode0'] == 1
    stars_tpz.keep_columns(['id', 'zmode0'])
    stars_tpz.rename_column('id', 'PSobjID')
    stars_tpz.rename_column('zmode0', 'STARS_TPZ')
    stars_tpz = stars_tpz[msk]

    stars_nirmir = training[training['STARS_MIRNIR']]
    stars_nirmir.keep_columns(['PSobjID', 'STARS_MIRNIR'])
    stars_nirmir.replace_column('STARS_MIRNIR', np.ones(len(stars_nirmir)))

    newcat = join(cat, stars_nirmir, keys='PSobjID', join_type='left')
    newcat = join(newcat, stars_tpz, keys='PSobjID', join_type='left')

    stars_any = np.logical_or(newcat['STARS_MIRNIR'] == True,
                              newcat['STARS_TPZ'] == True)
    stars_any = Table.Column(stars_any, name='STAR')
    newcat.add_column(stars_any)

    return newcat
예제 #12
0
def mod_evt(label):
    print('run')
    epoch_info=np.loadtxt(path+label[2]+'_epoch.txt')
    TSTART=epoch_info[:,0]
    TSTOP=epoch_info[:,1]
    obsID=epoch_info[:,2]
    exptime=epoch_info[:,3]
    file_o='LW_merged_evt.fits'
    hdul=fits.open(path+file_o)
    hd1=hdul[1]
    time=hdul[1].data.field(0)
    FID=[]
    for i in range(len(obsID)):
        if i >=1:
            if get_index_in_list(time,TSTART[i])[0]<=get_index_in_list(time,TSTOP[i-1])[-1]:
                ##防止出现几个光子时间相同的情况而重复
                FID.append(np.zeros(get_index_in_list(time,TSTOP[i])[-1]-get_index_in_list(time,TSTOP[i-1])[-1])+obsID[i])
            else:
                FID.append(np.zeros(get_index_in_list(time, TSTOP[i])[-1] - get_index_in_list(time, TSTART[i])[0] + 1) + obsID[i])
        else:
            FID.append(
                np.zeros(get_index_in_list(time, TSTOP[i])[-1] - get_index_in_list(time, TSTART[i])[0] + 1) + obsID[i])




    temp=0
    for i in range(len(FID)):
        temp+=len(FID[i])
    if temp!=len(time):
        print(temp)
        print(len(time))
        print('error')
        return 'error'

    FID_1d=np.concatenate((FID[0],FID[1]))
    for i in range(2,len(FID)):
        FID_1d=np.concatenate((FID_1d,FID[i]))
    FID_1d=FID_1d.astype('int')
    #得到前一个表
    t1 = Table.read(path+file_o)
    col1 = Table.Column(name='FID', data=FID_1d)
    t1.add_column(col1)
    #得到第二个表
    print(t1)
    t1.write(path+'LW_merged_evt_FID.fits',format='fits',overwrite='True')

#mod_evt(label)
예제 #13
0
    def __load_data(self, event_dir):
        """Returns a table with raw data."""
        t = Table.read(event_dir + "/phot.dat", format="ascii")

        # Remove additional columns
        t.columns[0].name = "HJD"
        t.columns[1].name = "mag"
        t.columns[2].name = "mag_err"
        t.keep_columns(("HJD", "mag", "mag_err"))

        # Add mask column
        mask = Table.Column(np.ones(len(t["HJD"]), dtype=bool),
                            name="mask",
                            dtype=bool)
        t.add_column(mask)  # Insert before the first table column

        # Add 2450000 if necessary
        if t["HJD"][0] < 2450000:
            t["HJD"] += 2450000

        t.meta = {"filter": "I", "observatory": "OGLE"}
        self._Data__tables.append(t)
예제 #14
0
    def generate_time_interpolated_calibration(self, science_hdu):
        if len(self.interpolated_coef_fits) == 0:
            self.interpolate_final_calibrations()
        mean_timestamp, mean_datetime, night = get_meantime_and_date(
            science_hdu.header)

        if night in self.interpolated_coef_fits.keys():
            night_interpolator, fit_model = self.interpolated_coef_fits[night]
        else:
            print(
                "WARNING: Requested {}, but that isn't a known night. Possible options were: "
                .format(night), self.interpolated_coef_fits.keys())
            timestamps, nights = [], []
            for (mean_timestamp, mean_datetime,
                 night) in self.fine_calibration_date_info.values():
                timestamps.append(mean_timestamp)
                nights.append(night)
            time_devs = np.array(timestamps) - mean_timestamp
            closest_night_loc = np.argmin(time_devs)
            closest_night = nights[closest_night_loc]
            print(
                "Using night: {} instead, as it's nearest at {:0.01f} minutes away"
                .format(closest_night, time_devs[closest_night_loc] / 60.))
            night_interpolator, fit_model = self.interpolated_coef_fits[
                closest_night]

        out_cols = []
        for fib, coefs in night_interpolator.items():
            fitted_coefs = []
            for coef_name, coefvals in coefs.items():
                if fit_model is None:
                    fitted_coef = coefvals(mean_timestamp)
                else:
                    fitted_coef = fit_model(mean_timestamp, *coefvals)
                fitted_coefs.append(fitted_coef)
            out_cols.append(Table.Column(name=fib, data=fitted_coefs))
        out_table = Table(out_cols)
        return out_table
예제 #15
0
파일: binning.py 프로젝트: ruizca/xmmpzcat
def final(obsids_table, data_folder, nir_survey='2MASS'):
    """
    Group observations in obsids_table combining the bins defined by
    sky density of optical and X-ray sources.
    """
    ### Get optical bins
    optbins = np.unique(obsids_table['OPTBIN'])
    obsids_table_bins = Table()
    stats = Table()

    ### Define Texp bins for each optical bin
    binid_first = 1
    for obin in tqdm(optbins, desc='Binning OBSIDs'):
        msk_bin = np.logical_and(obsids_table['OPTBIN'] == obin,
                                 ~obsids_table['SKY_OUTLIER'])

        bin_table = obsids_table[msk_bin]
        bin_table, bin_stats = xrays(bin_table, 45, binid_first)

        obsids_table_bins = vstack([obsids_table_bins, bin_table])
        stats = vstack([stats, bin_stats])

        binid_first += len(bin_stats)

    stats_filename = '{}_bins.fits'.format(nir_survey.lower())
    stats.write(os.path.join(data_folder, stats_filename),
                format='fits',
                overwrite=True)

    msk_outliers = obsids_table['SKY_OUTLIER']
    outliers_table = obsids_table[msk_outliers]
    outliers_col = Table.Column(len(outliers_table) * [np.nan], name='BIN_ID')
    outliers_table.add_column(outliers_col)

    obsids_table_bins = vstack([obsids_table_bins, outliers_table])

    return obsids_table_bins
예제 #16
0
def sdss(obsids_table,
         data_folder,
         moc_folder,
         nir_moc=None,
         data_release=14,
         radius=15 * u.arcmin,
         moc_order=15,
         overwrite=True):
    """
    Get SDSS data using astroquery.
    For each observation in obsids_table, saves a fits file with
    name 'OBS_ID.fits' in 'data_folder/groups'.

    The function sends a query and selects all sources within 'radius'
    of the RA,DEC of the observation, then it filters the result
    selecting the sources in the corresponding MOC stored in 'moc_folder/mocs'
    (moc_order must be consistent with the order used to calculate the MOC).

    If overwrite is True, always create a new fits file. If False, checks for
    an existing file and uses it to calculate the number of SDSS sources
    in the field. If it doesn't exist, creates the file.

    The function returns obsids_table with an additional column 'NSRC_SDSS'
    with the number of sources in the field.
    """
    # Groups folder
    if nir_moc is None:
        groups_folder = os.path.join(data_folder, 'groups')

    else:
        root, _ = os.path.splitext(os.path.basename(nir_moc))
        survey = root.split('_')[-1]
        groups_folder = os.path.join(data_folder, 'groups_' + survey)

        moc_nirsurvey = MOC()
        read_moc_fits(moc_nirsurvey, nir_moc)

    if not os.path.exists(groups_folder):
        os.makedirs(groups_folder)

    moc_folder = os.path.join(moc_folder, 'mocs')

    nsources_field = np.full((len(obsids_table), ), np.nan)
    hp = HEALPix(nside=2**moc_order, order='nested', frame=ICRS())
    photoobj_fields = ['objID', 'mode', 'ra', 'dec', 'raErr', 'decErr']

    for i, row in enumerate(tqdm(obsids_table, desc="Making SDSS groups")):
        ## Group file name
        field_table_file = os.path.join(data_folder, groups_folder,
                                        '{}.fits'.format(row['OBS_ID']))
        is_field_table = os.path.exists(field_table_file)

        if overwrite or (not overwrite and not is_field_table):
            ## Select all sources in the field
            field_coords = SkyCoord(ra=row['RA'] * u.deg,
                                    dec=row['DEC'] * u.deg)

            src_table = SDSS.query_region(field_coords,
                                          radius=radius,
                                          photoobj_fields=photoobj_fields,
                                          data_release=data_release)
            # Filter table
            # In ARCHES, the only filter is selecting primary objects,
            # no filtering in the quality of photometry (clean).
            src_table = src_table[src_table['mode'] == 1]

            ## Select sources in the non-overlaping area
            moc_field = MOC()
            read_moc_fits(
                moc_field,
                os.path.join(moc_folder, '{}.moc'.format(row['OBS_ID'])))
            if nir_moc is not None:
                moc_field = moc_nirsurvey.intersection(moc_field)

            inmoc_table = sources_inmoc(src_table,
                                        hp,
                                        moc_field,
                                        moc_order=moc_order,
                                        ra='ra',
                                        dec='dec',
                                        units=u.deg)
            ## Save sources
            inmoc_table.remove_columns(['mode'])
            inmoc_table.meta['description'] = 'SDSS'
            inmoc_table.write(field_table_file, overwrite=True)

        else:
            inmoc_table = Table.read(field_table_file)

        nsources_field[i] = len(inmoc_table)

    colsrc = Table.Column(nsources_field, name='NSRC_SDSS')
    obsids_table.add_column(colsrc)

    return obsids_table
예제 #17
0
def pstarrs(obsids_table,
            data_folder,
            moc_folder,
            nir_moc=None,
            radius=15 * u.arcmin,
            moc_order=16,
            overwrite=True):
    """
    Get Pan-STARRS data using astroquery and Vizier.
    For each observation in obsids_table, saves a fits file with
    name 'OBS_ID.fits' in 'data_folder/groups'.

    The function sends a Vizier query and selects all sources within 'radius'
    arcmin of the RA,DEC of the observation, then it filters the result
    selecting the sources in the corresponding MOC stored in 'moc_folder/mocs'
    (moc_order must be consistent with the order used to calculate the MOC).

    If overwrite is True, always create a new fits file. If False, checks for
    an existing file and uses it to calculate the number of Pan-STARRS sources
    in the field. If it doesn't exist, creates the file.

    The function returns obsids_table with an additional column 'NSRC_PS' with
    the number of sources in the field.
    """

    # Groups folder
    if nir_moc is None:
        groups_folder = os.path.join(data_folder, 'groups')
    else:
        root, _ = os.path.splitext(os.path.basename(nir_moc))
        survey = root.split('_')[-1]
        groups_folder = os.path.join(data_folder, 'groups_' + survey)

        moc_nirsurvey = MOC()
        read_moc_fits(moc_nirsurvey, nir_moc)

    if not os.path.exists(groups_folder):
        os.makedirs(groups_folder)

    moc_folder = os.path.join(moc_folder, 'mocs')

    nsources_field = np.full((len(obsids_table), ), np.nan)
    hp = HEALPix(nside=2**moc_order, order='nested', frame=ICRS())

    v = Vizier(columns=[
        'objID', 'RAJ2000', 'DEJ2000', 'e_RAJ2000', 'e_DEJ2000', 'Nd', 'Qual'
    ],
               column_filters={"Nd": ">1"},
               row_limit=np.inf,
               timeout=6000)

    for i, row in enumerate(tqdm(obsids_table,
                                 desc="Making Pan-STARRS groups")):
        ## Group file name
        field_table_file = os.path.join(data_folder, groups_folder,
                                        '{}.fits'.format(row['OBS_ID']))
        is_field_table = os.path.exists(field_table_file)

        if overwrite or (not overwrite and not is_field_table):
            ## Select all sources in the field
            field_coords = SkyCoord(ra=row['RA'] * u.deg,
                                    dec=row['DEC'] * u.deg)

            vrsp = v.query_region_async(field_coords,
                                        radius=radius,
                                        catalog='II/349',
                                        return_type='asu-tsv')

            # Fix bug in the vizier response
            # (returns the objID as a short int and fails to load
            # properly as an astropy table)
            with open('/tmp/tmp.tab', 'wb') as tmpfile:
                tmpfile.write(vrsp.content)

            src_table = Table.read('/tmp/tmp.tab', format='ascii.tab')
            src_table = src_table[2:]

            objid = np.array(src_table['objID']).astype(np.int64)
            ra = np.array(src_table['RAJ2000']).astype(np.float) * u.deg
            dec = np.array(src_table['DEJ2000']).astype(np.float) * u.deg

            err_ra = np.array(src_table['e_RAJ2000'])
            err_ra[err_ra == '            '] = '-1'
            err_ra = err_ra.astype(np.float) * u.arcsec
            err_ra[err_ra == -1] = np.nan

            err_dec = np.array(src_table['e_DEJ2000'])
            err_dec[err_dec == '            '] = '-1'
            err_dec = err_dec.astype(np.float) * u.arcsec
            err_dec[err_dec == -1] = np.nan

            flag = np.array(src_table['Qual']).astype(np.int32)

            src_table = Table([objid, ra, dec, err_ra, err_dec, flag],
                              names=[
                                  'objID', 'RAJ2000', 'DEJ2000', 'e_RAJ2000',
                                  'e_DEJ2000', 'Qual'
                              ])
            # Filter table
            msk_good = (src_table['Qual'] & 16) != 0
            src_table_new = src_table[msk_good]

            ## Select sources in the non-overlaping area
            moc_field = MOC()
            read_moc_fits(
                moc_field,
                os.path.join(moc_folder, '{}.moc'.format(row['OBS_ID'])))

            if nir_moc is not None:
                moc_field = moc_nirsurvey.intersection(moc_field)

            inmoc_table = sources_inmoc(src_table_new,
                                        hp,
                                        moc_field,
                                        moc_order=moc_order,
                                        ra='RAJ2000',
                                        dec='DEJ2000')
            ## Save sources
            inmoc_table.remove_columns(['Qual'])
            inmoc_table.meta['description'] = 'Pan-STARRS'
            inmoc_table.write(field_table_file, overwrite=True)

        else:
            inmoc_table = Table.read(field_table_file)

        nsources_field[i] = len(inmoc_table)

    colsrc = Table.Column(nsources_field, name='NSRC_PS')
    obsids_table.add_column(colsrc)

    return obsids_table
예제 #18
0
def assemble_fibermap(night, expid, force=False):
    """
    Create a fibermap for a given night and expid

    Args:
        night (int): YEARMMDD night of sunset
        expid (int): exposure ID

    Options:
        force (bool): create fibermap even if missing coordinates/guide files
    """

    log = get_logger()

    #- Find fiberassign file
    fafile = find_fiberassign_file(night, expid)

    #- Find coordinates file in same directory
    dirname, filename = os.path.split(fafile)
    globfiles = glob.glob(dirname + '/coordinates-*.fits')
    if len(globfiles) == 1:
        coordfile = globfiles[0]
    elif len(globfiles) == 0:
        message = f'No coordinates*.fits file in fiberassign dir {dirname}'
        if force:
            log.error(message + '; continuing anyway')
            coordfile = None
        else:
            raise FileNotFoundError(message)

    elif len(globfiles) > 1:
        raise RuntimeError(
            f'Multiple coordinates*.fits files in fiberassign dir {dirname}')

    #- And guide file
    dirname, filename = os.path.split(fafile)
    globfiles = glob.glob(dirname + '/guide-????????.fits.fz')
    if len(globfiles) == 1:
        guidefile = globfiles[0]
    elif len(globfiles) == 0:
        message = f'No guide-*.fits.fz file in fiberassign dir {dirname}'
        if force:
            log.error(message + '; continuing anyway')
            guidefile = None
        else:
            raise FileNotFoundError(message)

    elif len(globfiles) > 1:
        raise RuntimeError(
            f'Multiple guide-*.fits.fz files in fiberassign dir {dirname}')

    #- Preflight announcements
    log.info(f'Night {night} spectro expid {expid}')
    log.info(f'Fiberassign file {fafile}')
    log.info(f'Platemaker coordinates file {coordfile}')
    log.info(f'Guider file {guidefile}')

    #----
    #- Read and assemble

    fa = Table.read(fafile, 'FIBERASSIGN')
    fa.sort('LOCATION')

    if coordfile is not None:
        pm = Table.read(coordfile, 'DATA')  #- PM = PlateMaker
        pm['LOCATION'] = 1000 * pm['PETAL_LOC'] + pm['DEVICE_LOC']
        keep = np.in1d(pm['LOCATION'], fa['LOCATION'])
        pm = pm[keep]
        pm.sort('LOCATION')
        log.info('{}/{} fibers in coordinates file'.format(len(pm), len(fa)))

        #- Count offset iterations by counting columns with name OFFSET_{n}
        # numiter = len([col for col in pm.colnames if col.startswith('FVC_X_')])
        numiter = len([col for col in pm.colnames if col.startswith('EXP_X_')])

        #- Create fibermap table to merge with fiberassign file
        fibermap = Table()
        fibermap['LOCATION'] = pm['LOCATION']
        fibermap['NUM_ITER'] = numiter

        #- Sometimes these columns are missing in the coordinates files, maybe
        #- only when numiter=1, i.e. only a blind move but not corrections?
        if f'FPA_X_{numiter-1}' in pm.colnames:
            fibermap['FIBER_X'] = pm[f'FPA_X_{numiter-1}']
            fibermap['FIBER_Y'] = pm[f'FPA_Y_{numiter-1}']
            fibermap['DELTA_X'] = pm[f'DX_{numiter-1}']
            fibermap['DELTA_Y'] = pm[f'DY_{numiter-1}']
        else:
            log.warning(
                'No FIBER_X/Y or DELTA_X/Y information from platemaker')
            fibermap['FIBER_X'] = np.zeros(len(pm))
            fibermap['FIBER_Y'] = np.zeros(len(pm))
            fibermap['DELTA_X'] = np.zeros(len(pm))
            fibermap['DELTA_Y'] = np.zeros(len(pm))

        #- pre-parse which positioners were good
        expflag = pm[f'FLAGS_EXP_{numiter-1}']
        good = ((expflag & 4) != 0) & (expflag < 200)

        flags_cnt_colname = f'FLAGS_CNT_{numiter-1}'
        if flags_cnt_colname in pm.colnames:
            cntflag = pm[flags_cnt_colname]
            good &= ((cntflag & 1) != 0)
        else:
            log.warning(f'coordinates file missing column {flags_cnt_colname}')

        bad = ~good

        fibermap['_BADPOS'] = np.zeros(len(fibermap), dtype=bool)
        fibermap['_BADPOS'][bad] = True

        #- Missing columns from coordinates file...
        log.warning('No FIBER_RA or FIBER_DEC from platemaker yet')
        fibermap['FIBER_RA'] = np.zeros(len(pm))
        fibermap['FIBER_DEC'] = np.zeros(len(pm))

        fibermap = join(fa, fibermap, join_type='left')

        #- Set fiber status bits
        missing = np.in1d(fibermap['LOCATION'], pm['LOCATION'], invert=True)
        fibermap['FIBERSTATUS'][missing] |= fibermask.MISSINGPOSITION

        badpos = fibermap['_BADPOS']
        fibermap['FIBERSTATUS'][badpos] |= fibermask.BADPOSITION
        fibermap.remove_column('_BADPOS')

    else:
        #- No coordinates file; just use fiberassign + dummy columns
        fibermap = fa
        # Include NUM_ITER that is added if coord file exists
        fibermap['NUM_ITER'] = 0
        fibermap['FIBER_X'] = 0.0
        fibermap['FIBER_Y'] = 0.0
        fibermap['DELTA_X'] = 0.0
        fibermap['DELTA_Y'] = 0.0
        fibermap['FIBER_RA'] = 0.0
        fibermap['FIBER_DEC'] = 0.0
        # Update data types to be consistent with updated value if coord file was used.
        for val in ['FIBER_X', 'FIBER_Y', 'DELTA_X', 'DELTA_Y']:
            old_col = fibermap[val]
            fibermap.replace_column(
                val, Table.Column(name=val, data=old_col.data, dtype='>f8'))
        for val in ['LOCATION', 'NUM_ITER']:
            old_col = fibermap[val]
            fibermap.replace_column(
                val, Table.Column(name=val, data=old_col.data, dtype=np.int64))

    #- Update SKY and STD target bits to be in both CMX_TARGET and DESI_TARGET
    #- i.e. if they are set in one, also set in the other.  Ditto for SV*
    for targetcol in ['CMX_TARGET', 'SV0_TARGET', 'SV1_TARGET', 'SV2_TARGET']:
        if targetcol in fibermap.colnames:
            for mask in [
                    desi_mask.SKY, desi_mask.STD_FAINT, desi_mask.STD_BRIGHT
            ]:
                ii = (fibermap[targetcol] & mask) != 0
                iidesi = (fibermap['DESI_TARGET'] & mask) != 0
                fibermap[targetcol][iidesi] |= mask
                fibermap['DESI_TARGET'][ii] |= mask

    #- Add header info from guide file
    if guidefile is not None:
        hdr = fits.getheader(guidefile, 0)

        skipkeys = [
            'EXTNAME', 'COMMENT', 'CHECKSUM', 'DATASUM', 'PCOUNT', 'GCOUNT',
            'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2', 'XTENSTION', 'TFIELDS'
        ]
        if fibermap.meta['TILEID'] != hdr['TILEID']:
            raise RuntimeError('fiberassign tile {} != guider tile {}'.format(
                fibermap.meta['TILEID'], hdr['TILEID']))

        for key, value in hdr.items():
            if key not in skipkeys \
                   and not key.startswith('TTYPE') \
                   and not key.startswith('TFORM') \
                   and not key.startswith('TUNIT'):

                if key not in fibermap.meta:
                    fibermap.meta[key] = value
                elif fibermap.meta[key] != hdr[key]:
                    fmval = fibermap.meta[key]
                    log.warning(
                        f'fibermap[{key}] {fmval} != guide[{key}] {value}')

    fibermap.meta['EXTNAME'] = 'FIBERMAP'

    #- Record input guide and coordinates files
    if guidefile is not None:
        fibermap.meta['GUIDEFIL'] = os.path.basename(guidefile)
    else:
        fibermap.meta['GUIDEFIL'] = 'MISSING'

    if coordfile is not None:
        fibermap.meta['COORDFIL'] = os.path.basename(coordfile)
    else:
        fibermap.meta['COORDFIL'] = 'MISSING'

    #- Some code incorrectly relies upon the fibermap being sorted by
    #- fiber number, so accomodate that before returning the table
    fibermap.sort('FIBER')

    return fibermap
예제 #19
0
lw_col = []
for row in cat_leaves_general:
    if row['_idx'] in lw_map:
        lw_col.append(lw_map[row['_idx']])
    else:
        lw_col.append('-')

name_col = []
for row in cat_leaves_general:
    if row['_idx'] in name_map:
        name_col.append(name_map[row['_idx']])
    else:
        name_col.append('-')

cat_leaves_general.add_column(Table.Column(data=v_col, name='v_cen'))
cat_leaves_general.add_column(Table.Column(data=mom2_col, name='mean_mom2'))
cat_leaves_general.add_column(Table.Column(data=lw_col, name='fitted_lw'))
cat_leaves_general.add_column(Table.Column(data=name_col, name='Common_Name'))

# Format columns as desired.
cat_leaves_general['area_exact'].format = '6.0f'
cat_leaves_general['l_cen'].format = '6.3f'
cat_leaves_general['b_cen'].format = '6.3f'
cat_leaves_general['mass'].format = '%.1E'
cat_leaves_general['median_column'].format = '%.1E'
cat_leaves_general['peak_column'].format = '%.1E'
cat_leaves_general['median_tem'].format = '6.0f'
cat_leaves_general['peak_tem'].format = '6.0f'

# Output catalogue
예제 #20
0
    def __load_data(self, event_dir):
        """Returns tables with raw data."""
        count = 0
        for file in os.listdir(event_dir):
            if file.endswith(".tbl"):
                t = Table.read(os.path.join(event_dir, file), format="ascii")

                if t.colnames[0] == "JD":
                    t.rename_column("JD", "HJD")
                elif t.colnames[0] == "HJD":
                    pass
                else:
                    raise ValueError("No column named HJD or JD.")

                if t.colnames[1] == "Relative_Flux":
                    m, m_err = self.__fluxes_to_magnitudes(
                        t["Relative_Flux"], t["Relative_Flux_Uncertainty"])
                    t["Relative_Flux"] = m
                    t["Relative_Flux_Uncertainty"] = m_err
                    t.rename_column("Relative_Flux", "mag")
                    t.rename_column("Relative_Flux_Uncertainty", "mag_err")
                    t.keep_columns(["HJD", "mag", "mag_err"])
                elif t.colnames[1] == "RELATIVE_MAGNITUDE":
                    t.rename_column("RELATIVE_MAGNITUDE", "mag")
                    t.rename_column("MAGNITUDE_UNCERTAINTY", "mag_err")
                    t.keep_columns(["HJD", "mag", "mag_err"])
                else:
                    raise ValueError(
                        "No columns specifying flux or magnitude.")

                info = t.meta["keywords"]

                # Save coordinates of event, check they're consistent between
                # datasets
                if count == 0:
                    ra = info["RA"]["value"]
                    dec = info["DEC"]["value"]
                    self.__coordinates = SkyCoord(ra, dec)
                elif ra != info["RA"]["value"] or dec != info["DEC"]["value"]:
                    raise ValueError("Event coordinates don't match between"
                                     "different datasets. ")

                # Save event name
                if count == 0:
                    self.__event_name = info["STAR_ID"]["value"]
                elif self.__event_name != info["STAR_ID"]["value"]:
                    self.__event_name += info["keywords"]["STAR_ID"]["value"]

                # Check that all times are HJD in epoch J2000.0
                if info["EQUINOX"]["value"] != "J2000.0":
                    raise ValueError(
                        "Equinox for the dataset ",
                        info["OBSERVATORY_SITE"]["value"],
                        "is not J2000.",
                    )
                if info["TIME_REFERENCE_FRAME"]["value"] != "Heliocentric JD":
                    raise ValueError(
                        "Time reference frame for ",
                        info["OBSERVATORY_SITE"]["value"],
                        "is not HJD.",
                    )

                # Save information about observatory name and filter used
                t.meta = {
                    "observatory": info["OBSERVATORY_SITE"]["value"],
                    "filter": info["TIME_SERIES_DATA_FILTER"]["value"],
                }

                t = Table(t, masked=False)

                # Add mask column
                mask = Table.Column(np.ones(len(t["HJD"]), dtype=bool),
                                    name="mask",
                                    dtype=bool)
                t.add_column(mask)  # Insert before the first table column

                self._Data__tables.append(t)

                count = count + 1
def main(mtlz_path,
         mtlz_name,
         correlation_cut=0.2,
         summary_column_subset=True):

    maskname = mtlz_name.split('_')[2]
    print(maskname)
    tab = Table.read(os.path.join(mtlz_path, mtlz_name), format='ascii.csv')

    ## Make appropriate cuts
    if 'SDSS_only' in tab.colnames:
        if type(tab['SDSS_only'][0]) in [bool, np.bool, np.bool_]:
            boolcut = [(not row) for row in tab['SDSS_only']]
        else:
            boolcut = [row.lower() == 'false' for row in tab['SDSS_only']]
        tab = tab[boolcut]
    if 'cor' in tab.colnames:
        tab = tab[tab['cor'] >= correlation_cut]
    if 'ID' in tab.colnames:
        tab = tab[['GAL' in id for id in tab['ID']]]
        outnames = []
        for name in tab['ID']:
            outnames.append(name.replace('GAL', '{}-'.format(maskname)))

        tab.remove_column('ID')
        tab.add_column(Table.Column(name='TARGETID', data=outnames))

    ## Make sure the names look good
    # ra_colname_bool = np.any([col.strip(' \t\r').upper() == 'RA' for col in tab.colnames])
    # if not ra_colname_bool:
    if 'RA' not in tab.colnames:
        tab.rename_column('RA_targeted', 'RA')
        tab.rename_column('DEC_targeted', 'DEC')

    # sdss_colname_bool = np.any([col.strip(' \t\r').upper() == 'SDSS_SDSS12' for col in tab.colnames])
    # if sdss_colname_bool:
    if 'sdss_SDSS12' in tab.colnames:
        newcol = []
        for ii in range(len(tab)):
            newcol.append('SDSS' + str(tab['sdss_SDSS12'][ii]))
        tab.add_column(Table.Column(data=newcol, name='SDSS12_OBJID'))
        tab.remove_column('sdss_SDSS12')
    else:
        tab.add_column(
            tab.MaskedColumn(
                data=[''] * len(tab),
                name='SDSS12_OBJID'))  #,mask=np.ones(len(tab)).astype(bool)))
    if 'sdss_zsp' in tab.colnames:
        tab.rename_column('sdss_zsp', 'SDSS_zsp')
    else:
        tab.add_column(
            Table.MaskedColumn(
                data=np.zeros(len(tab)),
                name='SDSS_zsp'))  #,mask=np.ones(len(tab)).astype(bool)))
    if 'z_est_bary' in tab.colnames:
        tab.add_column(Table.Column(data=tab['z_est_bary'].copy(), name='z'))
    if 'Proj_R_asec' in tab.colnames:
        tab.rename_column('Proj_R_asec', 'R [asec]')
    if 'velocity' in tab.colnames:
        tab.rename_column('velocity', 'v [km/s]')
    if 'FIBNAME' in tab.colnames:
        tab.rename_column('FIBNAME', 'FIBERNUM')
    ## Load up in the right order
    if summary_column_subset:
        tab = tab[[
            'TARGETID', 'FIBERNUM', 'RA', 'DEC', 'SDSS12_OBJID', 'z',
            'R [asec]', 'v [km/s]', 'SDSS_zsp'
        ]]

    if 'description' in dict(tab.meta).keys():
        desc = tab.meta.pop('description')
        tab.meta['DESCRP'] = desc

    return tab.filled()
예제 #22
0
def generate_output_file(ecsv_file_list, output_filename, startingDT, clobber):
    """Generate combined output ecsv file.

    Parameters
    ----------
    ecsv_file_list : list
        List of ecsv full filenames (such as those generated by `find_files`)

    output_filename : string
         Name of the output combined .ecsv file.

    startingDT : datetime object
        starting date/time

    clobber : Boolean
        Overwrite existing files with the same name as output_filename?

    Returns
    -------
    Nothing.
    """
    n_found = len(ecsv_file_list)
    for filectr, ecsv_filename in enumerate(ecsv_file_list, start=1):
        table_data = ascii.read(ecsv_filename, format='ecsv')  # Read ecsv file

        # print incremental status update msg
        padding = " " * (len(str(n_found)) - len(str(filectr)))
        if len(table_data) < 2:
            plural_string = ""
        else:
            plural_string = "s"
        print("{}{}/{}: added {} row{} from {}.".format(
            padding, filectr, n_found, len(table_data), plural_string,
            ecsv_filename))
        # add new column with dataset name info to the 0th (left most) position in the table.
        dataset = os.path.basename(
            ecsv_filename)[:-5]  # scrape dataset name out of ecsv filename
        dataset_column = Table.Column(name='datasetName',
                                      data=[dataset] *
                                      len(table_data))  # make new column
        table_data.add_column(
            dataset_column,
            index=0)  # add dataset column to table data to append.

        if filectr == 1:  # append out_data with ecsv file data for all files after the list item.
            out_data = table_data.copy()

        else:  # use the data from the first ecsv file to initialize out_data
            out_data = vstack([out_data, table_data])

    ascii.write(out_data, output_filename, format='ecsv',
                overwrite=clobber)  # write output file.

    if n_found == 1:
        file_plural_string = ""
    else:
        file_plural_string = "s"
    total_rows = len(out_data)  # display total number of rows in output file.
    if total_rows == 1:
        row_plural_string = ""
    else:
        row_plural_string = "s"
    print("\nWrote {} row{} from {} input file{} to output file {}".format(
        total_rows, row_plural_string, n_found, file_plural_string,
        output_filename))
    total_runtime = (datetime.datetime.now() - startingDT).total_seconds()
    print('Total processing time: {} seconds'.format(total_runtime))
    print('Average time per row:  {} seconds'.format(total_runtime /
                                                     total_rows))
예제 #23
0
def make_mtl(io_config, science_filenum, vizier_catalogs, overwrite_field,
             overwrite_redshifts):
    catalog_loc = os.path.abspath(io_config['PATHS']['catalog_loc'])
    data_path = os.path.abspath(
        os.path.join(io_config['PATHS']['data_product_loc'],
                     io_config['DIRS']['oneD']))
    dataname = io_config['FILETEMPLATES']['oneds'].format(
        cam='{cam}', filenum=science_filenum, imtype='science')
    dataname = dataname + io_config['FILETAGS']['crrmvd'] + '.fits'

    plate_path = os.path.join(catalog_loc, io_config['DIRS']['plate'])
    plate_name = io_config['SPECIALFILES']['plate']
    print("In mask: {}".format(io_config['GENERAL']['mask_name']))
    if plate_name == 'None':
        print("No platename given")
        plate_name = None

    field_path = os.path.join(catalog_loc, io_config['DIRS']['field'])
    field_name = io_config['SPECIALFILES']['field']
    if field_name == 'None':
        print("No fieldname given")
        field_name = None

    redshifts_path = os.path.join(catalog_loc, io_config['DIRS']['redshifts'])
    redshifts_name = io_config['SPECIALFILES']['redshifts']
    if redshifts_name == 'None':
        print("No redshift filename given")
        redshifts_name == None

    targeting_path, targeting_name = None, None

    mtl_path = os.path.join(catalog_loc, io_config['DIRS']['mtl'])
    mtl_name = io_config['SPECIALFILES']['mtl']

    ## Housekeeping to make sure all the specified things are there to run
    paths = [plate_path, field_path, targeting_path, redshifts_path, mtl_path]
    names = [plate_name, field_name, targeting_name, redshifts_name, mtl_name]
    for path, filename in zip(paths, names):
        if filename is not None:
            if not os.path.exists(path):
                print("Creating {}".format(path))
                os.makedirs(path)
    del paths, names

    ## Get fiber info
    fiber_table = create_m2fs_fiber_info_table(data_path,
                                               dataname,
                                               cams=['r', 'b'])
    if fiber_table is None or len(fiber_table) == 0:
        print("No fiber table created!")

    ## Mine plate file for drilling info
    field_pathname = os.path.join(field_path, field_name)
    if plate_name is not None:
        if not os.path.exists(field_pathname) or overwrite_field:
            plate_pathname = os.path.join(plate_path, plate_name)
            create_drilled_field_file(
                plate_pathname,
                drilled_field_name_template=field_name.replace(
                    io_config['GENERAL']['mask_name'], '{}'),
                drilled_field_path=field_path,
                overwrite_file=overwrite_field)

        try:
            field_table = table.Table.read(field_pathname, format='ascii.tab')
        except:
            field_table = table.Table.read(field_pathname,
                                           format='ascii.basic')
        if len(field_table.colnames) == 1:
            field_table = table.Table.read(field_pathname,
                                           format='ascii.basic')

        if 'RA_targeted' in field_table.colnames:
            field_table.rename_column('RA_targeted', 'RA')
            field_table.rename_column('DEC_targeted', 'DEC')
        if 'RA_drilled' in field_table.colnames:
            field_table.rename_column('RA_drilled', 'RA')
            field_table.rename_column('RA_drilled', 'DEC')
    elif field_name is not None and os.path.exists(field_pathname):
        field_table = table.Table.read(field_pathname,
                                       format='ascii.basic')  # header_start=2,
        if 'RA_targeted' in field_table.colnames:
            field_table.rename_column('RA_targeted', 'RA')
            field_table.rename_column('DEC_targeted', 'DEC')
    else:
        print(
            "Couldn't manage to open a field file through any of the available means, returning None"
        )
        field_table = None

    ## Merge fiber and drill info
    if len(fiber_table) == 0:
        print(
            "No field file found and no plate file available for conversion.")
        print("Continuing with just the fiber data")
        observed_field_table = field_table
    else:
        try:
            observed_field_table = table.join(fiber_table,
                                              field_table,
                                              keys='ID',
                                              join_type='left')
        except:
            print(
                "Something went wrong combining fiber table and field table.")
            if type(fiber_table) is table.Table:
                print("Fiber table: ", fiber_table.colnames, len(fiber_table))
            if field_table is not None and type(field_table) is table.Table:
                print("Field table: ", field_table.colnames, len(field_table))
            raise ()

    ## If there is targeting information, merge that in as well
    if targeting_name is not None:
        full_pathname = os.path.join(targeting_path, targeting_name)
        if not os.path.exists(full_pathname):
            raise (IOError, "The targeting file doesn't exist")
        else:
            targeting = table.Table.read(full_pathname, format='ascii.csv')
            ml = table.join(observed_field_table,
                            targeting,
                            keys='ID',
                            join_type='left')
    else:
        ml = observed_field_table

    ## If there is a separate redshifts file, merge that in
    ## Else query vizier (either sdss or panstarrs) to get redshifts and merge that in
    if redshifts_name is not None and os.path.exists(
            os.path.join(redshifts_path,
                         redshifts_name)) and not overwrite_redshifts:
        full_pathname = os.path.join(redshifts_path, redshifts_name)
        redshifts = table.Table.read(full_pathname, format='ascii.csv')
        mtl = table.join(ml, redshifts, keys='ID', join_type='left')
    else:
        if 'DEC' in ml.colnames:
            dec_name = 'DEC'
        else:
            dec_name = 'DEC_targeted'
        if len(vizier_catalogs) == 1 and vizier_catalogs[
                0] == 'sdss12' and ml[dec_name][0] < -20:
            matches = None
        else:
            matches = get_vizier_matches(ml, vizier_catalogs)

        # print(len(fiber_table),len(drilled_field_table),len(observed_field_table),len(joined_field_table),len(mtl),len(matches))
        if matches is not None:
            full_pathname = os.path.join(
                redshifts_path,
                redshifts_name.format(zsource=vizier_catalogs[0]))
            matches.write(full_pathname, format='ascii.csv', overwrite='True')
            mtl = table.join(ml, matches, keys='ID', join_type='left')
        else:
            mtl = ml

    if 'sdss_SDSS12' not in mtl.colnames:
        mtl.add_column(Table.Column(data=[''] * len(mtl), name='sdss_SDSS12'))
    all_most_interesting = [
        'ID', 'TARGETNAME', 'FIBNAME', 'sdss_SDSS12', 'RA', 'DEC', 'sdss_zsp',
        'sdss_zph', 'sdss_rmag', 'MAG'
    ]
    all_cols = mtl.colnames
    final_order = []
    for name in all_most_interesting:
        if name in all_cols:
            final_order.append(name)
            all_cols.remove(name)

    ## Save the completed merged target list as a csv
    outname = os.path.join(mtl_path, mtl_name)

    mtl.meta['comments'] = []
    subtable = mtl[final_order]
    subtable.write(outname + '_selected.csv',
                   format='ascii.csv',
                   overwrite=True)

    final_order.extend(all_cols)
    fulltable = mtl[final_order]
    fulltable.write(outname + '_full.csv', format='ascii.csv', overwrite=True)
예제 #24
0
def make_mtlz(mtl_table, hdus, find_more_redshifts=False, outfile='mtlz.csv', \
              vizier_catalogs=['sdss12']):
    if len(hdus) == 2:
        hdu1, hdu2 = hdus
        if len(Table(hdu1.data)) == 0 and len(Table(hdu2.data)) == 0:
            print("No data found!")
            print(hdus)
            raise (IOError)
        elif len(Table(hdu1.data)) == 0:
            hdu1 = hdu2.copy()
            hdu2 = None
        elif len(Table(hdu2.data)) == 0:
            hdu2 = None
    elif len(hdus) == 1:
        hdu1 = hdus[0]
        hdu2 = None
    else:
        print("No data found!")
        print(hdus)
        raise (IOError)

    # apperature/FIBNUM, redshift_est, cor, template
    # ID,FIBNAME,sdss_SDSS12,RA,DEC,sdss_zsp,sdss_zph,sdss_rmag,MAG

    table1 = Table(hdu1.data)
    header1 = hdu1.header

    cam1 = str(header1['SHOE']).lower()
    if 'apperature' in table1.colnames:
        if len(table1) > 0 and str(table1['apperature'][0])[0].lower() != cam1:
            print(
                "I couldn't match the camera between the header and data table for hdu1!"
            )
        table1.rename_column('apperature', 'FIBNAME')

    if hdu2 is not None:
        table2 = Table(hdu2.data)
        header2 = hdu2.header
        cam2 = str(header2['SHOE']).lower()
        if 'apperature' in table2.colnames:
            if len(table2) > 0 and str(
                    table2['apperature'][0])[0].lower() != cam2:
                print(
                    "I couldn't match the camera between the header and data table for hdu2!"
                )
            table2.rename_column('apperature', 'FIBNAME')

    mtl = Table(mtl_table)

    ra_clust, dec_clust = float(header1['RA_TARG']), float(header1['DEC_TARG'])
    cluster = SkyCoord(ra=ra_clust * u.deg, dec=dec_clust * u.deg)
    z_clust = float(header1['Z_TARG'])
    kpc_p_amin = Planck13.kpc_comoving_per_arcmin(z_clust)

    fibermap = {}
    for key, val in dict(header1).items():
        if key[:5] == 'FIBER':
            fibermap['{}{}'.format(cam1, key[5:])] = val.strip(' \t')

    for t in range(1, 9):
        for f in range(1, 17):
            testkey = 'FIBER{:d}{:02d}'.format(t, f)
            replacekey = '{}{:d}{:02d}'.format(cam1, t, f)
            if testkey in table1.meta.keys():
                table1.meta[replacekey] = table1.meta[testkey]
                table1.meta.pop(testkey)

    if hdu2 is not None:
        for key, val in dict(header2).items():
            if key[:5] == 'FIBER':
                fibermap['{}{}'.format(cam2, key[5:])] = val.strip(' \t')
        for t in range(1, 9):
            for f in range(1, 17):
                testkey = 'FIBER{:d}{:02d}'.format(t, f)
                replacekey = '{}{:d}{:02d}'.format(cam2, t, f)
                if testkey in table2.meta.keys():
                    table2.meta[replacekey] = table2.meta[testkey]
                    table2.meta.pop(testkey)

        for ii in range(len(mtl)):
            id = mtl['ID'][ii]
            fbnm = mtl['FIBNAME'][ii]
            if fbnm not in fibermap.keys():
                print("{} not in fibermap!".format(fbnm))
            elif fibermap[fbnm].upper().strip(' \t\r\n') != id.upper().strip(
                    ' \t\r\n'):
                print(ii, fbnm, fibermap[fbnm], id)

        combined_table = vstack([table1, table2])
    else:
        combined_table = table1

    full_table = join(combined_table, mtl, 'FIBNAME', join_type='left')

    ## Add additional information
    if int(header1['UT-DATE'][:4]) > 2014:
        time = Time(header1['MJD'], format='mjd')
    else:
        time = Time(header1['UT-DATE'] + ' ' + header1['UT-TIME'])
    location = EarthLocation(lon=header1['SITELONG'] * u.deg, lat=header1['SITELAT'] * u.deg, \
                             height=header1['SITEALT'] * u.meter)
    bc_cor = cluster.radial_velocity_correction(kind='barycentric',
                                                obstime=time,
                                                location=location)
    dzb = bc_cor / consts.c

    hc_cor = cluster.radial_velocity_correction(kind='heliocentric',
                                                obstime=time,
                                                location=location)
    dzh = hc_cor / consts.c

    full_table.add_column(
        Table.Column(data=full_table['redshift_est'] / (1 + dzb),
                     name='z_est_bary'))
    full_table.add_column(
        Table.Column(data=full_table['redshift_est'] / (1 + dzh),
                     name='z_est_helio'))
    full_table.add_column(
        Table.Column(data=np.ones(len(full_table)) * z_clust,
                     name='z_clust_lit'))

    if type(full_table['RA'][1]) is str and ':' in full_table['RA'][1]:
        all_coords = SkyCoord(ra=full_table['RA'],
                              dec=full_table['DEC'],
                              unit=(u.hour, u.deg))
        newras = Table.Column(data=all_coords.icrs.ra.deg, name='RA')
        newdecs = Table.Column(data=all_coords.icrs.dec.deg, name='DEC')
        full_table.replace_column('RA', newras)
        full_table.replace_column('DEC', newdecs)
    else:
        all_coords = SkyCoord(ra=full_table['RA'],
                              dec=full_table['DEC'],
                              unit=(u.deg, u.deg))

    seps = cluster.separation(all_coords)
    full_table.add_column(
        Table.Column(data=seps.to(u.arcsec).value, name='Proj_R_asec'))
    full_table.add_column(
        Table.Column(data=(kpc_p_amin * seps).to(u.Mpc).value,
                     name='Proj_R_Comoving_Mpc'))

    dvs = consts.c.to(u.km / u.s).value * (
        z_clust - full_table['z_est_bary']) / (1. + z_clust)
    full_table.add_column(Table.Column(data=dvs, name='velocity'))

    if find_more_redshifts:
        radius = 5 * u.Mpc / kpc_p_amin
        Vizier.ROW_LIMIT = -1
        result = Vizier.query_region(cluster,
                                     radius=radius,
                                     catalog=vizier_catalogs)
        if len(result) > 0 and type(result) is not table.Table:
            res_tab = result[0]

            if np.all(res_tab['zsp'].mask):
                if np.all(res_tab['zph'].mask):
                    sdss_archive_table = res_tab
                else:
                    cut_tab = res_tab[np.where(~res_tab['zph'].mask)]
                    sdss_archive_table = cut_tab[np.where(
                        cut_tab['zph'] > -99)]
            else:
                sdss_archive_table = res_tab[np.where(~res_tab['zsp'].mask)]

            for col in sdss_archive_table.colnames:
                sdss_archive_table.rename_column(col, 'sdss_' + col)

            for ii in np.arange(len(sdss_archive_table))[::-1]:
                if sdss_archive_table['sdss_SDSS12'][ii] in full_table[
                        'sdss_SDSS12']:
                    print("Removing: {}".format(
                        sdss_archive_table['sdss_SDSS12'][ii]))
                    sdss_archive_table.remove_row(ii)

            sdss_archive_table.add_column(
                Table.Column(data=sdss_archive_table['sdss_RA_ICRS'],
                             name='RA'))
            sdss_archive_table.add_column(
                Table.Column(data=sdss_archive_table['sdss_DE_ICRS'],
                             name='DEC'))
            sdss_archive_table.add_column(
                Table.Column(data=['T'] * len(sdss_archive_table),
                             name='TYPE'))
            sdss_archive_table.add_column(
                Table.Column(data=[2000.0] * len(sdss_archive_table),
                             name='EPOCH'))

            all_sdss_coords = SkyCoord(ra=sdss_archive_table['sdss_RA_ICRS'],
                                       dec=sdss_archive_table['sdss_DE_ICRS'])
            seps = cluster.separation(all_sdss_coords)
            sdss_archive_table.add_column(
                Table.Column(data=seps.to(u.arcsec).value, name='Proj_R_asec'))
            sdss_archive_table.add_column(
                Table.Column(data=(kpc_p_amin * seps).to(u.Mpc).value,
                             name='Proj_R_Comoving_Mpc'))
            sdss_archive_table.add_column(
                Table.Column(data=sdss_archive_table['sdss_zsp'],
                             name='z_est_helio'))
            dvs = consts.c.to(u.km / u.s).value * (
                z_clust - sdss_archive_table['z_est_helio']) / (1. + z_clust)
            sdss_archive_table.add_column(
                Table.Column(data=dvs, name='velocity'))
            #
            #
            full_table.add_column(
                Table.Column(data=[False] * len(full_table), name='SDSS_only'))
            sdss_archive_table.add_column(
                Table.Column(data=[True] * len(sdss_archive_table),
                             name='SDSS_only'))

            sdss_archive_table.convert_bytestring_to_unicode()
            convert = []
            for row in sdss_archive_table['sdss_q_mode']:
                convert.append(float(row.strip(' ') == '+'))

            new_sdssq_col = Table.Column(data=convert, name='sdss_q_mode')
            sdss_archive_table.replace_column('sdss_q_mode', new_sdssq_col)

            mega_table = vstack([full_table, sdss_archive_table])
        else:
            full_table.add_column(
                Table.Column(data=[False] * len(full_table), name='SDSS_only'))
            mega_table = full_table
    else:
        full_table.add_column(
            Table.Column(data=[False] * len(full_table), name='SDSS_only'))
        mega_table = full_table

    for key, val in header1.items():
        if 'FIBER' in key:
            continue
        elif 'TFORM' in key:
            continue
        elif 'TTYPE' in key:
            continue
        elif 'NAXIS' in key:
            continue
        elif key in [
                'BITPIX', 'XTENSION', 'PCOUNT', 'GCOUNT', 'TFIELDS', 'COMMENT'
        ]:
            continue
        if key in mega_table.meta.keys() and key != 'HISTORY':
            print(
                "There was a conflicting key that I've overwritten: {}".format(
                    key))
            print("Values of the conflict: {}  {}".format(
                val, mega_table.meta[key]))
        mega_table.meta[key] = val

    if 'description' in dict(mega_table.meta).keys():
        desc = mega_table.meta.pop('description')
        mega_table.meta['DESCRP'] = desc

    if 'full' not in outfile:
        outfile = outfile + '_full'
    if '.csv' not in outfile:
        outfile = outfile + '.csv'

    mega_table.write(outfile.replace('.csv', '.fits'),
                     format='fits',
                     overwrite=True)  # ,\
    # output_verify="fix")

    mega_table.meta['comments'] = []
    mega_table.write(outfile, format='ascii.csv', overwrite=True)
예제 #25
0
def wise(obsids_table,
         data_folder,
         moc_folder,
         nir_moc=None,
         opt_moc=None,
         radius=15 * u.arcmin,
         moc_order=16,
         overwrite=True):
    """
    Get All-WISE data using astroquery and Vizier
    For each observation in obsids_table, saves a fits file with
    name 'OBS_ID.fits' in 'data_folder/groups'.

    The function sends a Vizier query and selects all sources within 'radius'
    arcmin of the RA,DEC of the observation, then it filters the result
    selecting the sources in the corresponding MOC stored in 'moc_folder/mocs'
    (moc_order must be consistent with the order used to calculate the moc).

    If overwrite is True, always create a new fits file. If False, checks for
    an existing file and uses it to calculate the number of WISE sources
    in the field. If it doesn't exist, creates the file.

    The function returns obsids_table with an additional column 'NSRC_WS'
    with the number of sources in the field.
    """
    # Groups folder
    if nir_moc is None:
        groups_folder = os.path.join(data_folder, 'groups')
    else:
        root, _ = os.path.splitext(os.path.basename(nir_moc))
        survey = root.split('_')[-1]
        groups_folder = os.path.join(data_folder, 'groups_' + survey)

        moc_nirsurvey = MOC()
        read_moc_fits(moc_nirsurvey, nir_moc)

    if not os.path.exists(groups_folder):
        os.makedirs(groups_folder)

    moc_folder = os.path.join(moc_folder, 'mocs')

    if opt_moc is not None:
        moc_optsurvey = MOC()
        read_moc_fits(moc_optsurvey, opt_moc)

    nsources_field = np.full((len(obsids_table), ), np.nan)
    hp = HEALPix(nside=2**moc_order, order='nested', frame=ICRS())

    v = Vizier(columns=['ID', 'RAJ2000', 'DEJ2000', 'eeMaj', 'eeMin', 'eePA'],
               row_limit=np.inf,
               timeout=6000)

    for i, row in enumerate(tqdm(obsids_table, desc="Making WISE groups")):
        ## Group file name
        field_table_file = os.path.join(data_folder, groups_folder,
                                        '{}.fits'.format(row['OBS_ID']))
        is_field_table = os.path.exists(field_table_file)

        if overwrite or (not overwrite and not is_field_table):
            ## Select all sources in the field
            field_coords = SkyCoord(ra=row['RA'] * u.deg,
                                    dec=row['DEC'] * u.deg)

            vrsp = v.query_region(field_coords,
                                  radius=radius,
                                  catalog='II/328/allwise')

            ## Select sources in the non-overlaping area
            moc_field = MOC()
            read_moc_fits(
                moc_field,
                os.path.join(moc_folder, '{}.moc'.format(row['OBS_ID'])))
            if opt_moc is not None:
                moc_field = moc_optsurvey.intersection(moc_field)

            if nir_moc is not None:
                moc_field = moc_nirsurvey.intersection(moc_field)

            inmoc_table = sources_inmoc(vrsp[0],
                                        hp,
                                        moc_field,
                                        moc_order=moc_order,
                                        ra='RAJ2000',
                                        dec='DEJ2000')
            ## Save sources
            field_table_file = os.path.join(data_folder, groups_folder,
                                            '{}.fits'.format(row['OBS_ID']))

            inmoc_table.meta['description'] = 'AllWISE'
            inmoc_table.write(field_table_file, overwrite=True)

        else:
            inmoc_table = Table.read(field_table_file)

        nsources_field[i] = len(inmoc_table)

    colsrc = Table.Column(nsources_field, name='NSRC_WS')
    obsids_table.add_column(colsrc)

    return obsids_table
예제 #26
0
def vista(obsids_table,
          data_folder,
          moc_folder,
          opt_moc=None,
          radius=15 * u.arcmin,
          moc_order=16,
          overwrite=True):
    """
    Get VISTA-VHS data using astroquery and the UKIDSS database
    For each observation in obsids_table, saves a fits file with
    name 'OBS_ID.fits' in 'data_folder/groups'.

    The function sends a query and selects all sources within 'radius'
    arcmin of the RA,DEC of the observation, then it filters the result
    selecting the sources in the corresponding MOC stored in 'moc_folder/mocs'
    (moc_order must be consistent with the order used to calculate the moc).

    If overwrite is True, always create a new fits file. If False, checks for
    an existing file and uses it to calculate the number of UKIDSS sources
    in the field. If it doesn't exist, creates the file.

    The function returns obsids_table with an additional column 'NSRC_VT'
    with the number of sources in the field.
    """
    # Groups folder
    groups_folder = os.path.join(data_folder, 'groups')
    if not os.path.exists(groups_folder):
        os.makedirs(groups_folder)

    moc_folder = os.path.join(moc_folder, 'mocs')

    if opt_moc is not None:
        moc_optsurvey = MOC()
        read_moc_fits(moc_optsurvey, opt_moc)

    nsources_field = np.full((len(obsids_table), ), np.nan)
    hp = HEALPix(nside=2**moc_order, order='nested', frame=ICRS())

    v = Vista()
    columns = 'sourceID, RA, Dec'
    constraint = '(jppErrBits | hppErrBits | ksppErrBits) < 65536'

    for i, row in enumerate(tqdm(obsids_table, desc="Making VISTA groups")):
        ## Group file name
        field_table_file = os.path.join(data_folder, groups_folder,
                                        '{}.fits'.format(row['OBS_ID']))
        is_field_table = os.path.exists(field_table_file)

        if overwrite or (not overwrite and not is_field_table):
            ## Select all sources in the field
            field_coords = SkyCoord(ra=row['RA'] * u.deg,
                                    dec=row['DEC'] * u.deg)

            vrsp = v.query_region(field_coords,
                                  radius=radius,
                                  database='VHSDR4',
                                  programme_id='VHS',
                                  select=columns,
                                  where=constraint)

            # Add error column and units
            err = np.full((len(vrsp), ), 0.1) * u.arcsec
            vrsp.add_column(Table.Column(err, name='RADECERR'))

            vrsp['RA'] = vrsp['RA'] * u.deg
            vrsp['Dec'] = vrsp['Dec'] * u.deg
            vrsp.rename_column('sourceID',
                               'objID')  # for consistency with 2MASS
            vrsp.remove_column('distance')

            ## Select sources in the non-overlaping area
            moc_field = MOC()
            read_moc_fits(
                moc_field,
                os.path.join(moc_folder, '{}.moc'.format(row['OBS_ID'])))
            if opt_moc is not None:
                moc_field = moc_optsurvey.intersection(moc_field)

            inmoc_table = sources_inmoc(vrsp,
                                        hp,
                                        moc_field,
                                        moc_order=moc_order,
                                        ra='RA',
                                        dec='Dec')
            ## Save sources
            field_table_file = os.path.join(data_folder, groups_folder,
                                            '{}.fits'.format(row['OBS_ID']))

            inmoc_table.meta['description'] = 'VISTA'
            inmoc_table.write(field_table_file, overwrite=True)

        else:
            inmoc_table = Table.read(field_table_file)

        nsources_field[i] = len(inmoc_table)

    colsrc = Table.Column(nsources_field, name='NSRC_VT')
    obsids_table.add_column(colsrc)

    return obsids_table
예제 #27
0
def tmass(obsids_table,
          data_folder,
          moc_folder,
          opt_moc=None,
          radius=15 * u.arcmin,
          moc_order=16,
          overwrite=True):
    """
    Get 2MASS data using astroquery and Vizier
    For each observation in obsids_table, saves a fits file with
    name 'OBS_ID.fits' in 'data_folder/groups'.

    The function sends a Vizier query and selects all sources within 'radius'
    arcmin of the RA,DEC of the observation, then it filters the result
    selecting the sources in the corresponding MOC stored in 'moc_folder/mocs'
    (moc_order must be consistent with the order used to calculate the moc).

    If overwrite is True, always create a new fits file. If False, checks for
    an existing file and uses it to calculate the number of WISE sources
    in the field. If it doesn't exist, creates the file.

    The function returns obsids_table with an additional column 'NSRC_2M'
    with the number of sources in the field.
    """
    # Groups folder
    groups_folder = os.path.join(data_folder, 'groups')
    if not os.path.exists(groups_folder):
        os.makedirs(groups_folder)

    moc_folder = os.path.join(moc_folder, 'mocs')

    if opt_moc is not None:
        moc_optsurvey = MOC()
        read_moc_fits(moc_optsurvey, opt_moc)

    nsources_field = np.full((len(obsids_table), ), np.nan)
    hp = HEALPix(nside=2**moc_order, order='nested', frame=ICRS())

    v = Vizier(columns=[
        'Cntr', 'RAJ2000', 'DEJ2000', 'errMaj', 'errMin', 'errPA', 'Qflg'
    ],
               row_limit=np.inf,
               timeout=6000)

    for i, row in enumerate(tqdm(obsids_table, desc="Making 2MASS groups")):
        ## Group file name
        field_table_file = os.path.join(data_folder, groups_folder,
                                        '{}.fits'.format(row['OBS_ID']))
        is_field_table = os.path.exists(field_table_file)

        if overwrite or (not overwrite and not is_field_table):
            ## Select all sources in the field
            field_coords = SkyCoord(ra=row['RA'] * u.deg,
                                    dec=row['DEC'] * u.deg)

            vrsp = v.query_region_async(field_coords,
                                        radius=radius,
                                        catalog='II/246/out',
                                        return_type='asu-tsv')

            # Fix bug in the vizier response
            # (returns the id as a short int and fails to load
            # properly as an astropy table)
            with open('/tmp/tmp.tab', 'wb') as tmpfile:
                tmpfile.write(vrsp.content)

            src_table = Table.read('/tmp/tmp.tab', format='ascii.tab')
            src_table = src_table[2:]

            objid = np.array(src_table['Cntr']).astype(np.int64)
            ra = np.array(src_table['RAJ2000']).astype(np.float) * u.deg
            dec = np.array(src_table['DEJ2000']).astype(np.float) * u.deg

            errMaj = np.array(src_table['errMaj']).astype(np.float) * u.arcsec
            #            err_ra[err_ra == '            '] = '-1'
            #            err_ra = err_ra.astype(np.float) * u.arcsec
            #            err_ra[err_ra == -1] = np.nan

            errMin = np.array(src_table['errMin']).astype(np.float) * u.arcsec
            #            err_dec[err_dec == '            '] = '-1'
            #            err_dec = err_dec.astype(np.float) * u.arcsec
            #            err_dec[err_dec == -1] = np.nan

            errPA = np.array(src_table['errPA']).astype(np.float) * u.deg
            flag = np.array(src_table['Qflg'])

            src_table = Table([objid, ra, dec, errMaj, errMin, errPA, flag],
                              names=[
                                  'objID', 'RAJ2000', 'DEJ2000', 'errMaj',
                                  'errMin', 'errPA', 'Qflg'
                              ])
            # Filter table
            # Sources detected with SNR>=5 in J, H or K
            flgJ = [f[0] in ['A', 'B', 'C'] for f in src_table['Qflg']]
            flgH = [f[1] in ['A', 'B', 'C'] for f in src_table['Qflg']]
            flgK = [f[2] in ['A', 'B', 'C'] for f in src_table['Qflg']]
            msk_good = np.logical_and(flgJ, np.logical_and(flgH, flgK))
            src_table_new = src_table[msk_good]

            ## Select sources in the non-overlaping area
            moc_field = MOC()
            read_moc_fits(
                moc_field,
                os.path.join(moc_folder, '{}.moc'.format(row['OBS_ID'])))
            if opt_moc is not None:
                moc_field = moc_optsurvey.intersection(moc_field)

            inmoc_table = sources_inmoc(src_table_new,
                                        hp,
                                        moc_field,
                                        moc_order=moc_order,
                                        ra='RAJ2000',
                                        dec='DEJ2000')
            ## Save sources
            field_table_file = os.path.join(data_folder, groups_folder,
                                            '{}.fits'.format(row['OBS_ID']))

            inmoc_table.meta['description'] = '2MASS'
            inmoc_table.remove_column('Qflg')
            inmoc_table.write(field_table_file, overwrite=True)

        else:
            inmoc_table = Table.read(field_table_file)

        nsources_field[i] = len(inmoc_table)

    colsrc = Table.Column(nsources_field, name='NSRC_2M')
    obsids_table.add_column(colsrc)

    return obsids_table
예제 #28
0
    def run_final_calibrations(self, initial_priors='parametric'):
        self.generate_evolution_tables()
        output_names = [
            'calib coefs', 'fit variances', 'wavelengths', 'pixels'
        ]
        all_output_names = [
            'calib coefs', 'fit variances', 'wavelengths', 'pixels', 'linelist'
        ]
        mock_spec_w, mock_spec_f = self.mock_spec_w, self.mock_spec_f

        if not self.do_fine_calib:
            print(
                "There doesn't seem to be a fine calibration defined. Using the supplied coarse calibs"
            )

        select_lines = True

        dev_allowance = 1.
        devs = 2.
        using_defaults = False
        if initial_priors == 'defaults':
            if self.default_calibration_coefs is None:
                print(
                    "Couldn't find the default calibration coefficients, so using a parametrization of the coarse coefs"
                )
                initial_coef_table = Table(
                    self.get_parametricfits_of(caltype='coarse'))
            else:
                need_to_parametrize = False
                initial_coef_table = Table(self.default_calibration_coefs)
                for fib in self.instrument.full_fibs[self.camera]:
                    if fib not in initial_coef_table.colnames:
                        need_to_parametrize = True
                        break
                if need_to_parametrize:
                    paramd_table = Table(
                        self.get_parametricfits_of(caltype='default'))
                    for fib in self.instrument.full_fibs[self.camera]:
                        if fib not in initial_coef_table.colnames:
                            initial_coef_table[fib] = paramd_table[fib]
                using_defaults = True
        elif initial_priors == 'medians':
            initial_coef_table = Table(
                self.get_medianfits_of(self.coarse_calibration_coefs))
        else:
            initial_coef_table = Table(
                self.get_parametricfits_of(caltype='coarse'))

        for pairnum, filnums in self.pairings.items():
            if pairnum > 0:
                coarse_table_differences = self.evolution_in_coarse_coefs[
                    pairnum]
                for column in coarse_table_differences.colnames:
                    initial_coef_table[column] = initial_coef_table[
                        column] + coarse_table_differences[column]
            ## HACK!!
            if pairnum == 0 and self.camera == 'r':
                continue
            ## END HACK!
            filenum = filnums[self.filenum_ind]
            data = Table(self.fine_calibrations[filenum].data)

            linelist = self.selected_lines

            effective_iteration = pairnum  #np.max([pairnum,int(using_defaults)])
            if effective_iteration == 0:
                user_input = 'some'
            elif effective_iteration == 1:
                user_input = 'minimal'
            elif effective_iteration > 1:  # and devs < dev_allowance:
                user_input = 'single'  #'none'

            hand_fit_subset = []
            cam = self.camera
            if user_input == 'all':
                hand_fit_subset = list(initial_coef_table.colnames)
            elif user_input in ['some', 'minimal', 'single']:
                if cam == 'r':
                    # specific_set = [cam + '101', cam + '416']
                    specific_set = [
                        cam + '101', cam + '816', cam + '416', cam + '501'
                    ]
                else:
                    specific_set = [
                        cam + '116', cam + '801', cam + '516', cam + '401'
                    ]
                for i, fib in enumerate(specific_set):
                    outfib = ensure_match(fib, data.colnames, hand_fit_subset,
                                          cam)
                    hand_fit_subset.append(outfib)

                if user_input == 'some':
                    seed = int(filenum)
                    np.random.seed(seed)
                    randfibs = [
                        '{:02d}'.format(x)
                        for x in np.random.randint(1, 16, 4)
                    ]
                    for tetn, fibn in zip([2, 3, 6, 7], randfibs):
                        fib = '{}{}{}'.format(cam, tetn, fibn)
                        outfib = ensure_match(fib, data.colnames,
                                              hand_fit_subset, cam)
                        hand_fit_subset.append(outfib)
                elif user_input == 'single':
                    hand_fit_subset = hand_fit_subset[:1]
            else:
                pass

            # hand_fit_subset = np.asarray(hand_fit_subset)

            ##HACK!
            # if pairnum == 0 and self.camera=='r':
            #     altered_coef_table = initial_coef_table.copy()
            #     hand_fit_subset = np.asarray(['r101','r816','r416','r501','r210','r602','r715'])
            #     altered_coef_table = {}#initial_coef_table.copy()
            #     altered_coef_table['r101'] = [5071.8187300612035, 0.9930979838081959, -5.769775729541421e-06,
            #                                   1.6219475654346627e-08, -1.060536238512127e-11, 2.027614894968671e-15]
            #
            #     altered_coef_table['r816'] = [5064.941399949152, 0.9887048293667995, 4.829092351762018e-06,
            #                                   5.280389577236655e-09, -5.618906483279477e-12, 1.1981097537960155e-15]
            #
            #     altered_coef_table['r416'] = [4966.43139830805, 0.9939388787553181, 5.244911711992524e-06,
            #                                   1.2291548669411035e-09, - 2.0296595329597448e-12, 2.9050877132565224e-16]
            #
            #     altered_coef_table['r501'] = [4965.341783218052, 0.9873531089008049, 2.4560812264245633e-05,
            #                                   -2.0293237635901715e-08, 8.081202360788054e-12, -1.397383927434781e-15]
            #
            #     altered_coef_table['r210'] = [5009.879532180203, 0.986418938077269,
            #                                   2.1117286784979934e-05, - 1.612921025968839e-08, 6.307242237439978e-12,
            #                                   -1.175841190977326e-15]
            #
            #     altered_coef_table['r309'] = [4981.847585300046, 0.9953409249278389, 6.616819915490353e-09,
            #                                   7.072942793437885e-09, -4.7799815890757634e-12, 7.369734622022845e-16]
            #
            #     altered_coef_table['r602'] = [4975.080088016758, 0.9916173886456268, 7.811003804278236e-06,
            #                                   1.1977785560589788e-09, -3.3762927213375386e-12, 7.593041888780153e-16]
            #
            #     altered_coef_table['r715'] = [5014.023681360571, 0.99147302071155, 4.748885129798807e-06,
            #                                   3.1454713162197196e-09, -3.4683774647827705e-12, 6.101876288746191e-16]
            #     handfit_fitting_dict = {}
            #     handfit_fitting_dict['calib coefs'] = altered_coef_table
            #     wm,fm = linelist['ThAr']
            # else:
            #     if pairnum==1 and self.camera=='r':
            #         # altered_coef_table,thetype = self.filemanager.locate_calib_dict(fittype='full-ThAr', camera=self.camera,
            #         #                                                  config=self.config, filenum=filenum)
            #         # print(thetype, altered_coef_table)
            #         altered_coef_table = self.filemanager.load_calib_dict(fittype='full-ThAr',cam=self.camera,config=self.config,filenum=1490,timestamp=679621)
            #         initial_coef_table = Table(altered_coef_table['CALIB COEFS'].data)
            ## End HACK!


            handfit_fitting_dict, wm, fm  = \
                                    wavelength_fitting_by_line_selection(data, initial_coef_table,\
                                    self.all_lines, linelist, self.mock_spec_w, self.mock_spec_f ,\
                                    select_lines=select_lines,save_plots=self.save_plots,savetemplate_funcs=self.savetemplate_funcs,\
                                    filenum=filenum,subset=hand_fit_subset,completed_coefs={})

            if select_lines:
                linelistdict = {'ThAr': (wm, fm)}
            else:
                linelistdict = self.selected_lines

            if self.single_core:
                full_fitting_dict, badfits = \
                    auto_wavelength_fitting_by_lines(data, initial_coef_table, handfit_fitting_dict['calib coefs'].copy(), self.all_lines, linelistdict.copy(),\
                                                          mock_spec_w=mock_spec_w,  mock_spec_f=mock_spec_f,\
                                                          filenum=filenum, \
                                                          save_plots=self.save_plots, savetemplate_funcs=self.savetemplate_funcs)

                # for datainfoname,datainfo in handfit_fitting_dict.items():
                #     for fib in datainfo.keys():
                #         full_fitting_dict[datainfoname][fib] = datainfo[fib]

                badfits = np.array(badfits)
            else:
                fib1s = self.instrument.lower_half_fibs[self.camera]
                fib2s = self.instrument.upper_half_fibs[self.camera]

                obs1 = {
                    'comp': data[fib1s.tolist()], 'fulllinelist': self.all_lines.copy(),
                    'coarse_coefs': initial_coef_table, 'linelistdict':linelistdict.copy(), \
                    'mock_spec_w':mock_spec_w.copy(), 'mock_spec_f': mock_spec_f.copy(), \
                    'out_coefs':handfit_fitting_dict['calib coefs'].copy(),'filenum':filenum,
                    'save_plots':self.save_plots, "savetemplate_funcs":self.savetemplate_funcs
                }
                obs2 = {
                    'comp': data[fib2s.tolist()], 'fulllinelist': self.all_lines.copy(),
                    'coarse_coefs': initial_coef_table.copy(), 'linelistdict':linelistdict.copy(), \
                    'mock_spec_w':mock_spec_w.copy(), 'mock_spec_f': mock_spec_f.copy(), \
                    'out_coefs':handfit_fitting_dict['calib coefs'].copy(),'filenum':filenum,
                    'save_plots': self.save_plots, "savetemplate_funcs": self.savetemplate_funcs
                }

                all_obs = [obs1, obs2]
                NPROC = np.clip(len(all_obs), 1, 4)

                with Pool(NPROC) as pool:
                    tabs = pool.map(auto_wavelength_fitting_by_lines_wrapper,
                                    all_obs)

                full_fitting_dict, badfits = tabs[0]
                full_fitting_dict2, badfits2 = tabs[1]

                # ## The hand fit calibrations are in both returned dicts, remove from the second
                # ## Assign the other calibration info from hand fits to the output dicts
                # for datainfoname, datainfo in handfit_fitting_dict.items():
                #     ## use the autofitted wavelength solution even for hand fits, note we're not
                #     ## assigning these values to the output array
                #     if 'coef' in datainfoname:
                #         for fib in datainfo.keys():
                #             full_fitting_dict2[datainfoname].pop(fib)
                #     else:
                #         for fib in datainfo.keys():
                #             full_fitting_dict[datainfoname][fib] = datainfo[fib]

                ## The hand fit calibrations are in both returned dicts, remove from the second
                ## Assign the other calibration info from hand fits to the output dicts
                for datainfoname, datainfo in full_fitting_dict2.items():
                    for fib in datainfo.keys():
                        full_fitting_dict[datainfoname][fib] = datainfo[fib]

                badfits = np.unique(np.append(badfits, badfits2))

            handfit_bad_subset_dict, wm, fm = \
                                                wavelength_fitting_by_line_selection(data, initial_coef_table, \
                                                     self.all_lines, linelistdict, self.mock_spec_w, self.mock_spec_f, \
                                                     select_lines=select_lines, save_plots=self.save_plots,
                                                     savetemplate_funcs=self.savetemplate_funcs, \
                                                     filenum=filenum, subset=badfits,
                                                     completed_coefs=full_fitting_dict['calib coefs'].copy())
            for datainfoname, datainfo in handfit_bad_subset_dict.items():
                for fib in datainfo.keys():
                    full_fitting_dict[datainfoname][fib] = datainfo[fib]

            if select_lines:
                self.selected_lines = full_fitting_dict['linelist'].copy()
                select_lines = False

            ## Zero pad rows so that the table won't throw an error for unequal sizes
            maxlams = int(
                np.max([
                    len(full_fitting_dict['wavelengths'][fib])
                    for fib in full_fitting_dict['wavelengths'].keys()
                ]))

            for fib in full_fitting_dict['wavelengths'].keys():
                nlams = len(full_fitting_dict['wavelengths'][fib])
                if nlams != maxlams:
                    full_fitting_dict['wavelengths'][fib] = np.append(
                        full_fitting_dict['wavelengths'][fib],
                        np.zeros(maxlams - nlams))
                    full_fitting_dict['pixels'][fib] = np.append(
                        full_fitting_dict['pixels'][fib],
                        np.zeros(maxlams - nlams))

            ## Create hdulist to export
            out_hdus = [
                fits.PrimaryHDU(header=self.fine_calibrations[filenum].header)
            ]
            for out_name in output_names:
                curtab = Table()
                curdict = full_fitting_dict[out_name]
                for key in self.instrument.full_fibs[self.camera]:
                    curtab.add_column(Table.Column(data=curdict[key],
                                                   name=key))

                out_hdus.append(
                    fits.BinTableHDU(data=curtab.copy(), name=out_name))

            hdulist = fits.HDUList(out_hdus)

            #out_calib_table = out_calib_table[np.sort(out_calib_table.colnames)]
            self.fine_calibration_coefs[pairnum] = full_fitting_dict[
                'calib coefs'].copy()

            if pairnum > 0:
                devs = find_devs(initial_coef_table,
                                 full_fitting_dict['calib coefs'])

            initial_coef_table = Table(full_fitting_dict['calib coefs'].copy())

            self.final_calibrated_hdulists[pairnum] = hdulist
            self.filemanager.save_full_calib_dict(hdulist,
                                                  self.lampstr_f,
                                                  self.camera,
                                                  self.config,
                                                  filenum=filenum)

            gc.collect()
예제 #29
0
def combine_modeloutputs(outputname='xxRENAMExx_binary_Zcombined.txt',
                         datatype='bin',
                         verbose=True):
    """
    Combine the model outputs to have a single 'master-model' with all variables included

    --- EXAMPLE OF USE ---
    import BPASSmodels as nm
    bm.combine_modeloutputs(outputname='nebular_emission_BPASS_binaries_Zcombined.txt',datatype='bin')
    bm.combine_modeloutputs(outputname='nebular_emission_BPASS_singles_Zcombined.txt',datatype='sin')

    """
    outputname = '/Users/kschmidt/work/catalogs/BPASSbasedNebularEmission/' + outputname
    zvals = [
        0.040, 0.030, 0.020, 0.014, 0.010, 0.008, 0.006, 0.004, 0.003, 0.002,
        0.001, 0.0001, 0.00001
    ]

    if datatype == 'bin':
        binaries = True
    elif datatype == 'sin':
        binaries = False

    outputtable = None
    filelist = []
    for zval in zvals:
        models_bin_uv, fileloaded_uv = bm.load_model(
            zval,
            filepath=
            '/Users/kschmidt/work/catalogs/BPASSbasedNebularEmission/UV/',
            binaries=binaries)
        filelist.append(fileloaded_uv)
        models_bin_op, fileloaded_op = bm.load_model(
            zval,
            filepath=
            '/Users/kschmidt/work/catalogs/BPASSbasedNebularEmission/Optical/',
            binaries=binaries)
        filelist.append(fileloaded_op)

        col_Zval = Table.Column(name='Zgas',
                                data=models_bin_uv['logU'] * 0.0 + zval)
        models_bin_uv.add_column(col_Zval, index=0)

        for cn in models_bin_op.colnames:
            if cn in ['No', 'logU', 'lognH', 'logAge']:
                coldiff = models_bin_op[cn] - models_bin_uv[cn]
                Ndiffobj = len(np.where(coldiff != 0.0)[0])
                if Ndiffobj > 0:
                    print(
                        '\n The column ' + cn +
                        ' does not match for the UV and Optical fields for Z='
                        + str(zval) + '; stopping for investigation.\n')
                    pdb.set_trace()
            else:
                coldat = Table.Column(name=cn, data=models_bin_op[cn])
                models_bin_uv.add_column(coldat)

        if outputtable is None:
            outputtable = models_bin_uv
        else:
            outputtable = vstack([outputtable, models_bin_uv])

    if verbose: print(' - Writing output to:\n   ' + outputname)
    aascii.write(outputtable, output=outputname, format='commented_header')
예제 #30
0
#!/usr/bin/python
import numpy as np
from astropy.table import Table
t = Table([[1, 2, 3], [0.1, 0.2, -0.3]], names=('a', 'b'))
t.add_column(Table.Column(data=[0.1, 0.3, -0.55], name='x', dtype=np.float32))
t.meta['table_name'] = 'defa'
t.write('a.sqlite', format='sql', dbtype='sqlite')