Exemple #1
0
def grab_calfiles():
    workingdir = os.getcwd()
    handler = Eso()

    ### Set your username here!
    USERNAME = ""

    handler.ROW_LIMIT = 10000
    handler.login(USERNAME, store_password=True)

    def mkdir_safe(dirname):
        '''
        Check if directory exists - if it doesn't, make it, if it does, clear it out
        '''
        if os.path.isdir(dirname) == True:
            flist = glob.glob(dirname + "/*")
            for f in flist:
                os.remove(f)
        else:
            os.mkdir(dirname)

    mkdir_safe("flats")
    mkdir_safe("flatdarks")
    mkdir_safe("darks")

    # Read the first FITS in the folder
    filelist = glob.glob("obj/*.fits")
    print(os.getcwd())
    temphdu = fits.open(filelist[0])
    header = temphdu[0].header
    print("FITS header loaded")

    # Extract relevant query params from science frame
    prop_ID = header["HIERARCH ESO OBS PROG ID"]
    date = Time(header["DATE-OBS"])
    sci_exp = header["EXPTIME"]
    # Set start and end time of search - may need to tweak this manually to find the right calfiles.
    # Best to be generous with this window since need to find flats, darks, and flat-darks for the pipeline to run.
    stime = date
    etime = date + 18 * u.hour
    win_size = header["HIERARCH ESO DET WINDOW NY"]
    sci_wav = header["HIERARCH ESO INS WLEN CWLEN"]
    #print(filelist[0], sci_wav, date)

    # Query flat frames - check they match
    print("Querying ESO Archive")
    flat_table = handler.query_instrument("crires",
                                          column_filters={
                                              'stime': stime.value,
                                              'etime': etime.value,
                                              'dp_type': 'FLAT',
                                              'ins_wlen_cwlen': sci_wav
                                          })
    flat_header = handler.get_headers(flat_table["DP.ID"])
    mask = flat_header["HIERARCH ESO DET WINDOW NY"] != win_size
    flat_table = flat_table[~mask]

    #### if flat_exp_time not all the same value, choose the highest one
    #### Download flat fields
    flat_exp_time = np.max(flat_table["EXPTIME"])
    flat_files = handler.retrieve_data(flat_table["DP.ID"])
    #print(flat_files)

    for f in flat_files:
        shutil.copy(f, "flats")

    #### Grab the dark frames matching the science exposure time
    dark_table = handler.query_instrument("crires",
                                          column_filters={
                                              'stime': stime.value,
                                              'etime': etime.value,
                                              'dp_type': 'DARK',
                                              'exptime': sci_exp
                                          })
    dark_header = handler.get_headers(dark_table['DP.ID'])
    mask = dark_header["HIERARCH ESO DET WINDOW NY"] != win_size
    dark_table = dark_table[~mask]
    dark_files = handler.retrieve_data(dark_table["DP.ID"])

    for d in dark_files:
        shutil.copy(d, "darks")

    #### Grab darks matched to flat fields
    flatdark_table = handler.query_instrument("crires",
                                              column_filters={
                                                  'stime': stime.value,
                                                  'etime': etime.value,
                                                  'dp_type': 'DARK',
                                                  'exptime': flat_exp_time
                                              })
    flatdark_header = handler.get_headers(flatdark_table["DP.ID"])
    mask = flatdark_header["HIERARCH ESO DET WINDOW NY"] != win_size
    flatdark_table = flatdark_table[~mask]
    flatdark_files = handler.retrieve_data(flatdark_table["DP.ID"])

    for d in flatdark_files:
        shutil.copy(d, "flatdarks")

    print("Unpacking and moving!")

    ### Unpack all the files -- several possible commands for thisself.

    ### maximum compatibility use "gzip -d *.Z"
    ### pigz is a parallel gzip, but also it can't decompress in parallel apparently.
    ### if you want to use it despite this, "pigz -d *.Z"

    ### For maximum SPEED you could try "ls *.Z | parallel pigz -d" if you have GNU parallel installed.

    os.chdir("flats")
    os.system("pigz -d *.Z")

    os.chdir("../flatdarks")
    os.system("pigz -d *.Z")

    os.chdir("../darks")
    os.system("pigz -d *.Z")

    os.chdir("../")
    print("Calibration selection complete!")
import pg
import re
import time
import yaml
from astropy.extern.six import BytesIO, cPickle as pickle
from astropy.table import Table
from astroquery.eso import Eso as ESO
from astroquery.eso.core import _check_response

# Load local catalog of positions.
local_catalog = Table.read("data/HARPS_all.csv")

# Login to ESO.
eso = ESO()
eso.login("andycasey")
eso.ROW_LIMIT = 100000  # Maximum possible number of observations per star

# Connect to the PostgreSQL database.
cwd = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(cwd, "../db/credentials.yaml"), "r") as fp:
    credentials = yaml.load(fp)
connection = pg.connect(**credentials)


def query_harps_phase3_by_position(ra, dec, **kwargs):
    """
    Query the ESO Phase 3 science archive by position.

    :param ra:
        Right ascension [degrees].
Exemple #3
0
def wrangle_eso_for_rv_availability(ra, dec):
    """
    Checks via ESO query for available RVs on:
        ['HARPS', 'ESPRESSO', 'FORS2', 'UVES', 'XSHOOTER']

    Possible future expansion: actually get the RVs. (For now, just this is
    just used as a flag to let the user know the RVs might exist!)

    Returns tuple of:
        (nan, nan, provenance)
    """
    eso = Eso()
    eso.ROW_LIMIT = 9999

    coord = SkyCoord(ra=ra, dec=dec, unit=(u.deg, u.deg), frame='icrs')
    print('begin ESO search for {}'.format(repr(coord)))

    rastr = (str(coord.ra.to_string(u.hour)).replace('h', ' ').replace(
        'm', ' ').replace('s', ' '))

    decstr = (str(coord.dec.to_string()).replace('d', ' ').replace(
        'm', ' ').replace('s', ' '))

    # search within 10 arcsec of given position
    boxsize = '00 00 10'
    res = eso.query_main(column_filters={
        'ra': rastr,
        'dec': decstr,
        'box': boxsize
    })

    if res is None:
        return np.nan, np.nan, np.nan

    # limit search to the following instruments, in order of preference
    instruments = ['HARPS', 'ESPRESSO', 'FORS2', 'UVES', 'XSHOOTER']
    sel = np.zeros((len(res))).astype(bool)
    for instrument in instruments:
        sel |= (nparr(res['Instrument']) == instrument)
    res = res[sel]

    # limit returned cateogires
    badcategories = ['CALIB']
    sel = np.zeros((len(res))).astype(bool)
    for badcategory in badcategories:
        sel |= (nparr(res['Category']) != badcategory)
    res = res[sel]

    if len(res) >= 1:

        # XSHOOTER doesn't seem to give archival RVs. would need to derive
        # from spectra yourself
        if np.all(nparr(res['Instrument']) == 'XSHOOTER'):
            return np.nan, np.nan, 'XSHOOTER'

        # Embargo lasts a year on all ESO observations.
        nt = Time.now()
        embargo_end = nt.mjd - 365
        if np.all(nparr(res['MJD-OBS']) > embargo_end):
            return np.nan, np.nan, np.unique(res['Instrument'])[0]

        # HARPS gives archival RVs. downloading them can be done... but for
        # s6+s7, only a few objects are viable.
        if np.all(nparr(res['Instrument']) == 'HARPS'):
            print('WARNING: SKIPPING AUTOMATION OF HARPS ARCHIVAL RV GETTING')
            return np.nan, np.nan, 'HARPS'

    else:
        return np.nan, np.nan, np.nan