Beispiel #1
0
    def locate_postcard(self, local):
        """ Finds the eleanor postcard, if available, this star falls on.
        
        Attributes
        ---------- 
        postcard : str
        postcard_bkg : str
        postcard_path : str
        position_on_postcard : list
        all_postcards : list
        mast_list : astropy.table.Table
        """
        self.mast_list = None

        info_str = "{0:04d}-{1}-{2}-{3}".format(self.sector, self.camera,
                                                self.chip, "cal")
        postcard_fmt = "postcard-s{0}-{{0:04d}}-{{1:04d}}"
        postcard_fmt = postcard_fmt.format(info_str)

        eleanorpath = os.path.dirname(__file__)

        guide_url = eleanorpath + '/postcard_centers.txt'
        guide = Table.read(guide_url, format="ascii")

        col, row = self.position_on_chip[0], self.position_on_chip[1]

        post_args = np.where((np.abs(guide['x'].data - col) <= 100)
                             & (np.abs(guide['y'].data - row) <= 100))

        post_cens = guide[post_args]

        # Finds the mostest closest postcard
        closest_x, closest_y = np.argmin(
            np.abs(post_cens['x'] - col)), np.argmin(
                np.abs(post_cens['y'] - row))
        self.postcard = postcard_fmt.format(post_cens['x'].data[closest_x],
                                            post_cens['y'].data[closest_y])

        # Keeps track of all postcards that the star falls on
        all_postcards = []
        for i in range(len(post_cens)):
            name = postcard_fmt.format(post_cens['x'].data[i],
                                       post_cens['y'].data[i])
            all_postcards.append(name)
        self.all_postcards = np.array(all_postcards)

        if local == False:

            postcard_obs = Observations.query_criteria(
                provenance_name="ELEANOR",
                target_name=self.postcard,
                obs_collection="HLSP")

            if len(postcard_obs) > 0:
                product_list = Observations.get_product_list(postcard_obs)
                self.pointing = check_pointing(self.sector, self.camera,
                                               self.chip, self.pm_dir)

                if self.pointing is None:
                    extension = ["pc.fits", "bkg.fits", "pm.txt"]
                else:
                    extension = ["pc.fits", "bkg.fits"]

                results = Observations.download_products(
                    product_list,
                    extension=extension,
                    download_dir=self.fn_dir)
                postcard_path = results['Local Path'][0]
                self.postcard_path = '/'.join(
                    e for e in postcard_path.split('/')[:-1])
                self.postcard = results['Local Path'][1].split('/')[-1]
                self.postcard_bkg = results['Local Path'][0].split('/')[-1]
                self.mast_results = results
                self.cutout = None  # Attribute for TessCut only
                # only downloaded the pointing model if the search for it above failed, so only
                # update it in that case here
                if self.pointing is None:
                    self.pm_dir = self.postcard_path
                    self.pointing = check_pointing(self.sector, self.camera,
                                                   self.chip, self.pm_dir)

            else:
                print(
                    "No eleanor postcard has been made for your target (yet). Using TessCut instead."
                )
                self.locate_with_tesscut()

        else:
            self.cutout = None  #Attribute for TessCut only
            self.postcard_bkg = 'hlsp_eleanor_tess_ffi_' + self.postcard + '_tess_v2_bkg.fits'
            self.postcard = 'hlsp_eleanor_tess_ffi_' + self.postcard + '_tess_v2_pc.fits'

            self.pointing = check_pointing(self.sector, self.camera, self.chip,
                                           self.pm_dir)
Beispiel #2
0
 
from astroquery.mast import Catalogs
from astroquery.vizier import Vizier
from astroquery.mast import Observations
import urllib.request
import os


filter_wide = ["F555W","F606W"]
for f in filter_wide:
    if not os.path.isdir("data/" + f + "/"):
        os.makedirs("data/" + f + "/")
    obsTable = Observations.query_criteria(calib_level= 3, dataproduct_type = 'image',
                                               obs_collection = ["HLA"],
                                        instrument_name = ["WFC3/UVIS"], filters = [f])

    for url in obsTable['dataURL']:
        if isinstance(url, type(obsTable[7]['dataURL'])):
            if url[-5:] == str(".fits"):
                file = "data/" + f + "/" + str(url[50:])
                urllib.request.urlretrieve(url,file)
Beispiel #3
0
import configparser

config = configparser.ConfigParser()
config.read(os.path.expanduser('~/.aws/credentials'))
os.environ["AWS_ACCESS_KEY_ID"] = config.get('default', 'aws_access_key_id')
os.environ["AWS_SECRET_ACCESS_KEY"] = config.get('default',
                                                 'aws_secret_access_key')

# Create a directory for downloaded files
if not os.path.exists('DATA'):
    os.makedirs('DATA')

# This downloads all the F160W DRZ images from CANDELS in the GOODS-South field
print('\nQuerying the MAST archive.\n')
obsTable = Observations.query_criteria(obs_collection='HST',
                                       filters='F160W',
                                       instrument_name='WFC3/IR',
                                       proposal_id=['12062', '12061', '12062'])

# Grab the list of available data products for these observations
products = Observations.get_product_list(obsTable)

# Select only drizzled (DRZ) files
filtered = Observations.filter_products(products,
                                        mrp_only=False,
                                        productSubGroupDescription='DRZ')

# Enable 'S3 mode' for module which will return S3-like URLs for FITs files
# e.g. s3://stpubdata/hst/public/icde/icde43l0q/icde43l0q_drz.fits
Observations.enable_s3_hst_dataset()

# Grab the S3 URLs for each of the observations
Beispiel #4
0
def _query_mast(target, radius=None, project=('Kepler', 'K2', 'TESS')):
    """Helper function which wraps `astroquery.mast.Observations.query_criteria()`
    to returns a table of all Kepler or K2 observations of a given target.

    Parameters
    ----------
    target : str, int, or `astropy.coordinates.SkyCoord` object
        See docstrings above.
    radius : float or `astropy.units.Quantity` object
        Conesearch radius.  If a float is given it will be assumed to be in
        units of arcseconds.  If `None` then we default to 0.0001 arcsec.
    project : str, list of str
        'Kepler', 'K2', and/or 'TESS'.

    Returns
    -------
    obs : astropy.Table
        Table detailing the available observations on MAST.
    """
    # If passed a SkyCoord, convert it to an RA and Dec
    if isinstance(target, SkyCoord):
        target = '{}, {}'.format(target.ra.deg, target.dec.deg)

    project = np.atleast_1d(project)

    if radius is None:
        radius = .0001 * u.arcsec
    elif not isinstance(radius, u.quantity.Quantity):
        radius = radius * u.arcsec

    try:
        # If `target` looks like a KIC or EPIC ID, we will pass the exact
        # `target_name` under which MAST will know the object to prevent
        # source confusion (see GitHub issue #148).
        target = int(target)
        if (target > 0) and (target < 200000000):
            target_name = 'kplr{:09d}'.format(target)
        elif (target > 200000000) and (target < 300000000):
            target_name = 'ktwo{:09d}'.format(target)
        else:
            raise ValueError(
                "{:09d}: not in the KIC or EPIC ID range".format(target))

        # query_criteria does not allow a cone search when target_name is passed in
        # so first grab desired target with ~0 arcsecond radius
        with warnings.catch_warnings():
            # suppress misleading AstropyWarning
            warnings.simplefilter('ignore', AstropyWarning)
            from astroquery.mast import Observations
            log.debug(
                "Started querying MAST for observations within {} of target_name='{}'."
                "".format(radius.to(u.arcsec), target_name))
            target_obs = Observations.query_criteria(target_name=target_name,
                                                     radius=str(
                                                         radius.to(u.deg)),
                                                     project=project,
                                                     obs_collection=project)

        if len(target_obs) == 0:
            raise ValueError(
                "No observations found for '{}'.".format(target_name))

        # check if a cone search is being performed
        # if yes, perform a cone search around coordinates of desired target
        if radius < (0.1 * u.arcsec):
            obs = target_obs
            # astroquery does not return distance if target_name is given;
            # we add it here so that the table returned always has this column.
            obs['distance'] = 0.
        else:
            ra = target_obs['s_ra'][0]
            dec = target_obs['s_dec'][0]
            with warnings.catch_warnings():
                # suppress misleading AstropyWarning
                warnings.simplefilter('ignore', AstropyWarning)
                from astroquery.mast import Observations
                log.debug(
                    "Started querying MAST for observations within {} of coordinates='{} {}'."
                    "".format(radius.to(u.arcsec), ra, dec))
                obs = Observations.query_criteria(coordinates='{} {}'.format(
                    ra, dec),
                                                  radius=str(radius.to(u.deg)),
                                                  project=project,
                                                  obs_collection=project)
            obs.sort('distance')
        return obs
    except ValueError:
        pass

    # If `target` did not look like a KIC or EPIC ID, then we let MAST
    # resolve the target name to a sky position. Convert radius from arcsec
    # to degrees for query_criteria().
    from astroquery.exceptions import ResolverError
    try:
        with warnings.catch_warnings():
            # suppress misleading AstropyWarning
            warnings.simplefilter('ignore', AstropyWarning)
            from astroquery.mast import Observations
            log.debug(
                "Started querying MAST for observations within {} of objectname='{}'."
                "".format(radius.to(u.arcsec), target))
            obs = Observations.query_criteria(objectname=target,
                                              radius=str(radius.to(u.deg)),
                                              project=project,
                                              obs_collection=project)
        obs.sort('distance')
        return obs
    except ResolverError as exc:
        raise SearchError(exc)
Beispiel #5
0
sector = 1

tpeak = 2458341.89227  # Julian Day

# %%
'''
### Querying MAST

#### Mission light curves

Here we choose the TESS mission (`obs_collection`) and query on our TIC ID and sector.
'''

# %%
mission_res = Observations.query_criteria(obs_collection="TESS",
                                          target_name=tic_id,
                                          sequence_number=sector)
mission_res

# %%
'''
#### TASOC light curves

MAST also hosts a variety of community contributed High Level Science Products (HLSPs), all of which are given the mission "HLSP". In this case we will specifically search for HLSPs in the TESS project, which will return the light curves provided by the TASOC (note the `provenance_name` of "TASOC").
'''

# %%
tasoc_res = Observations.query_criteria(target_name=tic_id,
                                        obs_collection="HLSP",
                                        project="TESS",
                                        sequence_number=sector)
Beispiel #6
0
#    ofile1.write(",".join(fieldnames1)+'\n')
    for this_id in ids:
        try:
            target_name = this_id
            radius = 0.2
            catalogTIC = Catalogs.query_object(target_name, radius, catalog = "TIC")
            numObj = "Number of TIC objects within %f deg of %s: %u" % (radius, target_name, len(catalogTIC))
            where_dwarfs = np.where(catalogTIC['lumclass'] == 'DWARF')[0]
            where_giants = np.where(catalogTIC['lumclass'] == 'GIANT')[0]
            dwarfs = "Number of objects classified as 'DWARF' within %f deg of %s: %u" % (radius, target_name, len(where_dwarfs))
            giants = "Number of objects classified as 'GIANT' within %f deg of %s: %u" % (radius, target_name, len(where_giants))
            where_closest = np.argmin(catalogTIC['dstArcSec'])
            closest = "Closest TIC ID to %s: TIC %s, seperation of %f arcsec. and a TESS mag. of %f" % (target_name, catalogTIC['ID'][where_closest], catalogTIC['dstArcSec'][where_closest], catalogTIC['Tmag'][where_closest])

            #sectors_search = Observations.query_criteria(target_name=this_id, provenance_project='TASOC')
            sectors_search = Observations.query_criteria(target_name=this_id, obs_collection="HLSP", filters="TESS",
                                                         t_exptime=[1799, 1801])
            #print('Getting sectors')
            #import pdb; pdb.set_trace()
            sector_length = len(sectors_search)
            if  sector_length !=0:
                sector = sectors_search['sequence_number']
                sector_length = sector_length-1
                

            print('Getting times')
            #import pdb; pdb.set_trace()
            time = gettimeflux_1800(this_id, str(sector[0]))[0]
            print(time)
            flux = gettimeflux_1800(this_id, str(sector[0]))[1]
            Tmag = get_catalog_data(this_id)[0]
            Teff = get_catalog_data(this_id)[1]
def get_hlsp_lightcurves(tic_id,
                         hlsp_products=('CDIPS', 'TASOC', 'PATHOS'),
                         download_dir=None,
                         verbose=True):
    """This downloads TESS HLSP light curves for a given TIC ID.

    Parameters
    ----------
    tic_id : str
        The TIC ID of the object as a string.

    hlsp_products : sequence of str
        List of desired HLSP products to search. For instance, ["CDIPS"].

    download_dir : str
        Path of directory to which light-curve will be downloaded.

    Returns
    -------
    lcfiles : list or None
        List of light-curve file paths. None if none are found and downloaded.

    """

    if not astroquery_dependency:
        LOGERROR(
            "The astroquery package is required for this function to work.")
        return None

    lcfiles = []

    for hlsp in hlsp_products:

        obs_table = Observations.query_criteria(target_name=tic_id,
                                                provenance_name=hlsp)

        if verbose:
            LOGINFO(f'Found {len(obs_table)} {hlsp} light-curves.')

        if len(obs_table) == 0:
            if verbose:
                LOGINFO("Did not find light-curves. Escaping.")
            return None

        # Get list of available products for this Observation.
        cdips_products = Observations.get_product_list(obs_table)

        # Download the products for this Observation.
        manifest = Observations.download_products(cdips_products,
                                                  download_dir=download_dir)
        if verbose:
            LOGINFO("Done")

        if len(manifest) >= 1:
            lcfiles.append(list(manifest['Local Path']))

    #
    # flatten lcfiles list
    #
    if len(lcfiles) >= 1:
        return_lcfiles = [item for sublist in lcfiles for item in sublist]
    else:
        return_lcfiles = None

    return return_lcfiles
Beispiel #8
0
def run_query(box=None,
              get_exptime=True,
              rename_columns=DEFAULT_RENAME,
              sort_column=['obs_id', 'filter'],
              position_box=True,
              base_query=DEFAULT_QUERY_ASTROQUERY.copy(),
              **kwargs):
    """
    Run MAST query with astroquery.mast.  
    
    All columns listed at https://mast.stsci.edu/api/v0/_c_a_o_mfields.html 
    can be used for the query.
    
    position_box: query on s_ra / s_dec positions rather than position 
                  coordinates
    """
    # arguments
    frame = inspect.currentframe()
    msg = utils.log_function_arguments(None, frame, 'query.run_query')

    import time

    from astroquery.mast import Observations
    from astropy.coordinates import SkyCoord
    from astropy.io.misc import yaml

    import astropy.units as u

    query_args = {}
    for k in base_query:
        query_args[k] = base_query[k]

    # JWST "expected data" won't have datasets to query for actual exptimes...
    if 'obs_collection' in base_query:
        if 'JWST' in base_query['obs_collection']:
            get_exptime = False

    for k in kwargs:
        if k == 'instruments':
            query_args['instrument_name'] = kwargs[k]
        elif k == 'proposal_id':
            query_args['proposal_id'] = ['{0}'.format(p) for p in kwargs[k]]
        elif k == 'extensions':
            continue
        else:
            query_args[k] = kwargs[k]

    if (box is not None):
        ra, dec, radius = box
        #coo = SkyCoord(ra*u.deg, dec*u.deg)
        #query_args['coordinates'] = coo
        #query_args['radius'] = radius*u.arcmin
        cosd = np.cos(box[1] / 180 * np.pi)
        query_args['s_ra'] = box[0] + np.array([-1, 1]) * box[2] / 60 / cosd
        query_args['s_dec'] = box[1] + np.array([-1, 1]) * box[2] / 60

    try:
        tab = Observations.query_criteria(**query_args)
    except:
        return query_args

    tab.meta['qtime'] = time.ctime(), 'Query timestamp'

    if box is not None:
        tab.meta['boxra'] = ra, 'Query RA, degrees'
        tab.meta['boxdec'] = dec, 'Query Decl., degrees'
        tab.meta['boxrad'] = radius, 'Query radius, arcmin'

    str_args = yaml.dump(query_args).replace('\n', ';;')
    tab.meta[
        'obsquery'] = str_args, 'Full query string, replace ;; with newline'

    if len(tab) == 0:
        return tab

    tab = modify_table(tab,
                       get_exptime=get_exptime,
                       rename_columns=rename_columns,
                       sort_column=sort_column)
    return tab
Beispiel #9
0
#!/usr/bin/env python3

from astroquery.mast import Observations
import IPython
import boto3

from s3_query import find_product_in_s3

Observations.enable_s3_hst_dataset()

obs = Observations.query_criteria(
    dataproduct_type=['image'],
    project='HST',
    instrument_name='ACS/WFC',
    filters='F555W',
    calib_level=3,
)

print("Observations: ", len(obs))

products = Observations.get_product_list(obs)
print("Products: ", len(products))

filtered_products = Observations.filter_products(
    products,
    productType='SCIENCE',
    extension='fits',
    description='DADS FLT file - Calibrated exposure ACS/WFC3/STIS/COS',
)
print("Filtered products: ", len(filtered_products))
print()
Beispiel #10
0
from astroquery.mast import Observations
import numpy as np
import os,sys

search_radius = 120

## import files ##
inp0    = 'ALMA_z6qso_all.list'
data    = np.loadtxt(inp0,comments='#',dtype={'names':('proj_id','name','ra','dec','PI'),'formats':('S32','S32','f16','f16','S32')})
proj_id = data["proj_id"]
name    = data["name"]
ra      = data["ra"]
dec     = data["dec"]

for i in range(len(name)):
	obs_table = Observations.query_criteria(coordinates="%3.5f %3.5f" %(ra[i],dec[i]), radius="%3.5f arcsec" % search_radius, intentType=["science","SCIENCE"], obs_collection=["HST"])  
	__,uidx = np.unique(obs_table['target_name'],return_index=True)
	target_table = obs_table[uidx]['target_name','s_ra','s_dec','filters','t_exptime','proposal_id','dataURL','obsid']
	if len(uidx) > 0:
		os.system('mkdir download_sh/'+name[i])
		for u in range(len(uidx)):
			data_products = Observations.get_product_list(target_table[u][-1])
			Observations.download_products(data_products, calib_level=[2,3], productType="SCIENCE", curl_flag=True,mrp_only=True,download_dir='download_sh/'+name[i])

Beispiel #11
0
def _query_mast(target, radius=None, cadence='long'):
    """Helper function which wraps `astroquery.mast.Observations.query_criteria()`
    to returns a table of all Kepler or K2 observations of a given target.

    Parameters
    ----------
    target : str, int, or `astropy.coordinates.SkyCoord` object
        See docstrings above.
    radius : float or `astropy.units.Quantity` object
        Conesearch radius.  If a float is given it will be assumed to be in
        units of arcseconds.  If `None` then we default to 0.0001 arcsec.
    cadence: 'short' or 'long'
        Specify short (1-min) or long (30-min) cadence data.

    Returns
    -------
    obs : astropy.Table
        Table detailing the available observations on MAST.
    """
    # If passed a SkyCoord, convert it to an RA and Dec
    if isinstance(target, SkyCoord):
        target = '{}, {}'.format(target.ra.deg, target.dec.deg)

    if radius is None:
        radius = .0001 * u.arcsec
    elif not isinstance(radius, u.quantity.Quantity):
        radius = radius * u.arcsec

    try:
        # If `target` looks like a KIC or EPIC ID, we will pass the exact
        # `target_name` under which MAST will know the object to prevent
        # source confusion (see GitHub issue #148).
        target = int(target)
        if (target > 0) and (target < 200000000):
            target_name = 'kplr{:09d}'.format(target)
        elif (target > 200000000) and (target < 300000000):
            target_name = 'ktwo{:09d}'.format(target)
        else:
            raise ValueError("{:09d}: not in the KIC or EPIC ID range".format(target))

        # query_criteria does not allow a cone search when target_name is passed in
        # so first grab desired target with ~0 arcsecond radius
        target_obs = Observations.query_criteria(target_name=target_name,
                                                 radius=str(radius.to(u.deg)),
                                                 project=["Kepler", "K2"],
                                                 obs_collection=["Kepler", "K2"])
        if len(target_obs) == 0:
            raise ValueError("No observations found for {}".format(target_name))

        # check if a cone search is being performed
        # if yes, perform a cone search around coordinates of desired target
        if radius < (0.1 * u.arcsec):
            obs = target_obs
            # astroquery does not return distance if target_name is given;
            # we add it here so that the table returned always has this column.
            obs['distance'] = 0.
        else:
            ra = target_obs['s_ra'][0]
            dec = target_obs['s_dec'][0]
            obs = Observations.query_criteria(coordinates='{} {}'.format(ra, dec),
                                              radius=str(radius.to(u.deg)),
                                              project=["Kepler", "K2"],
                                              obs_collection=["Kepler", "K2"])
    except ValueError:
        # If `target` did not look like a KIC or EPIC ID, then we let MAST
        # resolve the target name to a sky position. Convert radius from arcsec
        # to degrees for query_criteria().
        try:
            obs = Observations.query_criteria(objectname=target,
                                              radius=str(radius.to(u.deg)),
                                              project=["Kepler", "K2"],
                                              obs_collection=["Kepler", "K2"])
        except ResolverError as exc:
            raise SearchError(exc)

    obs.sort('distance')  # ensure table returned is sorted by distance
    return obs
def retrieve_observation(obsid, suffix=['FLC'], archive=False,clobber=False):
    """Simple interface for retrieving an observation from the MAST archive

    If the input obsid is for an association, it will request all members with
    the specified suffixes.

    Parameters
    -----------
    obsid : string
        ID for observation to be retrieved from the MAST archive.  Only the
        IPPSSOOT (rootname) of exposure or ASN needs to be provided; eg., ib6v06060.

    suffix : list
        List containing suffixes of files which should be requested from MAST.

    path : string
        Directory to use for writing out downloaded files.  If `None` (default),
        the current working directory will be used.

    archive : Boolean
        Retain copies of the downloaded files in the astroquery created sub-directories? Default is 'False'.

    clobber : Boolean
        Download and Overwrite existing files? Default is 'False'.

    Returns
    -------
    local_files : list
        List of filenames
    """
    local_files = []

    # Query MAST for the data with an observation type of either "science" or "calibration"
    obsTable = Observations.query_criteria(obs_id=obsid, obstype='all')
    # Catch the case where no files are found for download
    if len(obsTable) == 0:
        log.info("WARNING: Query for {} returned NO RESULTS!".format(obsid))
        return local_files

    dpobs = Observations.get_product_list(obsTable)
    dataProductsByID = Observations.filter_products(dpobs,
                                              productSubGroupDescription=suffix,
                                              extension='fits',
                                              mrp_only=False)

    # After the filtering has been done, ensure there is still data in the table for download.
    # If the table is empty, look for FLT images in lieu of FLC images. Only want one
    # or the other (not both!), so just do the filtering again.
    if len(dataProductsByID) == 0:
        log.info("WARNING: No FLC files found for {} - will look for FLT files instead.".format(obsid))
        suffix = ['FLT']
        dataProductsByID = Observations.filter_products(dpobs,
                                              productSubGroupDescription=suffix,
                                              extension='fits',
                                              mrp_only=False)

        # If still no data, then return.  An exception will eventually be thrown in
        # the higher level code.
        if len(dataProductsByID) == 0:
            log.info("WARNING: No FLC or FLT files found for {}.".format(obsid))
            return local_files
    allImages = []
    for tableLine in dataProductsByID:
        allImages.append(tableLine['productFilename'])
    log.info(allImages)
    if not clobber:
        rowsToRemove = []
        for rowCtr in range(0,len(dataProductsByID)):
            if os.path.exists(dataProductsByID[rowCtr]['productFilename']):
                log.info("{} already exists. File download skipped.".format(dataProductsByID[rowCtr]['productFilename']))
                rowsToRemove.append(rowCtr)
        if rowsToRemove:
            rowsToRemove.reverse()
            for rowNum in rowsToRemove:
                dataProductsByID.remove_row(rowNum)

    manifest = Observations.download_products(dataProductsByID, mrp_only=False)

    if not clobber:
        rowsToRemove.reverse()
        for rownum in rowsToRemove:
            if not manifest:
                local_files = allImages
                return local_files
            else:
                manifest.insert_row(rownum,vals=[allImages[rownum],"LOCAL","None","None"])

    download_dir = None
    for file,fileStatus in zip(manifest['Local Path'],manifest['Status']):
        if fileStatus != "LOCAL":
            # Identify what sub-directory was created by astroquery for the download
            if download_dir is None:
                file_path = file.split(os.sep)
                file_path.remove('.')
                download_dir = file_path[0]
            # Move or copy downloaded file to current directory
            local_file = os.path.abspath(os.path.basename(file))
            if archive:
                shutil.copy(file, local_file)
            else:
                shutil.move(file, local_file)
            # Record what files were downloaded and their current location
            local_files.append(os.path.basename(local_file))
        else:
            local_files.append(file)
    if not archive:
        # Remove astroquery created sub-directories
        shutil.rmtree(download_dir)
    return local_files
Beispiel #13
0
def get_mast_file_list(star_id, provenance, sequence_name):
    """
    Queries MAST to retrieve archive data based on object name (``star_id``),
    data provenance, and sequence name.

    Parameters
    ----------
    star_id: int, str
        EPIC ID of the star.

    provenance: str
        Provenance name in MAST archive, e.g., ``'K2'``, ``'EVEREST'``,
        ``'K2SFF'``.

    sequence_name: str
        Campaign number.

    Returns
    -------
    file_names: list of str
        A Python list of all the file names retrieved from archive.

    """
    star_id = str(star_id)
    target_name = '*' + star_id.split()[-1]

    # Make sure that there are data for the criteria
    mission = _MISSION_MAPPING[provenance]

    print("\n=====  Retrieving data for observation:  =====")
    print("** star_id: {}\n** provenance: {}\n** sequence_name: {}".format(
        star_id, provenance, sequence_name))

    if sequence_name == '*':
        sequence_name = ''

    obs_count = Observations.query_criteria_count(
        obs_collection=mission,
        dataproduct_type=["timeseries"],
        instrument_name="Kepler",
        objectname=star_id,
        target_name=target_name,
        project="K2",
        provenance_name=provenance,
        sequence_number=sequence_name + '*')
    if obs_count == 0:
        raise RuntimeError("No data found in archive.")

    obs_table = Observations.query_criteria(obs_collection=mission,
                                            dataproduct_type=["timeseries"],
                                            instrument_name="Kepler",
                                            objectname=star_id,
                                            target_name=target_name,
                                            project="K2",
                                            provenance_name=provenance,
                                            sequence_number=sequence_name +
                                            '*')

    data_products = Observations.get_product_list(obs_table)

    lc_mask = [
        "lightcurve" in x or "light curve" in x
        for x in map(str.lower, data_products['description'])
    ]
    if not any(lc_mask):
        raise RuntimeError("Retrieved data products do not contain light "
                           "curve data.")

    data_products = data_products[lc_mask]  # keep only rows with light curves
    manifest = Observations.download_products(data_products)
    files = list(manifest['Local Path'])  # get local file names

    # sort results:
    idx = np.argsort(files)
    file_names = [files[i] for i in idx]

    print("Download Status: SUCCESS\n")

    return file_names
            if simbadResults is not None: break
    if simbadResults is not None:
        coord = SkyCoord(simbadResults["RA"][0],
                         simbadResults["DEC"][0],
                         unit=[u.hourangle, u.deg])
        ras.append(coord.ra.value)
        raUnit = coord.ra.unit
        decs.append(coord.dec.value)
        decUnit = coord.dec.unit
    else:
        ras.append(StarListTics["ra"][where_closest])
        decs.append(StarListTics["dec"][where_closest])

    #Retrieve data
    obsTable = Observations.query_criteria(filters=["TESS"],
                                           objectname=targetName,
                                           dataproduct_type=["TIMESERIES"],
                                           radius="0.01 deg")

    proceed = True
    if len(obsTable) == 0:
        proceed = False
        obsIDList.append(np.nan)
        pathList.append("NA")

        sectorsTxt, sectorsNb = TESSSectorLookUp(targetName)
        if len(sectorsNb) == 0:
            sectorList.append("")
        elif len(sectorsNb) == 1:
            sectorList.append(sectorsNb[0])
        else:
            sectorList.append(",".join(["{}".format(s) for s in sectorsNb]))
Beispiel #15
0
def main():

    # Read in the Pantheon+ catalog from Ben
    adap_dir = home + '/Documents/adap2021/'
    pantheon_datadir = adap_dir + 'pantheon_data/'
    cat = np.genfromtxt(adap_dir + 'pantheon_plus.csv', 
        dtype=None, names=True, delimiter=',', encoding='ascii')

    print("Read in Pantheon+ catalog with the following header names:")
    print(cat.dtype.names)

    num_orig_cols = len(cat.dtype.names)

    # Open a new file to write an updated catalog
    # Adds the following columns 
    # HST data
    # GALEX data
    # if yes to any of the above observatories then give 
    # Inst/Camera field, and filters.
    # if no then leave these cols blank.
    fh = open(adap_dir + 'pantheon_plus_data.csv', 'w')

    # Write header
    fh.write("Serial_num,CID,CIDint,IDSURVEY,zHEL,zHELERR,zCMB,zCMBERR,zHD,zHDERR," +\
        "HOST_LOGMASS,HOST_LOGMASS_ERR,RA,DEC,HOST_RA,HOST_DEC," +\
        "HST_data,Inst/Cam,Filters" + "\n")

    # Loop over all objects in the catalog
    # and search for HST data at the SN and Host location
    for i in range(27, len(cat)): #tqdm(range(len(cat)), desc="Processing SN"):

        # Get coords
        sn_ra = cat['RA'][i]
        sn_dec = cat['DEC'][i]
        host_ra = cat['HOST_RA'][i]
        host_dec = cat['HOST_DEC'][i]

        # Print info
        #print(f"{bcolors.CYAN}")
        #print("SN identifier:", cat['CID'][i], " at:", sn_ra, sn_dec)
        #print("Host galaxy coords:", host_ra, host_dec)
        #print(f"{bcolors.ENDC}")

        # Set up query
        sn_coords = SkyCoord(ra=sn_ra*u.degree, dec=sn_dec*u.degree, frame='icrs')

        print("SN coordinates:", sn_coords)

        obs_table = Observations.query_criteria(coordinates=sn_coords, radius="0.5 arcsec", \
            intentType='science', obs_collection=['HST'])

        #print(obs_table)
        print(obs_table.columns)
        print("\nRows in obs table:", len(obs_table))
        print("HST filters available for this SN:")
        all_instr = np.unique(obs_table['instrument_name'])
        print(all_instr)
        print("--------------------------------------\n")

        sys.exit(0)

        # Download any existing wfc3 data
        for r in range(len(obs_table)):

            instr = obs_table['instrument_name'][r]

            if 'WFC3' in instr:

                data_products = Observations.get_product_list(obs_table[r])
                Observations.download_products(data_products, download_dir=pantheon_datadir, 
                    productType="SCIENCE", mrp_only=True)

        sys.exit(0)

        # Now loop over all the observations
        ra_one = sn_ra
        dec_one = sn_dec

        dist = []
        inst_cam = []
        exptimes = []
        filt = []

        for o in range(len(obs_table)):

            ra_two = obs_table['s_ra'][o]
            dec_two = obs_table['s_dec'][o]

            dist_to_sn = np.arccos(np.cos(dec_one*np.pi/180) * \
                np.cos(dec_two*np.pi/180) * np.cos(ra_one*np.pi/180 - ra_two*np.pi/180) + \
                np.sin(dec_one*np.pi/180) * np.sin(dec_two*np.pi/180))

            # print("{:.7}".format(ra_two), "{:.7}".format(dec_two))
            #print("Distance from SN [arcsec]:", "{:.5}".format(dist_to_sn * (180/np.pi) * 3600), \
            #    "Inst/Cam:", obs_table['instrument_name'][o], "Filter(s):", obs_table['filters'][o], \
            #    "ExpTime:", obs_table['t_exptime'][o])

            dist.append(dist_to_sn * (180/np.pi) * 3600)  # The dist returned by the line above is in radians
            inst_cam.append(obs_table['instrument_name'][o])
            filt.append(obs_table['filters'][o])
            exptimes.append(obs_table['t_exptime'][o])

        # Add to original catalog
        # Need to loop over the original row to do this
        for j in range(num_orig_cols):
            fh.write(str(cat[i][j]) + ',')

        # Now add the new cols
        # First convert to numpy arrays
        inst_cam = np.unique(np.asarray(inst_cam))
        filt = np.unique(np.asarray(filt))

        if len(obs_table) > 0:
            hst_data = True
        else:
            hst_data = False

        fh.write(str(hst_data) + ",")

        if len(inst_cam) > 1:
            for w in range(len(inst_cam)):
                fh.write(str(inst_cam[w]) + ";")
            fh.write(",")
        else:
            fh.write(str(inst_cam) + ",")

        if len(filt) > 1:
            for v in range(len(filt)):
                fh.write(str(filt[v]) + ";")
        else:
            fh.write(str(filt))
        fh.write("\n")

        # Check that the distances are within FoV of the instrument specified
        # dist = np.asarray(dist) * (180/np.pi) * 3600  # radians to degrees to arcseconds

    fh.close()

    return None
Beispiel #16
0
#!/usr/bin/env python3

import numpy
import sys
import boto3
import astropy.io.fits

from astroquery.mast import Observations

s3 = boto3.resource('s3')
stpubdata = s3.Bucket('stpubdata')

Observations.enable_s3_hst_dataset()
obs = Observations.query_criteria(
    project='HST',
    instrument_name='ACS/WFC',
)
print("Observations: ", len(obs), file=sys.stderr)

proposal_ids = numpy.unique(obs['proposal_id'])
print("Proposals: ", len(proposal_ids), file=sys.stderr)

shape_tally = {}

for prop in proposal_ids:
    proposal_obs = Observations.query_criteria(
        project='HST',
        instrument_name='ACS/WFC',
        proposal_id=[prop],
    )
Beispiel #17
0
def retrieve_observation(obsid, suffix=['FLC'], archive=False, clobber=False):
    """Simple interface for retrieving an observation from the MAST archive

    If the input obsid is for an association, it will request all members with
    the specified suffixes.

    Parameters
    -----------
    obsid : string
        ID for observation to be retrieved from the MAST archive.  Only the
        IPPSSOOT (rootname) of exposure or ASN needs to be provided; eg.,
        ib6v06060.

    suffix : list, optional
        List containing suffixes of files which should be requested from MAST.
        Default value  "['FLC']".

    archive : Boolean, optional
        Retain copies of the downloaded files in the astroquery created
        sub-directories? Default is "False".

    clobber : Boolean, optional
        Download and Overwrite existing files? Default is "False".

    Returns
    -------
    local_files : list
        List of filenames
    """
    local_files = []

    if Observations is None:
        log.warning("The astroquery package was not found.  No files retrieved!")
        return local_files

    # Query MAST for the data with an observation type of either "science" or
    # "calibration"
    obs_table = Observations.query_criteria(obs_id=obsid, obstype='all')
    # Catch the case where no files are found for download
    if not obs_table:
        log.info("WARNING: Query for {} returned NO RESULTS!".format(obsid))
        return local_files

    dpobs = Observations.get_product_list(obs_table)
    data_products_by_id = Observations.filter_products(dpobs,
                                                       productSubGroupDescription=suffix,
                                                       extension='fits',
                                                       mrp_only=False)

    # After the filtering has been done, ensure there is still data in the
    # table for download. If the table is empty, look for FLT images in lieu
    # of FLC images. Only want one or the other (not both!), so just do the
    # filtering again.
    if not data_products_by_id:
        log.info("WARNING: No FLC files found for {} - will look for FLT "
                 "files instead.".format(obsid))
        suffix = ['FLT']
        data_products_by_id = Observations.filter_products(dpobs,
                                                           productSubGroupDescription=suffix,
                                                           extension='fits',
                                                           mrp_only=False)

        # If still no data, then return.  An exception will eventually be
        # thrown in the higher level code.
        if not data_products_by_id:
            log.info(
                "WARNING: No FLC or FLT files found for {}.".format(obsid))
            return local_files
    all_images = data_products_by_id['productFilename'].tolist()
    log.info(all_images)
    if not clobber:
        rows_to_remove = []
        for row_idx, row in enumerate(data_products_by_id):
            fname = row['productFilename']
            if os.path.isfile(fname):
                log.info(fname + " already exists. File download skipped.")
                rows_to_remove.append(row_idx)
        data_products_by_id.remove_rows(rows_to_remove)

    manifest = Observations.download_products(data_products_by_id,
                                              mrp_only=False)

    if not clobber:
        for rownum in rows_to_remove[::-1]:
            if manifest:
                manifest.insert_row(rownum,
                                    vals=[all_images[rownum], "LOCAL", "None", "None"])
            else:
                return all_images

    download_dir = None
    for file, file_status in zip(manifest['Local Path'], manifest['Status']):
        if file_status != "LOCAL":
            # Identify what sub-directory was created by astroquery for the
            # download
            if download_dir is None:
                download_dir = os.path.dirname(os.path.abspath(file))
            # Move or copy downloaded file to current directory
            local_file = os.path.abspath(os.path.basename(file))
            if archive:
                shutil.copy(file, local_file)
            else:
                shutil.move(file, local_file)
            # Record what files were downloaded and their current location
            local_files.append(os.path.basename(local_file))
        else:
            local_files.append(file)
    if not archive:
        # Remove astroquery created sub-directories
        shutil.rmtree(download_dir)
    return local_files
import time

import boto3
import numpy as np
from astropy.io import fits
from astroquery.mast import Observations

# NOTE: Use your own key values here.
os.environ['AWS_ACCESS_KEY_ID'] = 'somekey'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'somesecret'

# NOTE: Change TESS observation ID as needed.
obs_id = 'tess-s0001-1-1'

# Find full frame dataset for the observation ID.
obs_table = Observations.query_criteria(obs_id=obs_id)
products = Observations.get_product_list(obs_table)
filtered = Observations.filter_products(products,
                                        productSubGroupDescription="FFIC",
                                        mrp_only=False)

# Set up AWS S3 bucket to pull data from.
Observations.enable_cloud_dataset()
s3_urls = Observations.get_cloud_uris(filtered, include_bucket=False)
s3 = boto3.resource('s3')
bucket = s3.Bucket('stpubdata')


def time_mean():
    """Loop through full frame files, extract a subarray, and calculate mean.
    This must be done in a way that the file is deleted as soon as it is
Beispiel #19
0
def download_lc(target_name: str, flux_type='PDCSAP', mission: str = 'TESS',sigma_clip=4,iters=1,do_pca : bool = False,do_psf :bool= False) -> Tuple[
    LightCurve, Union[List[Figure],None]]:
    """
    Downloads a light curve using the TESS mission. If the star has been observed in the SC mode, it
    will download the original light curve from MAST. You can also choose the flux type you want to use.

    If it wasn't observed in SC mode, it will try to extract a light curve from the FFIs if the target has
    been observed by TESS.

    You can also download light curves of stars that are observed by the K2 or Kepler mission, by setting
    the mission parameter.

    :param target_name: Name of the target. You can either provide the TIC ID (TIC ...), Kepler ID (KIC ...), K2 ID(EPIC ...) or a name that is resolvable by Simbad.
    :param flux_type: Type of flux in the SC mode. Can be either PDCSAP or SAP or PSF for long cadence data
    :param mission: Mission from which the light curves are extracted. By default TESS only is used. You can consider all missions by passing 'all' (TESS, Kepler, K2)
    :param sigma_clip: Sigma clip parameter. Defines the number of standard deviations that are clipped.
    :param iters: Iterations for the sigma clipping
    :return: lightkurve.LightCurve object and validation page if extracted from FFI
    """
    chosen_mission = [mission] if mission != 'all' else ('Kepler', 'K2', 'TESS')
    mprint(f"Searching processed light curves for {target_name} on mission(s) {','.join(chosen_mission)} ... ", log)

    if chosen_mission == ['TESS']:
        if target_name.startswith('TIC'):
            tic_id = re.findall(r'\d+', target_name)
            if len(tic_id) == 0:
                raise ValueError(ctext("A Tic ID needs to consist of TIC and a number!", error))
            tic_id = int(tic_id[0])
        else:
            mprint(f"Resolving {target_name} to TIC using MAST ...",log)
            try:
                tic_id = Catalogs.query_object(target_name,catalog='TIC',radius=0.003)[0]['ID']
            except KeyError:
                raise ValueError(ctext(f"No TESS observations available for {target_name}", error))

            mprint(f"TIC ID for {target_name}: TIC {tic_id}",log)

        o = Observations.query_criteria(objectname=target_name, radius=str(0 * u.deg), project='TESS',
                                        obs_collection='TESS').to_pandas()

        if len(o) > 0 and len(o[o.target_name != 'TESS FFI']) > 0:
            mprint(f"Short cadence observations available for {target_name}. Downloading ...",info)
            res = search_lightcurvefile(target_name, mission=chosen_mission)
        else: #Only FFI available
            mprint(f"No short cadence data available for {target_name}, extracting from FFI ...",info)
            lc, fig,_ = cut_ffi(tic_id,sigma_clip,iters,do_pca,do_psf,flux_type)
            mprint(f"Total observation length: {'%.2f' % (lc.time[-1] - lc.time[0])} days.", log)
            return lc, fig
    else:
        res = search_lightcurvefile(target_name, mission=chosen_mission)

    if len(res) != 0:
        fig = None
        mprint(f"Found processed light curve for {target_name}!", info)
        res = res.download_all()
        types = []

        for d in res.data:
            type = 'TESS' if isinstance(d, lk.TessLightCurveFile) else 'Kepler'
            if type not in types:
                types.append(type)

        mprint(f"Using {','.join(types)} observations! Combining sectors ...", log)

        if flux_type == 'PSF':
            mprint(f"PSF not available for short cadence data. Reverting to PDCSAP",warn)
            flux_type = 'PDCSAP'

        if flux_type == 'PDCSAP':
            lc_set: List[Union[lk.TessLightCurve, lk.KeplerLightCurve]] = [i for i in res.PDCSAP_FLUX.data]
        elif flux_type == 'SAP':
            lc_set: List[Union[lk.TessLightCurve, lk.KeplerLightCurve]] = [i for i in res.SAP_FLUX.data]
        else:
            raise ValueError(ctext("Flux type needs to be either PDCSAP or SAP", error))
        lc = combine_light_curves(lc_set,sigma_clip=sigma_clip,iters=iters)
    else:
        raise ValueError(ctext(f"No light curve available for {target_name} on mission(s) {chosen_mission}",error))

    mprint(f"Total observation length: {'%.2f' % (lc.time[-1] - lc.time[0])} days.", log)
    return lc, fig
Beispiel #20
0
def _query_mast(target,
                radius=None,
                project=('Kepler', 'K2', 'TESS'),
                provenance_name=("Kepler", "K2", "SPOC"),
                t_exptime=(0, 9999),
                sequence_number=None,
                **extra_query_criteria):
    """Helper function which wraps `astroquery.mast.Observations.query_criteria()`
    to return a table of all Kepler/K2/TESS observations of a given target.

    By default only the official data products are returned, but this can be
    adjusted by adding alternative data product names into `provenance_name`.

    Parameters
    ----------
    target : str, int, or `astropy.coordinates.SkyCoord` object
        See docstrings above.
    radius : float or `astropy.units.Quantity` object
        Conesearch radius.  If a float is given it will be assumed to be in
        units of arcseconds.  If `None` then we default to 0.0001 arcsec.
    project : str, list of str
        Mission name.  Typically 'Kepler', 'K2', or 'TESS'.
        This parameter is case-insensitive.
    provenance_name : str, list of str
        Provenance of the observation.  Common options include 'Kepler', 'K2',
        'SPOC', 'K2SFF', 'EVEREST', 'KEPSEISMIC'.
        This parameter is case-insensitive.
    t_exptime : (float, float) tuple
        Exposure time range in seconds. Common values include `(59, 61)`
        for Kepler short cadence and `(1799, 1801)` for Kepler long cadence.
    sequence_number : int, list of int
        Quarter, Campaign, or Sector number.
    **extra_query_criteria : kwargs
        Extra criteria to be passed to `astroquery.mast.Observations.query_criteria`.

    Returns
    -------
    obs : astropy.Table
        Table detailing the available observations on MAST.
    """
    # Local astroquery import because the package is not used elsewhere
    from astroquery.mast import Observations
    from astroquery.exceptions import ResolverError, NoResultsWarning

    # If passed a SkyCoord, convert it to an "ra, dec" string for MAST
    if isinstance(target, SkyCoord):
        target = '{}, {}'.format(target.ra.deg, target.dec.deg)

    # We pass the following `query_criteria` to MAST regardless of whether
    # we search by position or target name:
    query_criteria = {'project': project, **extra_query_criteria}
    if provenance_name is not None:
        query_criteria['provenance_name'] = provenance_name
    if sequence_number is not None:
        query_criteria['sequence_number'] = sequence_number
    if t_exptime is not None:
        query_criteria['t_exptime'] = t_exptime

    # If an exact KIC ID is passed, we will search by the exact `target_name`
    # under which MAST will know the object to prevent source confusion.
    # For discussion, see e.g. GitHub issues #148, #718.
    exact_target_name = None
    target_lower = str(target).lower()
    # Was a Kepler target ID passed?
    kplr_match = re.match("^(kplr|kic) ?(\d+)$", target_lower)
    if kplr_match:
        exact_target_name = f"kplr{kplr_match.group(2).zfill(9)}"
    # Was a K2 target ID passed?
    ktwo_match = re.match("^(ktwo|epic) ?(\d+)$", target_lower)
    if ktwo_match:
        exact_target_name = f"ktwo{ktwo_match.group(2).zfill(9)}"
    # Was a TESS target ID passed?
    tess_match = re.match("^(tess|tic) ?(\d+)$", target_lower)
    if tess_match:
        exact_target_name = f"{tess_match.group(2).zfill(9)}"

    if exact_target_name and radius is None:
        log.debug("Started querying MAST for observations with the exact "
                  f"target_name='{exact_target_name}'.")
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=NoResultsWarning)
            obs = Observations.query_criteria(target_name=exact_target_name,
                                              **query_criteria)
        if len(obs) > 0:
            # astroquery does not report distance when querying by `target_name`;
            # we add it here so that the table returned always has this column.
            obs['distance'] = 0.
            return obs
        else:
            log.debug(
                f"No observations found. Now performing a cone search instead."
            )

    # If the above did not return a result, then do a cone search using the MAST name resolver
    # `radius` defaults to 0.0001 and unit arcsecond
    if radius is None:
        radius = .0001 * u.arcsec
    elif not isinstance(radius, u.quantity.Quantity):
        radius = radius * u.arcsec
    query_criteria['radius'] = str(radius.to(u.deg))

    try:
        log.debug("Started querying MAST for observations within "
                  f"{radius.to(u.arcsec)} arcsec of objectname='{target}'.")
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=NoResultsWarning)
            obs = Observations.query_criteria(objectname=target,
                                              **query_criteria)
        obs.sort('distance')
        return obs
    except ResolverError as exc:
        # MAST failed to resolve the object name to sky coordinates
        raise SearchError(exc) from exc
Beispiel #21
0
def mvm_id_filenames(sky_coord, cutout_size, log_level=logutil.logging.INFO):
    """
    This function retrieves a table of MVM drizzled image filenames with additional
    information from the archive.  The user can then further cull the table to use as
    input to obtain a list of files from the archive.  This function will return filter-level
    products. At this time, both ACS and WFC3 are searched by default.

    Parameters
    ----------
    sky_coord : str or `~astropy.coordinates.SkyCoord` object
        The position around which to cutout. It may be specified as a string ("ra dec" in degrees)
        or as the appropriate `~astropy.coordinates.SkyCoord` object.

    cutout_size : int, array-like, `~astropy.units.Quantity`
        The size of the cutout array. If ``cutout_size`` is a scalar number or a scalar
        `~astropy.units.Quantity`, then a square cutout of ``cutout_size`` will be created.
        If ``cutout_size`` has two elements, they should be in ``(ny, nx)`` order.  Scalar numbers
        in ``cutout_size`` are assumed to be in units of arcseconds. `~astropy.units.Quantity` objects
        must be in angular units.

    log_level : int, optional
        The desired level of verbosity in the log statements displayed on the screen and written to the
        .log file. Default value is 20, or 'info'.

    Returns
    -------
    final_table : `~astropy.table.Table` object

    This utility also writes an output ECSV file version of the in-memory filtered data product table,
    final_table.  The output filename is in the form: 
    mvm_query-ra<###>d<####>-dec<n|s><##>d<####>_<radius>_cutout.ecsv 
    (e.g., mvm_query-ra84d8208-decs69d8516_354_cutout.ecsv, where radius has been computed from the
    cutout dimensions.
    """

    # set logging level to user-specified level
    log.setLevel(log_level)

    # If the cutout_size is not an astropy.units.Quantity object, the scalar(s)
    # are assumed to be arcseconds.  The variable must be cast as a Quantity.
    if not isinstance(cutout_size, Quantity):
        cutout_size *= u.arcsec
        cutout_size = np.atleast_1d(cutout_size)
        if len(cutout_size) == 1:
            cutout_size = np.repeat(cutout_size, 2)

    if not isinstance(sky_coord, SkyCoord):
        sky_coord = SkyCoord(sky_coord, unit="deg")

    # From HST data, Search for the list of images based upon: coordinates, search region, data
    # product type, and the instrument name (with wildcard), project (HAP), and observation
    # collection (HST).  Use the wildcard to get all the detectors for the instrument.  Multiple
    # instruments cannot be searched at the same time.  Use the diagonal of the cutout to define
    # the search radius for the archive.  Images which fall outside the desired cutout need to
    # be filtered from the solution later.
    radius = math.ceil(
        math.sqrt(
            math.pow(cutout_size.value[0], 2) +
            math.pow(cutout_size.value[1], 2)) / 2.0)

    # Careful - the radius must be a str or Quantity
    radius *= u.arcsec
    log.info("Radius for query: {}.".format(radius))
    log.info("Performing query for ACS images.")

    acs_query_table = Observations.query_criteria(coordinates=sky_coord,
                                                  radius=radius,
                                                  dataproduct_type="IMAGE",
                                                  instrument_name="ACS*",
                                                  project="HAP",
                                                  obs_collection="HST")

    log.info("Performing query for WFC3 images.")
    wfc3_query_table = Observations.query_criteria(coordinates=sky_coord,
                                                   radius=radius,
                                                   dataproduct_type="IMAGE",
                                                   instrument_name="WFC3*",
                                                   project="HAP",
                                                   obs_collection="HST")

    query_table = vstack([acs_query_table, wfc3_query_table])
    del acs_query_table
    del wfc3_query_table

    # Catch the case where no files are found which satisfied the Query
    if not query_table:
        log.warning(
            "Query for objects within {} of {} returned NO RESULTS!".format(
                radius, (str_ra, str_dec)))
        return query_table

    # Compute the limits of the cutout region
    deg_cutout_size = cutout_size.to(u.deg)
    ra_min = sky_coord.ra.degree - deg_cutout_size.value[0]
    ra_max = sky_coord.ra.degree + deg_cutout_size.value[0]
    dec_min = sky_coord.dec.degree - deg_cutout_size.value[1]
    dec_max = sky_coord.dec.degree + deg_cutout_size.value[1]
    str_ra = "{:.4f}".format(sky_coord.ra.degree)
    str_dec = "{:.4f}".format(sky_coord.dec.degree)

    # Filter the output as necessary to include only MVM filenames (MVM prefix: hst_skycell).
    # Also, filter out images which are not actually in the requested cutout region as the
    # archive search had to be done using a radius.
    good_rows = []
    updated_query_table = None
    for old_row in query_table:
        if old_row["obs_id"].startswith("hst_skycell"):
            if old_row["s_ra"] >= ra_min and old_row["s_ra"] <= ra_max and \
               old_row["s_dec"] >= dec_min and old_row["s_dec"] <= dec_max:
                good_rows.append(old_row)

    # Catch the case where no files are found which satisfy the clean up criteria
    if len(good_rows) == 0:
        log.warning(
            "Query for objects within cutout {} of {} returned NO RESULTS!".
            format(cutout_size, (str_ra, str_dec)))
        return updated_query_table

    # Make the cleaned up table
    updated_query_table = Table(rows=good_rows, names=query_table.colnames)
    del query_table

    # Get the data product list associated with the elements of the table
    log.info("Get the product list for all entries in the query table.")
    dp_table = Observations.get_product_list(updated_query_table)
    del updated_query_table

    # Filter on MVM drizzled products only
    suffix = ["DRZ", "DRC"]
    log.info(
        "Filter the product list table for only {} filenames.".format(suffix))
    filtered_dp_table = Observations.filter_products(
        dp_table, productSubGroupDescription=suffix, extension="fits")

    if not filtered_dp_table:
        log.warning(
            "No MVM drizzle product datasets (DRZ/DRC) found within {} of {}.".
            format(radius, (str_ra, str_dec)))
        return filtered_dp_table
    del dp_table

    # Need to filter out any non-hst-skycell entries AGAIN which may have
    # crept back into the list via the get_product_list() function.
    good_rows = []
    output_table = None
    for old_row in filtered_dp_table:
        if old_row["obs_id"].startswith("hst_skycell"):
            good_rows.append(old_row)

    # Catch the case where no files are found which satisfy the criteria
    if len(good_rows) == 0:
        log.warning(
            "After filtering datasets there are NO RESULTS within {} of {}!".
            format(radius, (str_ra, str_dec)))
        return output_table

    # Make the output table
    output_table = Table(rows=good_rows, names=filtered_dp_table.colnames)
    del filtered_dp_table

    # Finally, make sure the entries are unique
    final_table = None
    final_table = unique(output_table, keys="productFilename")
    del output_table

    # Write the table to a file.  This allows for further manipulation of
    # the information before a list of filenames is distilled from the table.
    # Output filename in the form: mvm_query-ra<###>d<####>-dec<n|s><##>d<####>_<radius>_cutout.ecsv
    # (e.g., mvm_query-ra84d9208-decs69d1483_71_cutout.ecsv), where radius has been computed from the
    # cutout dimensions.
    #
    # Get the whole number and fractional components of the RA and Dec
    ns = "s" if sky_coord.dec.degree < 0.0 else "n"
    ra_whole = int(sky_coord.ra.value)
    ra_frac = str(sky_coord.ra.value).split(".")[1][0:4]
    dec_whole = abs(int(sky_coord.dec.value))
    dec_frac = str(sky_coord.dec.value).split(".")[1][0:4]
    log.info("coords2. {} {} {}".format(sky_coord.ra.value,
                                        sky_coord.dec.value, dec_frac))

    query_filename = "mvm_query-ra" + str(ra_whole) + "d" + ra_frac + "-dec" + ns + \
                     str(dec_whole) + "d" + dec_frac + "_{:.0f}".format(radius.value) + "_cutout.ecsv"

    log.info(
        "Writing out the MVM product list table to {}.".format(query_filename))
    log.info("Number of entries in table: {}.".format(len(final_table)))
    final_table.write(query_filename, format="ascii.ecsv")

    return final_table
Beispiel #22
0
def hst_button(
    galaxies,
    skymethod='globalmin+match',
    instruments="ACS/WFC",
    prop_ids=None,
    filters=None,
    radius=None,
    filepath=None,
    download_data=True,
    correct_astrometry=True,
    create_mosaic=True,
    jy_conversion=True,
    verbose=False,
    log_filename='hst.log',
):
    """Create a HST mosaic, given a galaxy name.
    
    Using a galaxy name and radius, queries around that object, 
    downloads available HST data and mosaics into a final product. It
    will create separate mosaics for each proposal ID, and the file structure
    will look like ``/galaxy/HST/proposal_id/galaxy_instrument_filter_proposal_id.fits``.
    
    N.B. I must confess to not being well-versed with HST data, so if 
    anyone can help improve this please let me know.
    
    This data button uses a number of tools included in the drizzlepac
    Python package. This includes alignimages/tweakreg and astrodrizzle, 
    which correct astrometry and are specifically tailored for the setup 
    of HST data. This means that 1) creating mosaics with this will likely 
    take a long time and 2) you will need a beefy computer (especially with
    regards to hard drive space).
    
    Args:
        galaxies (str or list): Names of galaxies to create mosaics for.
            Resolved by NED.
        skymethod (str, optional): Method used for AstroDrizzle's background
            matching step. In general, this can be left untouched but for
            mosaics with little overlap, it may be worth playing around 
            with this. For instance, I've had some luck when there isn't
            much overlap between exposures using 'globalmin'. Options are 
            'localmin', 'globalmin+match', 'globalmin', and 'match'. Defaults 
            to 'globalmin+match'.
        instruments (str or list, optional): Instrument to download data 
            for.  Can be any combination of 'ACS/WFC', 'WFC3/IR', 
            'WFC3/UVIS', 'WFPC2/PC', or 'WFPC2/WFC'. If you want all 
            available data for all these instruments, select 'all', but 
            this is not recommended! Defaults to 'ACS/WFC'.
        prop_ids (str or list, optional): Proposal IDs to download data for.
            Defaults to None, which will pull out all proposal IDs for each
            instrument.
        filters (str or list, optional): Filters to download data for.
            The script will look for each filter, for each instrument.
            Defaults to None, which will pull out all applicable filters
            for each instrument, for each proposal ID.
        radius (astropy.units.Quantity, optional): Radius around the 
            galaxy to search for observations. Defaults to None, where
            it will query Ned to get size.
        filepath (str, optional): Path to save the working and output
            files to. If not specified, saves to current working 
            directory.
        download_data (bool, optional): If True, will download data from 
            MAST. Defaults to True.
        correct_astrometry (bool, optional): If True, will perform astrometric
            corrections to the downloaded data using alignimages. Defaults
            to True.
        create_mosaic (bool, optional): Switching this to True will 
            mosaic the data using astrodrizzle as appropriate. Defaults 
            to True.
        jy_conversion (bool, optional): Convert the mosaicked file from
            raw units to Jy/pix. Defaults to True.
        verbose (bool, optional): Can be used to suppress most of the
            output messages produced during the process. Mainly useful
            for debugging. Defaults to False.
        log_filename (str, optional): Will produce a stripped down log
            of what data the code is reducing. By default, will save to
            galaxy/hst.log.
    
    """
    
    if isinstance(galaxies, str):
        galaxies = [galaxies]
        
    if isinstance(instruments,str):
        instruments = [instruments]
        
    if instruments == 'all':
        instruments = ['ACS/WFC',
                       'WFC3/IR','WFC3/UVIS',
                       'WFPC2/PC','WFPC2/WFC']
        
    if isinstance(filters,str):
        filters = [filters]
        
    if isinstance(prop_ids,str):
        prop_ids = [prop_ids]

    if filepath is not None:
        os.chdir(filepath)
        
    orig_dir = os.getcwd()
    
    if radius is not None:
        original_radius = radius.copy()
    else:
        original_radius = None
        
    steps = []
    
    if download_data:
        steps.append(1)
    if correct_astrometry:
        steps.append(2)
    if create_mosaic:
        steps.append(3)
    if jy_conversion:
        steps.append(4)
        
    # Set up folders for various corrections
    
    os.environ['CRDS_SERVER_URL'] = 'https://hst-crds.stsci.edu'
    os.environ['CRDS_PATH'] = orig_dir+'/reference_files'
    os.environ['iref'] = orig_dir+'/reference_files/references/hst/wfc3/'
    os.environ['jref'] = orig_dir+'/reference_files/references/hst/acs/'
    os.environ['uref'] = orig_dir+'/reference_files/references/hst/wfpc2/'
    
    # For large proposals, astrodrizzle can run into file open
    # issues so raise the max file open amount.
    
    _, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
    resource.setrlimit(resource.RLIMIT_NOFILE,(hard,hard))
    
    # Change the temp directory -- if this gets filled up it can cause
    # problems.
    
    orig_tmpdir = os.environ['TMPDIR']
    
    if not os.path.exists('tmp'):
        os.mkdir('tmp')
    os.environ['TMPDIR'] = orig_dir+'/tmp'
        
    for galaxy in galaxies:
        
        if not os.path.exists(galaxy):
            os.mkdir(galaxy)
            
        if not os.path.exists(galaxy+'/HST'):
            os.mkdir(galaxy+'/HST')
            
        if not verbose:
            
            # Various packages used here put out a lot of messages. Silence info messages.
            
            loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
            for logger in loggers:
                    logger.setLevel(logging.ERROR)
                    
        # Even if verbose is not True, still print out some useful messages to the 
        # console.
        
        hst_logger = logging.getLogger('data_buttons')
        handler = logging.FileHandler(galaxy+'/'+log_filename,mode='w')
        hst_logger.addHandler(handler)
        hst_logger.addHandler(logging.StreamHandler())
        hst_logger.setLevel(logging.INFO)
        hst_logger.info('Beginning '+galaxy)
        hst_logger.info(' ')
        hst_logger.info(' ')
        
        if radius is None:
            
            try:
 
                size_query = Ned.get_table(galaxy,table='diameters')
                radius = np.max(size_query['NED Major Axis'])/2*u.arcsec
                radius = radius.to(u.deg)
     
            except:
                
                hst_logger.warning(galaxy+' not resolved by Ned, using 0.2deg radius.')
                radius = 0.2*u.degree
 
        obs_table = Observations.query_criteria(objectname=galaxy,
                                                radius=radius,
                                                obs_type='all',
                                                obs_collection='HST')
        
        # Ignore any calibration observations.
        obs_table = obs_table[obs_table['intentType'] == 'science']
        
        for instrument in instruments:
    
            # Pixel sizes for final mosaics selected to match the HLA.
            
            pix_size = {'ACS/HRC':0.025,
                        'ACS/SBC':0.03,
                        'ACS/WFC':0.05,
                        'NICMOS/NIC1':0.025,
                        'NICMOS/NIC2':0.05,
                        'NICMOS/NIC3':0.1,
                        'WFC3/IR':0.09,
                        'WFC3/UVIS':0.04,
                        'WFPC2/PC':0.05,
                        'WFPC2/WFC':0.1}[instrument]
                        
            # Bits to consider good for drizzling.
            
            bits = {'ACS/HRC':256,
                    'ACS/SBC':256,
                    'ACS/WFC':256,
                    'NICMOS/NIC1':0,
                    'NICMOS/NIC2':0,
                    'NICMOS/NIC3':0,
                    'WFC3/IR':768,
                    'WFC3/UVIS':256,
                    'WFPC2/PC':'8,1024',
                    'WFPC2/WFC':'8,1024'}[instrument]
                        
            # Filename extension, in order of preference.
            
            suffixes = {'ACS/WFC':['FLC','FLT'],
                        'WFC3/IR':['FLT'],
                        'WFC3/UVIS':['FLC','FLT'],
                        'WFPC2/PC':[['C0M','C1M']],
                        'WFPC2/WFC':[['C0M','C1M']],
                        }[instrument]
            
            # The instruments often have / in the name, so account for 
            # this in making folders and files.
            
            hst_logger.info('Beginning '+instrument)
            
            if not os.path.exists(galaxy+'/HST/'+instrument.replace('/','_')):
                os.mkdir(galaxy+'/HST/'+instrument.replace('/','_'))
                
            reset_filters = False
            
            instrument_table = obs_table[obs_table['instrument_name'] == instrument]
            
            reset_prop_ids = False
                
            if not prop_ids:
                prop_ids = list(np.unique(instrument_table['proposal_id']))
                reset_prop_ids = True
                
            hst_logger.info('Available proposal IDs: '+','.join(prop_ids))
            hst_logger.info(' ')
                
            for prop_id in prop_ids:
                
                hst_logger.info('Proposal ID: '+str(prop_id))
                
                prop_table = instrument_table[instrument_table['proposal_id'] == prop_id]
            
                if not filters:
                    filters = list(np.unique(prop_table['filters']))
                    reset_filters = True
                    
                hst_logger.info('Available filters: '+','.join(filters))
            
                for hst_filter in filters:
                    
                    # If we have a highly illegal filter, just skip.
                    # TODO: This needs to be sorted for some fringe
                    # cases, probably.
                     
                    if not hst_filter[0] == 'F':
                        continue
                    
                    hst_logger.info('Filter: '+str(hst_filter))

                    # Pull out available data and download.

                    filter_table = prop_table[prop_table['filters'] == hst_filter]
                    
                    if len(filter_table) == 0:
                        hst_logger.warning('No available data to download. Skipping...')
                        continue
                 
                    data_products_id = Observations.get_product_list(filter_table)
                    
                    for suffix in suffixes:
                    
                        download_table = Observations.filter_products(data_products_id,
                                                                      productSubGroupDescription=suffix,
                                                                      mrp_only=False)
                        
                        if len(download_table) > 0:
                            break
                        
                    if isinstance(suffix,list):
                        filename_exts = [ext.lower() for ext in suffix]
                    else:
                        filename_exts = [suffix.lower()]
                    
                    hst_logger.info(instrument+'/'+prop_id+'/'+hst_filter)
                        
                    if not os.path.exists(galaxy+
                                          '/HST/'+
                                          instrument.replace('/','_')+
                                          '/'+
                                          hst_filter):
                        os.mkdir(galaxy+
                                 '/HST/'+
                                 instrument.replace('/','_')+
                                 '/'+
                                 hst_filter) 
                        
                    if not os.path.exists(galaxy+'/HST/'+prop_id):
                        os.mkdir(galaxy+'/HST/'+prop_id) 
                        
                    full_filepath =  (galaxy+
                                          '/HST/'+
                                          instrument.replace('/','_')+
                                          '/'+
                                          hst_filter+
                                          '/'
                                          +prop_id)
                    
                    if not os.path.exists(full_filepath):
                        os.mkdir(full_filepath)
                     
                    if 1 in steps:
                            
                        # Download files
                        
                        download_mast(download_table,
                                      download_dir="hst_temp/" + galaxy)  
                                    
                        if not os.path.exists(full_filepath+'/raw'):
                            os.mkdir(full_filepath+'/raw')
                        if not os.path.exists(full_filepath+'/outputs'):
                            os.mkdir(full_filepath+'/outputs')    
                                    
                        # Pull out the relevant files, and move to base folder.
                        
                        for filename_ext in filename_exts:
                
                            matches = []
                            for root, _, filenames in os.walk("hst_temp/" + galaxy):
                                for filename in fnmatch.filter(
                                    filenames, "*_"+filename_ext+".fits"
                                ):
                                    matches.append(os.path.join(root, filename))
                    
                            for match in matches:
                                
                                filename = match.split('/')
                    
                                os.rename(match,full_filepath+'/raw/'+filename[-1])
                            
                        # Clean up any temporary files.
        
                        shutil.rmtree("hst_temp/" + galaxy, ignore_errors=True)
                        
                    filename_ext = filename_exts[0]
                        
                    hst_files = glob.glob(full_filepath+'/raw/*_'+filename_ext+'.fits')
                        
                    if 2 in steps:
                            
                        # First, update the WCS information in case it's 
                        # required.
                        
                        for filename_ext in filename_exts:
                             
                            hst_files = glob.glob(full_filepath+'/raw/*_'+filename_ext+'.fits')
                            
                            crds.assign_bestrefs(hst_files,
                                                 sync_references=True)
                            
                            # For WFPC2, the CRDS doesn't download everything
                            # needed. Download the GEIS data files and
                            # rerun the bestrefs assignment.
                            
                            if 'WFPC2' in instrument:
                                 
                                geis_hdrs = glob.glob(os.environ['uref']+'/*h')
                                 
                                for geis_hdr in geis_hdrs:
                                     
                                    geis_data = geis_hdr[:-1]+'d'
                                     
                                    if not os.path.exists(geis_data):
                                         
                                        geis_data = geis_data.split('/')[-1]
                                         
                                        print(geis_data)
                                        print(os.environ['uref'])
                                         
                                        wget.download(
                                            os.environ['CRDS_SERVER_URL']+'/unchecked_get/references/hst/'+geis_data,
                                            out=os.environ['uref'])
                                         
                                crds.assign_bestrefs(hst_files,sync_references=True)
                 
                            for hst_file in hst_files:
                              
                                stwcs.updatewcs.updatewcs(hst_file,
                                                          use_db=False)
                            
                        os.chdir(full_filepath+'/raw')
                        
                        filename_ext = filename_exts[0]
                            
                        hst_files = glob.glob('*_'+filename_ext+'.fits')
                        
                        # Normalize all files.
                        
                        photeq.photeq(', '.join(hst_files),readonly=False)
                        os.rename('photeq.log','../outputs/photeq.log')
                        
                        if 'WFPC' in instrument:
                            
                            # Using tweakreg, align each frame to GAIA.
                            
                            gaia_table = Gaia.query_object_async(coordinate=galaxy, 
                                                                 radius=2*radius)
                            ras = gaia_table['ra']
                            decs = gaia_table['dec']
                             
                            source_table = Table([ras,decs])
                            source_table.write('gaia.cat',
                                               format='ascii.fast_commented_header')
                            
                            tweakreg.TweakReg(hst_files,
                                              imagefindcfg={'threshold':5,'conv_width':3},
                                              refcat='gaia.cat',
                                              #expand_refcat=True,
                                              enforce_user_order=False,
                                              shiftfile=True,
                                              outshifts='shifts.txt',
                                              searchrad=10,
                                              minobj=5,
                                              separation=0,
                                              updatehdr=True,
                                              reusename=True,
                                              wcsname='TWEAK',
                                              interactive=False,
                                              fitgeometry='general',
                                              clean=True,
                                              see2dplot=False
                                              )
                            
                            # Update the c1m files to use the TWEAK
                            # wcs
                            
                            for hst_file in hst_files:
                                
                                dq_file = hst_file.replace('c0','c1')
                                
                                tweakback.tweakback(hst_file,
                                                    dq_file,
                                                    newname='TWEAK')
                            
                            plot_files = glob.glob('*.png')
                            for plot_file in plot_files:
                                os.remove(plot_file)
                                
                            cat_files = glob.glob('*.coo')
                            for cat_file in cat_files:
                                os.remove(cat_file)
                                
                            os.rename('shifts_wcs.fits','../outputs/shifts_wcs.fits')
                            os.rename('tweakreg.log','../outputs/tweakreg.log')
                            os.rename('shifts.txt','../outputs/shifts.txt')
                        
                        elif 'ACS' in instrument or 'WFC3' in instrument:
                             
                            # Correct astrometry using alignimages. First,
                            # correct each frame separately.
                        
                            pool = mp.Pool(mp.cpu_count())
                            
                            suitable_hst_files = pool.map(astrometric_correction,
                                                          hst_files)
                            
                            pool.close()
                            
                            suitable_hst_files = [x for x in suitable_hst_files 
                                                  if x is not None]
                            
                            if len(suitable_hst_files) == 0:
                                hst_logger.warning('Failure with astrometry corrections. Skipping')
                                os.chdir(orig_dir)
                                continue
                            
                            # Now, align every suitable frame simultaneously. 
    
                            output_table = astrometric_correction(suitable_hst_files)
                             
                            with open('../outputs/astrometry.pkl','wb') as table_file:
                                pickle.dump(output_table,table_file)
                            
                        else:
                            
                            raise Exception('Unknown instrument!')
                        
                        os.chdir(orig_dir)
                        
                    os.chdir(full_filepath)
                            
                    if 3 in steps:    
                        
                        os.chdir('raw')
                        
                        if 'WFPC2' in instrument:
                            
                            hst_files = glob.glob('*_c0m.fits')
                            
                            wcskey = 'TWEAK'
                            
                        elif 'ACS' in instrument or 'WFC3' in instrument:
                            
                            with open('../outputs/astrometry.pkl','rb') as table_file:
                             
                                output_table = pickle.load(table_file)
                                    
                            # We only want fits where an acceptable astrometric
                            # solution has been found.
                            
                            suitable_fits = np.where(output_table['fit_qual'] < 5)
#                         (output_table['fit_qual'] >= 1)
                            
                            hst_files = list(output_table[suitable_fits]['imageName'])
                            
                            if len(output_table[suitable_fits]) == 0:
                                hst_logger.warning('Failure with astrometry corrections. Skipping')
                                os.chdir(orig_dir)
                                continue
                            
                            wcskey = ' '
                            
                        else:
                            
                            raise Exception('Unknown instrument!')
                        
                        # Following Dalcanton+ (2012), group exposures into
                        # long (>50s) and short (<=50s), and process for cosmic
                        # rays separately
                        
                        exp_times = []
                        
                        for hst_file in hst_files:
                            
                            hdu = fits.open(hst_file)[0]
                            exp_time = hdu.header['EXPTIME']
                            exp_times.append(exp_time)
                            
                        for exp_group in ['short','long']:
                            
                            hst_files_group = []
                            
                            for i in range(len(exp_times)):
                            
                                if exp_times[i] > 50 and exp_group == 'long':
                                    hst_files_group.append(hst_files[i])
                                elif exp_times[i] <= 50 and exp_group == 'short':
                                    hst_files_group.append(hst_files[i])
                                    
                            if len(hst_files_group) == len(hst_files):
                                
                                exp_group = ''
                                
                            if len(hst_files_group) == 0:
                                continue
                                
                            if len(exp_group) > 0:
                                
                                output_name = '../outputs/'+galaxy+'_'+exp_group
                                drizzle_log_name = '../outputs/astrodrizzle_'+exp_group+'.log'
                                
                            else:
                                
                                output_name = '../outputs/'+galaxy
                                drizzle_log_name = '../outputs/astrodrizzle.log'
        
                            # Perform the mosaicking. Generally, use iminmed.
                            # However, sometimes iminmed will fail so
                            # for the other instruments we'll use imedian as
                            # a fallback.
    
                            combine_types = ['iminmed','imedian']
                            
                            if 'WFPC2' in instrument:
                                combine_nhigh = 1
                            else:
                                combine_nhigh = 0
                            
                            for combine_type in combine_types:
                             
                                try:
                                    
                                    astrodrizzle.AstroDrizzle(
                                        input=hst_files_group,
                                        output=output_name,
                                        preserve=False,
                                        clean=True,
                                        combine_type=combine_type,
                                        combine_nhigh=combine_nhigh,
                                        skymethod=skymethod,
                                        sky_bits=bits,
                                        driz_sep_bits=bits,
                                        driz_sep_fillval=99999,
                                        combine_hthresh=90000,
                                        final_scale=pix_size,
                                        final_bits=bits,
                                        final_fillval=0,
                                        wcskey=wcskey,
                                        final_rot=0,
                                        )
                                    
                                    break
                                    
                                except ValueError:
                                
                                    pass
                                
                            # Move the AstroDrizzle log.
                            
                            os.rename('astrodrizzle.log',
                                      drizzle_log_name)
                            
                    # Move back to the original directory.
                        
                    os.chdir(orig_dir)
                    
                    if 4 in steps:
                        
                        mosaic_outputs = glob.glob(full_filepath+'/outputs/*_sci.fits')
                         
                        for mosaic_output in mosaic_outputs:
                            
                            # Replace any fillvals with NaNs.
                            
                            hdu = fits.open(mosaic_output)[0]
                            hdu.data[hdu.data == 0] = np.nan
                            
                            fits.writeto(mosaic_output,
                                         hdu.data,hdu.header,
                                         overwrite=True)
                            
                            if '_long_' in mosaic_output.split('/')[-1]:
                                
                                new_filename = (galaxy+
                                            '/HST/'
                                            +prop_id
                                            +'/'
                                            +galaxy
                                            +'_'
                                            +instrument.replace('/','_')
                                            +'_'
                                            +hst_filter
                                            +'_'
                                            +prop_id
                                            +'_long.fits')
                                
                            elif '_short_' in mosaic_output.split('/')[-1]:
                                
                                new_filename = (galaxy+
                                            '/HST/'
                                            +prop_id
                                            +'/'
                                            +galaxy
                                            +'_'
                                            +instrument.replace('/','_')
                                            +'_'
                                            +hst_filter
                                            +'_'
                                            +prop_id
                                            +'_short.fits')
                                
                            else:
                                
                                new_filename = (galaxy+
                                            '/HST/'
                                            +prop_id
                                            +'/'
                                            +galaxy
                                            +'_'
                                            +instrument.replace('/','_')
                                            +'_'
                                            +hst_filter
                                            +'_'
                                            +prop_id
                                            +'.fits')
                                
                                
                            convert_to_jy(mosaic_output,
                                          new_filename)
                            
                if reset_filters:
                    filters = None
                    
                hst_logger.info(' ')
        
            if reset_prop_ids:
                prop_ids = None
                
            hst_logger.info(' ')
            
        if original_radius is None:
            radius = None
        else:
            radius = original_radius.copy()
            
    # Clear out the tmp folder and reset to the original.
    
    shutil.rmtree('tmp/', ignore_errors=True)
    os.environ['TMPDIR'] = orig_tmpdir
Beispiel #23
0
def lambda_handler(event, context):
    """Extract light curve data from one TESS full frame image.

    Parameters
    ----------
    event : dict
        API Gateway Lambda Proxy Input Format.
        Event doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format

    context : object
        Lambda Context runtime methods and attributes.
        Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html

    Returns
    ------
    result : dict
        API Gateway Lambda Proxy Output Format.
        Return doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html

    """  # noqa
    obs_id = event['id']  # TESS observation ID; Example: 'tess-s0001-1-1'

    # TODO: Calculate some of these from the 10th frame?
    # For now, also takes these and pass them onto worker:
    payload = {
        'xpos': event['xpos'],
        'ypos': event['ypos'],
        'radius': event['radius'],
        'bright_pixel_threshold': event['bright_pixel_threshold']
    }

    # Find full frame dataset for the observation ID.
    obs_table = Observations.query_criteria(obs_id=obs_id)
    products = Observations.get_product_list(obs_table)
    filtered = Observations.filter_products(products,
                                            productSubGroupDescription="FFIC",
                                            mrp_only=False)

    # Use AWS S3 bucket to pull data from.
    Observations.enable_cloud_dataset()  # TODO: verbose=False ?
    s3_urls = Observations.get_cloud_uris(filtered, include_bucket=False)

    # TODO: Timed out! Try https://docs.python.org/3/library/asyncio.html ?
    # TODO: Handle same Lambda call invoked multiple times by AWS?
    # Call tess_fullframe_worker AWS Lambda function in parallel
    # https://aws.amazon.com/blogs/compute/parallel-processing-in-python-with-aws-lambda/
    parent_connections = []
    processes = []
    data = []
    for url in s3_urls[:2]:  # TODO: Remove [:2] when done testing
        payload['key'] = url
        parent_conn, child_conn = Pipe()
        parent_connections.append(parent_conn)
        arg = json.dumps(payload)
        process = Process(target=_pipe_worker, args=(arg, child_conn))
        processes.append(process)

    for process in processes:
        process.start()

    for process in processes:
        process.join()

    for parent_connection in parent_connections:
        try:
            response = parent_connection.recv()[0]
        except EOFError:
            response = {}
        if 'body' not in response:  # Worker Lambda threw exception
            continue
        body = json.loads(response['body'])
        row = (body['midtime'], body['signal'], body['background'])
        if np.all(list(map(np.isfinite, row))):
            data.append(row)

    # TODO: Save data as table.
    # filename = f'/tmp/{obs_id}_lightcurve.csv'
    # with open(filename) as fout:
    #     for row in data:
    #         fout.write(f'{row[0]},{row[1]},{row[2]}{os.linesep}')

    # TODO: Upload table to S3 and then delete the table locally.
    # TODO: Return table S3 URL below.
    # TODO: Do we want to plot it and upload the plot too?
    #       If so, need to add matplotlib as dependency.

    return {
        "statusCode": 200,
        "body": json.dumps({
            'n_rows': len(data),
            'data_url': 'TODO'
        })
    }
Beispiel #24
0
def galex_button(
    galaxies,
    filters="both",
    radius=None,
    filepath=None,
    download_data=True,
    create_mosaic=True,
    jy_conversion=True,
    verbose=False,
):
    """Create a GALEX mosaic, given a galaxy name.
    
    Using a galaxy name and radius, queries around that object, 
    downloads available GALEX data and mosaics into a final product.
    
    Because GALEX images are in counts/s and the integrations may be 
    different lengths, we convert back to a raw count, add the frames
    and convert back to counts/s at the end. This effectively weights
    the frame by exposure time.
    
    Args:
        galaxies (str or list): Names of galaxies to create mosaics for.
            Resolved by NED.
        filters (str, optional): One of 'FUV', 'NUV', or 'both'. Selects
            which GALEX filters to create a mosaic for. Defaults to
            'both'.
        radius (astropy.units.Quantity, optional): Radius around the 
            galaxy to search for observations. Defaults to None, where
            it will query Ned to get size.
        filepath (str, optional): Path to save the working and output
            files to. If not specified, saves to current working 
            directory.
        download_data (bool, optional): If True, will download data from 
            MAST. Defaults to True.
        create_mosaic (bool, optional): Switching this to True will 
            mosaic data as appropriate. Defaults to True.
        jy_conversion (bool, optional): Convert the mosaicked file from
            raw units to Jy/pix. Defaults to True.
        verbose (bool, optional): Print out messages during the process.
            Useful mainly for debugging purposes or large images. 
            Defaults to False.
    
    """

    if isinstance(galaxies, str):
        galaxies = [galaxies]

    if filters == "both":
        filters = ["NUV", "FUV"]

    else:
        filters = [filters]

    if filepath is not None:
        os.chdir(filepath)

    if radius is not None:
        original_radius = radius.copy()
    else:
        original_radius = None

    steps = []

    if download_data:
        steps.append(1)
    if create_mosaic:
        steps.append(2)
    if jy_conversion:
        steps.append(3)

    for galaxy in galaxies:

        if verbose:
            print('Beginning ' + galaxy)

        if radius is None:

            try:

                size_query = Ned.get_table(galaxy, table='diameters')
                radius = 1.2 * np.max(
                    size_query['NED Major Axis']) / 2 * u.arcsec
                radius = radius.to(u.deg)

            except:

                raise Warning(galaxy +
                              ' not resolved by Ned, using 0.2deg radius.')
                radius = 0.2 * u.degree

        if not os.path.exists(galaxy):
            os.mkdir(galaxy)

        obs_table = Observations.query_criteria(objectname=galaxy,
                                                radius=radius,
                                                obs_type='all',
                                                obs_collection='GALEX')

        # Ignore any calibration observations.
        obs_table = obs_table[obs_table['intentType'] == 'science']

        for galex_filter in filters:

            if verbose:
                print('Beginning GALEX ' + galex_filter)

            if 1 in steps:

                # Pull out available data, and download it

                query_results = np.where(
                    obs_table["filters"] == galex_filter)[0]

                # If there isn't any GALEX coverage, just skip

                if len(query_results) == 0:
                    print(galaxy + " missing!")

                    continue

                # We only want to print out download messages if
                # verbose is True, so redirect otherwise.

                if not verbose:
                    sys.stdout = open(os.devnull, 'w')

                dataProductsByID = Observations.get_product_list(
                    obs_table[query_results])

                Observations.download_products(
                    dataProductsByID,
                    download_dir="galex_temp/" + galaxy,
                    mrp_only=True,
                )

                # And set back to the original for printing.

                if not verbose:

                    sys.stdout = sys.__stdout__

                if not os.path.exists(galaxy + "/GALEX/"):
                    os.mkdir(galaxy + "/GALEX/")
                if not os.path.exists(galaxy + "/GALEX/" + galex_filter):
                    os.mkdir(galaxy + "/GALEX/" + galex_filter)
                if not os.path.exists(galaxy + "/GALEX/" + galex_filter +
                                      '/raw'):
                    os.mkdir(galaxy + "/GALEX/" + galex_filter + '/raw')
                if not os.path.exists(galaxy + "/GALEX/" + galex_filter +
                                      '/data'):
                    os.mkdir(galaxy + "/GALEX/" + galex_filter + '/data')
                if not os.path.exists(galaxy + "/GALEX/" + galex_filter +
                                      '/reprojected'):
                    os.mkdir(galaxy + "/GALEX/" + galex_filter +
                             '/reprojected')
                if not os.path.exists(galaxy + "/GALEX/" + galex_filter +
                                      '/weight'):
                    os.mkdir(galaxy + "/GALEX/" + galex_filter + '/weight')
                if not os.path.exists(galaxy + "/GALEX/" + galex_filter +
                                      '/outputs'):
                    os.mkdir(galaxy + "/GALEX/" + galex_filter + '/outputs')

                ext_name = {"NUV": "nd", "FUV": "fd"}[galex_filter]

                # Pull out the relevant filter files files (either *nd*
                # or *fd*), extract and move to base folder.

                matches = []
                for root, _, filenames in os.walk("galex_temp/" + galaxy):
                    for filename in fnmatch.filter(
                            filenames, "*" + ext_name + "-int.fits.gz"):
                        matches.append(os.path.join(root, filename))

                for match in matches:
                    with gzip.open(match, "rb") as f_in:
                        with open(match[:-3], "wb") as f_out:
                            shutil.copyfileobj(f_in, f_out)

                    filename = match[:-3].split('/')

                    os.rename(
                        match[:-3],
                        galaxy + "/GALEX/" + galex_filter + "/raw/" +
                        filename[-1],
                    )

                # Clean up any temporary files.

                shutil.rmtree("galex_temp/" + galaxy, ignore_errors=True)

            mHdr(
                galaxy,
                2 * radius.value,
                2 * radius.value,
                galaxy + "/GALEX/" + galex_filter + "/outputs/header.hdr",
                resolution=1.5,
            )

            if 2 in steps:

                # Read in these files and set anything more than 35
                # arcmin out to NaN.

                if verbose:
                    print('Performing initial weighted reprojections')

                galex_files = glob.glob(galaxy + "/GALEX/" + galex_filter +
                                        "/raw/*")

                for galex_file in galex_files:
                    hdu = fits.open(galex_file)[0]

                    i = np.linspace(-hdu.data.shape[0] / 2,
                                    hdu.data.shape[0] / 2, hdu.data.shape[0])

                    j = np.linspace(-hdu.data.shape[1] / 2,
                                    hdu.data.shape[1] / 2, hdu.data.shape[1])

                    iv, jv = np.meshgrid(i, j)
                    r = iv**2 + jv**2

                    hdu.data[r >= 1400**2] = np.nan

                    hdu.writeto(galex_file.replace('/raw/', '/data/'),
                                overwrite=True)

                    # Also create a weight map (sqrt EXPTIME).

                    exp_time = np.ones(
                        hdu.data.shape) * hdu.header['EXPTIME']**0.5
                    exp_time[np.isnan(hdu.data) == True] = np.nan

                    fits.writeto(galex_file.replace('/raw/', '/weight/'),
                                 exp_time,
                                 hdu.header,
                                 overwrite=True)

                    # And reproject each map separately using this weighting.

                    mProject(galex_file.replace('/raw/', '/data/'),
                             galex_file.replace('/raw/', '/reprojected/'),
                             galaxy + "/GALEX/" + galex_filter +
                             "/outputs/header.hdr",
                             weight_file=galex_file.replace(
                                 '/raw/', '/weight/'))

                # And mosaic!

                # Montage uses its size as the length of the square,
                # since we want a radius use twice that.

                mHdr(
                    galaxy,
                    2 * radius.value,
                    2 * radius.value,
                    galaxy + "/GALEX/" + galex_filter + "/outputs/header.hdr",
                    resolution=1.5,
                )

                tools.mosaic(
                    galaxy + "/GALEX/" + galex_filter + '/reprojected',
                    header=galaxy + "/GALEX/" + galex_filter +
                    "/outputs/header.hdr",
                    verbose=verbose,
                    reproject=False,
                    haveAreas=True,
                )

                os.rename(
                    "mosaic/mosaic.fits", galaxy + "/GALEX/" + galex_filter +
                    "/outputs/" + galaxy + '.fits')

                shutil.rmtree("mosaic/", ignore_errors=True)

            if 3 in steps:

                if verbose:
                    print('Converting to Jy')

                # Convert to Jy.

                convert_to_jy(galaxy + "/GALEX/" + galex_filter + '/outputs/' +
                              galaxy + ".fits",
                              galex_filter,
                              hdu_out=galaxy + "/GALEX/" + galaxy + '_' +
                              galex_filter + ".fits")

        if original_radius is None:
            radius = None
        else:
            radius = original_radius.copy()
Beispiel #25
0
def bert_tess_fullframe_main_2():
    """Continuation of main function to run it across different sectors."""

    import os
    import time

    import boto3
    from astropy.io import fits
    from astropy.wcs import WCS
    from astroquery.mast import Observations

    s3 = boto3.resource('s3')
    bucket = s3.Bucket(name=os.environ.get('AWSBUCKETNAME'))
    outbucket = s3.Bucket(name=os.environ.get('CACHEBUCKETNAME'))
    homedir = os.environ.get('HOME')

    work_queue, done_queue, ologger = utils.comm_binders(
        bert_tess_fullframe_main_2)

    # Example event:
    # {
    #   "tic_id": "25155310",
    #   "sec_id": "tess-s0001-4-1",
    #   "ra": 63.3739396231274,
    #   "dec": -69.226822697583,
    #   "radius": 2.5,
    #   "cutout_width": 30,
    #   "use_cache": "true"
    # }
    #
    # work_queue populated by calling Lambda
    for event in work_queue:
        tic_id = event['tic_id']
        sec_id = event['sec_id']

        basename = f'{sec_id}_s3_uris.txt'  # noqa
        filename = os.path.join(homedir, basename)

        try:
            # Check if URI list already cached.
            # According to MAST, there is no need to invalidate cache here.
            ologger.info(f'Attempting to download {basename} from S3')
            outbucket.download_file(
                basename, filename,
                ExtraArgs={"RequestPayer": "requester"})
        except Exception:
            # Find full frame dataset for the observation ID.
            ologger.info('Started quering Observations...')
            obs_table = Observations.query_criteria(obs_id=sec_id)
            products = Observations.get_product_list(obs_table)
            filtered = Observations.filter_products(
                products, productSubGroupDescription="FFIC",
                mrp_only=False)

            # Use AWS S3 bucket to pull data from.
            Observations.enable_cloud_dataset(verbose=False)
            ologger.info('Started obtaining cloud URIs...')
            t_start = time.time()
            s3_urls = Observations.get_cloud_uris(
                filtered, include_bucket=False)
            t_end = time.time()
            ologger.info(f'Got {len(s3_urls)} URIs in {t_end - t_start} s')

            # Upload URI list to cache.
            with open(filename, 'w') as fout:
                for url in s3_urls:
                    fout.write(url + os.linesep)
            try:
                outbucket.upload_file(
                    filename, basename,
                    ExtraArgs={"RequestPayer": "requester"})
            except Exception as exc:
                ologger.error(str(exc))
            else:
                ologger.info(f'Uploaded {basename} to S3')
        else:
            # Use cache if it exists.
            with open(filename, 'r') as fin:
                s3_urls = [url.strip() for url in fin.readlines()]
            ologger.info(f'Read {len(s3_urls)} URIs from {basename}')
        finally:
            # Clean up
            if os.path.exists(filename):
                os.remove(filename)

        ra = float(event['ra'])
        dec = float(event['dec'])

        # TODO: Cache good WCS for a given sector/camera/ccd combo and use
        #       known good cache if available.
        # Find pixel coordinates from sky from first frame header.
        key = s3_urls[0]
        basename = key.split('/')[-1]
        filename = os.path.join(homedir, basename)
        ologger.info(f'Resolving WCS from {key}')
        bucket.download_file(
            key, filename, ExtraArgs={"RequestPayer": "requester"})
        hdr = fits.getheader(filename, ext=1)
        if hdr.get('WCSAXES', 0) != 2:  # Good WCS according to MIT
            ologger.error(f'{key} has invalid WCS')
            continue
        w = WCS(hdr)
        pix = w.all_world2pix(ra, dec, 0)
        xpos = round(float(pix[0]))  # float needed to get rid of 0-D array
        ypos = round(float(pix[1]))

        # Clean up
        os.remove(filename)

        # The star needs to be at least 2*radii pixels away in both X and Y.
        radius = float(event['radius'])
        edge_r = 2 * radius
        naxis1, naxis2 = w.pixel_shape  # X Y
        if (xpos < edge_r or xpos >= (naxis1 - edge_r) or
                ypos < edge_r or ypos >= (naxis2 - edge_r)):
            ologger.error(
                f'TIC f{tic_id} in {sec_id}: X={xpos},Y={ypos} not at least '
                f'{edge_r} pixels away from the edge, skipping...')
            continue

        # Pass data into the next AWS Lambda function.
        ologger.info(f'TIC f{tic_id} in {sec_id}: Started processing '
                     'full frame URIs...')
        for url in s3_urls:
            done_queue.put({
                'key': url,
                'tic_id': tic_id,
                'ra': ra,
                'dec': dec,
                'xpos': xpos,
                'ypos': ypos,
                'radius': radius,
                'cutout_width': event['cutout_width'],
                'use_cache': event['use_cache']})
Beispiel #26
0
def get_lc_file_and_data(yourpath, target):
    """ goes in, grabs the data for the target, gets the time index, intensity,and TIC
    if connection error w/ MAST, skips it.
    Also masks any flagged data points according to the QUALITY column.
    parameters: 
        * yourpath, where you want the files saved to. must end in /
        * targets, target list of all TICs 
    modified [lcg 07082020] - fixed handling no results, fixed deleting download folder"""
    fitspath = yourpath + 'mastDownload/TESS/' # >> download directory
    targ = "TIC " + str(int(target))
    print(targ)
    try:
        #find and download data products for your target objectname='TIC '+str(int(target)),
        obs_table = Observations.query_criteria(obs_collection='TESS',
                                        dataproduct_type='timeseries',
                                        target_name=str(int(target)),
                                        objectname=targ)
        data_products_by_obs = Observations.get_product_list(obs_table[0:8])
            
        filter_products = Observations.filter_products(data_products_by_obs,
                                                       description = 'Light curves')
        if len(filter_products) != 0:
            manifest = Observations.download_products(filter_products, download_dir= yourpath, extension='fits')
        else: 
            print("Query yielded no matching data produts for ", targ)
            time1 = 0
            i1 = 0
            ticid = 0
            
        #get all the paths to lc.fits files
        filepaths = []
        for root, dirs, files in os.walk(fitspath):
            for name in files:
                #print(name)
                if name.endswith(("lc.fits")):
                    filepaths.append(root + "/" + name)
        #print(len(filepaths))
        #print(filepaths)
        
        if len(filepaths) == 0: #if no lc.fits were downloaded, move on
            print("No lc.fits files available for TIC ", targ)
            time1 = 0
            i1 = 0
            ticid = 0
        else: #if there are lc.fits files, open them and get the goods
                #get the goods and then close it
            f = fits.open(filepaths[0], memmap=False)
            time1 = f[1].data['TIME']
            i1 = f[1].data['PDCSAP_FLUX']
            ticid = f[1].header["TICID"]
            quality = f[1].data['QUALITY']
            f.close()
            
            # >> mask out any nonzero points
            flagged_inds = np.nonzero(quality)
            i1[flagged_inds] = np.nan # >> will be interpolated later
                  
        #then delete all downloads in the folder, no matter what type
        if os.path.isdir(yourpath + "mastDownload") == True:
            shutil.rmtree(yourpath + "mastDownload")
            print("Download folder deleted.")
            
        #corrects for connnection errors
    except (ConnectionError, OSError, TimeoutError, RemoteServiceError):
        print(targ, " could not be accessed due to an error.")
        i1 = 0
        time1 = 0
        ticid = 0
    
    return time1, i1, ticid
Beispiel #27
0
# PARAMETERS :
# - a (array_like) = obs_table
# RETURNS :
# - ndarray (index_array) = *finds specified images in obs_table**
#
# **astroquery.mast.download_products** [[documentation](https://astroquery.readthedocs.io/en/latest/api/astroquery.mast.ObservationsClass.html#astroquery.mast.ObservationsClass.download_products)]
#
# PARAMETERS :
# - products
# - productSubGroupDescription = ['RAW']
# - mrp_only = False (*default False*)

# In[ ]:

obs_table = Observations.query_criteria(instrument_name='ACS/WFC',
                                        intentType='calibration',
                                        target_name='BIAS',
                                        calib_level=1)

#below are images listed in Table 01 - 07 from Grogin et al. 2011
#power-spectrum analysis [Table 05]
idx_ps = [
    'ja8wa2e8q', 'ja8wa3fpq', 'ja8wa6jmq', 'ja8wa7jqq', 'ja8waaloq',
    'ja8wablsq', 'ja8waedtq', 'ja8wafe2q', 'ja8wain1q', 'ja8wajnmq',
    'ja8wambyq', 'ja8wanc2q', 'ja8wb0n3q', 'ja8wb1ndq', 'ja8wb4izq',
    'ja8wb5j6q'
]
#striping mitigation testing [Table 06]
idx_sm = [
    'ja8wc2gjq', 'ja8wc3gvq', 'ja8wc6xlq', 'ja8wc7yaq', 'ja8wcae8q',
    'ja8wcbfbq', 'ja8wcetxq', 'ja8wcfugq', 'ja8wciipq', 'ja8wcjjdq',
    'ja8wcmxiq', 'ja8wcnxmq', 'ja8wd0t3q', 'ja8wd1tnq', 'ja8wd4hhq',
def get_twomin_obs(ticlist):
    """
    Given a list of ticids, return a table of all the data that is available on that target.
    The table will be well formated to just be one one describing 
    the available sectors per target.
    
    Input: list of ticids, int
    Return: full table (astropy table of tic, obsid, filenames, camera, ccd)
            for all relevant two minute data files.
            summary table (one line per ticid), list of sectors, list of camera and ccd
    """

    sectors = []
    gis = []
    obsid = []
    tmin = []
    tmax = []
    exptime = []
    target = []

    dv_sectors = []
    dv_obsid = []
    dv_tmin = []
    dv_tmax = []
    dv_target = []
    dv_exptime = []

    for tic in ticlist:
        observations = Observations.query_criteria(
            obs_collection="TESS",
            dataproduct_type=["timeseries"],
            target_name=tic)
        if len(observations) > 0:
            observations.sort(keys=['sequence_number', 'target_name'])

            for obs in observations:
                #Match for multi-sector observations
                match = re.search("s\d\d\d\d-s\d\d\d\d", obs['obs_id'])

                if match == None:

                    if int(obs['target_name']) == int(tic):
                        obsid.append(obs['obs_id'])
                        gis.append(obs['proposal_id'])
                        sectors.append(obs['sequence_number'])
                        tmin.append(obs['t_min'])
                        tmax.append(obs['t_max'])
                        exptime.append(int(obs['t_exptime'] / 60.0))
                        target.append(obs['target_name'])

                else:
                    dv_sectors.append(match.group(0))
                    dv_obsid.append(obs['obs_id'])
                    dv_tmin.append(obs['t_min'])
                    dv_tmax.append(obs['t_max'])
                    dv_target.append(obs['target_name'])
                    dv_exptime.append(int(obs['t_exptime'] / 60.0))

    ts_table = QTable([target, sectors, gis, obsid, exptime, tmin, tmax],
                      names=('target_tic', 'sectors', 'GI_nums', 'obs_id',
                             'Exp_time_min', 't_min', 't_max'),
                      meta={'name': 'observation table'})
    dv_table = QTable(
        [dv_target, dv_sectors, dv_obsid, dv_exptime, dv_tmin, dv_tmax],
        names=('target_tic', 'sector_range', 'obs_id', 'Exp_time_min', 't_min',
               't_max'),
        meta={'name': 'Multi-Sector Planet Search'})

    return ts_table, dv_table
Beispiel #29
0
def retrieve_observation(obsid,
                         suffix=['FLC'],
                         archive=False,
                         clobber=False,
                         product_type=None):
    """Simple interface for retrieving an observation from the MAST archive

    If the input obsid is for an association, it will request all members with
    the specified suffixes.

    Parameters
    -----------
    obsid : string or list of strings
        ID or list of IDs for observations to be retrieved from the MAST archive.
        Only the IPPSSOOT (rootname) of exposure or ASN needs to be provided; eg.,
        ib6v06060.  Additionally, a wild-carded `obsid` can be provided to
        retrieve all products for a visit; e.g., "ib6v06*".  Data from multiple
        ASNs, exposures or visits can be retrieved by simply providing them as a list.

    suffix : list, optional
        List containing suffixes of files which should be requested from MAST.
        Default value  "['FLC']".

    archive : Boolean, optional
        Retain copies of the downloaded files in the astroquery created
        sub-directories? Default is "False".

    clobber : Boolean, optional
        Download and Overwrite existing files? Default is "False".

    product_type : str, optional
        Specify what type of product you want from the archive, either 'pipeline'
        or 'HAP' or 'both' (default).  By default, all versions of the products
        processed for the requested datasets will be returned.  This would include:
          - pipeline : files processed by `runastrodriz` to include the latest
                       distortion calibrations and the best possible alignment to GAIA
                       with `ipppssoot_fl[tc].fits` filenames for FLT/FLC files.
          - HAP : files processed as a single visit and aligned (as possible) to GAIA
                  with `hst_<propid>_<visit>_<instr>_<det>_<filter>_<ipppssoo>_fl[tc].fits`
                  filenames.

    Returns
    -------
    local_files : list
        List of filenames
    """
    local_files = []

    if Observations is None:
        log.warning(
            "The astroquery package was not found.  No files retrieved!")
        return local_files

    # Query MAST for the data with an observation type of either "science" or
    # "calibration"
    obs_table = Observations.query_criteria(obs_id=obsid)

    # Catch the case where no files are found for download
    if not obs_table:
        log.info("WARNING: Query for {} returned NO RESULTS!".format(obsid))
        return local_files

    dpobs = Observations.get_product_list(obs_table)
    if product_type:
        ptypes = [
            product_type_dict[product_type] in descr
            for descr in dpobs['description']
        ]
        dpobs = dpobs[ptypes]

    data_products_by_id = Observations.filter_products(
        dpobs,
        productSubGroupDescription=suffix,
        extension='fits',
        mrp_only=False)

    # After the filtering has been done, ensure there is still data in the
    # table for download. If the table is empty, look for FLT images in lieu
    # of FLC images. Only want one or the other (not both!), so just do the
    # filtering again.
    if not data_products_by_id:
        log.info("WARNING: No FLC files found for {} - will look for FLT "
                 "files instead.".format(obsid))
        suffix = ['FLT']
        data_products_by_id = Observations.filter_products(
            dpobs,
            productSubGroupDescription=suffix,
            extension='fits',
            mrp_only=False)

        # If still no data, then return.  An exception will eventually be
        # thrown in the higher level code.
        if not data_products_by_id:
            log.info(
                "WARNING: No FLC or FLT files found for {}.".format(obsid))
            return local_files
    all_images = data_products_by_id['productFilename'].tolist()
    log.info(all_images)
    if not clobber:
        rows_to_remove = []
        for row_idx, row in enumerate(data_products_by_id):
            fname = row['productFilename']
            if os.path.isfile(fname):
                log.info(fname + " already exists. File download skipped.")
                rows_to_remove.append(row_idx)
        data_products_by_id.remove_rows(rows_to_remove)

    manifest = Observations.download_products(data_products_by_id,
                                              mrp_only=False)

    if not clobber:
        for rownum in rows_to_remove[::-1]:
            if manifest:
                manifest.insert_row(
                    rownum, vals=[all_images[rownum], "LOCAL", "None", "None"])
            else:
                return all_images

    download_dir = None
    for file, file_status in zip(manifest['Local Path'], manifest['Status']):
        if file_status != "LOCAL":
            # Identify what sub-directory was created by astroquery for the
            # download
            if download_dir is None:
                download_dir = os.path.dirname(os.path.abspath(file))
            # Move or copy downloaded file to current directory
            local_file = os.path.abspath(os.path.basename(file))
            if archive:
                shutil.copy(file, local_file)
            else:
                shutil.move(file, local_file)
            # Record what files were downloaded and their current location
            local_files.append(os.path.basename(local_file))
        else:
            local_files.append(file)
    if not archive:
        # Remove astroquery created sub-directories
        shutil.rmtree('mastDownload')
    return local_files
Beispiel #30
0
# Convert the duration to days from hours
duration_guess = np.array(tois.pl_trandurh, dtype=float) / 24.0

tois[["tid", "toi", "pl_orbper", "pl_trandep", "pl_trandurh"]]

# %% [markdown]
# Then we can search for and download the light curves.
# Note that this will fail if there is no 2-minute cadence light curve for this target.
# Typically this would be executed using [lightkurve](https://docs.lightkurve.org) directly, but we'll use the MAST API directly because the lightkurve light curve search is a little slow currently.

# %%
# Temporary workaround for slow MAST queries with lightkurve
observations = Observations.query_criteria(
    target_name=f"{tic}",
    radius=0.0001,
    project=["TESS"],
    obs_collection=["TESS"],
    provenance_name="SPOC",
    dataproduct_type="timeseries",
)
if not len(observations):
    raise RuntimeError("no 2-minute cadence data")
products = Observations.get_product_list(observations)
products = products[products["productSubGroupDescription"] == "LC"]
files = Observations.download_products(
    products, download_dir=tess_world.get_lightkurve_directory())
lcfs = lk.LightCurveCollection(
    [lk.open(file).PDCSAP_FLUX for file in files["Local Path"]])
lc = lcfs.stitch().remove_nans()

# Extract the data in the correct format
x = np.ascontiguousarray(lc.time, dtype=np.float64)
Beispiel #31
0
#!/usr/bin/env python3

#
#

from astroquery.mast import Observations

import boto3
import json
import os

import IPython

wfc = Observations.query_criteria(
    dataproduct_type=['image'],
    project='HST',
    instrument_name='ACS/WFC',
)
print("WFC: ", len(wfc))

hrc = Observations.query_criteria(
    dataproduct_type=['image'],
    project='HST',
    instrument_name='ACS/HRC',
)
print("HRC: ", len(hrc))

IPython.embed()