Exemple #1
0
def get_hdul(fits_name,
             mode='readonly',
             checksum=True):  # Written by Ema 05.03.2020
    r"""Wrapper for astropy `fits.open`. It checks if the file exists and in case returns its HDUList.

    Args:
        fits_name (str): fits file name
        mode (str): Open mode for the file. Possibilities are: `readonly’, `update`, `append`, `denywrite`,
            or `ostream`
        checksum (bool): If True, verifies that both `DATASUM` and `CHECKSUM` card values (when present in the HDU
            header) match the header and data of all HDU’s in the file. Updates to a file that already has a checksum
            will preserve and update the existing checksums unless this argument is given a value of `remove`,
            in which case the `CHECKSUM` and `DATASUM` values are not checked, and are removed when saving
            changes to the file

    Returns:
        hdul: list-like collection of HDU objects

    """
    if not checks.fits_file_is_valid(fits_name):
        msgs.error('Fits file not valid')
        return None
    else:
        hdul = fits.open(fits_name, mode=mode, checksum=checksum)
        msgs.info('The fits file {} contains {} HDUs'.format(
            fits_name, len(hdul)))
        msgs.info('Summary:')
        hdul.info()
        return hdul
Exemple #2
0
def check_disk_space(min_disk_space=float(
    default.get_value('min_disk_space'))) -> bool:
    r"""Check that there is enough space on the location where the code is running

    Given a disk space limit in GB, the macro returns `True` if the disk where the code is running has more free GB
    than the given limit.

    .. warning::
        The current implementation checks the disk where the code is running (i.e., from the directory: `./`).
        This may cause some troubles with shared disks.

    Args:
        min_disk_space (float): Size of free space on disk required

    Returns:
        bool: `True` if there is enough space on disk

    """
    total, used, free = shutil.disk_usage("./")
    total = total / (1024.**3.)
    used = used / (1024.**3.)
    free = free / (1024.**3.)
    msgs.info('Your disk has:')
    msgs.info('Total: {0:.2f} GB, Used: {0:.2f} GB, Free: {0:.2f} GB'.format(
        total, used, free))
    if free > min_disk_space:
        enough_space = True
    else:
        enough_space = False
        msgs.warning('Not enough space on disk')
    return enough_space
Exemple #3
0
def _run_query(query,
               verbose=False,
               remove_bytes=True,
               maxrec=default.get_value('maxrec')):
    r"""Run tap query and return result as a table

    Args:
        query (`str`):
            Query to be run
        verbose (`bool`):
            if set to `True` additional info will be displayed
        remove_bytes ('bool')
            if set to True, it converts all bytes entries to standard strings
        maxrec (`int`):
            Define the maximum number of entries that a single query can return

    Returns:
        result_from_query (`astropy.Table`):
            Result from the query to the TAP service
    """
    # Load tap service
    tapobs = _define_tap_service(verbose=False)
    if verbose:
        msgs.info('The query is: \n {} \n'.format(str(query)))
    # Obtaining query results and convert it to an astropy table
    result_from_query = tapobs.search(query=query, maxrec=maxrec).to_table()
    # removing bytes code from some columns:
    if remove_bytes:
        for column_name in result_from_query.colnames:
            result_from_query[
                column_name].data.data[:] = checks.from_bytes_to_string(
                    result_from_query[column_name].data.data)
    return result_from_query
Exemple #4
0
def download(dp_ids,
             min_disk_space=float(default.get_value('min_disk_space'))):
    r"""Given a filename in the ADP format, the code download the file from the
    `ESO archive <http://archive.eso.org>`_

    Args:
        dp_ids (any): list data product ID (or single product ID) to be downloaded
        min_disk_space (float): the file will be downloaded only if there is this amount of space (in Gb) free on the
            disk

    Returns:
        None

    """
    # Check for disk space
    checks.check_disk_space(min_disk_space=min_disk_space)
    # Cleaning list
    dp_ids_list = cleaning_lists.from_element_to_list(
        cleaning_lists.from_bytes_to_string(dp_ids), element_type=str)
    for dp_id in dp_ids_list:
        # Given a dp_id of a public file, the link to download it is constructed as follows:
        download_url = 'http://archive.eso.org/datalink/links?ID=ivo://eso.org/ID?{}&eso_download=file'.format(
            dp_id)
        msgs.work('Retrieving file {}.fits'.format(dp_id))
        urllib.request.urlretrieve(download_url, filename=dp_id + '.fits')
        msgs.info('File {}.fits downloaded'.format(dp_id))
Exemple #5
0
def query_ASP_from_polygons(polygons=None, open_link=False, show_link=False):
    if polygons is not None:
        for iii, polygon in enumerate(polygons):
            url = 'http://archive.eso.org/scienceportal/home?' + 'poly=' + polygon + '&sort=-obs_date'
            if show_link:
                msgs.info('ASP link to region N.{} is:\n {}\n'.format(
                    np.str(iii + 1), url))
            if open_link:
                webbrowser.open(url)
Exemple #6
0
def query_ASP_from_radec(positions,
                         radius=None,
                         open_link=False,
                         show_link=False):
    r"""Query the ESO ASP service given a position in RA and Dec.

     The `positions` value (or list) needs to be given as an `astropy.coordinates.SkyCoord` object.

    Args:
        positions (`astropy.coordinates.SkyCoord`):
            Coordinates (or list of coordinates) of the sky you want to query in the format of an
            `astropy.coordinates.SkyCoord` object. For further detail see here:
            `astropy coordinates <https://docs.astropy.org/en/stable/coordinates/>`_
        radius (`float`):
            Search radius you want to query in arcseconds. Note that in case `None` is given, the query will be
            performed with the `CONTAINS(POINT('',RA,Dec), s_region)` clause instead of the
            `CONTAINS(s_region,CIRCLE('',RA,Dec,radius/3600.))` one. See here for further examples:
            `tap obs examples <http://archive.eso.org/tap_obs/examples>`_
        open_link (`bool`):
            open a link to the ASP page
        show_link (`bool`):
            show the link on the terminal

    """
    # Check inputs:
    # Working on positions
    if isinstance(positions, list):
        positions_list = positions
    else:
        positions_list = [positions]
    for position in positions_list:
        assert isinstance(
            position,
            coordinates.SkyCoord), r'Input positions not a SkyCoord object'
    # Working on radius
    if radius is not None:
        if isinstance(radius, int):
            radius = float(radius)
        else:
            assert isinstance(radius, float), r'Input radius is not a number'

    for iii, position in enumerate(positions_list):
        position.transform_to(ICRS)
        radec_string = np.str(np.float_(position.ra.degree)) + ',' + np.str(
            np.float_(position.dec.degree))
        radius_string = np.str(radius / 3600.)
        url = 'http://archive.eso.org/scienceportal/home?' + 'pos=' + radec_string + '&r=' + radius_string + \
              '&sort=-obs_date'
        if show_link:
            msgs.info('ASP link to region N.{} is:\n {}\n'.format(
                np.str(iii + 1), url))
        if open_link:
            webbrowser.open(url)
Exemple #7
0
def contours_from_gw_bayestar(file_name, credible_level=50.):
    r"""Given a bayestar.fits.gz HEALPix maps of a GW superevent it extracts contours at the given `credible_level`

    This is a wrapper around `ligo_skymap_contour`. The different contours are then counterclockwise oriented to be
    compatible with TAP and ASP queries.

    Args:
        file_name (str): Name of HEALPix maps
        credible_level (float): Probability level at which contours are returned

    Returns:
        list: [RA,Dec] list defining the contours location. These are counter-clockwise oriented as seen on the sky from
           inside the sphere. The length of contours represent the number of not connected regions identified.
    """

    # Some checks
    assert isinstance(file_name, (str, np.str)), '{} is not a valid string'.format(file_name)
    if not checks.fits_file_is_valid(file_name):
        msgs.error('{} not a valid fits file'.format(file_name))
    assert isinstance(credible_level, (int, float, np.int, np.float)), '`credible_level` is not a float or an int'

    # Create temporary file where to store output from ligo_skymap_contour
    contour_tmp_file = '.' + file_name + '.tmp.json'
    if path.isfile(contour_tmp_file):
        remove(contour_tmp_file)

    # Use ligo_skymap_contour to compute contours
    ligo_skymap_contour(args=[file_name, '--output', contour_tmp_file, '--contour', str(credible_level)])

    # Parse the content of the resulting json file into a dict
    with open(contour_tmp_file, 'r') as json_file:
        json_data = json_file.read()
    contours_dict = json.loads(json_data)
    json_file.close()
    # cleaning up
    remove(contour_tmp_file)

    # Parse the resulting json to obtain a list of coordinates defining the vertices of the contours of each peak
    # contours is a list, each element of which contains the vertices of one contour encircling the desired significance
    contours = contours_dict['features'][0]['geometry']['coordinates']
    # Make sure that the orientation of the polygons on the sky is the correct one for the TAP queries,
    # i.e. counter-clockwise as seen on the sky from inside the sphere.
    for iii, contour in enumerate(contours):
        contours[iii] = _ensure_orientation(contour)

    # Quick summary:
    if len(contours) > 0:
        msgs.info('Extracted the contours for {} regions at {} credible level'.format(len(contours), credible_level))
    else:
        msgs.info('No contours extracted at {} credible level'.format(credible_level))
        contours = None

    return contours
Exemple #8
0
def which_service(tap_service):
    r"""Print a summary description of the TAP service used

    Args:
        tap_service (pyvo.dal.tap.TAPService): TAP service used for the queries

    Returns:
        None

    """
    msgs.info('The TAP service used is:')
    tap_service.describe()
    return
Exemple #9
0
def fits_file_is_valid(fits_file,
                       verify_fits=False,
                       overwrite=False) -> bool:  # Written by Ema 05.03.2020
    r"""Check if a file exists and has a valid extension

    The option `verify_fits` checks the header of the fits file using `astropy.io.fits.verify`

    Args:
        fits_file (str): fits file you would like to check
        verify_fits (bool): if set to `True`, it will verify that the fits file is complaint to the FITS standard.
        overwrite (bool): if `True`, overwrite the input fits file with the header corrections from `verify_fits`

    Returns:
        bool: `True` if exists `False` and warning raised if not.

    """
    is_fits = True
    # Checks if it is a string
    assert isinstance(fits_file, str), 'input `fits_file` needs to be a string'
    # Check for ending
    # ToDo
    # to be updated to: PERMITTED_FITS_ENDINGS
    if not fits_file.endswith('.fits') and not fits_file.endswith(
            '.fits.fz') and not fits_file.endswith('.fits.gz'):
        msgs.warning(
            'File: {} does not end with `fits` or `fits.fz` or `fits.gz`'.
            format(fits_file))
        is_fits = False
    # Check for existence
    if not os.path.exists(fits_file):
        msgs.warning('File: {} does not exists'.format(fits_file))
        is_fits = False
    # Check for compliance with FITS standard
    if verify_fits:
        if overwrite:
            hdul = fits.open(fits_file, mode='update', checksum=False)
            if not check_checksums(hdul):
                is_fits = False
            hdul.flush(output_verify='fix+warn', verbose=True)
            hdul.writeto(fits_file, checksum=True, overwrite=True)
            msgs.info('File checked and rewritten')
        else:
            hdul = fits.open(fits_file, mode='readonly', checksum=True)
            if not check_checksums(hdul):
                is_fits = False
            hdul.verify('fix+warn')
        hdul.close()
    else:
        if overwrite:
            msgs.error('The option overwrite works only if verify_fits = True')
    return is_fits
Exemple #10
0
 def prodcatg(self, prodcatg_type):
     self.__prodcatg = eso_prodcatg.ProdCatg(prodcatg_type=prodcatg_type)
     if self.is_primary is True:
         _prodcatg_value = self.get('PRODCATG', default=None)
         if _prodcatg_value is None:
             msgs.info(
                 'Added PRODCATG = {} to the header'.format(prodcatg_type))
             self.set('PRODCATG', prodcatg_type)
         elif _prodcatg_value != prodcatg_type:
             msgs.warning('Updating value fo PRODCATG from {} to {}'.format(
                 _prodcatg_value, prodcatg_type))
             self.set('PRODCATG', prodcatg_type)
         elif _prodcatg_value == prodcatg_type:
             msgs.info('PRODCATG = {}'.format(_prodcatg_value))
         else:
             msgs.error('Cannot set the value of PRODCATG')
Exemple #11
0
def run_query(query_url, show_link=True, open_link=True):
    r"""Run the ASP query

    Args:
        query_url (str): url of the ASP query
        open_link (bool): open a link to the ASP page
        show_link (bool): show the link on the terminal

    Returns:
        None

    """
    if show_link:
        msgs.info('The ASP link is:\n {}\n'.format(query_url))
    if open_link:
        webbrowser.open(query_url)
    return
Exemple #12
0
def print_query(query):
    r"""Print the query on the terminal

    In case the `query` is empty, a warning is raised

    Args:
        query (str): String containing the query

    Returns:
          None

    """
    if query is None:
        msgs.warning('The query is empty')
    else:
        msgs.info('The query is:')
        msgs.info('{}'.format(query))
    return
Exemple #13
0
def _define_tap_service(verbose=False):
    r"""Load tap service from defaults

    The TAP service for raw. reduced, and ambient data is defined in `ESOAsg\default.txt` as `eso_tap_obs`

    Args:
        verbose (`bool`):
            if set to `True` additional info will be displayed

    Returns:
        tapcat (`pyvo.dal.tap.TAPService`)
            TAP service that will be used for the queries
    """
    if verbose:
        msgs.info('Querying the ESO TAP service at:')
        msgs.info('{}'.format(str(default.get_value('eso_tap_obs'))))
    tapobs = dal.tap.TAPService(default.get_value('eso_tap_obs'))
    return tapobs
Exemple #14
0
def download(dp_id,
             min_disk_space=np.float32(default.get_value('min_disk_space'))):
    r"""Given a filename in the ADP format, the code download the file from the
    `ESO archive <http://archive.eso.org>`_

    ..note::
        if dp_id is not a `numpy.str`, a WARNING message will be raised and the content of `dp_id` will be
        converted into a string.

    Args:
        dp_id (`numpy.str`):
            Data product ID to be downloaded.
        min_disk_space (`numpy.float`):
            The file will be downloaded only if there is this amount of space (in Gb) free on the disk.
            By default is set by the `default.txt` file.

    Returns:
        This downloads a fits ADP file with the same name of the input.
    """

    # Check for disk space
    checks.check_disk_space(min_disk_space=min_disk_space)

    for file_name in dp_id:
        # if the file name is in byte, this decode it.
        if not isinstance(file_name, str):
            msgs.warning('The content of dp_id is not in a string format.')
            msgs.warning('The code is trying to fix this.')
            if isinstance(file_name, bytes):
                file_name = np.str(file_name.decode("utf-8"))
                msgs.warning('Converted to {}.'.format(type(file_name)))
            else:
                msgs.error(
                    'Unable to understand the format of the dp_id entry: {}'.
                    format(type(file_name)))

        # Given a dp_id of a public file, the link to download it is constructed as follows:
        download_url = 'http://archive.eso.org/datalink/links?ID=ivo://eso.org/ID?{}&eso_download=file'.format(
            str(file_name))
        msgs.work(
            'Downloading file {}. This may take some time.'.format(file_name +
                                                                   '.fits'))
        urllib.request.urlretrieve(download_url, filename=file_name + '.fits')
        msgs.info('File {} downloaded.'.format(file_name + '.fits'))
Exemple #15
0
def download_gw_bayestar(superevent_name, file_name='bayestar.fits.gz'):
    r"""Download the bayestar.fits.gz of a GW superevent

    This is simply checking the existence of: `https://gracedb.ligo.org/superevents/<superevent_name>/files/`
    and downloading the file: `https://gracedb.ligo.org/apiweb/superevents/<superevent_name>/iles/bayestar.fits.gz`

    The downloaded file will be renamed as `superevent_name`+`file_name`

    Args:
        superevent_name (str): name of the superevent: e.g., 'S191205ah'
        file_name (str): name of the file to download. Default is `bayestar.fits.gz`

    Returns:
        bool: it returns `True` if data are retrieved and `False` otherwise.

    """

    # Some checks
    assert isinstance(superevent_name, str), '{} is not a valid string'.format(superevent_name)
    assert isinstance(file_name, str), '{} is not a valid string'.format(file_name)
    # Checks if superevent exists
    gw_files_url = 'https://gracedb.ligo.org/superevents/' + superevent_name + '/files/'
    checks.connection_to_website(gw_files_url)
    # download the and save the superevent file
    gw_bayestar_url = 'https://gracedb.ligo.org/apiweb/superevents/' + superevent_name + '/files/' + file_name
    r = requests.get(gw_bayestar_url, allow_redirects=True)
    if r.status_code == 404:
        msgs.warning('Failed to access to: {}'.format(gw_bayestar_url))
        return False
    open(superevent_name + '_bayestar.fits.gz', 'wb').write(r.content)

    # check that the file actually arrived on disk
    if path.isfile(superevent_name + '_bayestar.fits.gz'):
        _test_header = fitsfiles.header_from_fits_file(superevent_name + '_' + file_name)
        if len(_test_header) > 0:
            msgs.info('File {}_{} successfully downloaded'.format(superevent_name, file_name))
        else:
            msgs.warning('Not a valid fits file in: {}'.format(gw_bayestar_url))
            return False
    else:
        msgs.warning('Failed to download: {}'.format(gw_bayestar_url))
        return False

    return True
Exemple #16
0
def _define_tap_service(verbose=False):
    r"""Load tap service from defaults

    The TAP service for scientific catalogues generated by ESO observing teams is defined in `ESOAsg\default.txt` as
    `eso_tap_cat`

    Args:
        verbose (`bool`):
            if set to `True` additional info will be displayed

    Returns:
        tapcat (`pyvo.dal.tap.TAPService`)
            TAP service that will be used for the queries. For more details on this see the
            `pyvo documents<https://pyvo.readthedocs.io/en/latest/>`_
    """
    if verbose:
        msgs.info('Querying the ESO TAP service at:')
        msgs.info('{}'.format(str(default.get_value('eso_tap_cat'))))
    tapcat = dal.tap.TAPService(default.get_value('eso_tap_cat'))
    return tapcat
Exemple #17
0
def main(args):
    from astropy import coordinates
    from astropy import units as u

    from ESOAsg import archive_observations
    from ESOAsg import msgs
    from ESOAsg.ancillary import cleaning_lists

    ra_list = cleaning_lists.from_element_to_list(args.ra_degree,
                                                  element_type=float)
    dec_list = cleaning_lists.from_element_to_list(args.dec_degree,
                                                   element_type=float)
    if args.radius is None:
        radius = None
    else:
        radius = cleaning_lists.from_element_to_list(args.radius,
                                                     element_type=float)[0]
    instruments = cleaning_lists.from_element_to_list(args.instruments,
                                                      element_type=str)
    data_types = cleaning_lists.from_element_to_list(args.data_types,
                                                     element_type=str)

    msgs.start()
    for ra, dec in zip(ra_list, dec_list):
        position = coordinates.SkyCoord(ra=ra * u.degree,
                                        dec=dec * u.degree,
                                        frame='fk5')
        msgs.newline()
        msgs.info('Query for ESO archival data around the position: {}'.format(
            position.to_string('hmsdms')))
        result_from_query = archive_observations.query_from_radec(
            position,
            radius=radius,
            instruments=instruments,
            data_types=data_types,
            verbose=False)
        if len(result_from_query['dp_id']) > 0:
            archive_observations.download(result_from_query['dp_id'])
    msgs.end()
Exemple #18
0
def _run_query(query, verbose=False, maxrec=default.get_value('maxrec')):
    r"""Run tap query and return result as a table

    Args:
        query (`str`):
            Query to be run
        verbose (`bool`):
            if set to `True` additional info will be displayed
        maxrec (`int`, `None`):
            Define the maximum number of entries that a single query can return

    Returns:
        result_from_query (`astropy.Table`):
            Result from the query to the TAP service
    """
    # Load tap service
    tapcat = _define_tap_service(verbose=False)
    if verbose:
        msgs.info('The query is:')
        msgs.info('{}'.format(str(query)))
    # Obtaining query results and convert it to an astropy table
    result_from_query = tapcat.search(query=query, maxrec=maxrec).to_table()
    return result_from_query
Exemple #19
0
def run_query_async(tap_service, query, maxrec=default.get_value('maxrec')):
    r"""Run an asynchronous query to TAP service and return result as an `astropy.Table`

    Args:
        tap_service (pyvo.dal.tap.TAPService): TAP service that will be used for the query
        query (str): query to be run
        maxrec (int): define the maximum number of entries that a single query can return. Default is set
            by default.get_value('maxrec')

    Returns:
        astropy.table: result from the query to the TAP service

    """
    tap_job = tap_service.submit_job(query=query, maxrec=maxrec)
    tap_job.run()
    # Wait for Executing
    tap_job.wait(phases=["EXECUTING", "ERROR", "ABORTED"], timeout=10.)
    msgs.info('The query to the tap_service is in the status: {}'.format(str(tap_job.phase)))
    # Wait for Completed
    tap_job.wait(phases=["COMPLETED", "ERROR", "ABORTED"], timeout=10.)
    msgs.info('The query to the tap_service is in the status: {}'.format(str(tap_job.phase)))
    # Fetch the results
    tap_job.raise_if_error()
    return tap_job.fetch_result().to_table()
Exemple #20
0
def query_TAP_from_polygons(polygons=None,
                            merge=False,
                            instrument=None,
                            maxrec=default.get_value('maxrec'),
                            verbose=False):
    tapobs = dal.tap.TAPService(default.get_value('eso_tap_obs'))
    msgs.info('Querying the ESO TAP service at:')
    msgs.info('{}'.format(str(default.get_value('eso_tap_obs'))))

    results_from_query = []

    if polygons is not None:
        polygon_union = ''
        for iii, polygon in enumerate(polygons):
            polygon_union += """intersects(s_region, POLYGON('', """ + polygon + """)) = 1 OR """
            if not merge:
                query = """SELECT
                               target_name, dp_id, s_ra, s_dec, t_exptime, em_min, em_max, 
                               dataproduct_type, instrument_name, abmaglim, proposal_id
                           FROM
                               ivoa.ObsCore
                           WHERE
                               intersects(s_region, POLYGON('', """ + polygon + """)) = 1 """
                if instrument is not None:
                    instrument_selection = str(
                        """                     AND instrument_name='{}'""".
                        format(str(instrument)))
                    query = '\n'.join([query, instrument_selection])
                if verbose:
                    msgs.info('The query is:')
                    msgs.info('{}'.format(str(query)))

                # Obtaining query results
                result_from_query = tapobs.search(query=query, maxrec=maxrec)
                if len(result_from_query) < 1:
                    msgs.warning('No data has been retrieved')
                else:
                    msgs.info(
                        'A total of {} entries has been retrieved for polygon N.{}'
                        .format(len(result_from_query), iii))
                    msgs.info('For the following instrument:')
                    for inst_name in np.unique(
                            result_from_query['instrument_name'].data):
                        msgs.info(' - {}'.format(inst_name.decode("utf-8")))
                    if verbose:
                        result_from_query.to_table().pprint(max_width=-1)
                results_from_query.append(result_from_query)

        polygon_union = polygon_union[:-4]
        if merge:
            query = """SELECT
                           target_name, dp_id, s_ra, s_dec, t_exptime, em_min, em_max, 
                           dataproduct_type, instrument_name, abmaglim, proposal_id
                       FROM
                           ivoa.ObsCore
                       WHERE
                            (""" + polygon_union + """)"""
            if instrument is not None:
                instrument_selection = str(
                    """                     AND instrument_name='{}'""".format(
                        str(instrument)))
                query = '\n'.join([query, instrument_selection])
            if verbose:
                msgs.info('The query is:')
                msgs.info('{}'.format(str(query)))
            # Obtaining query results
            result_from_query = tapobs.search(query=query, maxrec=maxrec)
            if len(result_from_query) < 1:
                msgs.warning('No data has been retrieved')
            else:
                msgs.info(
                    'A total of {} entries has been retrieved for polygon N.{}'
                    .format(len(result_from_query), iii))
                msgs.info('For the following instrument:')
                for inst_name in np.unique(
                        result_from_query['instrument_name'].data):
                    msgs.info(' - {}'.format(inst_name.decode("utf-8")))
                if verbose:
                    result_from_query.to_table().pprint(max_width=-1)
            results_from_query.append(result_from_query)
        return results_from_query
        '''
Exemple #21
0
def get_header_from_archive(file_id,
                            text_file=None):  # written by Ema. 04.03.2020
    r"""Given a file ID the macro download the corresponding header.

    Args:
        file_id (`str`):
            ESO file ID for which the header will be downloaded
        text_file (`str`):
            text file where the header will be downloaded. If `None` it will it will be set to the same
            string `file_id` but with a `.hdr` extension.

    """

    # checks for connection to ESO archive
    archive_url = default.get_value('eso_archive_url')
    if not checks.connection_to_website(archive_url, timeout=1):
        msgs.error('Cannot connect to the ESO archive website:\n {}'.format(
            archive_url))

    # checks for file id
    assert isinstance(file_id, list) or isinstance(
        file_id, (str, np.str)), 'file_id needs to be a str or a list'
    if isinstance(file_id, str):
        list_of_files = [file_id]
    else:
        list_of_files = file_id
    list_of_files = [
        files if not files.endswith('.fits') else files.replace('.fits', '')
        for files in list_of_files
    ]

    # checks for text_file
    assert isinstance(text_file, list) or isinstance(text_file, (str, np.str)) or \
           isinstance(text_file, (type(None), bytes)), 'text_file needs to be a str or a list'
    if isinstance(text_file, str):
        if len(list_of_files) == 1:
            list_of_outputs = [text_file]
        else:
            list_of_outputs = [output + text_file for output in list_of_files]
    elif isinstance(text_file, list):
        if len(list_of_files) == len(text_file):
            list_of_outputs = text_file
        else:
            list_of_outputs = [files + '.hdr' for files in list_of_files]
    else:
        list_of_outputs = [files + '.hdr' for files in list_of_files]

    # Downloading headers
    for file_name, file_out in zip(list_of_files, list_of_outputs):
        if os.path.isfile(file_out):
            msgs.warning('Overwriting existing text file: {}'.format(file_out))
            os.remove(file_out)
        url_for_header = archive_url + 'hdr?DpId=' + file_name
        response_url = requests.get(url_for_header, allow_redirects=True)
        # Removing html from text
        header_txt = response_url.text.split('<pre>')[1].split('</pre>')[0]
        if not header_txt.startswith('No info found for'):
            file_header = open(file_out, 'w')
            for line in header_txt.splitlines():
                file_header.write(line + '\n')
            file_header.close()
            msgs.info('Header successfully saved in: {}'.format(file_out))
        else:
            msgs.warning(
                '{} is not present in the ESO archive'.format(file_name))

    return
Exemple #22
0
if __name__ == '__main__':
    args = parse_arguments()

    # getting fits names
    input_fits = np.str(args.input_fits[0])
    if args.output_fits is None:
        output_fits = input_fits.replace('.fits', '_fixed.fits')
    else:
        output_fits = np.str(args.output_fits[0])

    msgs.start()

    hdul_original = fits.open(input_fits)




    msgs.work('Updating checksum and datasum')
    hdul[0].add_datasum()
    hdul[1].add_datasum()
    hdul[0].add_checksum(override_datasum=True)
    hdul[1].add_checksum(override_datasum=True)
    msgs.work('Flushing changes.')
    hdul.flush()
    hdul.close()

    msgs.newline()
    msgs.info('File {} produced.'.format(output_fits))
    msgs.end()
Exemple #23
0
def main(args):
    import numpy as np
    import os
    import shutil
    from astropy.coordinates import SkyCoord
    from astropy.coordinates import name_resolve
    from astropy import units as u
    from astropy.io import fits
    from ESOAsg.ancillary import cleaning_lists
    from ESOAsg.core import fitsfiles
    from ESOAsg import msgs

    # Cleaning input lists
    input_fits_files = cleaning_lists.make_list_of_fits_files(args.input_fits)
    # Make whitelight images
    if args.whitelight:
        make_whitelight_image = True
    else:
        make_whitelight_image = False
    # Creating output list
    if args.suffix is None:
        overwrite = False
        msgs.warning('The file will overwrite the input files')
    else:
        overwrite = True
    suffix_string = cleaning_lists.make_string(args.suffix)
    output_fits_files = cleaning_lists.make_list_of_fits_files_and_append_suffix(
        input_fits_files, suffix=suffix_string)
    if make_whitelight_image:
        output_whitelight_files = cleaning_lists.make_list_of_fits_files_and_append_suffix(
            input_fits_files, suffix=suffix_string + '_WL')
    else:
        output_whitelight_files = [None] * len(input_fits_files)
    '''
    # reference
    if args.referenc is not None:
        reference = str(args.referenc[0])
    else:
        reference = str(' ')

    # fluxcal
    if args.fluxcal == 'ABSOLUTE':
        fluxcal = 'ABSOLUTE'
    elif args.fluxcal == 'UNCALIBRATED':
        fluxcal = 'UNCALIBRATED'
    else:
        msgs.error('Possible values for fluxcal are: `ABSOLUTE` or `UNCALIBRATED`')

    # abmaglim
    if args.abmaglim is not None:
        abmaglim = args.abmaglim
        assert isinstance(abmaglim, (int, np.float_)), 'ABMAGLIM must be a float'
        if abmaglim < 0:
            msgs.error('ABMAGLIM must be positive')
    else:
        abmaglim = np.float_(-1.)
    '''

    msgs.start()

    for fits_in, fits_out, image_out in zip(input_fits_files,
                                            output_fits_files,
                                            output_whitelight_files):
        if os.path.exists(fits_out):
            shutil.copy(fits_out, fits_out.replace('.fit', '_old.fit'))
            msgs.warning('{} already exists. Backup created.'.format(fits_out))
        if image_out is not None:
            if os.path.exists(image_out):
                shutil.copy(image_out, image_out.replace('.fit', '_old.fit'))
                msgs.warning(
                    '{} already exists. Backup created.'.format(image_out))

        full_hdul = fitsfiles.get_hdul(fits_in)
        try:
            instrument = full_hdul[0].header['HIERARCH ESO SEQ ARM']
        except KeyError:
            msgs.error(
                'Failed to read the keyword HIERARCH ESO SEQ ARM from the primary header'
            )
        finally:
            if instrument in SUPPORTED_INSTRUMENT:
                msgs.info(
                    'The input file is from SPHERE/{}'.format(instrument))
            else:
                msgs.warning(
                    'Instrument SPHERE/{} not supported'.format(instrument))

        # ToDo
        # These needs to be transformed in objects
        if instrument.startswith('IFS'):
            msgs.work('Fixing header for SPHERE/{} file {}'.format(
                instrument, fits_in))

            # Create a copy of the file where there is a primary HDU and data are in the 'DATA" HDU
            msgs.work('Reshaping cube into PrimaryHEADER and Data Header')
            fitsfiles.new_fits_like(fits_in, [0],
                                    fits_out,
                                    overwrite=overwrite,
                                    fix_header=True)
            hdul = fitsfiles.get_hdul(fits_out, 'update', checksum=True)
            hdr0 = hdul[0].header
            hdr1 = hdul[1].header

            # Check for HISTORY
            # Primary Header
            if 'HISTORY' in hdr0.keys():
                history_cards_hdr0 = [
                    history_card_hdr0 for history_card_hdr0 in hdr0
                    if history_card_hdr0.startswith('HISTORY')
                ]
                history_values_hdr0 = [
                    hdr0[history_card_hdr0] for history_card_hdr0 in hdr0
                    if history_card_hdr0.startswith('HISTORY')
                ]
                for history_card_hdr0, history_value_hdr0 in zip(
                        history_cards_hdr0, history_values_hdr0):
                    msgs.work('Cleaning cards: {} = {}'.format(
                        history_card_hdr0, history_value_hdr0))
                del hdr0['HISTORY'][:]
            # Data Header
            if 'HISTORY' in hdr1.keys():
                history_values_hdr1 = hdr1['HISTORY'][:]
                for history_number in range(0, len(history_values_hdr1)):
                    clean_history = cleaning_lists.remove_non_ascii(
                        history_values_hdr1[history_number])
                    if len(clean_history) > 0:
                        hdr1['HISTORY'][history_number] = str(clean_history)
                    else:
                        hdr1['HISTORY'][history_number] = str(' ')

            # Update cards for headers:
            # Updating values with different CARD in the header
            cards_input = [
                'CRPIX4', 'CRVAL4', 'CTYPE4', 'CUNIT4', 'CD4_4', 'CD1_4',
                'CD2_4', 'CD4_1', 'CD4_2'
            ]
            cards_output = [
                'CRPIX3', 'CRVAL3', 'CTYPE3', 'CUNIT3', 'CD3_3', 'CD1_3',
                'CD2_3', 'CD3_1', 'CD3_2'
            ]
            fitsfiles.transfer_header_cards(hdr1,
                                            hdr1,
                                            cards_input,
                                            output_cards=cards_output,
                                            delete_card=True)
            # Remove not used values
            cards_to_be_removed_hdr1 = ['CD4_3', 'CD3_4']
            for card_to_be_removed_hdr1 in cards_to_be_removed_hdr1:
                hdr1.remove(card_to_be_removed_hdr1, ignore_missing=True)

            # Transfer cards from HDU1 to the PrimaryHDU
            not_to_be_transfer = [
                hdr1_card for hdr1_card in hdr1
                if hdr1_card.startswith('COMMENT') or hdr1_card.startswith(
                    'EXTNAME') or hdr1_card.startswith('BITPIX') or
                hdr1_card.startswith('NAXIS') or hdr1_card.startswith('CRPIX')
                or hdr1_card.startswith('CRVAL') or hdr1_card.startswith(
                    'CDELT') or hdr1_card.startswith('CTYPE')
                or hdr1_card.startswith('CD1_') or hdr1_card.startswith('CD2_')
                or hdr1_card.startswith('CD3_') or hdr1_card.startswith(
                    'CUNIT') or hdr1_card.startswith('CSYER') or hdr1_card.
                startswith('HDUCLAS') or hdr1_card.startswith('XTENSION')
                or hdr1_card.startswith('PCOUNT') or hdr1_card.startswith(
                    'GCOUNT') or hdr1_card.startswith('HDUDOC') or hdr1_card.
                startswith('HDUVER') or hdr1_card.startswith('HISTORY')
            ]
            cards_to_be_transfer = []
            for hdr1_card in hdr1:
                if hdr1_card not in not_to_be_transfer:
                    cards_to_be_transfer.append(hdr1_card)
            fitsfiles.transfer_header_cards(hdr1,
                                            hdr0,
                                            cards_to_be_transfer,
                                            with_comment=True,
                                            delete_card=True)

            # Try to guess coordinates
            if 'CRVAL1' not in hdr1.keys():
                msgs.warning('CRVAL position keywords not preset')
                if 'OBJECT' in hdr0.keys():
                    try:
                        object_coordinate = SkyCoord.from_name(
                            str(hdr0['OBJECT']).strip())
                        ra_obj, dec_obj = object_coordinate.ra.degree, object_coordinate.dec.degree
                        if 'RA' in hdr0.keys() and 'DEC' in hdr0.keys():
                            pointing_coordinate = SkyCoord(float(hdr0['RA']),
                                                           float(hdr0['DEC']),
                                                           unit='deg')
                            msgs.work(
                                'Testing from separation from pointing position'
                            )
                            separation = object_coordinate.separation(
                                pointing_coordinate).arcsec
                            if separation < 120.:
                                msgs.info('Object - Pointing separation is {}'.
                                          format(separation))
                                msgs.info(
                                    'Updating CRVAL1 = {}'.format(ra_obj))
                                msgs.info(
                                    'Updating CRVAL2 = {}'.format(dec_obj))
                                hdr1['CRVAL1'] = ra_obj
                                hdr1['CRVAL2'] = dec_obj
                                msgs.work('Updating CUNIT')
                                hdr1['CUNIT1'] = 'deg'
                                hdr1['CUNIT2'] = 'deg'
                                msgs.work('Updating CTYPE')
                                hdr1['CTYPE1'] = 'RA---TAN'
                                hdr1['CTYPE2'] = 'DEC--TAN'
                                msgs.work('Updating CRPIX')
                                hdr1['CRPIX1'] = float(
                                    hdul[1].data.shape[2]) / 2.
                                hdr1['CRPIX2'] = float(
                                    hdul[1].data.shape[1]) / 2.
                                msgs.info('Updating CD1 and CD2')
                                hdr1['CD1_1'] = 2.06E-06
                                hdr1['CD2_2'] = 2.06E-06
                                hdr1['CD1_2'] = 0.
                                hdr1['CD2_1'] = 0.
                                msgs.work('Updating RA, DEC')
                                hdr0['RA'] = ra_obj
                                hdr0.comments[
                                    'RA'] = object_coordinate.ra.to_string(
                                        u.hour)
                                hdr0['DEC'] = dec_obj
                                hdr0.comments[
                                    'DEC'] = object_coordinate.dec.to_string(
                                        u.degree, alwayssign=True)
                            else:
                                msgs.warning(
                                    'Object - Pointing separation is {}'.
                                    format(separation))
                                msgs.warning(
                                    'This is suspicious, CRVAL not updated')
                    except name_resolve.NameResolveError:
                        msgs.warning('Object {} not recognized'.format(
                            str(hdr0['OBJECT']).strip()))
                        msgs.warning('CRVAL not updated')

            # Updating file prodcatg
            msgs.work('Updating PRODCATG to SCIENCE.CUBE.IFS')
            hdr0['PRODCATG'] = str('SCIENCE.CUBE.IFS')
            # Some more updates
            msgs.work('Setting NAXIS = 0 in primary header')
            hdr0['NAXIS'] = 0
            if 'OBSTECH' not in hdr0.keys():
                msgs.warning('OBSTECH missing')
                if 'ESO PRO TECH' in hdr0.keys():
                    msgs.info('Deriving OBSTECH from HIERARCH ESO PRO TECH')
                    msgs.work('Updating OBSTECH to {}'.format(
                        str(hdr0['HIERARCH ESO PRO TECH'])))
                    hdr0['OBSTECH'] = str(hdr0['HIERARCH ESO PRO TECH'])
            if 'EXPTIME' not in hdr0.keys():
                msgs.warning('EXPTIME missing')
                if 'ESO DET SEQ1 REALDIT' in hdr0.keys(
                ) and 'ESO DET NDIT' in hdr0.keys():
                    msgs.info('Deriving EXPTIME and TEXPTIME as REALDIT * DIT')
                    hdr0['EXPTIME'] = hdr0[
                        'HIERARCH ESO DET SEQ1 REALDIT'] * hdr0[
                            'HIERARCH ESO DET NDIT']
                    hdr0['TEXPTIME'] = hdr0[
                        'HIERARCH ESO DET SEQ1 REALDIT'] * hdr0[
                            'HIERARCH ESO DET NDIT']
                    msgs.work('Updating EXPTIME to {}'.format(
                        str(hdr0['EXPTIME'])))
                    msgs.work('Updating TEXPTIME to {}'.format(
                        str(hdr0['TEXPTIME'])))
            if 'WAVELMIN' not in hdr0.keys():
                msgs.warning('WAVELMIN missing')
                z_pixel = np.arange(int(hdul[1].data.shape[0]))
                z_wave = float(
                    hdr1['CRVAL3']) + (z_pixel * float(hdr1['CD3_3']))
                if str(hdr1['CUNIT3']).strip().upper() == 'MICRONS':
                    msgs.info('Deriving WAVELMIN and WAVELMAX from CRVAL1')
                    z_wave = z_wave * 1000.  # convert to nanometers
                    hdr0['WAVELMIN'] = np.nanmin(z_wave)
                    hdr0['WAVELMAX'] = np.nanmax(z_wave)
                    msgs.work('Updating WAVELMIN to {}'.format(
                        str(hdr0['WAVELMIN'])))
                    msgs.work('Updating WAVELMAX to {}'.format(
                        str(hdr0['WAVELMAX'])))
                else:
                    msgs.warning(
                        'Unknown units {}. WAVELMIN and WAVELMAX not calculated'
                        .format(str(hdr1['CUNIT3'])))
            if 'SPEC_RES' not in hdr0.keys():
                msgs.warning('SPEC_RES missing')
                if 'WAVELMAX' in hdr0.keys():
                    msgs.info('Deriving SPEC_RES from WAVELMAX')
                    if (float(hdr0['WAVELMAX']) > 1300.) and (float(
                            hdr0['WAVELMAX']) < 1400.):
                        hdr0['SPEC_RES'] = 50.
                        msgs.work('Updating SPEC_RES to {}'.format(
                            str(hdr0['SPEC_RES'])))
                    elif (float(hdr0['WAVELMAX']) > 1600.) and (float(
                            hdr0['WAVELMAX']) < 1700.):
                        hdr0['SPEC_RES'] = 30.
                        msgs.work('Updating SPEC_RES to {}'.format(
                            str(hdr0['SPEC_RES'])))
                    else:
                        msgs.warning('WAVELMAX = {} is not in the expected ' /
                                     + 'range of possible values'.format(
                                         str(hdr0['WAVELMAX'])))
            if 'PROGID' not in hdr0.keys():
                msgs.warning('PROG_ID missing')
                if 'ESO OBS PROG ID' in hdr0.keys():
                    msgs.info('Deriving PROG_ID from HIERARCH ESO OBS PROG ID')
                    msgs.work('Updating PROG_ID to {}'.format(
                        str(hdr0['HIERARCH ESO OBS PROG ID'])))
                    hdr0['PROG_ID'] = str(hdr0['HIERARCH ESO OBS PROG ID'])
            if 'MJD-END' not in hdr0.keys():
                msgs.warning('MJD-END missing')
                if 'TEXPTIME' in hdr0.keys():
                    msgs.info('Deriving MJD-END from MJD-OBS and TEXPTIME')
                    texptime_sec = float(hdr0['TEXPTIME'])
                    texptime_day = texptime_sec / (60. * 60. * 24.)
                    mjdend = float(hdr0['MJD-OBS']) + texptime_day
                    fitsfiles.add_header_card(hdr0, 'MJD-END', mjdend,
                                              'End of observation')
                    msgs.work('MJD-OBS = {} and TEXPTIME = {} days'.format(
                        str(hdr0['MJD-OBS']), str(texptime_day)))
                    msgs.work('Updating MJD-END to {}'.format(
                        str(hdr0['MJD-END'])))

            # Remove not used values
            cards_to_be_removed_hdr0 = ['ERRDATA', 'QUALDATA', 'SCIDATA']
            for card_to_be_removed_hdr0 in cards_to_be_removed_hdr0:
                hdr0.remove(card_to_be_removed_hdr0, ignore_missing=True)
            cards_to_be_removed_hdr1 = ['HDUCLASS3']
            for card_to_be_removed_hdr1 in cards_to_be_removed_hdr1:
                hdr1.remove(card_to_be_removed_hdr1, ignore_missing=True)

            # Updating the FITS file definition comment line
            hdr0.add_comment(
                "  FITS (Flexible Image Transport System) format is defined in 'Astronomy"
                + "  and "
                "Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H",
                after='EXTEND')
            if 'COMMENT' in hdr1.keys():
                comment_values_hdr1 = hdr1['COMMENT'][:]
                for index, comment_value_hdr1 in enumerate(
                        comment_values_hdr1):
                    msgs.work('Removing COMMENT card : {}'.format(
                        comment_value_hdr1))
                hdr1.remove('COMMENT', ignore_missing=True, remove_all=True)

            # Creating white light image keyword:
            if make_whitelight_image:
                fitsfiles.add_header_card(
                    hdr0, 'ASSON1',
                    image_out.split('/')[-1],
                    'ANCILLARY.IMAGE.WHITELIGHT filename')
                msgs.work('Updating ASSON1 to {}'.format(hdr0['ASSON1']))

            # Actually creating the white-light image
            if make_whitelight_image:
                msgs.info('Making white light image')
                image_hdu = fits.PrimaryHDU()
                image_hdul = fits.HDUList([image_hdu])
                if str(hdr1['CUNIT3']).strip().upper() == 'MICRONS':
                    to_ang = 10000.
                else:
                    msgs.error('Spectral unit: {} not recognized'.format(
                        hdr1['CUNIT3']))
                delta_wave_bin = hdr1['CD3_3']
                image_hdul.append(
                    fits.ImageHDU(
                        to_ang * delta_wave_bin *
                        np.nansum(hdul[1].data, axis=0, dtype=np.float_)))
                image_hdr0 = image_hdul[0].header
                image_hdr1 = image_hdul[1].header
                card_for_image0 = [
                    'WAVELMIN', 'WAVELMAX', 'OBJECT', 'TELESCOP', 'INSTRUME',
                    'RADECSYS', 'RA', 'DEC', 'EQUINOX'
                ]
                fitsfiles.transfer_header_cards(hdr0,
                                                image_hdr0,
                                                card_for_image0,
                                                with_comment=True,
                                                delete_card=False)
                image_hdr0['PRODCATG'] = str('ANCILLARY.IMAGE.WHITELIGHT')

                card_for_image1 = [
                    'CRPIX1', 'CRPIX2', 'CRVAL1', 'CRVAL2', 'CUNIT1', 'CUNIT2',
                    'NAXIS1', 'NAXIS2', 'EXTNAME', 'CD1_1', 'CD1_2', 'CD2_1',
                    'CD2_2', 'CTYPE1', 'CTYPE2'
                ]
                fitsfiles.transfer_header_cards(hdr1,
                                                image_hdr1,
                                                card_for_image1,
                                                with_comment=True,
                                                delete_card=False)

            # Update checksum and datasum
            msgs.work('Updating checksum and datasum')
            hdul[0].add_checksum(override_datasum=False)
            hdul[1].add_datasum()
            hdul[1].add_checksum(override_datasum=True)
            hdul.flush(output_verify='fix')
            hdul.close()
            msgs.info('File {} produced.'.format(fits_out))
            if make_whitelight_image:
                image_hdul[0].add_datasum()
                image_hdul[1].add_datasum()
                image_hdul[0].add_checksum(override_datasum=True)
                image_hdul[1].add_checksum(override_datasum=True)
                image_hdul.writeto(image_out,
                                   overwrite=True,
                                   output_verify='fix')
            msgs.info('Image {} produced.'.format(image_out))

        elif instrument.startswith('IRDIS'):
            msgs.work('Fixing header for SPHERE/{} file {}'.format(
                instrument, fits_in))
            hdr = fitsfiles.header_from_fits_file(fits_in)
            if 'ESO DPR TECH' in hdr.keys():
                if str(hdr['ESO DPR TECH']).strip(
                ) == 'IMAGE,DUAL,CORONOGRAPHY':
                    msgs.work('Working with {} as observing technique'.format(
                        str(hdr['ESO DPR TECH']).strip()))
                elif 'DUAL' in str(hdr['ESO DPR TECH']).strip():
                    msgs.error('{} needs to be tested'.format(
                        str(hdr['ESO DPR TECH']).strip()))
                else:
                    msgs.error('Only DUAL imaging currently implemented')
            else:
                msgs.error('Cannot recognize the observing technique')

            # defining the two fits_out files:
            fits_out_index = [0, 1]
            fits_out_files = []
            for index in fits_out_index:
                fits_out_file = fits_out.replace('.fit',
                                                 '_' + str(index) + '.fit')
                if os.path.exists(fits_out_file):
                    shutil.copy(fits_out_file,
                                fits_out_file.replace('.fit', '_old.fit'))
                    msgs.warning(
                        '{} already exists. Backup created.'.format(fits_out))
                fitsfiles.new_fits_like(fits_in, [0],
                                        fits_out_file,
                                        overwrite=overwrite,
                                        fix_header=True,
                                        empty_primary_hdu=False)
                fits_out_files.append(fits_out_file)

            for index, fits_out_file in zip(fits_out_index, fits_out_files):
                hdul = fitsfiles.get_hdul(fits_out_file,
                                          'update',
                                          checksum=True)
                hdr0 = hdul[0].header
                hdul[0].data = hdul[0].data[index, :, :]
                # Check for HISTORY
                # Primary Header
                if 'HISTORY' in hdr0.keys():
                    history_cards_hdr0 = [
                        history_card_hdr0 for history_card_hdr0 in hdr0
                        if history_card_hdr0.startswith('HISTORY')
                    ]
                    history_values_hdr0 = [
                        hdr0[history_card_hdr0] for history_card_hdr0 in hdr0
                        if history_card_hdr0.startswith('HISTORY')
                    ]
                    for history_card_hdr0, history_value_hdr0 in zip(
                            history_cards_hdr0, history_values_hdr0):
                        msgs.work('Cleaning cards: {} = {}'.format(
                            history_card_hdr0, history_value_hdr0))
                    del hdr0['HISTORY'][:]
                # Try to guess coordinates
                if 'CRVAL1' not in hdr0.keys():
                    msgs.warning('CRVAL position keywords not preset')
                    if 'OBJECT' in hdr0.keys():
                        try:
                            object_coordinate = SkyCoord.from_name(
                                str(hdr0['OBJECT']).strip())
                            ra_obj, dec_obj = object_coordinate.ra.degree, object_coordinate.dec.degree
                            if 'RA' in hdr0.keys() and 'DEC' in hdr0.keys():
                                pointing_coordinate = SkyCoord(
                                    float(hdr0['RA']),
                                    float(hdr0['DEC']),
                                    unit='deg')
                                msgs.work(
                                    'Testing from separation from pointing position'
                                )
                                separation = object_coordinate.separation(
                                    pointing_coordinate).arcsec
                                if separation < 120.:
                                    msgs.info(
                                        'Object - Pointing separation is {}'.
                                        format(separation))
                                    msgs.info(
                                        'Updating CRVAL1 = {}'.format(ra_obj))
                                    msgs.info(
                                        'Updating CRVAL2 = {}'.format(dec_obj))
                                    hdr0['CRVAL1'] = ra_obj
                                    hdr0['CRVAL2'] = dec_obj
                                    msgs.work('Updating CUNIT')
                                    hdr0['CUNIT1'] = 'deg'
                                    hdr0['CUNIT2'] = 'deg'
                                    msgs.work('Updating CTYPE')
                                    hdr0['CTYPE1'] = 'RA---TAN'
                                    hdr0['CTYPE2'] = 'DEC--TAN'
                                    msgs.work('Updating CRPIX')
                                    hdr0['CRPIX1'] = float(
                                        hdul[0].data.shape[1]) / 2.
                                    hdr0['CRPIX2'] = float(
                                        hdul[0].data.shape[0]) / 2.
                                    msgs.info('Updating CD1 and CD2')
                                    hdr0['CD1_1'] = hdr0[
                                        'PIXSCAL'] * 2.778E-4 / 1000.
                                    hdr0['CD2_2'] = hdr0[
                                        'PIXSCAL'] * 2.778E-4 / 1000.
                                    hdr0['CD1_2'] = 0.
                                    hdr0['CD2_1'] = 0.
                                    msgs.work('Updating RA, DEC')
                                    hdr0['RA'] = ra_obj
                                    hdr0.comments[
                                        'RA'] = object_coordinate.ra.to_string(
                                            u.hour)
                                    hdr0['DEC'] = dec_obj
                                    hdr0.comments[
                                        'DEC'] = object_coordinate.dec.to_string(
                                            u.degree, alwayssign=True)
                                else:
                                    msgs.warning(
                                        'Object - Pointing separation is {}'.
                                        format(separation))
                                    msgs.warning(
                                        'This is suspicious, CRVAL not updated'
                                    )
                        except name_resolve.NameResolveError:
                            msgs.warning('Object {} not recognized'.format(
                                str(hdr0['OBJECT']).strip()))
                            msgs.warning('CRVAL not updated')

                # Updating file prodcatg
                msgs.work('Updating PRODCATG to SCIENCE.IMAGE')
                hdr0['PRODCATG'] = str('SCIENCE.IMAGE')

                if 'PROGID' not in hdr0.keys():
                    msgs.warning('PROG_ID missing')
                    if 'ESO OBS PROG ID' in hdr0.keys():
                        msgs.info(
                            'Deriving PROG_ID from HIERARCH ESO OBS PROG ID')
                        msgs.work('Updating PROG_ID to {}'.format(
                            str(hdr0['HIERARCH ESO OBS PROG ID'])))
                        hdr0['PROG_ID'] = str(hdr0['HIERARCH ESO OBS PROG ID'])

                # Update checksum and datasum
                msgs.work('Updating checksum and datasum')
                hdul[0].add_checksum(override_datasum=False)
                hdul.flush(output_verify='fix')
                hdul.close()
        else:
            msgs.warning(
                'The Instrument {} is not supported \nThe file {} will not be processed'
                .format(instrument, fits_in))

    msgs.end()
    '''
Exemple #24
0
                    (WAVELMAX + WAVELMIN) * to_ang / 2.,
                    2.) * whitelight_std * n_pixels_psf / (delta_wave_bin *
                                                           to_ang)
                hdr0['ABMAGLIM'] = -2.5 * np.log10(five_sigma_nu / 3631.)
                msgs.warning(
                    'ABMAGLIM={}. This is most probably not correct at the moment'
                    .format(hdr0['ABMAGLIM']))

        # 6. update checksum and datasum
        msgs.work('Updating checksum and datasum')
        hdul[0].add_datasum()
        hdul[1].add_datasum()
        hdul[0].add_checksum(override_datasum=True)
        hdul[1].add_checksum(override_datasum=True)
        image_hdul[0].add_datasum()
        image_hdul[1].add_datasum()
        image_hdul[0].add_checksum(override_datasum=True)
        image_hdul[1].add_checksum(override_datasum=True)
        """
        # CARDS are already present in hdr0
        # HDUCLASS', 'HDUCLAS1', 'HDUCLAS2', 'HDUDOC', 'HDUVERS'
        """

        image_hdul.writeto(image_out)
        hdul.flush()
        hdul.close()
        msgs.info('File {} produced.'.format(fits_out))
        msgs.info('Image {} produced.'.format(image_out))

    msgs.end()
Exemple #25
0
def main(args):
    from ESOAsg.ancillary import astro
    from ESOAsg.ancillary import cleaning_lists
    from ESOAsg.ancillary import polygons
    from ESOAsg import archive_science_portal
    from ESOAsg import archive_observations
    from ESOAsg import msgs

    # Cleaning input lists
    input_fits_files = cleaning_lists.make_list_of_fits_files(args.input_fits)
    instruments = cleaning_lists.from_element_to_list(args.instruments,
                                                      element_type=str)
    data_types = cleaning_lists.from_element_to_list(args.data_types,
                                                     element_type=str)

    # Cleaning input values
    confidence_level = cleaning_lists.from_element_to_list(
        args.confidence_level, element_type=float)[0]
    maxrec = cleaning_lists.from_element_to_list(args.maxrec,
                                                 element_type=int)[0]
    max_vertices = cleaning_lists.from_element_to_list(args.max_vertices,
                                                       element_type=int)[0]

    # Cleaning bool
    show_figure = args.show_sky
    # Show link
    show_asp = args.asp_link
    # Download data
    download_data = args.download_data

    msgs.start()

    for fits_file in input_fits_files:
        msgs.info(' ')
        msgs.info('Working on file: {}'.format(fits_file))
        msgs.info(' ')
        contours = astro.contours_from_gw_bayestar(
            fits_file, credible_level=confidence_level)
        astro.show_contours_from_gw_bayestar(
            fits_file,
            contours=contours,
            cmap='afmhot',
            contours_color='white',
            show_figure=show_figure,
            matplotlib_backend=STARTING_MATPLOTLIB_BACKEND)
        polygons_list = polygons.contours_to_polygons(
            contours, max_vertices=max_vertices)

        if show_asp:
            msgs.info('Opening links to ASP')
            archive_science_portal.query_from_polygons(polygons=polygons_list,
                                                       open_link=True)

        results_from_query = archive_observations.query_from_polygons(
            polygons=polygons_list,
            maxrec=maxrec,
            instruments=instruments,
            data_types=data_types,
            verbose=False)
        msgs.info(' ')
        if download_data:
            if results_from_query is None:
                msgs.warning('No data retrieved')
            elif isinstance(results_from_query, list):
                for idx_poly, result_from_query in enumerate(
                        results_from_query):
                    msgs.info(
                        'Downloading data for polygon N.{}'.format(idx_poly +
                                                                   1))
                    archive_observations.download(result_from_query['dp_id'])
            else:
                msgs.info('Downloading data')
                archive_observations.download(results_from_query['dp_id'])
        msgs.info(' ')

    msgs.end()
Exemple #26
0
    change0_comments.append(' ')

    change0_cards.append('CONTNORM')
    change0_values.append(False)
    change0_comments.append(' ')

    change0_cards.append('TOT_FLUX')
    change0_values.append(False)
    change0_comments.append(' ')

    change0_cards.append('FLUXERR')
    change0_values.append(-2)
    change0_comments.append(' ')

    change0_cards.append('SNR')
    msgs.info('Calculated SNR={}'.format(
        np.nanmedian(FLUX[FLUX > 0.] / ERR[FLUX > 0.])))
    change0_values.append(np.nanmedian(FLUX[FLUX > 0.] / ERR[FLUX > 0.]))
    change0_comments.append(' ')

    change0_cards.append('NCOMBINE')
    change0_values.append(len(fullHeader['MJD-END'][this_object].data))
    change0_comments.append(' ')

    change0_cards.append('DATE')
    change0_values.append(fullHeader['DATE'][this_object].data[0])
    change0_comments.append(' ')

    #SPEC_RES
    change0_cards.append('SPEC_RES')
    change0_values.append(np.int(3000))
    change0_comments.append('Spectral resolution')
Exemple #27
0
    def compare_with(self,
                     second_list,
                     check_cards=None,
                     on_terminal=True,
                     on_file=None):
        r"""Compare two list objects. The code will go through all cards and values present in the
        input list (or the `check_cards` subset if set not to None) and compare with cards and values in the
        `second_list`.

        Args:
            second_list (`Lists`):
                List that you want to compare with the input list.
            check_cards (`np.array`):
                List of cards that needs to be returned.
            on_terminal (`bool`):
                if `True`, the `cards` and respective `values` are printed on the terminal.
            on_file (`str`, `None`):
                if not `None`, `cards` and respective `values` will be stored in this text file

        Returns:
            first_cards (`np.array`):
                `cards` present in both the 1st and the 2nd lists.
            first_values (`np.array`):
                `values` from the 1st list associated to the `cards`
            second_values (`np.array`):
                `values` from the 2nd list associated to the `cards`
        """

        if not isinstance(second_list, Lists):
            msgs.error('The second list is not a Lists object.')
        if not isinstance(on_terminal, bool):
            msgs.error('The on_terminal option should be a bool.')

        if check_cards is None:
            check_cards, _ = self.get_cards(check_cards)

        # loading the first list:
        _first_cards, _first_values, _first_missing = self.get_values(
            check_cards=check_cards)
        msgs.work('Checking for cards in the second_list')
        # comparing with values from the second list:
        _second_cards, _second_values, _second_missing = second_list.get_values(
            check_cards=check_cards)

        # define where cards are present in both lists
        _overlap = np.isin(_first_cards, _second_cards)
        first_cards, first_values, first_missing = self.get_values(
            check_cards=_first_cards[_overlap])
        second_cards, second_values, second_missing = second_list.get_values(
            check_cards=_first_cards[_overlap])

        # Printing results
        if on_terminal:
            msgs.info(
                'Cards and values from both the first and the second list:')
        _print_1cards_2values(first_cards,
                              first_values,
                              second_values,
                              on_terminal=on_terminal,
                              on_file=on_file)

        if on_terminal:
            # Case of all cards present in the first and in the second lists
            if np.size(_first_missing) == 0 and np.size(_second_missing) == 0:
                msgs.info('All cards are present in both lists')
            # Case of some cards missing in the first list but present in the second:
            elif np.size(_first_missing) != 0 and np.size(
                    _second_missing) == 0:
                _only_second = np.isin(check_cards, _first_missing)
                _only_second_cards, _only_second_values, _only_second_missing = second_list.get_values(
                    check_cards=check_cards[_only_second])
                msgs.warning('{} Cards are missing in the first list,'.format(
                    np.size(_first_missing)))
                msgs.warning('but present in the second:')
                _print_1cards_1values(_only_second_cards,
                                      _only_second_values,
                                      on_terminal=on_terminal,
                                      on_file=None)
            # Case of some cards missing in the second list but present in the first:
            elif np.size(
                    _first_missing) == 0 and np.size(_second_missing) != 0:
                _only_first = np.isin(check_cards, _second_missing)
                _only_first_cards, _only_first_values, _only_first_missing = self.get_values(
                    check_cards=check_cards[_only_first])
                msgs.warning('{} Cards are missing in the second list,'.format(
                    np.size(_second_missing)))
                msgs.warning('but present in the first:')
                _print_1cards_1values(_only_first_cards,
                                      _only_first_values,
                                      on_terminal=on_terminal,
                                      on_file=None)
            # Case of some cards missing in the first and in the second list:
            else:
                _first_missing = ~np.isin(check_cards, _first_cards)
                _second_missing = ~np.isin(check_cards, _second_cards)
                _both_missing = _first_missing * _second_missing
                _first_only_missing = _first_missing * ~_both_missing
                _second_only_missing = _second_missing * ~_both_missing
                if np.size(check_cards[_both_missing]) != 0:
                    msgs.warning(
                        '{} Cards are missing from both lists:'.format(
                            np.size(check_cards[_both_missing])))
                    for both_missing in check_cards[_both_missing]:
                        print(' ----------> ' + both_missing)
                if np.size(check_cards[_second_only_missing]) != 0:
                    _only_second_cards, _only_second_values, _only_second_missing = self.get_values(
                        check_cards=check_cards[_second_only_missing])
                    msgs.warning(
                        '{} Cards are missing in the first list,'.format(
                            np.size(check_cards[_second_only_missing])))
                    msgs.warning('but present in the second:')
                    _print_1cards_1values(_only_second_cards,
                                          _only_second_values,
                                          on_terminal=on_terminal,
                                          on_file=None)
                if np.size(check_cards[_first_only_missing]) != 0:
                    _only_first_cards, _only_first_values, _only_first_missing = second_list.get_values(
                        check_cards=check_cards[_first_only_missing])
                    msgs.warning(
                        '{} Cards are missing in the second list,'.format(
                            np.size(check_cards[_first_only_missing])))
                    msgs.warning('but present in the first:')
                    _print_1cards_1values(_only_first_cards,
                                          _only_first_values,
                                          on_terminal=on_terminal,
                                          on_file=None)

        return first_cards, first_values, second_values
Exemple #28
0
def query_from_radec(positions,
                     radius=None,
                     instruments=None,
                     data_types=None,
                     verbose=False,
                     maxrec=default.get_value('maxrec')):
    r"""Query the ESO TAP service given a position in RA and Dec.

     The `positions` value (or list) needs to be given as an `astropy.coordinates.SkyCoord` object.
    
    Args:
        positions (`astropy.coordinates.SkyCoord`):
            Coordinates (or list of coordinates) of the sky you want to query in the format of an
            `astropy.coordinates.SkyCoord` object. For further detail see here:
            `astropy coordinates <https://docs.astropy.org/en/stable/coordinates/>`_
        radius (`float`):
            Search radius you want to query in arcseconds. Note that in case `None` is given, the query will be
            performed with the `INTERSECT(POINT('',RA,Dec), s_region)` clause instead of the
            `INTERSECT(s_region,CIRCLE('',RA,Dec,radius/3600.))` one. See here for further examples:
            `tap obs examples <http://archive.eso.org/tap_obs/examples>`_
        instruments (`list`):
            Limit the search to the selected list of instruments (e.g., `XSHOOTER`)
        data_types (`list`):
            Limit the search to the selected types of data (e.g., `spectrum`)
        verbose (`bool`):
            if set to `True` additional info will be displayed
        maxrec (`int`):
            Define the maximum number of file that a single query can return from the ESO archive. The default values
            is set in the `default.txt` file.

    Returns:
        results_from_query (`list`):
            Results from the query in a list with the same length of the input position. Currently it contains:
            target_name, dp_id, s_ra, s_dec, t_exptime, em_min, em_max, em_min, dataproduct_type, instrument_name,
            abmaglim, proposal_id, obs_collection
    """
    # Check inputs:
    # Working on positions
    if isinstance(positions, list):
        positions_list = positions
    else:
        positions_list = [positions]
    for position in positions_list:
        assert isinstance(
            position,
            coordinates.SkyCoord), r'Input positions not a SkyCoord object'
    # Working on radius
    if radius is not None:
        if isinstance(radius, int):
            radius = float(radius)
        else:
            assert isinstance(radius, float), r'Input radius is not a number'
    # Working on instruments
    if instruments is not None:
        if isinstance(instruments, list):
            instruments_list = instruments
        else:
            instruments_list = [instruments]
        for instrument in instruments_list:
            assert isinstance(
                instrument,
                str), r'Input instrument: {} not valid'.format(instrument)
    # Working on data_types
    if data_types is not None:
        if isinstance(data_types, list):
            data_types_list = data_types
        else:
            data_types_list = [data_types]
        for data_type in data_types_list:
            assert isinstance(
                data_type,
                str), r'Input data type: {} not valid'.format(data_type)

    # Running over all positions
    if verbose:
        how_many_positions = len(positions_list)
        if how_many_positions > 1:
            msgs.work(
                'Exploring ESO archive around {} locations in the sky'.format(
                    how_many_positions))
        else:
            msgs.work(
                'Exploring ESO archive around the input location in the sky')

    results_from_query = []

    for position, idx in zip(positions_list, range(len(positions_list))):
        position.transform_to(ICRS)
        ra, dec = np.float_(position.ra.degree), np.float_(position.dec.degree)
        msgs.work(
            'Running query {} to the ESO archive (out of {} total)'.format(
                idx + 1, len(positions_list)))

        # Define query
        # base query:
        query = _query_obscore_base()
        # selection of the location:
        query = query + _query_obscore_intersect_ra_dec(ra, dec, radius=radius)
        # selection of the instrument(s)
        if instruments is not None:
            query = query + _query_obscore_select_instruments(instruments_list)
        # selection of the data_type(s)
        if data_types is not None:
            query = query + _query_obscore_select_data_types(data_types_list)

        # running query and append results to the list
        result_from_query = _run_query(query,
                                       verbose=verbose,
                                       remove_bytes=True,
                                       maxrec=maxrec)

        if len(result_from_query) < 1:
            msgs.warning('No data has been retrieved')
        else:
            msgs.info('A total of {} entries has been retrieved'.format(
                len(result_from_query)))
            if verbose:
                msgs.info('For the following instrument:')
                for inst_name in np.unique(
                        result_from_query['instrument_name'].data):
                    msgs.info(' - {}'.format(inst_name))

        results_from_query.append(result_from_query)
    return results_from_query
Exemple #29
0
def get_catalogues(collections=None,
                   tables=None,
                   columns=None,
                   type_of_query='sync',
                   all_versions=False,
                   maxrec=None,
                   verbose=False):
    r"""Query the ESO tap_cat service for specific catalogues

    There are two ways to select the catalogues you are interested in. Either you select directly the table_name (or the
    list of table_names) that you want to query, or you select a collection (or a list of collections). If you select
    this latter option, what happens in the background is that the code is going to search for the table(s)
    corresponding to the given collection and query them.

    If you are asking for more than one table, the result will be listed in a list of `astropy.tables` with one element
    per retrieved table

    Args:
        collections (any): list of `str` containing the names (or a single `str`) of the collections for
            which the query will be limited
        tables (any): list of `str` containing the table_name of the tables for which the query will be limited
        columns (any): list of the `column_name` that you want to download. The full list of the columns in a
            table can be found by running `columns_info()`
        all_versions (bool): if set to `True` also obsolete versions of the catalogues are searched in case
            `collections` is given
        type_of_query (str): type of query to be run
        maxrec (int, optional): define the maximum number of entries that a single query can return. If it is `None` the
            value is set by the limit of the service.
        verbose (bool): if set to `True` additional info will be displayed

    Returns:
        any: `astropy.table` or `list` of `astropy.tables` containing the queried catalogues

    """
    # ToDo EMA: conditions to select properties from catalogues should be added in this. Both with ANDs and with ORs
    # Obtain list of all tables derived from the merger of collections and tables
    clean_tables = _is_collection_and_table_list_at_eso(
        collections=collections, tables=tables, all_versions=all_versions)

    # if maxrec is set to None, the entire length of the catalogue is considered:
    maxrec_list = _get_catalogue_length_from_tables(clean_tables,
                                                    maxrec=maxrec,
                                                    all_versions=all_versions)

    list_of_catalogues = []
    for table_name, maxrec_for_table in zip(clean_tables, maxrec_list):
        # test for columns
        columns_in_table = _is_column_list_in_catalogues(columns,
                                                         tables=table_name)
        # instantiate ESOCatalogues
        query_table = query_catalogues.ESOCatalogues(
            query=tap_queries.create_query_table(table_name,
                                                 columns=columns_in_table),
            type_of_query=type_of_query,
            maxrec=maxrec_for_table)
        if verbose:
            query_table.print_query()
        query_table.run_query(to_string=True)
        catalogue = query_table.get_result_from_query()
        list_of_catalogues.append(catalogue)
        msgs.info(
            'The query to {} returned {} entries (with a limit set to maxrec={})'
            .format(table_name, len(catalogue), maxrec_for_table))
    if len(list_of_catalogues) == 0:
        return None
    elif len(list_of_catalogues) == 1:
        return list_of_catalogues[0]
    else:
        return list_of_catalogues
Exemple #30
0
    # cards
    input_cards = cleaning_lists.make_list_of_strings(args.cards)

    # Starting to read the header(s)
    msgs.start()

    for fits_file in input_fits_files:
        for which_hdu in hdu_numbers:
            hdr = fitsfiles.header_from_fits_file(fits_file,
                                                  which_hdu=which_hdu)
            original_files, prov_files, original_cards, prov_cards = [], [], [], []
            if 'PROV1' in hdr:
                for prov_number in hdr['PROV*']:
                    file_id = hdr[prov_number].replace('.fits', '')
                    phase3.get_header_from_archive(file_id,
                                                   text_file=file_id + '.hdr')
                    hdr_prov = fitsfiles.header_from_txt_file(file_id + '.hdr')
                    for card in input_cards:
                        original_files.append(fits_file)
                        prov_files.append(file_id)
                        original_cards.append(card)
                        prov_cards.append(hdr_prov[card])
    msgs.info('Summary:')
    msgs.info('Input <- Provenance   -  card -> Value')
    for original_file, prov_file, original_card, prov_card in zip(
            original_files, prov_files, original_cards, prov_cards):
        print('{} <- {}  -  {} -> {}'.format(original_file, prov_file,
                                             original_card, prov_card))

    msgs.end()