Exemplo n.º 1
0
def download(dp_ids,
             min_disk_space=float(default.get_value('min_disk_space'))):
    r"""Given a filename in the ADP format, the code download the file from the
    `ESO archive <http://archive.eso.org>`_

    Args:
        dp_ids (any): list data product ID (or single product ID) to be downloaded
        min_disk_space (float): the file will be downloaded only if there is this amount of space (in Gb) free on the
            disk

    Returns:
        None

    """
    # Check for disk space
    checks.check_disk_space(min_disk_space=min_disk_space)
    # Cleaning list
    dp_ids_list = cleaning_lists.from_element_to_list(
        cleaning_lists.from_bytes_to_string(dp_ids), element_type=str)
    for dp_id in dp_ids_list:
        # Given a dp_id of a public file, the link to download it is constructed as follows:
        download_url = 'http://archive.eso.org/datalink/links?ID=ivo://eso.org/ID?{}&eso_download=file'.format(
            dp_id)
        msgs.work('Retrieving file {}.fits'.format(dp_id))
        urllib.request.urlretrieve(download_url, filename=dp_id + '.fits')
        msgs.info('File {}.fits downloaded'.format(dp_id))
Exemplo n.º 2
0
    def load_from_table(self,
                        table,
                        primary_header=None,
                        copy_header=True,
                        where_time='TIME',
                        where_time_bin='TIME_BIN',
                        where_flux='FLUX',
                        where_error='ERROR',
                        where_background='BACKGROUND',
                        where_quality='QUAL'):
        r"""Given a table put it in a LightCurves object

        Args:
            where_quality:
            where_background:
            where_error:
            where_time_bin:
            where_time:
            copy_header:
            primary_header:
            where_flux :
        """
        if checks.table_is_valid(table):
            msgs.work('Reading input table')

        if primary_header is not None:
            if len(primary_header) > 0:
                self.primary_header = primary_header
            else:
                msgs.warning('Empty `primary_header` provided')

        if copy_header:
            if len(table.header) > 0:
                self.header = table.header
            else:
                msgs.warning('No header found in the table')

        if isinstance(table, fits.BinTableHDU):
            self._load_from_BinTableHDU(table,
                                        copy_header=copy_header,
                                        where_time=where_time,
                                        where_time_bin=where_time_bin,
                                        where_flux=where_flux,
                                        where_error=where_error,
                                        where_background=where_background,
                                        where_quality=where_quality)
        elif isinstance(table, fits.TableHDU):
            # ToDo implement TableHDU case
            msgs.error('To be implemented')
        else:
            msgs.error('Unknown table type')
Exemplo n.º 3
0
def remove_header_cards(header, cards, ignore_missing=True):
    r"""

    Args:
        header:
        cards:
        ignore_missing:

    Returns:

    """
    for card in cards:
        header.remove(cards, ignore_missing=ignore_missing)
        msgs.work('card {} removed from the header'.format(card))
Exemplo n.º 4
0
def _convert_to_numpy_array(input_list):
    r""" Convert an input to a numpy array
    
    Args:
        input_list:
            to be converted into a numpy array. At the moment the supported type for the input are: `list`,

    Returns:
        output_np_array (`np.array`):
            output array with the same dimension of `input_list`
    """
    if isinstance(input_list, list):
        output_np_array = np.copy(np.array(input_list))
        msgs.work('Converting list to np.array with dimension {}.'.format(
            np.shape(output_np_array)))
    else:
        output_np_array = np.copy(input_list)
    return output_np_array
Exemplo n.º 5
0
def download(dp_id,
             min_disk_space=np.float32(default.get_value('min_disk_space'))):
    r"""Given a filename in the ADP format, the code download the file from the
    `ESO archive <http://archive.eso.org>`_

    ..note::
        if dp_id is not a `numpy.str`, a WARNING message will be raised and the content of `dp_id` will be
        converted into a string.

    Args:
        dp_id (`numpy.str`):
            Data product ID to be downloaded.
        min_disk_space (`numpy.float`):
            The file will be downloaded only if there is this amount of space (in Gb) free on the disk.
            By default is set by the `default.txt` file.

    Returns:
        This downloads a fits ADP file with the same name of the input.
    """

    # Check for disk space
    checks.check_disk_space(min_disk_space=min_disk_space)

    for file_name in dp_id:
        # if the file name is in byte, this decode it.
        if not isinstance(file_name, str):
            msgs.warning('The content of dp_id is not in a string format.')
            msgs.warning('The code is trying to fix this.')
            if isinstance(file_name, bytes):
                file_name = np.str(file_name.decode("utf-8"))
                msgs.warning('Converted to {}.'.format(type(file_name)))
            else:
                msgs.error(
                    'Unable to understand the format of the dp_id entry: {}'.
                    format(type(file_name)))

        # Given a dp_id of a public file, the link to download it is constructed as follows:
        download_url = 'http://archive.eso.org/datalink/links?ID=ivo://eso.org/ID?{}&eso_download=file'.format(
            str(file_name))
        msgs.work(
            'Downloading file {}. This may take some time.'.format(file_name +
                                                                   '.fits'))
        urllib.request.urlretrieve(download_url, filename=file_name + '.fits')
        msgs.info('File {} downloaded.'.format(file_name + '.fits'))
Exemplo n.º 6
0
    def get_cards(self, check_cards=None):
        r"""Returns the `cards` present in a list object. If `check_cards` is not `None`, it checks that such cards are
        contained in the list and return them. If not present an empty `np.vector` is returned.

        Args:
            check_cards (`np.array`, `None`):
                List of cards that needs to be checked.

        Returns:
            selected_cards (`np.array`):
                Cards present in the list.
            missing_cards (`np.array`):
                Subset of `check_cards` not present in the list. It is empty if all cards are present.
        """
        if check_cards is not None:
            _check_cards = np.isin(self.cards,
                                   _convert_to_numpy_array(check_cards))
            selected_cards = np.copy(self.cards[_check_cards])
            if np.size(selected_cards) < 1:
                msgs.warning(
                    'None of the cards in input are present in the list.')
                selected_cards = np.array([])
                missing_cards = np.array([])
            else:
                msgs.work('There are {} occurrences of the {} cards in input.'.
                          format(np.size(selected_cards),
                                 np.size(check_cards)))
                if np.all(np.isin(check_cards, selected_cards)):
                    msgs.work('All cards in input are present in the list.')
                    missing_cards = np.array([])
                else:
                    msgs.work(
                        'Not all the cards in input are present in the list.')
                    missing_cards = check_cards[np.logical_not(
                        np.isin(check_cards, selected_cards))]
                    msgs.work('The missing cards are:')
                    for missing_card in missing_cards:
                        msgs.work(' - {}'.format(missing_card))
        else:
            selected_cards = np.copy(self.cards)
            missing_cards = np.array([])
        return selected_cards, missing_cards
Exemplo n.º 7
0
def abmaglim(rms, seeing_fwhm, exptime=None, zero_point=None, sigma=5.):
    r"""Calculate the N-sigma magnituide limit.

    The code behaves in different ways depending if zero_point is set to None or not.
    If zero_points is None:
        rms needs to have units and exptime is not considered.
        abmaglim = -2.5 * Log10( sigma * rms * PI*(.5*seeing_fwhm)**2. / (3631.*u.jansky) )
    else:
        abmaglim is calculated as:
        abamglim = -2.5 * Log10( sigma * rms * PI*(.5*seeing_fwhm)**2. / exptime ) + zero_point

    Args:
        rms (`float`):
           Calculated rms on the image
        exptime (`float`):
            Exposure time in seconds
        zero_point (`float`):
            AB zero point of the image. If not set, the code will take the RMS with the associated
            astropy.units.
        seeing_fwhm (`float`):
            FWHM of the seeing in pixels
        sigma (`float`):
           Number of sigma to consider a detection significant

    Returns:
        abmaglim (`float`):
            AB magnitude limit of for an image
    """
    msgs.work('Calculating {}-sigma AB mag limit'.format(sigma))

    if zero_point is None:
        if exptime is not None:
            msgs.error('`exptime` needs to be `None` if `zero_point` is `None`')
        rms_in_jansky = rms.to(u.jansky, equivalencies=u.spectral())
        abmaglim = -2.5 * np.log10(sigma * rms_in_jansky * np.pi * np.power(seeing_fwhm / 2., 2.) / (3631. * u.jansky))
    else:
        abmaglim = -2.5 * np.log10(sigma * rms * np.pi * np.power(seeing_fwhm / 2., 2.) / exptime) + zero_point
    return abmaglim
Exemplo n.º 8
0
if __name__ == '__main__':
    args = parse_arguments()

    # getting fits names
    input_fits = np.str(args.input_fits[0])
    if args.output_fits is None:
        output_fits = input_fits.replace('.fits', '_fixed.fits')
    else:
        output_fits = np.str(args.output_fits[0])

    msgs.start()

    hdul_original = fits.open(input_fits)




    msgs.work('Updating checksum and datasum')
    hdul[0].add_datasum()
    hdul[1].add_datasum()
    hdul[0].add_checksum(override_datasum=True)
    hdul[1].add_checksum(override_datasum=True)
    msgs.work('Flushing changes.')
    hdul.flush()
    hdul.close()

    msgs.newline()
    msgs.info('File {} produced.'.format(output_fits))
    msgs.end()
Exemplo n.º 9
0
    def compare_with(self,
                     second_list,
                     check_cards=None,
                     on_terminal=True,
                     on_file=None):
        r"""Compare two list objects. The code will go through all cards and values present in the
        input list (or the `check_cards` subset if set not to None) and compare with cards and values in the
        `second_list`.

        Args:
            second_list (`Lists`):
                List that you want to compare with the input list.
            check_cards (`np.array`):
                List of cards that needs to be returned.
            on_terminal (`bool`):
                if `True`, the `cards` and respective `values` are printed on the terminal.
            on_file (`str`, `None`):
                if not `None`, `cards` and respective `values` will be stored in this text file

        Returns:
            first_cards (`np.array`):
                `cards` present in both the 1st and the 2nd lists.
            first_values (`np.array`):
                `values` from the 1st list associated to the `cards`
            second_values (`np.array`):
                `values` from the 2nd list associated to the `cards`
        """

        if not isinstance(second_list, Lists):
            msgs.error('The second list is not a Lists object.')
        if not isinstance(on_terminal, bool):
            msgs.error('The on_terminal option should be a bool.')

        if check_cards is None:
            check_cards, _ = self.get_cards(check_cards)

        # loading the first list:
        _first_cards, _first_values, _first_missing = self.get_values(
            check_cards=check_cards)
        msgs.work('Checking for cards in the second_list')
        # comparing with values from the second list:
        _second_cards, _second_values, _second_missing = second_list.get_values(
            check_cards=check_cards)

        # define where cards are present in both lists
        _overlap = np.isin(_first_cards, _second_cards)
        first_cards, first_values, first_missing = self.get_values(
            check_cards=_first_cards[_overlap])
        second_cards, second_values, second_missing = second_list.get_values(
            check_cards=_first_cards[_overlap])

        # Printing results
        if on_terminal:
            msgs.info(
                'Cards and values from both the first and the second list:')
        _print_1cards_2values(first_cards,
                              first_values,
                              second_values,
                              on_terminal=on_terminal,
                              on_file=on_file)

        if on_terminal:
            # Case of all cards present in the first and in the second lists
            if np.size(_first_missing) == 0 and np.size(_second_missing) == 0:
                msgs.info('All cards are present in both lists')
            # Case of some cards missing in the first list but present in the second:
            elif np.size(_first_missing) != 0 and np.size(
                    _second_missing) == 0:
                _only_second = np.isin(check_cards, _first_missing)
                _only_second_cards, _only_second_values, _only_second_missing = second_list.get_values(
                    check_cards=check_cards[_only_second])
                msgs.warning('{} Cards are missing in the first list,'.format(
                    np.size(_first_missing)))
                msgs.warning('but present in the second:')
                _print_1cards_1values(_only_second_cards,
                                      _only_second_values,
                                      on_terminal=on_terminal,
                                      on_file=None)
            # Case of some cards missing in the second list but present in the first:
            elif np.size(
                    _first_missing) == 0 and np.size(_second_missing) != 0:
                _only_first = np.isin(check_cards, _second_missing)
                _only_first_cards, _only_first_values, _only_first_missing = self.get_values(
                    check_cards=check_cards[_only_first])
                msgs.warning('{} Cards are missing in the second list,'.format(
                    np.size(_second_missing)))
                msgs.warning('but present in the first:')
                _print_1cards_1values(_only_first_cards,
                                      _only_first_values,
                                      on_terminal=on_terminal,
                                      on_file=None)
            # Case of some cards missing in the first and in the second list:
            else:
                _first_missing = ~np.isin(check_cards, _first_cards)
                _second_missing = ~np.isin(check_cards, _second_cards)
                _both_missing = _first_missing * _second_missing
                _first_only_missing = _first_missing * ~_both_missing
                _second_only_missing = _second_missing * ~_both_missing
                if np.size(check_cards[_both_missing]) != 0:
                    msgs.warning(
                        '{} Cards are missing from both lists:'.format(
                            np.size(check_cards[_both_missing])))
                    for both_missing in check_cards[_both_missing]:
                        print(' ----------> ' + both_missing)
                if np.size(check_cards[_second_only_missing]) != 0:
                    _only_second_cards, _only_second_values, _only_second_missing = self.get_values(
                        check_cards=check_cards[_second_only_missing])
                    msgs.warning(
                        '{} Cards are missing in the first list,'.format(
                            np.size(check_cards[_second_only_missing])))
                    msgs.warning('but present in the second:')
                    _print_1cards_1values(_only_second_cards,
                                          _only_second_values,
                                          on_terminal=on_terminal,
                                          on_file=None)
                if np.size(check_cards[_first_only_missing]) != 0:
                    _only_first_cards, _only_first_values, _only_first_missing = second_list.get_values(
                        check_cards=check_cards[_first_only_missing])
                    msgs.warning(
                        '{} Cards are missing in the second list,'.format(
                            np.size(check_cards[_first_only_missing])))
                    msgs.warning('but present in the first:')
                    _print_1cards_1values(_only_first_cards,
                                          _only_first_values,
                                          on_terminal=on_terminal,
                                          on_file=None)

        return first_cards, first_values, second_values
Exemplo n.º 10
0
    def __init__(self,
                 cards=None,
                 values=None,
                 from_fits=None,
                 which_hdu=0,
                 from_txt=None,
                 data_start=None,
                 data_end=None):
        r"""

        Given the variety of possible format of the input, particular care should be used with the `from_txt` option.
        The options `data_start` and `data_end` can help with this, but if the format is not a simple plain text
        with space/tab separated column, reading the text file with another module and then store the data with
        cards and values could be a more safe option.

        Args:
            cards (`np.array` or `list`):
                array that you want to use as cards
            values (`np.array` or `list`):
                values associated to each element of `cards`. This implies that `cards` and `values` needs to have the
                same length. But multiple `values` could be associated to `cards`.
            from_fits (`str`):
                fits file name form which read the header.
            which_hdu (`numpy.int`):
                select from which HDU you are getting the header. See `fitsfiles` in `ESOAsg.core` for further
                details.
            from_txt (`str`, `None`):
                Ascii file from which cards and values will be read. The assumption here is that `cards` are stored
                in the first column, and `values` in the second one. The options `data_start` and `data_end` will
                select only the (`data_end`-`data_start`) lines following the line `data_start`. This is the same
                option present in `ascii.read` from astropy.
            data_start (`np.int`, `None`):
                First line to be read from the file: `from_txt`.
            data_end (`np.int`, `None`):
                Last line to be read from the file: `from_txt`.

        Returns:
            a list object containing cards and corresponding values
        """

        # Loading from input
        if cards is not None:
            # Loading cards
            msgs.work('Loading cards.')
            _cards = _convert_to_numpy_array(cards)
            self.cards = _cards
            # Loading values
            if values is not None:
                _values = _convert_to_numpy_array(values)
                if np.shape(_values) == np.shape(_cards):
                    msgs.work('Loading values.')
                    self.values = _values
                else:
                    if np.ndim(_values) == 1:
                        if np.shape(_values[:]) == np.shape(_cards):
                            self.values = _values
                        else:
                            msgs.error(
                                'Values and Cards should have the same length.'
                            )
                    elif np.ndim(_values) == 2:
                        for index in np.arange(np.shape(_values)[0],
                                               dtype=np.int_):
                            if np.shape(_values[index, :]) != np.shape(_cards):
                                msgs.error(
                                    'Values and Cards should have the same length.'
                                )
                        self.values = _values
                    else:
                        msgs.error(
                            'Values and Cards should have the same length.')
            else:
                msgs.work('Values is empty.')
                self.values = np.array([])

        # Loading from fits file
        elif from_fits is not None:
            msgs.work('Loading header from fits file: {}'.format(from_fits))
            _hdu = fitsfiles.header_from_fits_file(from_fits,
                                                   which_hdu=which_hdu)
            _cards, _values = np.zeros_like(0), np.zeros_like(0)
            for index in list(_hdu.keys()):
                if 'COMMENT' not in index:
                    _cards = np.append(_cards, index)
                    _values = np.append(_values, _hdu[index])
            self.cards = np.array(_cards)
            self.values = np.array(_values)

        # Loading from text file
        elif from_txt is not None:
            msgs.work('Loading list from text file: {}'.format(from_txt))
            _full_table = ascii.read(from_txt,
                                     guess=True,
                                     data_start=data_start,
                                     data_end=data_end)
            self.cards = np.array(_full_table[_full_table.colnames[0]].data)
            if np.size(_full_table.colnames) < 2:
                self.values = np.array([])
            elif np.size(_full_table.colnames) >= 2:
                _values = np.array(_full_table[_full_table.colnames[1]].data)
                if np.size(_full_table.colnames) > 2:
                    for index in _full_table.colnames[2:]:
                        _values = np.vstack((_values, _full_table[index].data))
                self.values = _values

        # Creating empty object
        else:
            msgs.work('Creating empty lists.Lists object.')
            self.cards = np.array([])
            self.values = np.array([])

        return
Exemplo n.º 11
0
    if args.output_fits is None:
        output_fits = input_fits.replace('.fits', '_fixed.fits')
    else:
        output_fits = np.str(args.output_fits[0])

    msgs.start()

    # Copy relevant information from input into output file
    fitsfiles.new_fits_like(input_fits, [0], output_fits, overwrite=True)

    # This file will be modified in place
    hdul = fits.open(output_fits, 'update', checksum=True)
    hdr0 = hdul[0].header
    hdr1 = hdul[1].header

    msgs.work('Refactoring data structure.')
    msgs.work('Taking data from the file {}'.format(
        str(input_fits.replace('.fits', '.dat'))))
    spectra = ascii.read(input_fits.replace('.fits', '.dat'))
    WAVE = spectra['col1']
    FLUX = spectra['col2']
    ERR = spectra['col3']

    # double precision:
    col_dtype = np.float64
    col_format = str(len(np.array(WAVE, dtype=col_dtype))) + 'D'

    col1 = fits.Column(name='WAVE',
                       format=col_format,
                       unit='angstrom',
                       array=[np.array(WAVE, dtype=col_dtype)])
Exemplo n.º 12
0
def query_from_radec(positions,
                     radius=None,
                     instruments=None,
                     data_types=None,
                     verbose=False,
                     maxrec=default.get_value('maxrec')):
    r"""Query the ESO TAP service given a position in RA and Dec.

     The `positions` value (or list) needs to be given as an `astropy.coordinates.SkyCoord` object.
    
    Args:
        positions (`astropy.coordinates.SkyCoord`):
            Coordinates (or list of coordinates) of the sky you want to query in the format of an
            `astropy.coordinates.SkyCoord` object. For further detail see here:
            `astropy coordinates <https://docs.astropy.org/en/stable/coordinates/>`_
        radius (`float`):
            Search radius you want to query in arcseconds. Note that in case `None` is given, the query will be
            performed with the `INTERSECT(POINT('',RA,Dec), s_region)` clause instead of the
            `INTERSECT(s_region,CIRCLE('',RA,Dec,radius/3600.))` one. See here for further examples:
            `tap obs examples <http://archive.eso.org/tap_obs/examples>`_
        instruments (`list`):
            Limit the search to the selected list of instruments (e.g., `XSHOOTER`)
        data_types (`list`):
            Limit the search to the selected types of data (e.g., `spectrum`)
        verbose (`bool`):
            if set to `True` additional info will be displayed
        maxrec (`int`):
            Define the maximum number of file that a single query can return from the ESO archive. The default values
            is set in the `default.txt` file.

    Returns:
        results_from_query (`list`):
            Results from the query in a list with the same length of the input position. Currently it contains:
            target_name, dp_id, s_ra, s_dec, t_exptime, em_min, em_max, em_min, dataproduct_type, instrument_name,
            abmaglim, proposal_id, obs_collection
    """
    # Check inputs:
    # Working on positions
    if isinstance(positions, list):
        positions_list = positions
    else:
        positions_list = [positions]
    for position in positions_list:
        assert isinstance(
            position,
            coordinates.SkyCoord), r'Input positions not a SkyCoord object'
    # Working on radius
    if radius is not None:
        if isinstance(radius, int):
            radius = float(radius)
        else:
            assert isinstance(radius, float), r'Input radius is not a number'
    # Working on instruments
    if instruments is not None:
        if isinstance(instruments, list):
            instruments_list = instruments
        else:
            instruments_list = [instruments]
        for instrument in instruments_list:
            assert isinstance(
                instrument,
                str), r'Input instrument: {} not valid'.format(instrument)
    # Working on data_types
    if data_types is not None:
        if isinstance(data_types, list):
            data_types_list = data_types
        else:
            data_types_list = [data_types]
        for data_type in data_types_list:
            assert isinstance(
                data_type,
                str), r'Input data type: {} not valid'.format(data_type)

    # Running over all positions
    if verbose:
        how_many_positions = len(positions_list)
        if how_many_positions > 1:
            msgs.work(
                'Exploring ESO archive around {} locations in the sky'.format(
                    how_many_positions))
        else:
            msgs.work(
                'Exploring ESO archive around the input location in the sky')

    results_from_query = []

    for position, idx in zip(positions_list, range(len(positions_list))):
        position.transform_to(ICRS)
        ra, dec = np.float_(position.ra.degree), np.float_(position.dec.degree)
        msgs.work(
            'Running query {} to the ESO archive (out of {} total)'.format(
                idx + 1, len(positions_list)))

        # Define query
        # base query:
        query = _query_obscore_base()
        # selection of the location:
        query = query + _query_obscore_intersect_ra_dec(ra, dec, radius=radius)
        # selection of the instrument(s)
        if instruments is not None:
            query = query + _query_obscore_select_instruments(instruments_list)
        # selection of the data_type(s)
        if data_types is not None:
            query = query + _query_obscore_select_data_types(data_types_list)

        # running query and append results to the list
        result_from_query = _run_query(query,
                                       verbose=verbose,
                                       remove_bytes=True,
                                       maxrec=maxrec)

        if len(result_from_query) < 1:
            msgs.warning('No data has been retrieved')
        else:
            msgs.info('A total of {} entries has been retrieved'.format(
                len(result_from_query)))
            if verbose:
                msgs.info('For the following instrument:')
                for inst_name in np.unique(
                        result_from_query['instrument_name'].data):
                    msgs.info(' - {}'.format(inst_name))

        results_from_query.append(result_from_query)
    return results_from_query
Exemplo n.º 13
0
def query_from_polygons(polygons,
                        instruments=None,
                        data_types=None,
                        verbose=False,
                        columns=None,
                        maxrec=None):
    r"""Query the ESO archive for data at a area in the sky defined by a polygon

    The `polygons` value (or list) needs to be given as a string defining the location in the sky of the polygon
    with RA, Dec, separated by commas and with the first RA, Dec pair that matches the last one (to close the
    polygon)

    The output is in an (list of) `astropy.table` with columns defined in: `core.tap_queries.COLUMNS_FROM_OBSCORE`
    It is possible to change the columns to query by setting the value of `columns`


    Args:
        polygons (list): ist of `str` (or single `str`) containing the coordinates of the polygon in the sky you want
            to query
        instruments (list): list of `str` (or single `str`) containing the instruments used to limit the search
        data_types (list): list of `str` (or single `str`) containing the data types used to limit the search
        columns (list): list of `str` (or single `str`) containing the columns to be queried
        verbose (bool): if set to `True` additional info will be displayed
        maxrec (int, optional): define the maximum number of entries that a single query can return. If it is `None` the
            value is set by the limit of the service.

    Returns:
        any: results from the queries

    """
    # Check inputs:
    # Working on polygons
    polygons_list = cleaning_lists.from_element_to_list(polygons,
                                                        element_type=str)
    # Working on instruments
    instruments_list = cleaning_lists.from_element_to_list(instruments,
                                                           element_type=str)
    # Working on data_types
    data_types_list = cleaning_lists.from_element_to_list(data_types,
                                                          element_type=str)
    # Working on columns
    columns_list = _is_column_list_in_obscore(columns)

    if verbose:
        how_many_polygons = len(polygons_list)
        if how_many_polygons > 1:
            msgs.work(
                'Exploring ESO archive around {} locations in the sky'.format(
                    how_many_polygons))
        else:
            msgs.work(
                'Exploring ESO archive around the input location in the sky')

    # Running over all positions
    results_from_query = []
    for idx, polygon in enumerate(polygons_list):
        msgs.work(
            'Running query {} to the ESO archive (out of {} total)'.format(
                idx + 1, len(polygons_list)))
        # Define query
        query = "{0}{1}{2}{3}".format(
            tap_queries.create_query_obscore_base(columns_list),
            tap_queries.condition_intersects_polygon(polygon),
            tap_queries.condition_instruments_like(instruments_list),
            tap_queries.condition_data_types_like(data_types_list))
        # instantiate ESOCatalogues
        query_for_observations = query_observations.ESOObservations(
            query=query, type_of_query='sync', maxrec=maxrec)
        # running query and append results to the list
        if verbose:
            query_for_observations.print_query()
        # Obtaining query results
        query_for_observations.run_query(to_string=True)
        result_from_query = query_for_observations.get_result_from_query()
        if len(result_from_query) < 1:
            msgs.warning('No data has been retrieved')
        else:
            msgs.info(
                'A total of {} entries has been retrieved (with maxrec={})'.
                format(len(result_from_query), maxrec))
            msgs.info('For the following instrument:')
            for inst_name in np.unique(
                    result_from_query['instrument_name'].data):
                msgs.info(' - {}'.format(inst_name))

        results_from_query.append(result_from_query)

    # Returning results
    return _return_results_from_query(results_from_query)
Exemplo n.º 14
0
def transfer_header_cards(source_header,
                          output_header,
                          source_cards,
                          output_cards=None,
                          with_comment=True,
                          delete_card=True):
    r"""Transfer header cards from one header to another

    Cards, values (and optionally comments, if `with_comment`=`True`) from the header `source_header`  will be
    transfer to the header `output_header`.
    `source_cards` is a list containing all the cards that needs to be transfer. If `output_cards` is defined, the cards
    from `source_cards[i]` will be saved in the `output_header` has `output_cards[i]`. If `delete_card`=`True` the
    card will be removed from the `source_header`.

    ..note ::
        Both `source_header` and `output_header` are modified in place. I.e. there is no backup option for the
        original values.
        If a card is not present in `source_header` a warning is raised and will not be transferred to `output_header`

    Args:
        source_header (`hdu.header'):
            Header from which the cards will be taken.
        output_header (`hdu.header'):
            Header that will be modified with cards from `source_header`.
        source_cards (`list`):
            List of cards you want to transfer from `source_header` to `output_header`.
        output_cards (`list`):
            If not `None` the cards in `output_header` will be saved with the new names listed here.
        with_comment (`bool`):
            if true, also the associated comment will be copied
        delete_card (`bool`):
            if true, the card will be removed from the `source_header`

    Returns:
        `source_header` and `output_header` with update values.
    """
    if output_cards is None:
        output_cards = source_cards
    if len(output_cards) != len(source_cards):
        msgs.error(
            "Incompatible length between output and source cards lists.")

    for source_card, output_card in zip(source_cards, output_cards):
        msgs.work("Transferring header card {} to {}.".format(
            source_card, output_card))
        if source_card not in source_header:
            msgs.warning(
                '{} not present in `source_header`. The card will not be transferred'
                .format(source_card))
            continue
        if with_comment:
            add_header_card(output_header,
                            output_card,
                            source_header[source_card],
                            comment=source_header.comments[source_card])
        else:
            add_header_card(output_header,
                            output_card,
                            source_header[source_card],
                            comment=None)
        if delete_card:
            del source_header[source_card]
Exemplo n.º 15
0
def main(args):
    import numpy as np
    import os
    import shutil
    from astropy.coordinates import SkyCoord
    from astropy.coordinates import name_resolve
    from astropy import units as u
    from astropy.io import fits
    from ESOAsg.ancillary import cleaning_lists
    from ESOAsg.core import fitsfiles
    from ESOAsg import msgs

    # Cleaning input lists
    input_fits_files = cleaning_lists.make_list_of_fits_files(args.input_fits)
    # Make whitelight images
    if args.whitelight:
        make_whitelight_image = True
    else:
        make_whitelight_image = False
    # Creating output list
    if args.suffix is None:
        overwrite = False
        msgs.warning('The file will overwrite the input files')
    else:
        overwrite = True
    suffix_string = cleaning_lists.make_string(args.suffix)
    output_fits_files = cleaning_lists.make_list_of_fits_files_and_append_suffix(
        input_fits_files, suffix=suffix_string)
    if make_whitelight_image:
        output_whitelight_files = cleaning_lists.make_list_of_fits_files_and_append_suffix(
            input_fits_files, suffix=suffix_string + '_WL')
    else:
        output_whitelight_files = [None] * len(input_fits_files)
    '''
    # reference
    if args.referenc is not None:
        reference = str(args.referenc[0])
    else:
        reference = str(' ')

    # fluxcal
    if args.fluxcal == 'ABSOLUTE':
        fluxcal = 'ABSOLUTE'
    elif args.fluxcal == 'UNCALIBRATED':
        fluxcal = 'UNCALIBRATED'
    else:
        msgs.error('Possible values for fluxcal are: `ABSOLUTE` or `UNCALIBRATED`')

    # abmaglim
    if args.abmaglim is not None:
        abmaglim = args.abmaglim
        assert isinstance(abmaglim, (int, np.float_)), 'ABMAGLIM must be a float'
        if abmaglim < 0:
            msgs.error('ABMAGLIM must be positive')
    else:
        abmaglim = np.float_(-1.)
    '''

    msgs.start()

    for fits_in, fits_out, image_out in zip(input_fits_files,
                                            output_fits_files,
                                            output_whitelight_files):
        if os.path.exists(fits_out):
            shutil.copy(fits_out, fits_out.replace('.fit', '_old.fit'))
            msgs.warning('{} already exists. Backup created.'.format(fits_out))
        if image_out is not None:
            if os.path.exists(image_out):
                shutil.copy(image_out, image_out.replace('.fit', '_old.fit'))
                msgs.warning(
                    '{} already exists. Backup created.'.format(image_out))

        full_hdul = fitsfiles.get_hdul(fits_in)
        try:
            instrument = full_hdul[0].header['HIERARCH ESO SEQ ARM']
        except KeyError:
            msgs.error(
                'Failed to read the keyword HIERARCH ESO SEQ ARM from the primary header'
            )
        finally:
            if instrument in SUPPORTED_INSTRUMENT:
                msgs.info(
                    'The input file is from SPHERE/{}'.format(instrument))
            else:
                msgs.warning(
                    'Instrument SPHERE/{} not supported'.format(instrument))

        # ToDo
        # These needs to be transformed in objects
        if instrument.startswith('IFS'):
            msgs.work('Fixing header for SPHERE/{} file {}'.format(
                instrument, fits_in))

            # Create a copy of the file where there is a primary HDU and data are in the 'DATA" HDU
            msgs.work('Reshaping cube into PrimaryHEADER and Data Header')
            fitsfiles.new_fits_like(fits_in, [0],
                                    fits_out,
                                    overwrite=overwrite,
                                    fix_header=True)
            hdul = fitsfiles.get_hdul(fits_out, 'update', checksum=True)
            hdr0 = hdul[0].header
            hdr1 = hdul[1].header

            # Check for HISTORY
            # Primary Header
            if 'HISTORY' in hdr0.keys():
                history_cards_hdr0 = [
                    history_card_hdr0 for history_card_hdr0 in hdr0
                    if history_card_hdr0.startswith('HISTORY')
                ]
                history_values_hdr0 = [
                    hdr0[history_card_hdr0] for history_card_hdr0 in hdr0
                    if history_card_hdr0.startswith('HISTORY')
                ]
                for history_card_hdr0, history_value_hdr0 in zip(
                        history_cards_hdr0, history_values_hdr0):
                    msgs.work('Cleaning cards: {} = {}'.format(
                        history_card_hdr0, history_value_hdr0))
                del hdr0['HISTORY'][:]
            # Data Header
            if 'HISTORY' in hdr1.keys():
                history_values_hdr1 = hdr1['HISTORY'][:]
                for history_number in range(0, len(history_values_hdr1)):
                    clean_history = cleaning_lists.remove_non_ascii(
                        history_values_hdr1[history_number])
                    if len(clean_history) > 0:
                        hdr1['HISTORY'][history_number] = str(clean_history)
                    else:
                        hdr1['HISTORY'][history_number] = str(' ')

            # Update cards for headers:
            # Updating values with different CARD in the header
            cards_input = [
                'CRPIX4', 'CRVAL4', 'CTYPE4', 'CUNIT4', 'CD4_4', 'CD1_4',
                'CD2_4', 'CD4_1', 'CD4_2'
            ]
            cards_output = [
                'CRPIX3', 'CRVAL3', 'CTYPE3', 'CUNIT3', 'CD3_3', 'CD1_3',
                'CD2_3', 'CD3_1', 'CD3_2'
            ]
            fitsfiles.transfer_header_cards(hdr1,
                                            hdr1,
                                            cards_input,
                                            output_cards=cards_output,
                                            delete_card=True)
            # Remove not used values
            cards_to_be_removed_hdr1 = ['CD4_3', 'CD3_4']
            for card_to_be_removed_hdr1 in cards_to_be_removed_hdr1:
                hdr1.remove(card_to_be_removed_hdr1, ignore_missing=True)

            # Transfer cards from HDU1 to the PrimaryHDU
            not_to_be_transfer = [
                hdr1_card for hdr1_card in hdr1
                if hdr1_card.startswith('COMMENT') or hdr1_card.startswith(
                    'EXTNAME') or hdr1_card.startswith('BITPIX') or
                hdr1_card.startswith('NAXIS') or hdr1_card.startswith('CRPIX')
                or hdr1_card.startswith('CRVAL') or hdr1_card.startswith(
                    'CDELT') or hdr1_card.startswith('CTYPE')
                or hdr1_card.startswith('CD1_') or hdr1_card.startswith('CD2_')
                or hdr1_card.startswith('CD3_') or hdr1_card.startswith(
                    'CUNIT') or hdr1_card.startswith('CSYER') or hdr1_card.
                startswith('HDUCLAS') or hdr1_card.startswith('XTENSION')
                or hdr1_card.startswith('PCOUNT') or hdr1_card.startswith(
                    'GCOUNT') or hdr1_card.startswith('HDUDOC') or hdr1_card.
                startswith('HDUVER') or hdr1_card.startswith('HISTORY')
            ]
            cards_to_be_transfer = []
            for hdr1_card in hdr1:
                if hdr1_card not in not_to_be_transfer:
                    cards_to_be_transfer.append(hdr1_card)
            fitsfiles.transfer_header_cards(hdr1,
                                            hdr0,
                                            cards_to_be_transfer,
                                            with_comment=True,
                                            delete_card=True)

            # Try to guess coordinates
            if 'CRVAL1' not in hdr1.keys():
                msgs.warning('CRVAL position keywords not preset')
                if 'OBJECT' in hdr0.keys():
                    try:
                        object_coordinate = SkyCoord.from_name(
                            str(hdr0['OBJECT']).strip())
                        ra_obj, dec_obj = object_coordinate.ra.degree, object_coordinate.dec.degree
                        if 'RA' in hdr0.keys() and 'DEC' in hdr0.keys():
                            pointing_coordinate = SkyCoord(float(hdr0['RA']),
                                                           float(hdr0['DEC']),
                                                           unit='deg')
                            msgs.work(
                                'Testing from separation from pointing position'
                            )
                            separation = object_coordinate.separation(
                                pointing_coordinate).arcsec
                            if separation < 120.:
                                msgs.info('Object - Pointing separation is {}'.
                                          format(separation))
                                msgs.info(
                                    'Updating CRVAL1 = {}'.format(ra_obj))
                                msgs.info(
                                    'Updating CRVAL2 = {}'.format(dec_obj))
                                hdr1['CRVAL1'] = ra_obj
                                hdr1['CRVAL2'] = dec_obj
                                msgs.work('Updating CUNIT')
                                hdr1['CUNIT1'] = 'deg'
                                hdr1['CUNIT2'] = 'deg'
                                msgs.work('Updating CTYPE')
                                hdr1['CTYPE1'] = 'RA---TAN'
                                hdr1['CTYPE2'] = 'DEC--TAN'
                                msgs.work('Updating CRPIX')
                                hdr1['CRPIX1'] = float(
                                    hdul[1].data.shape[2]) / 2.
                                hdr1['CRPIX2'] = float(
                                    hdul[1].data.shape[1]) / 2.
                                msgs.info('Updating CD1 and CD2')
                                hdr1['CD1_1'] = 2.06E-06
                                hdr1['CD2_2'] = 2.06E-06
                                hdr1['CD1_2'] = 0.
                                hdr1['CD2_1'] = 0.
                                msgs.work('Updating RA, DEC')
                                hdr0['RA'] = ra_obj
                                hdr0.comments[
                                    'RA'] = object_coordinate.ra.to_string(
                                        u.hour)
                                hdr0['DEC'] = dec_obj
                                hdr0.comments[
                                    'DEC'] = object_coordinate.dec.to_string(
                                        u.degree, alwayssign=True)
                            else:
                                msgs.warning(
                                    'Object - Pointing separation is {}'.
                                    format(separation))
                                msgs.warning(
                                    'This is suspicious, CRVAL not updated')
                    except name_resolve.NameResolveError:
                        msgs.warning('Object {} not recognized'.format(
                            str(hdr0['OBJECT']).strip()))
                        msgs.warning('CRVAL not updated')

            # Updating file prodcatg
            msgs.work('Updating PRODCATG to SCIENCE.CUBE.IFS')
            hdr0['PRODCATG'] = str('SCIENCE.CUBE.IFS')
            # Some more updates
            msgs.work('Setting NAXIS = 0 in primary header')
            hdr0['NAXIS'] = 0
            if 'OBSTECH' not in hdr0.keys():
                msgs.warning('OBSTECH missing')
                if 'ESO PRO TECH' in hdr0.keys():
                    msgs.info('Deriving OBSTECH from HIERARCH ESO PRO TECH')
                    msgs.work('Updating OBSTECH to {}'.format(
                        str(hdr0['HIERARCH ESO PRO TECH'])))
                    hdr0['OBSTECH'] = str(hdr0['HIERARCH ESO PRO TECH'])
            if 'EXPTIME' not in hdr0.keys():
                msgs.warning('EXPTIME missing')
                if 'ESO DET SEQ1 REALDIT' in hdr0.keys(
                ) and 'ESO DET NDIT' in hdr0.keys():
                    msgs.info('Deriving EXPTIME and TEXPTIME as REALDIT * DIT')
                    hdr0['EXPTIME'] = hdr0[
                        'HIERARCH ESO DET SEQ1 REALDIT'] * hdr0[
                            'HIERARCH ESO DET NDIT']
                    hdr0['TEXPTIME'] = hdr0[
                        'HIERARCH ESO DET SEQ1 REALDIT'] * hdr0[
                            'HIERARCH ESO DET NDIT']
                    msgs.work('Updating EXPTIME to {}'.format(
                        str(hdr0['EXPTIME'])))
                    msgs.work('Updating TEXPTIME to {}'.format(
                        str(hdr0['TEXPTIME'])))
            if 'WAVELMIN' not in hdr0.keys():
                msgs.warning('WAVELMIN missing')
                z_pixel = np.arange(int(hdul[1].data.shape[0]))
                z_wave = float(
                    hdr1['CRVAL3']) + (z_pixel * float(hdr1['CD3_3']))
                if str(hdr1['CUNIT3']).strip().upper() == 'MICRONS':
                    msgs.info('Deriving WAVELMIN and WAVELMAX from CRVAL1')
                    z_wave = z_wave * 1000.  # convert to nanometers
                    hdr0['WAVELMIN'] = np.nanmin(z_wave)
                    hdr0['WAVELMAX'] = np.nanmax(z_wave)
                    msgs.work('Updating WAVELMIN to {}'.format(
                        str(hdr0['WAVELMIN'])))
                    msgs.work('Updating WAVELMAX to {}'.format(
                        str(hdr0['WAVELMAX'])))
                else:
                    msgs.warning(
                        'Unknown units {}. WAVELMIN and WAVELMAX not calculated'
                        .format(str(hdr1['CUNIT3'])))
            if 'SPEC_RES' not in hdr0.keys():
                msgs.warning('SPEC_RES missing')
                if 'WAVELMAX' in hdr0.keys():
                    msgs.info('Deriving SPEC_RES from WAVELMAX')
                    if (float(hdr0['WAVELMAX']) > 1300.) and (float(
                            hdr0['WAVELMAX']) < 1400.):
                        hdr0['SPEC_RES'] = 50.
                        msgs.work('Updating SPEC_RES to {}'.format(
                            str(hdr0['SPEC_RES'])))
                    elif (float(hdr0['WAVELMAX']) > 1600.) and (float(
                            hdr0['WAVELMAX']) < 1700.):
                        hdr0['SPEC_RES'] = 30.
                        msgs.work('Updating SPEC_RES to {}'.format(
                            str(hdr0['SPEC_RES'])))
                    else:
                        msgs.warning('WAVELMAX = {} is not in the expected ' /
                                     + 'range of possible values'.format(
                                         str(hdr0['WAVELMAX'])))
            if 'PROGID' not in hdr0.keys():
                msgs.warning('PROG_ID missing')
                if 'ESO OBS PROG ID' in hdr0.keys():
                    msgs.info('Deriving PROG_ID from HIERARCH ESO OBS PROG ID')
                    msgs.work('Updating PROG_ID to {}'.format(
                        str(hdr0['HIERARCH ESO OBS PROG ID'])))
                    hdr0['PROG_ID'] = str(hdr0['HIERARCH ESO OBS PROG ID'])
            if 'MJD-END' not in hdr0.keys():
                msgs.warning('MJD-END missing')
                if 'TEXPTIME' in hdr0.keys():
                    msgs.info('Deriving MJD-END from MJD-OBS and TEXPTIME')
                    texptime_sec = float(hdr0['TEXPTIME'])
                    texptime_day = texptime_sec / (60. * 60. * 24.)
                    mjdend = float(hdr0['MJD-OBS']) + texptime_day
                    fitsfiles.add_header_card(hdr0, 'MJD-END', mjdend,
                                              'End of observation')
                    msgs.work('MJD-OBS = {} and TEXPTIME = {} days'.format(
                        str(hdr0['MJD-OBS']), str(texptime_day)))
                    msgs.work('Updating MJD-END to {}'.format(
                        str(hdr0['MJD-END'])))

            # Remove not used values
            cards_to_be_removed_hdr0 = ['ERRDATA', 'QUALDATA', 'SCIDATA']
            for card_to_be_removed_hdr0 in cards_to_be_removed_hdr0:
                hdr0.remove(card_to_be_removed_hdr0, ignore_missing=True)
            cards_to_be_removed_hdr1 = ['HDUCLASS3']
            for card_to_be_removed_hdr1 in cards_to_be_removed_hdr1:
                hdr1.remove(card_to_be_removed_hdr1, ignore_missing=True)

            # Updating the FITS file definition comment line
            hdr0.add_comment(
                "  FITS (Flexible Image Transport System) format is defined in 'Astronomy"
                + "  and "
                "Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H",
                after='EXTEND')
            if 'COMMENT' in hdr1.keys():
                comment_values_hdr1 = hdr1['COMMENT'][:]
                for index, comment_value_hdr1 in enumerate(
                        comment_values_hdr1):
                    msgs.work('Removing COMMENT card : {}'.format(
                        comment_value_hdr1))
                hdr1.remove('COMMENT', ignore_missing=True, remove_all=True)

            # Creating white light image keyword:
            if make_whitelight_image:
                fitsfiles.add_header_card(
                    hdr0, 'ASSON1',
                    image_out.split('/')[-1],
                    'ANCILLARY.IMAGE.WHITELIGHT filename')
                msgs.work('Updating ASSON1 to {}'.format(hdr0['ASSON1']))

            # Actually creating the white-light image
            if make_whitelight_image:
                msgs.info('Making white light image')
                image_hdu = fits.PrimaryHDU()
                image_hdul = fits.HDUList([image_hdu])
                if str(hdr1['CUNIT3']).strip().upper() == 'MICRONS':
                    to_ang = 10000.
                else:
                    msgs.error('Spectral unit: {} not recognized'.format(
                        hdr1['CUNIT3']))
                delta_wave_bin = hdr1['CD3_3']
                image_hdul.append(
                    fits.ImageHDU(
                        to_ang * delta_wave_bin *
                        np.nansum(hdul[1].data, axis=0, dtype=np.float_)))
                image_hdr0 = image_hdul[0].header
                image_hdr1 = image_hdul[1].header
                card_for_image0 = [
                    'WAVELMIN', 'WAVELMAX', 'OBJECT', 'TELESCOP', 'INSTRUME',
                    'RADECSYS', 'RA', 'DEC', 'EQUINOX'
                ]
                fitsfiles.transfer_header_cards(hdr0,
                                                image_hdr0,
                                                card_for_image0,
                                                with_comment=True,
                                                delete_card=False)
                image_hdr0['PRODCATG'] = str('ANCILLARY.IMAGE.WHITELIGHT')

                card_for_image1 = [
                    'CRPIX1', 'CRPIX2', 'CRVAL1', 'CRVAL2', 'CUNIT1', 'CUNIT2',
                    'NAXIS1', 'NAXIS2', 'EXTNAME', 'CD1_1', 'CD1_2', 'CD2_1',
                    'CD2_2', 'CTYPE1', 'CTYPE2'
                ]
                fitsfiles.transfer_header_cards(hdr1,
                                                image_hdr1,
                                                card_for_image1,
                                                with_comment=True,
                                                delete_card=False)

            # Update checksum and datasum
            msgs.work('Updating checksum and datasum')
            hdul[0].add_checksum(override_datasum=False)
            hdul[1].add_datasum()
            hdul[1].add_checksum(override_datasum=True)
            hdul.flush(output_verify='fix')
            hdul.close()
            msgs.info('File {} produced.'.format(fits_out))
            if make_whitelight_image:
                image_hdul[0].add_datasum()
                image_hdul[1].add_datasum()
                image_hdul[0].add_checksum(override_datasum=True)
                image_hdul[1].add_checksum(override_datasum=True)
                image_hdul.writeto(image_out,
                                   overwrite=True,
                                   output_verify='fix')
            msgs.info('Image {} produced.'.format(image_out))

        elif instrument.startswith('IRDIS'):
            msgs.work('Fixing header for SPHERE/{} file {}'.format(
                instrument, fits_in))
            hdr = fitsfiles.header_from_fits_file(fits_in)
            if 'ESO DPR TECH' in hdr.keys():
                if str(hdr['ESO DPR TECH']).strip(
                ) == 'IMAGE,DUAL,CORONOGRAPHY':
                    msgs.work('Working with {} as observing technique'.format(
                        str(hdr['ESO DPR TECH']).strip()))
                elif 'DUAL' in str(hdr['ESO DPR TECH']).strip():
                    msgs.error('{} needs to be tested'.format(
                        str(hdr['ESO DPR TECH']).strip()))
                else:
                    msgs.error('Only DUAL imaging currently implemented')
            else:
                msgs.error('Cannot recognize the observing technique')

            # defining the two fits_out files:
            fits_out_index = [0, 1]
            fits_out_files = []
            for index in fits_out_index:
                fits_out_file = fits_out.replace('.fit',
                                                 '_' + str(index) + '.fit')
                if os.path.exists(fits_out_file):
                    shutil.copy(fits_out_file,
                                fits_out_file.replace('.fit', '_old.fit'))
                    msgs.warning(
                        '{} already exists. Backup created.'.format(fits_out))
                fitsfiles.new_fits_like(fits_in, [0],
                                        fits_out_file,
                                        overwrite=overwrite,
                                        fix_header=True,
                                        empty_primary_hdu=False)
                fits_out_files.append(fits_out_file)

            for index, fits_out_file in zip(fits_out_index, fits_out_files):
                hdul = fitsfiles.get_hdul(fits_out_file,
                                          'update',
                                          checksum=True)
                hdr0 = hdul[0].header
                hdul[0].data = hdul[0].data[index, :, :]
                # Check for HISTORY
                # Primary Header
                if 'HISTORY' in hdr0.keys():
                    history_cards_hdr0 = [
                        history_card_hdr0 for history_card_hdr0 in hdr0
                        if history_card_hdr0.startswith('HISTORY')
                    ]
                    history_values_hdr0 = [
                        hdr0[history_card_hdr0] for history_card_hdr0 in hdr0
                        if history_card_hdr0.startswith('HISTORY')
                    ]
                    for history_card_hdr0, history_value_hdr0 in zip(
                            history_cards_hdr0, history_values_hdr0):
                        msgs.work('Cleaning cards: {} = {}'.format(
                            history_card_hdr0, history_value_hdr0))
                    del hdr0['HISTORY'][:]
                # Try to guess coordinates
                if 'CRVAL1' not in hdr0.keys():
                    msgs.warning('CRVAL position keywords not preset')
                    if 'OBJECT' in hdr0.keys():
                        try:
                            object_coordinate = SkyCoord.from_name(
                                str(hdr0['OBJECT']).strip())
                            ra_obj, dec_obj = object_coordinate.ra.degree, object_coordinate.dec.degree
                            if 'RA' in hdr0.keys() and 'DEC' in hdr0.keys():
                                pointing_coordinate = SkyCoord(
                                    float(hdr0['RA']),
                                    float(hdr0['DEC']),
                                    unit='deg')
                                msgs.work(
                                    'Testing from separation from pointing position'
                                )
                                separation = object_coordinate.separation(
                                    pointing_coordinate).arcsec
                                if separation < 120.:
                                    msgs.info(
                                        'Object - Pointing separation is {}'.
                                        format(separation))
                                    msgs.info(
                                        'Updating CRVAL1 = {}'.format(ra_obj))
                                    msgs.info(
                                        'Updating CRVAL2 = {}'.format(dec_obj))
                                    hdr0['CRVAL1'] = ra_obj
                                    hdr0['CRVAL2'] = dec_obj
                                    msgs.work('Updating CUNIT')
                                    hdr0['CUNIT1'] = 'deg'
                                    hdr0['CUNIT2'] = 'deg'
                                    msgs.work('Updating CTYPE')
                                    hdr0['CTYPE1'] = 'RA---TAN'
                                    hdr0['CTYPE2'] = 'DEC--TAN'
                                    msgs.work('Updating CRPIX')
                                    hdr0['CRPIX1'] = float(
                                        hdul[0].data.shape[1]) / 2.
                                    hdr0['CRPIX2'] = float(
                                        hdul[0].data.shape[0]) / 2.
                                    msgs.info('Updating CD1 and CD2')
                                    hdr0['CD1_1'] = hdr0[
                                        'PIXSCAL'] * 2.778E-4 / 1000.
                                    hdr0['CD2_2'] = hdr0[
                                        'PIXSCAL'] * 2.778E-4 / 1000.
                                    hdr0['CD1_2'] = 0.
                                    hdr0['CD2_1'] = 0.
                                    msgs.work('Updating RA, DEC')
                                    hdr0['RA'] = ra_obj
                                    hdr0.comments[
                                        'RA'] = object_coordinate.ra.to_string(
                                            u.hour)
                                    hdr0['DEC'] = dec_obj
                                    hdr0.comments[
                                        'DEC'] = object_coordinate.dec.to_string(
                                            u.degree, alwayssign=True)
                                else:
                                    msgs.warning(
                                        'Object - Pointing separation is {}'.
                                        format(separation))
                                    msgs.warning(
                                        'This is suspicious, CRVAL not updated'
                                    )
                        except name_resolve.NameResolveError:
                            msgs.warning('Object {} not recognized'.format(
                                str(hdr0['OBJECT']).strip()))
                            msgs.warning('CRVAL not updated')

                # Updating file prodcatg
                msgs.work('Updating PRODCATG to SCIENCE.IMAGE')
                hdr0['PRODCATG'] = str('SCIENCE.IMAGE')

                if 'PROGID' not in hdr0.keys():
                    msgs.warning('PROG_ID missing')
                    if 'ESO OBS PROG ID' in hdr0.keys():
                        msgs.info(
                            'Deriving PROG_ID from HIERARCH ESO OBS PROG ID')
                        msgs.work('Updating PROG_ID to {}'.format(
                            str(hdr0['HIERARCH ESO OBS PROG ID'])))
                        hdr0['PROG_ID'] = str(hdr0['HIERARCH ESO OBS PROG ID'])

                # Update checksum and datasum
                msgs.work('Updating checksum and datasum')
                hdul[0].add_checksum(override_datasum=False)
                hdul.flush(output_verify='fix')
                hdul.close()
        else:
            msgs.warning(
                'The Instrument {} is not supported \nThe file {} will not be processed'
                .format(instrument, fits_in))

    msgs.end()
    '''
Exemplo n.º 16
0
        cards_to_be_transfer = [
            hdr1_card for hdr1_card in hdr1
            if hdr1_card not in not_to_be_transfer
        ]
        fitsfiles.transfer_header_cards(hdr1,
                                        hdr0,
                                        cards_to_be_transfer,
                                        with_comment=True,
                                        delete_card=True)

        # Updating the FITS file definition comment line
        hdr0['COMMENT'] = "  FITS (Flexible Image Transport System) format is defined in 'Astronomy" \
                          + "  and Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H"

        # Including specific cards
        msgs.work('Updating SPEC_RES')
        if hdr0['HIERARCH ESO INS GRAT1 NAME'].strip() == 'J':
            hdr0['SPEC_RES'] = 2000.
        elif hdr0['HIERARCH ESO INS GRAT1 NAME'].strip() == 'H':
            hdr0['SPEC_RES'] = 3000.
        elif hdr0['HIERARCH ESO INS GRAT1 NAME'].strip() == 'K':
            hdr0['SPEC_RES'] = 4000.
        elif hdr0['HIERARCH ESO INS GRAT1 NAME'].strip() == 'H + K' or hdr0['HIERARCH ESO INS GRAT1 NAME'].strip() == \
                'H+K':
            hdr0['SPEC_RES'] = 1500.
        else:
            msgs.error('GRAT1 name: {} not recognized'.format(
                hdr0['HIERARCH ESO INS GRAT1 NAME']))

        msgs.work('Updating PRODCATG')
        hdr0['PRODCATG'] = str('SCIENCE.CUBE.IFS')
Exemplo n.º 17
0
def query_from_radec(positions=None,
                     radius=None,
                     instruments=None,
                     data_types=None,
                     columns=None,
                     verbose=False,
                     maxrec=None):
    r"""Query the ESO archive for data at a given position in RA and Dec

    The `positions` value (or list) needs to be given as an
    `astropy.coordinates.SkyCoord <https://docs.astropy.org/en/stable/coordinates/>`_ object.

    The output is in an (list of) `astropy.table` with columns defined in: `core.tap_queries.COLUMNS_FROM_OBSCORE`
    It is possible to change the columns to query by setting the value of `columns`


    .. note::
        In case you are querying radius=`None` is set, the query will performed with:
        `INTERSECT(POINT('',RA,Dec), s_region)`
        instead of:
        `INTERSECT(s_region,CIRCLE('',RA,Dec,radius/3600.))`.
        See here for further examples: `tap obs examples <http://archive.eso.org/tap_obs/examples>`_


    Args:
        positions (astropy.coordinates.SkyCoord): coordinates (or list of coordinates) of the sky you want to query
        radius (float, optional): search radius in arcseconds
        instruments (list): list of `str` (or single `str`) containing the instruments used to limit the search
        data_types (list): list of `str` (or single `str`) containing the data types used to limit the search
        columns (list): list of `str` (or single `str`) containing the columns to be queried
        verbose (bool): if set to `True` additional info will be displayed
        maxrec (int, optional): define the maximum number of entries that a single query can return. If it is `None` the
            value is set by the limit of the service.

    Returns:
        any: results from the queries

    """
    # Check inputs:
    # Working on positions
    positions_list = cleaning_lists.from_element_to_list(
        positions, element_type=coordinates.SkyCoord)
    # Working on radius
    if radius is not None:
        if isinstance(radius, int):
            radius = float(radius)
        else:
            assert isinstance(radius, float), r'Input radius is not a number'
    # Working on instruments
    instruments_list = cleaning_lists.from_element_to_list(instruments,
                                                           element_type=str)
    # Working on data_types
    data_types_list = cleaning_lists.from_element_to_list(data_types,
                                                          element_type=str)
    # Working on columns
    columns_list = _is_column_list_in_obscore(columns)

    if verbose:
        how_many_positions = len(positions_list)
        if how_many_positions > 1:
            msgs.work(
                'Exploring ESO archive around {} locations in the sky'.format(
                    how_many_positions))
        else:
            msgs.work(
                'Exploring ESO archive around the input location in the sky')

    # Running over all positions
    results_from_query = []
    for idx, position in enumerate(positions_list):
        position.transform_to(ICRS)
        ra, dec = np.float_(position.ra.degree), np.float_(position.dec.degree)
        msgs.work(
            'Running query {} to the ESO archive (out of {} total)'.format(
                idx + 1, len(positions_list)))
        # Define query
        query = "{0}{1}{2}{3}".format(
            tap_queries.create_query_obscore_base(columns_list),
            tap_queries.condition_intersects_ra_dec(ra, dec, radius=radius),
            tap_queries.condition_instruments_like(instruments_list),
            tap_queries.condition_data_types_like(data_types_list))
        # instantiate ESOCatalogues
        query_for_observations = query_observations.ESOObservations(
            query=query, type_of_query='sync', maxrec=maxrec)
        # running query and append results to the list
        if verbose:
            query_for_observations.print_query()
        # Obtaining query results
        query_for_observations.run_query(to_string=True)
        result_from_query = query_for_observations.get_result_from_query()
        if len(result_from_query) < 1:
            msgs.warning('No data has been retrieved')
        else:
            msgs.info('A total of {} entries has been retrieved'.format(
                len(result_from_query)))
            if verbose:
                msgs.info('For the following instrument:')
                for inst_name in np.unique(
                        result_from_query['instrument_name'].data):
                    msgs.info(' - {}'.format(inst_name))

        results_from_query.append(result_from_query)

    # Returning results
    return _return_results_from_query(results_from_query)