Exemple #1
0
def from_element_to_list(element, element_type=str):
    r"""Given an element it returns a list containing the element

    It also checks all the elements in the list have the same type defined by `element_type`

    Args:
        element (any): element that will be put in the list
        element_type (any): type of the element that should be contained in the list

    Returns:
        list: list containing `element`

    """
    if element is None:
        return None
    elif isinstance(element, list):
        for element_in_list in element:
            assert isinstance(element_in_list, element_type), r'{} must be a {}'.format(element_in_list, element_type)
        return element
    elif isinstance(element, np.ndarray):
        element_list: list = element.tolist()
        for element_in_list in element_list:
            assert isinstance(element_in_list, element_type), r'{} must be a {}'.format(element_in_list, element_type)
        return element_list
    elif isinstance(element, MaskedColumn):
        element_list = element.data.data.tolist()
        for element_in_list in element_list:
            assert isinstance(element_in_list, element_type), r'{} must be a {}'.format(element_in_list, element_type)
        return element_list
    elif isinstance(element, element_type):
        return [element]
    else:
        msgs.error('Not valid type for: {}'.format(element))
    return
Exemple #2
0
def make_list_of_fits_files(args_input, verify_fits=False) -> list:
    r"""Cleaning an input list of fits files

    Args:
        args_input (list): input list of fits files that will be checked (usually is coming from `parse_arguments()` in
            a macro)
        verify_fits (bool): if set to `True`, it will verify that the fits file is complaint to the FITS standard

    Returns:
        list: list containing all the valid fits files given in input

    """
    list_of_fits_files = []
    if not isinstance(args_input, list):
        args_input_files: list = [args_input]
    else:
        args_input_files: list = args_input
    for args_input_file in args_input_files:
        if checks.fits_file_is_valid(args_input_file, overwrite=False, verify_fits=verify_fits):
            list_of_fits_files.append(args_input_file)
        else:
            msgs.warning('{} excluded because not a valid fits file'.format(args_input_file))
    if len(list_of_fits_files) == 0:
        msgs.error('No valid fits files present')
    return list_of_fits_files
Exemple #3
0
def make_list_of_int(args_input, length=None):
    r"""Cleaning an input list of int

    Args:
        args_input (`list`):
            input list of strings that will be checked (usually is coming from `parse_arguments()` in a macro).
        length (`int`):
            If set to a value, the code will check that the length of `list_of_string` will match `length`. If not
            the code will further check if `args_input` contains only one element. In this case the output list will
            contain this values `length` times. If this situation does not happen, an error is raised.
    Returns:
        list_of_int (`list`):
            list containing all the valid int given in input
    """
    if length is not None:
        assert(isinstance(length, int)), '`length` must be an integer'
    list_of_int = []
    if not isinstance(args_input, list):
        args_input_strings = [args_input]
    else:
        args_input_strings = args_input
    for args_input_string in args_input_strings:
        if isinstance(args_input_string, int):
            list_of_int.append(args_input_string)
        else:
            msgs.warning('{} excluded because not a valid string'.format(args_input_string))
    if len(list_of_int) == 0:
        msgs.error('No valid element present in the list')
    if length is not None:
        if len(list_of_int) != length:
            if len(list_of_int) == 1:
                list_of_int = [list_of_int[0]] * length
            else:
                msgs.error('List length: {} not matching {}'.format(len(list_of_int), length))
    return list_of_int
Exemple #4
0
def run_query(tap_service, query, type_of_query, maxrec=default.get_value('maxrec')):
    r"""Run query to TAP service and return result as an `astropy.Table`

    If the job requires to much time to run, the code will move to an asynchronous query.

    Args:
        tap_service (pyvo.dal.tap.TAPService): TAP service that will be used for the query
        query (str): query to be run
        type_of_query (str): type of query to be run
        maxrec (int): define the maximum number of entries that a single query can return. Default is set
            by default.get_value('maxrec')

    Returns:
        astropy.table: result from the query to the TAP service

    """
    if type_of_query not in TAP_QUERY_TYPES:
        msgs.error('{} not a valid entry for the type of TAP query. Possibilities are: {}'.format(type_of_query,
                                                                                                  TAP_QUERY_TYPES))
    # Obtaining query results and convert it to an astropy table
    if query is not None:
        if type_of_query == 'sync':
            result_from_query = run_query_sync(tap_service, query, maxrec=maxrec)
        else:
            result_from_query = run_query_async(tap_service, query, maxrec=maxrec)
    else:
        msgs.warning('Empty query provided')
        result_from_query = None
    return result_from_query
Exemple #5
0
def f_lambda2f_nu(f_lambdas, wavelengths):
    r"""Convert list of spectral flux densities per unit wavelength into spectral flux densities per unit frequency

    Args:
        f_lambdas (any): input list of spectral flux densities per unit wavelength to be converted to AB magnitudes. If
            `f_lambdas` does not have units it is assumed to be in erg/s/cm**2/Ang.
        wavelengths (any): specific wavelengths at which perform the conversion. If the input does not have units, Ang.
            is assumed. It must be either a single value or a list with the same length of `f_lambdas`

    Returns:
        list: list of spectral flux densities per unit frequency

    """
    f_lambdas_list = cleaning_lists.from_element_to_list_of_quantities(f_lambdas,
                                                                       unit=u.erg*u.centimeter**-2.*u.second**-1.*u.AA**-1.)
    wavelengths_list = cleaning_lists.from_element_to_list_of_quantities(wavelengths, unit=u.AA)
    f_nus_list = []
    if len(wavelengths_list) == 1:
        for f_lambda in f_lambdas_list:
            f_nus_list.append(f_lambda.to(u.jansky,
                                          equivalencies=u.spectral_density(wavelengths_list[0])))
    elif len(wavelengths_list) == len(f_lambdas_list):
        for f_lambda, wavelength in zip(f_lambdas_list, wavelengths_list):
            f_nus_list.append(f_lambda.to(u.jansky,
                                          equivalencies=u.spectral_density(wavelength)))
    else:
        msgs.error('Length of `wavelengths` not compatible with the one of `f_lambdas`')
        return
    return f_nus_list
Exemple #6
0
def sb2mag(surface_brightnesses, areas):
    r"""Convert list of surface brightness into AB magnitudes given an area

    .. math::

        m = SB - 2.5 log10(area/arcsec^2)

    Args:
        surface_brightnesses (any): input list of surface brightnesses to be converted in magnitudes in mag. If the
            input does not have units, mag/arcsec**2 is assumed.
        areas (any): area over which calculate the surface brightness. If the input does not have units, arcsec**2
            is assumed. It must be either a single value or a list with the same length of `sbs`

    Returns:
        list: list of magnitudes. Each element of the list is an `astropy.units.Quantity` with mag as unit

    """
    surface_brightnesses_list = cleaning_lists.from_element_to_list_of_quantities(surface_brightnesses,
                                                                                  unit=u.mag/u.arcsec**2)
    areas_list = cleaning_lists.from_element_to_list_of_quantities(areas, unit=u.arcsec**2.)
    abmag_list = []
    if len(areas_list) == 1:
        for surface_brightness in surface_brightnesses_list:
            abmag_list.append((surface_brightness*u.arcsec**2.) - (2.5 * np.log10(areas_list[0]/u.arcsec**2.) * u.mag))
    elif len(areas_list) == len(surface_brightnesses_list):
        for surface_brightness, area in zip(surface_brightnesses_list, areas_list):
            abmag_list.append((surface_brightness*u.arcsec**2.) - (2.5 * np.log10(area/u.arcsec**2.) * u.mag))
    else:
        msgs.error('Length of `areas` not compatible with the one of `surface_brightnesses`')
        return
    return abmag_list
Exemple #7
0
def mag2sb(abmags, areas):
    r"""Convert list of AB magnitudes into surface brightness given an area

    .. math::

        SB = m + 2.5 log10(area/arcsec^2)

    Args:
        abmags (any): input list of magnitudes in the AB system to be converted in surface brightness in mag/arcsec**2
        areas (any): area over which calculate the surface brightness. If the input does not have units, arcsec**2
            is assumed. It must be either a single value or a list with the same length of `abmags`

    Returns:
        list: list of surface brightnesses. Each element of the list is an `astropy.units.Quantity` with
            mag/arcsec**2 as unit
    """
    abmags_list = cleaning_lists.from_element_to_list_of_quantities(abmags, unit=u.mag)
    areas_list = cleaning_lists.from_element_to_list_of_quantities(areas, unit=u.arcsec**2.)
    surface_brightnesses_list = []
    if len(areas_list) == 1:
        for abmag in abmags_list:
            surface_brightnesses_list.append((abmag + (2.5 * np.log10(areas_list[0]/u.arcsec**2.)))/u.arcsec**2.)
    elif len(areas_list) == len(abmags_list):
        for abmag, area in zip(abmags_list, areas_list):
            surface_brightnesses_list.append((abmag + (2.5 * np.log10(area/u.arcsec**2.)))/u.arcsec**2.)
    else:
        msgs.error('Length of `areas` not compatible with the one of `abmags`')
        return
    return surface_brightnesses_list
Exemple #8
0
def columns_in_catalogue(table_name, verbose=False):
    r"""Return the columns present in a table

    Args:
        table_name (`str`):
            Table to be queried. To check the full list of catalogues run `all_catalogues()`
        verbose (`bool`):
            if set to `True` additional info will be displayed

    Returns:
        columns_table (`astropy.table`):
            List of all columns present in a table. Information are stored in `column_name`, `datatype`, `description`,
            and `unit`
    """
    # Check for the table
    if not _is_table_at_eso(table_name):
        msgs.error('{} is not a valid table'.format(table_name))
    query = '''
            SELECT
                column_name, datatype, description, table_name, unit 
            FROM
                columns
            WHERE
                table_name='{}'
            '''.format(table_name)
    # Obtaining query results
    columns_table = _run_query(query, maxrec=None, verbose=verbose)
    columns_table['column_name'].data.data[:] = checks.from_bytes_to_string(
        columns_table['column_name'].data.data)
    columns_table.remove_column('table_name')
    return columns_table
Exemple #9
0
def get_hdul(fits_name,
             mode='readonly',
             checksum=True):  # Written by Ema 05.03.2020
    r"""Wrapper for astropy `fits.open`. It checks if the file exists and in case returns its HDUList.

    Args:
        fits_name (str): fits file name
        mode (str): Open mode for the file. Possibilities are: `readonly’, `update`, `append`, `denywrite`,
            or `ostream`
        checksum (bool): If True, verifies that both `DATASUM` and `CHECKSUM` card values (when present in the HDU
            header) match the header and data of all HDU’s in the file. Updates to a file that already has a checksum
            will preserve and update the existing checksums unless this argument is given a value of `remove`,
            in which case the `CHECKSUM` and `DATASUM` values are not checked, and are removed when saving
            changes to the file

    Returns:
        hdul: list-like collection of HDU objects

    """
    if not checks.fits_file_is_valid(fits_name):
        msgs.error('Fits file not valid')
        return None
    else:
        hdul = fits.open(fits_name, mode=mode, checksum=checksum)
        msgs.info('The fits file {} contains {} HDUs'.format(
            fits_name, len(hdul)))
        msgs.info('Summary:')
        hdul.info()
        return hdul
Exemple #10
0
def _print_1cards_2values(cards,
                          values1,
                          values2,
                          on_terminal=True,
                          on_file=None):
    r""" Print on terminal (if `on_terminal` is `True`) and/or on a text file (if `on_file` is not `None`) cards
    and values for

    Args:
        cards (`np.array`):
            cards to be printed
        values1 (`np.array`):
            first values to be printed
        values2 (`np.array`):
            second values to be printed
        on_terminal (`bool`):
            if `True`, the `cards` and the two `values` are printed on the terminal.
        on_file (`str`, `None`):
            if not `None`, `cards` and `values` will be stored in this text file

    Returns:
        Nothing is returned, but if `on_file` is set, `cards` and `values` will be saved on a text file, and if
        `on_terminal` is `True` it will be also displayed on the terminal.
    """
    # the maximum number of chars required to display any item in list
    max_length = np.max(
        (_find_max_str_length(cards), _find_max_str_length(values1),
         _find_max_str_length(values2), 10))
    if np.ndim(cards) != 1:
        msgs.error('Cards should be a 1D array.')
    else:
        spaces_values1 = _get_y_dimension(values1)
        spaces_values2 = _get_y_dimension(values2)
        header_line = 'CARDS' + ' '*(max_length-4) + 'VALUES_1st' + ' '*((spaces_values1-1)*(max_length+1)) + \
                      ' '*(max_length-9) + 'VALUES_2nd' + ' '*((spaces_values2-1)*(max_length+1)) + \
                      ' '*(max_length-9)+'\n' + '-----' + ' '*(max_length-4)+'----------' + \
                      ' '*((spaces_values1-1)*(max_length+1)) + ' '*(max_length-9) + '----------'
        complete_matrix = np.vstack((cards, values1, values2))
        if on_file is None:
            filename = '_temp.txt'
        else:
            filename = on_file
        np.savetxt(filename,
                   complete_matrix.T,
                   fmt='%-' + str(max_length) + 's',
                   header=header_line)
        if on_terminal:
            print(" ")
            with open(filename) as text_file:
                line_number = np.int(0)
                for line in text_file:
                    if line_number < 2:
                        print(line.strip())
                    else:
                        print('  ' + line.strip())
                    line_number += 1
            print(" ")
        os.remove(
            filename) if os.path.exists(filename) and on_file is None else None
Exemple #11
0
def contours_from_gw_bayestar(file_name, credible_level=50.):
    r"""Given a bayestar.fits.gz HEALPix maps of a GW superevent it extracts contours at the given `credible_level`

    This is a wrapper around `ligo_skymap_contour`. The different contours are then counterclockwise oriented to be
    compatible with TAP and ASP queries.

    Args:
        file_name (str): Name of HEALPix maps
        credible_level (float): Probability level at which contours are returned

    Returns:
        list: [RA,Dec] list defining the contours location. These are counter-clockwise oriented as seen on the sky from
           inside the sphere. The length of contours represent the number of not connected regions identified.
    """

    # Some checks
    assert isinstance(file_name, (str, np.str)), '{} is not a valid string'.format(file_name)
    if not checks.fits_file_is_valid(file_name):
        msgs.error('{} not a valid fits file'.format(file_name))
    assert isinstance(credible_level, (int, float, np.int, np.float)), '`credible_level` is not a float or an int'

    # Create temporary file where to store output from ligo_skymap_contour
    contour_tmp_file = '.' + file_name + '.tmp.json'
    if path.isfile(contour_tmp_file):
        remove(contour_tmp_file)

    # Use ligo_skymap_contour to compute contours
    ligo_skymap_contour(args=[file_name, '--output', contour_tmp_file, '--contour', str(credible_level)])

    # Parse the content of the resulting json file into a dict
    with open(contour_tmp_file, 'r') as json_file:
        json_data = json_file.read()
    contours_dict = json.loads(json_data)
    json_file.close()
    # cleaning up
    remove(contour_tmp_file)

    # Parse the resulting json to obtain a list of coordinates defining the vertices of the contours of each peak
    # contours is a list, each element of which contains the vertices of one contour encircling the desired significance
    contours = contours_dict['features'][0]['geometry']['coordinates']
    # Make sure that the orientation of the polygons on the sky is the correct one for the TAP queries,
    # i.e. counter-clockwise as seen on the sky from inside the sphere.
    for iii, contour in enumerate(contours):
        contours[iii] = _ensure_orientation(contour)

    # Quick summary:
    if len(contours) > 0:
        msgs.info('Extracted the contours for {} regions at {} credible level'.format(len(contours), credible_level))
    else:
        msgs.info('No contours extracted at {} credible level'.format(credible_level))
        contours = None

    return contours
Exemple #12
0
def fits_file_is_valid(fits_file,
                       verify_fits=False,
                       overwrite=False) -> bool:  # Written by Ema 05.03.2020
    r"""Check if a file exists and has a valid extension

    The option `verify_fits` checks the header of the fits file using `astropy.io.fits.verify`

    Args:
        fits_file (str): fits file you would like to check
        verify_fits (bool): if set to `True`, it will verify that the fits file is complaint to the FITS standard.
        overwrite (bool): if `True`, overwrite the input fits file with the header corrections from `verify_fits`

    Returns:
        bool: `True` if exists `False` and warning raised if not.

    """
    is_fits = True
    # Checks if it is a string
    assert isinstance(fits_file, str), 'input `fits_file` needs to be a string'
    # Check for ending
    # ToDo
    # to be updated to: PERMITTED_FITS_ENDINGS
    if not fits_file.endswith('.fits') and not fits_file.endswith(
            '.fits.fz') and not fits_file.endswith('.fits.gz'):
        msgs.warning(
            'File: {} does not end with `fits` or `fits.fz` or `fits.gz`'.
            format(fits_file))
        is_fits = False
    # Check for existence
    if not os.path.exists(fits_file):
        msgs.warning('File: {} does not exists'.format(fits_file))
        is_fits = False
    # Check for compliance with FITS standard
    if verify_fits:
        if overwrite:
            hdul = fits.open(fits_file, mode='update', checksum=False)
            if not check_checksums(hdul):
                is_fits = False
            hdul.flush(output_verify='fix+warn', verbose=True)
            hdul.writeto(fits_file, checksum=True, overwrite=True)
            msgs.info('File checked and rewritten')
        else:
            hdul = fits.open(fits_file, mode='readonly', checksum=True)
            if not check_checksums(hdul):
                is_fits = False
            hdul.verify('fix+warn')
        hdul.close()
    else:
        if overwrite:
            msgs.error('The option overwrite works only if verify_fits = True')
    return is_fits
Exemple #13
0
    def load_from_table(self,
                        table,
                        primary_header=None,
                        copy_header=True,
                        where_time='TIME',
                        where_time_bin='TIME_BIN',
                        where_flux='FLUX',
                        where_error='ERROR',
                        where_background='BACKGROUND',
                        where_quality='QUAL'):
        r"""Given a table put it in a LightCurves object

        Args:
            where_quality:
            where_background:
            where_error:
            where_time_bin:
            where_time:
            copy_header:
            primary_header:
            where_flux :
        """
        if checks.table_is_valid(table):
            msgs.work('Reading input table')

        if primary_header is not None:
            if len(primary_header) > 0:
                self.primary_header = primary_header
            else:
                msgs.warning('Empty `primary_header` provided')

        if copy_header:
            if len(table.header) > 0:
                self.header = table.header
            else:
                msgs.warning('No header found in the table')

        if isinstance(table, fits.BinTableHDU):
            self._load_from_BinTableHDU(table,
                                        copy_header=copy_header,
                                        where_time=where_time,
                                        where_time_bin=where_time_bin,
                                        where_flux=where_flux,
                                        where_error=where_error,
                                        where_background=where_background,
                                        where_quality=where_quality)
        elif isinstance(table, fits.TableHDU):
            # ToDo implement TableHDU case
            msgs.error('To be implemented')
        else:
            msgs.error('Unknown table type')
Exemple #14
0
 def to_fits(self,
             fits_file_name,
             light_curve_name='LIGHTCURVE',
             overwrite=True,
             autocorrect=False):
     if not self.check(autocorrect=autocorrect):
         msgs.error(
             'the LightCurve object does not respect the requirements from ESO'
         )
     save_into_fits(fits_file_name,
                    self.primary_header,
                    self.header,
                    light_curve_name,
                    self,
                    overwrite=overwrite)
Exemple #15
0
 def prodcatg(self, prodcatg_type):
     self.__prodcatg = eso_prodcatg.ProdCatg(prodcatg_type=prodcatg_type)
     if self.is_primary is True:
         _prodcatg_value = self.get('PRODCATG', default=None)
         if _prodcatg_value is None:
             msgs.info(
                 'Added PRODCATG = {} to the header'.format(prodcatg_type))
             self.set('PRODCATG', prodcatg_type)
         elif _prodcatg_value != prodcatg_type:
             msgs.warning('Updating value fo PRODCATG from {} to {}'.format(
                 _prodcatg_value, prodcatg_type))
             self.set('PRODCATG', prodcatg_type)
         elif _prodcatg_value == prodcatg_type:
             msgs.info('PRODCATG = {}'.format(_prodcatg_value))
         else:
             msgs.error('Cannot set the value of PRODCATG')
Exemple #16
0
def _get_connector(connector):
    r"""Check that the connector is valid

    Args:
        connector (str, optional): connector to be put in front of the the condition (e.g., '&')

    Returns:
        str: valid connector

    """
    if connector is None:
        clean_connector = ''
    elif connector in CONNECTORS:
        clean_connector = connector
    else:
        msgs.error('Not a valid connector for the ASP query. Possible values are: {}'.format(CONNECTORS))
        clean_connector = None
    return clean_connector
Exemple #17
0
def download(dp_id,
             min_disk_space=np.float32(default.get_value('min_disk_space'))):
    r"""Given a filename in the ADP format, the code download the file from the
    `ESO archive <http://archive.eso.org>`_

    ..note::
        if dp_id is not a `numpy.str`, a WARNING message will be raised and the content of `dp_id` will be
        converted into a string.

    Args:
        dp_id (`numpy.str`):
            Data product ID to be downloaded.
        min_disk_space (`numpy.float`):
            The file will be downloaded only if there is this amount of space (in Gb) free on the disk.
            By default is set by the `default.txt` file.

    Returns:
        This downloads a fits ADP file with the same name of the input.
    """

    # Check for disk space
    checks.check_disk_space(min_disk_space=min_disk_space)

    for file_name in dp_id:
        # if the file name is in byte, this decode it.
        if not isinstance(file_name, str):
            msgs.warning('The content of dp_id is not in a string format.')
            msgs.warning('The code is trying to fix this.')
            if isinstance(file_name, bytes):
                file_name = np.str(file_name.decode("utf-8"))
                msgs.warning('Converted to {}.'.format(type(file_name)))
            else:
                msgs.error(
                    'Unable to understand the format of the dp_id entry: {}'.
                    format(type(file_name)))

        # Given a dp_id of a public file, the link to download it is constructed as follows:
        download_url = 'http://archive.eso.org/datalink/links?ID=ivo://eso.org/ID?{}&eso_download=file'.format(
            str(file_name))
        msgs.work(
            'Downloading file {}. This may take some time.'.format(file_name +
                                                                   '.fits'))
        urllib.request.urlretrieve(download_url, filename=file_name + '.fits')
        msgs.info('File {} downloaded.'.format(file_name + '.fits'))
Exemple #18
0
def header_from_fits_file(fits_name, which_hdu=0):  # Written by Ema 05.03.2020
    r"""Load an header with the information from a fits file

    Args:
        fits_name (`str`):
            fits file name
        which_hdu (`numpy.int`):
            select from which HDU you are getting the header. Default = 0

    Returns:
         header (`hdu.header`):
             the header corresponding to `which_hdu` from `fits_name`

    """

    assert isinstance(which_hdu, (int, np.int_)), 'which_hdu must be an int'
    if not checks.fits_file_is_valid(fits_name):
        msgs.error('Fits file not valid')
    else:
        header = fits.getheader(fits_name, which_hdu)
        return header
Exemple #19
0
def define_tap_service(which_tap_service):
    r"""Load a Table Access Protocol (TAP) service from defaults

    Currently the supported `TAP services <http://archive.eso.org/programmatic/#TAP>`_ are:
    * `eso_tap_cat`: TAP service for scientific catalogues generated by ESO observing teams
    * `eso_tap_obs`: TAP service for raw, reduced, and ambient data

    See `pyvo docs <https://pyvo.readthedocs.io/en/latest/api/pyvo.dal.TAPService.html#>`_ for further details

    Args:
        which_tap_service (str): Select the `TAP services <http://archive.eso.org/programmatic/#TAP>`_ to be queried

    Returns:
        pyvo.dal.tap.TAPService: TAP service used for the queries

    """
    if which_tap_service not in TAP_SERVICES:
        msgs.error('{} not a valid entry for TAP services. Possibilities are: {}'.format(which_tap_service,
                                                                                         TAP_SERVICES))
    tap_service = dal.tap.TAPService(default.get_value(which_tap_service))
    return tap_service
Exemple #20
0
def from_element_to_list_of_quantities(element, unit=None):
    r"""Convert an input into a list of `astropy.units.Quantity`

    Args:
        element (int, float, np.ndarray, Quantity object, list): element that will be put in the list
        unit (UnitBase instance): An object that represents the unit to be associated with the input value

    Returns:
        list: list of quantities in the format `element`*`unit`

    """
    assert isinstance(unit, u.UnitBase), r'{} not a valid astropy units'.format(unit)
    if isinstance(element, int):
        return [float(element)*unit]
    elif isinstance(element, float):
        return [element*unit]
    elif isinstance(element, np.ndarray) and not isinstance(element, u.Quantity):
        element_list_clean = []
        element_list: list = np.copy(element).tolist()
        for element_in_list in element_list:
            element_list_clean.append(element_in_list*unit)
        return element_list_clean
    elif isinstance(element, u.Quantity):
        element_list_clean = []
        element_converted = np.copy(element).to(unit)
        for element_in_list in np.nditer(element_converted):
            print(element_in_list)
            element_list_clean.append(element_in_list*unit)
        return element_list_clean
    elif isinstance(element, list):
        element_list_clean = []
        for element_in_list in element:
            if isinstance(element_in_list, u.Quantity):
                element_list_clean.append(element_in_list.to(unit))
            else:
                element_list_clean.append(element_in_list*unit)
        return element_list_clean
    else:
        msgs.error('The input cannot be converted into a list of quantities')
        return
Exemple #21
0
def abmaglim(rms, seeing_fwhm, exptime=None, zero_point=None, sigma=5.):
    r"""Calculate the N-sigma magnituide limit.

    The code behaves in different ways depending if zero_point is set to None or not.
    If zero_points is None:
        rms needs to have units and exptime is not considered.
        abmaglim = -2.5 * Log10( sigma * rms * PI*(.5*seeing_fwhm)**2. / (3631.*u.jansky) )
    else:
        abmaglim is calculated as:
        abamglim = -2.5 * Log10( sigma * rms * PI*(.5*seeing_fwhm)**2. / exptime ) + zero_point

    Args:
        rms (`float`):
           Calculated rms on the image
        exptime (`float`):
            Exposure time in seconds
        zero_point (`float`):
            AB zero point of the image. If not set, the code will take the RMS with the associated
            astropy.units.
        seeing_fwhm (`float`):
            FWHM of the seeing in pixels
        sigma (`float`):
           Number of sigma to consider a detection significant

    Returns:
        abmaglim (`float`):
            AB magnitude limit of for an image
    """
    msgs.work('Calculating {}-sigma AB mag limit'.format(sigma))

    if zero_point is None:
        if exptime is not None:
            msgs.error('`exptime` needs to be `None` if `zero_point` is `None`')
        rms_in_jansky = rms.to(u.jansky, equivalencies=u.spectral())
        abmaglim = -2.5 * np.log10(sigma * rms_in_jansky * np.pi * np.power(seeing_fwhm / 2., 2.) / (3631. * u.jansky))
    else:
        abmaglim = -2.5 * np.log10(sigma * rms * np.pi * np.power(seeing_fwhm / 2., 2.) / exptime) + zero_point
    return abmaglim
Exemple #22
0
def from_number_to_string(number):
    r"""Given an int or a float it returns a string

    If the input is a int, it is first converted to float and then to string

    Args:
        number (any): `int` or `float` that needs to be transformed into a string

    Returns:
        str: same of number but as a `str`

    """
    if number is None:
        return None
    elif isinstance(number, str):
        return number
    elif isinstance(number, int):
        return str(float(number))
    elif isinstance(number, float):
        return str(number)
    else:
        msgs.error('The value entered is not a string or a number. Type: {}'.format(type(number)))
        return
Exemple #23
0
    def __init__(self,
                 cards=None,
                 values=None,
                 from_fits=None,
                 which_hdu=0,
                 from_txt=None,
                 data_start=None,
                 data_end=None):
        r"""

        Given the variety of possible format of the input, particular care should be used with the `from_txt` option.
        The options `data_start` and `data_end` can help with this, but if the format is not a simple plain text
        with space/tab separated column, reading the text file with another module and then store the data with
        cards and values could be a more safe option.

        Args:
            cards (`np.array` or `list`):
                array that you want to use as cards
            values (`np.array` or `list`):
                values associated to each element of `cards`. This implies that `cards` and `values` needs to have the
                same length. But multiple `values` could be associated to `cards`.
            from_fits (`str`):
                fits file name form which read the header.
            which_hdu (`numpy.int`):
                select from which HDU you are getting the header. See `fitsfiles` in `ESOAsg.core` for further
                details.
            from_txt (`str`, `None`):
                Ascii file from which cards and values will be read. The assumption here is that `cards` are stored
                in the first column, and `values` in the second one. The options `data_start` and `data_end` will
                select only the (`data_end`-`data_start`) lines following the line `data_start`. This is the same
                option present in `ascii.read` from astropy.
            data_start (`np.int`, `None`):
                First line to be read from the file: `from_txt`.
            data_end (`np.int`, `None`):
                Last line to be read from the file: `from_txt`.

        Returns:
            a list object containing cards and corresponding values
        """

        # Loading from input
        if cards is not None:
            # Loading cards
            msgs.work('Loading cards.')
            _cards = _convert_to_numpy_array(cards)
            self.cards = _cards
            # Loading values
            if values is not None:
                _values = _convert_to_numpy_array(values)
                if np.shape(_values) == np.shape(_cards):
                    msgs.work('Loading values.')
                    self.values = _values
                else:
                    if np.ndim(_values) == 1:
                        if np.shape(_values[:]) == np.shape(_cards):
                            self.values = _values
                        else:
                            msgs.error(
                                'Values and Cards should have the same length.'
                            )
                    elif np.ndim(_values) == 2:
                        for index in np.arange(np.shape(_values)[0],
                                               dtype=np.int_):
                            if np.shape(_values[index, :]) != np.shape(_cards):
                                msgs.error(
                                    'Values and Cards should have the same length.'
                                )
                        self.values = _values
                    else:
                        msgs.error(
                            'Values and Cards should have the same length.')
            else:
                msgs.work('Values is empty.')
                self.values = np.array([])

        # Loading from fits file
        elif from_fits is not None:
            msgs.work('Loading header from fits file: {}'.format(from_fits))
            _hdu = fitsfiles.header_from_fits_file(from_fits,
                                                   which_hdu=which_hdu)
            _cards, _values = np.zeros_like(0), np.zeros_like(0)
            for index in list(_hdu.keys()):
                if 'COMMENT' not in index:
                    _cards = np.append(_cards, index)
                    _values = np.append(_values, _hdu[index])
            self.cards = np.array(_cards)
            self.values = np.array(_values)

        # Loading from text file
        elif from_txt is not None:
            msgs.work('Loading list from text file: {}'.format(from_txt))
            _full_table = ascii.read(from_txt,
                                     guess=True,
                                     data_start=data_start,
                                     data_end=data_end)
            self.cards = np.array(_full_table[_full_table.colnames[0]].data)
            if np.size(_full_table.colnames) < 2:
                self.values = np.array([])
            elif np.size(_full_table.colnames) >= 2:
                _values = np.array(_full_table[_full_table.colnames[1]].data)
                if np.size(_full_table.colnames) > 2:
                    for index in _full_table.colnames[2:]:
                        _values = np.vstack((_values, _full_table[index].data))
                self.values = _values

        # Creating empty object
        else:
            msgs.work('Creating empty lists.Lists object.')
            self.cards = np.array([])
            self.values = np.array([])

        return
Exemple #24
0
def get_header_from_archive(file_id,
                            text_file=None):  # written by Ema. 04.03.2020
    r"""Given a file ID the macro download the corresponding header.

    Args:
        file_id (`str`):
            ESO file ID for which the header will be downloaded
        text_file (`str`):
            text file where the header will be downloaded. If `None` it will it will be set to the same
            string `file_id` but with a `.hdr` extension.

    """

    # checks for connection to ESO archive
    archive_url = default.get_value('eso_archive_url')
    if not checks.connection_to_website(archive_url, timeout=1):
        msgs.error('Cannot connect to the ESO archive website:\n {}'.format(
            archive_url))

    # checks for file id
    assert isinstance(file_id, list) or isinstance(
        file_id, (str, np.str)), 'file_id needs to be a str or a list'
    if isinstance(file_id, str):
        list_of_files = [file_id]
    else:
        list_of_files = file_id
    list_of_files = [
        files if not files.endswith('.fits') else files.replace('.fits', '')
        for files in list_of_files
    ]

    # checks for text_file
    assert isinstance(text_file, list) or isinstance(text_file, (str, np.str)) or \
           isinstance(text_file, (type(None), bytes)), 'text_file needs to be a str or a list'
    if isinstance(text_file, str):
        if len(list_of_files) == 1:
            list_of_outputs = [text_file]
        else:
            list_of_outputs = [output + text_file for output in list_of_files]
    elif isinstance(text_file, list):
        if len(list_of_files) == len(text_file):
            list_of_outputs = text_file
        else:
            list_of_outputs = [files + '.hdr' for files in list_of_files]
    else:
        list_of_outputs = [files + '.hdr' for files in list_of_files]

    # Downloading headers
    for file_name, file_out in zip(list_of_files, list_of_outputs):
        if os.path.isfile(file_out):
            msgs.warning('Overwriting existing text file: {}'.format(file_out))
            os.remove(file_out)
        url_for_header = archive_url + 'hdr?DpId=' + file_name
        response_url = requests.get(url_for_header, allow_redirects=True)
        # Removing html from text
        header_txt = response_url.text.split('<pre>')[1].split('</pre>')[0]
        if not header_txt.startswith('No info found for'):
            file_header = open(file_out, 'w')
            for line in header_txt.splitlines():
                file_header.write(line + '\n')
            file_header.close()
            msgs.info('Header successfully saved in: {}'.format(file_out))
        else:
            msgs.warning(
                '{} is not present in the ESO archive'.format(file_name))

    return
Exemple #25
0
def transfer_header_cards(source_header,
                          output_header,
                          source_cards,
                          output_cards=None,
                          with_comment=True,
                          delete_card=True):
    r"""Transfer header cards from one header to another

    Cards, values (and optionally comments, if `with_comment`=`True`) from the header `source_header`  will be
    transfer to the header `output_header`.
    `source_cards` is a list containing all the cards that needs to be transfer. If `output_cards` is defined, the cards
    from `source_cards[i]` will be saved in the `output_header` has `output_cards[i]`. If `delete_card`=`True` the
    card will be removed from the `source_header`.

    ..note ::
        Both `source_header` and `output_header` are modified in place. I.e. there is no backup option for the
        original values.
        If a card is not present in `source_header` a warning is raised and will not be transferred to `output_header`

    Args:
        source_header (`hdu.header'):
            Header from which the cards will be taken.
        output_header (`hdu.header'):
            Header that will be modified with cards from `source_header`.
        source_cards (`list`):
            List of cards you want to transfer from `source_header` to `output_header`.
        output_cards (`list`):
            If not `None` the cards in `output_header` will be saved with the new names listed here.
        with_comment (`bool`):
            if true, also the associated comment will be copied
        delete_card (`bool`):
            if true, the card will be removed from the `source_header`

    Returns:
        `source_header` and `output_header` with update values.
    """
    if output_cards is None:
        output_cards = source_cards
    if len(output_cards) != len(source_cards):
        msgs.error(
            "Incompatible length between output and source cards lists.")

    for source_card, output_card in zip(source_cards, output_cards):
        msgs.work("Transferring header card {} to {}.".format(
            source_card, output_card))
        if source_card not in source_header:
            msgs.warning(
                '{} not present in `source_header`. The card will not be transferred'
                .format(source_card))
            continue
        if with_comment:
            add_header_card(output_header,
                            output_card,
                            source_header[source_card],
                            comment=source_header.comments[source_card])
        else:
            add_header_card(output_header,
                            output_card,
                            source_header[source_card],
                            comment=None)
        if delete_card:
            del source_header[source_card]
Exemple #26
0
def query_catalogue(table_name,
                    which_columns=None,
                    maxrec=default.get_value('maxrec')):
    r"""Query the ESO tap_cat service (link defined in `ESOAsg\default.txt`) for a specific catalogue.
    
    Args:
        table_name (`str`):
            Table to be queried. To check the full list of catalogues run `all_catalogues()`
        which_columns (`list`):
            List of the columns that you want to download. The full list of the columns in a table can be found
            running `columns_in_catalogue(table_name)`
        maxrec (`int`):
            Define the maximum number of entries that a single query can return. If set to `None` the
            value is set by the limit of the service. The default values is set in`ESOAsg\default.txt` as `max_rec`

    Returns:
        catalogue
    """
    # Check for the table
    if not _is_table_at_eso(table_name):
        msgs.error('{} is not a valid table'.format(table_name))

    # Check for columns and select the one present in the table
    if which_columns is not None:
        test_columns = _are_columns_in_table(which_columns, table_name)
        good_columns = [
            which_column
            for which_column, test_column in zip(which_columns, test_columns)
            if test_column
        ]
        bad_columns = [
            which_column
            for which_column, test_column in zip(which_columns, test_columns)
            if not test_column
        ]
        if len(bad_columns) > 0:
            msgs.warning(
                'The following columns will be excluded from the query: {}'.
                format(bad_columns))
        if len(good_columns) == 0:
            good_columns = ['*']
            msgs.warning(
                'No valid column selected. All the columns will be queried')
    else:
        good_columns = ['*']

    # Create string of all columns for the query
    good_columns_string = str(' ')
    for good_column in good_columns:
        good_columns_string = good_columns_string + str(good_column) + ', '
    good_columns_string = good_columns_string.strip()[:-1]
    # query
    query = '''
            SELECT 
                {} 
            FROM 
                {}
            '''.format(good_columns_string, table_name)

    # Obtaining query results
    result_from_query = _run_query(query, maxrec=maxrec, verbose=True)
    return result_from_query
Exemple #27
0
    # File_names
    input_fits_files = []
    for file_name in args.input_fits:
        if not checks.fits_file_is_valid(file_name):
            msgs.warning(
                'File {} is not a valid fits file. Skipping the procedure'.
                format(file_name))
        elif len(fitsfiles.get_hdul(file_name)) > 1:
            msgs.warning(
                '{} cube already processed? Skipping the procedure'.format(
                    file_name))
        else:
            input_fits_files.append(file_name)
    if len(input_fits_files) == 0:
        msgs.error('No valid file to process')

    # output
    if args.output is not None:
        overwrite = False
        if len(input_fits_files) == 1:
            output_fits_files = [
                output_fits_temp if output_fits_temp.endswith('.fits') else
                np.str(output_fits_temp) + '.fits'
                for output_fits_temp in [args.output[0]]
            ]
            output_fits_images = [
                output_fits_image.replace('.fits', '_whitelight.fits')
                for output_fits_image in output_fits_files
            ]
        else:
Exemple #28
0
    def compare_with(self,
                     second_list,
                     check_cards=None,
                     on_terminal=True,
                     on_file=None):
        r"""Compare two list objects. The code will go through all cards and values present in the
        input list (or the `check_cards` subset if set not to None) and compare with cards and values in the
        `second_list`.

        Args:
            second_list (`Lists`):
                List that you want to compare with the input list.
            check_cards (`np.array`):
                List of cards that needs to be returned.
            on_terminal (`bool`):
                if `True`, the `cards` and respective `values` are printed on the terminal.
            on_file (`str`, `None`):
                if not `None`, `cards` and respective `values` will be stored in this text file

        Returns:
            first_cards (`np.array`):
                `cards` present in both the 1st and the 2nd lists.
            first_values (`np.array`):
                `values` from the 1st list associated to the `cards`
            second_values (`np.array`):
                `values` from the 2nd list associated to the `cards`
        """

        if not isinstance(second_list, Lists):
            msgs.error('The second list is not a Lists object.')
        if not isinstance(on_terminal, bool):
            msgs.error('The on_terminal option should be a bool.')

        if check_cards is None:
            check_cards, _ = self.get_cards(check_cards)

        # loading the first list:
        _first_cards, _first_values, _first_missing = self.get_values(
            check_cards=check_cards)
        msgs.work('Checking for cards in the second_list')
        # comparing with values from the second list:
        _second_cards, _second_values, _second_missing = second_list.get_values(
            check_cards=check_cards)

        # define where cards are present in both lists
        _overlap = np.isin(_first_cards, _second_cards)
        first_cards, first_values, first_missing = self.get_values(
            check_cards=_first_cards[_overlap])
        second_cards, second_values, second_missing = second_list.get_values(
            check_cards=_first_cards[_overlap])

        # Printing results
        if on_terminal:
            msgs.info(
                'Cards and values from both the first and the second list:')
        _print_1cards_2values(first_cards,
                              first_values,
                              second_values,
                              on_terminal=on_terminal,
                              on_file=on_file)

        if on_terminal:
            # Case of all cards present in the first and in the second lists
            if np.size(_first_missing) == 0 and np.size(_second_missing) == 0:
                msgs.info('All cards are present in both lists')
            # Case of some cards missing in the first list but present in the second:
            elif np.size(_first_missing) != 0 and np.size(
                    _second_missing) == 0:
                _only_second = np.isin(check_cards, _first_missing)
                _only_second_cards, _only_second_values, _only_second_missing = second_list.get_values(
                    check_cards=check_cards[_only_second])
                msgs.warning('{} Cards are missing in the first list,'.format(
                    np.size(_first_missing)))
                msgs.warning('but present in the second:')
                _print_1cards_1values(_only_second_cards,
                                      _only_second_values,
                                      on_terminal=on_terminal,
                                      on_file=None)
            # Case of some cards missing in the second list but present in the first:
            elif np.size(
                    _first_missing) == 0 and np.size(_second_missing) != 0:
                _only_first = np.isin(check_cards, _second_missing)
                _only_first_cards, _only_first_values, _only_first_missing = self.get_values(
                    check_cards=check_cards[_only_first])
                msgs.warning('{} Cards are missing in the second list,'.format(
                    np.size(_second_missing)))
                msgs.warning('but present in the first:')
                _print_1cards_1values(_only_first_cards,
                                      _only_first_values,
                                      on_terminal=on_terminal,
                                      on_file=None)
            # Case of some cards missing in the first and in the second list:
            else:
                _first_missing = ~np.isin(check_cards, _first_cards)
                _second_missing = ~np.isin(check_cards, _second_cards)
                _both_missing = _first_missing * _second_missing
                _first_only_missing = _first_missing * ~_both_missing
                _second_only_missing = _second_missing * ~_both_missing
                if np.size(check_cards[_both_missing]) != 0:
                    msgs.warning(
                        '{} Cards are missing from both lists:'.format(
                            np.size(check_cards[_both_missing])))
                    for both_missing in check_cards[_both_missing]:
                        print(' ----------> ' + both_missing)
                if np.size(check_cards[_second_only_missing]) != 0:
                    _only_second_cards, _only_second_values, _only_second_missing = self.get_values(
                        check_cards=check_cards[_second_only_missing])
                    msgs.warning(
                        '{} Cards are missing in the first list,'.format(
                            np.size(check_cards[_second_only_missing])))
                    msgs.warning('but present in the second:')
                    _print_1cards_1values(_only_second_cards,
                                          _only_second_values,
                                          on_terminal=on_terminal,
                                          on_file=None)
                if np.size(check_cards[_first_only_missing]) != 0:
                    _only_first_cards, _only_first_values, _only_first_missing = second_list.get_values(
                        check_cards=check_cards[_first_only_missing])
                    msgs.warning(
                        '{} Cards are missing in the second list,'.format(
                            np.size(check_cards[_first_only_missing])))
                    msgs.warning('but present in the first:')
                    _print_1cards_1values(_only_first_cards,
                                          _only_first_values,
                                          on_terminal=on_terminal,
                                          on_file=None)

        return first_cards, first_values, second_values
Exemple #29
0
def show_contours_from_gw_bayestar(file_name, contours=None, cmap='cylon', contours_color='C3',
                                   show_figure=True, save_figure=None, matplotlib_backend=None):
    r"""Show sky credibility from the input healpix map and plot the contours created with `contours_from_gw_bayestar`

    Args:
        file_name (`str`):
            Fits files containing the HEALPix mask you would like to show.
        contours (`list`):
        cmap:
        contours_color:
        show_figure (`bool`):
        save_figure (`str` or `None`):
        matplotlib_backend:

    Returns:

    """
    # Some checks
    assert isinstance(file_name, (str, np.str)), '{} is not a valid string'.format(file_name)
    if not checks.fits_file_is_valid(file_name):
        msgs.error('{} not a valid fits file'.format(file_name))

    if contours is None:
        msgs.warning("No contours defined, showing `credible_level=50` contours")
        plot_contours = contours_from_gw_bayestar(file_name, credible_level=50.)
    else:
        plot_contours = contours

    # ToDo find a more elegant solution for this.
    # ligo.skymap.tool.ligo_skymap_contour introduces problems with the matplotlib backend. This i a workaround to
    # get it running.
    importlib.reload(matplotlib)
    from matplotlib import pyplot as plt
    if matplotlib_backend is None:
        matplotlib.use(STARTING_MATPLOTLIB_BACKEND)
        try:
            # check if the script runs on a notebook.
            # https://stackoverflow.com/questions/23883394/detect-if-python-script-is-run-from-an-ipython-shell-or
            # -run-from
            # -the-command-li
            __IPYTHON__
        except NameError:
            # Try to get a working gui
            # https://stackoverflow.com/questions/39026653/programmatically-choose-correct-backend-for-matplotlib-on
            # -mac-os-x
            gui_env = matplotlib.rcsetup.all_backends
            for gui in gui_env:
                try:
                    matplotlib.use(gui, force=True)
                    break
                except:
                    continue
        else:
            matplotlib.use('nbAgg')
    else:
        if matplotlib_backend in matplotlib.rcsetup.all_backends:
            matplotlib.use(matplotlib_backend)
        else:
            msgs.error('{} is not a valid `matplolib` backend'.format(matplotlib_backend))

    # Read map and get object name
    map_data, map_header = healpy.read_map(file_name, h=True, verbose=False, dtype=None)
    object_name_list = [value for name, value in map_header if name == 'OBJECT']
    if len(object_name_list) > 0:
        object_name = object_name_list[0]
    else:
        object_name = 'GW event - {}'.format(file_name)

    # start the plot
    # ToDo fix matplotlibe error
    """ 
    plt.figure(figsize=(10., 7.))
    ax = plt.axes([0.1, 0.1, 0.9, 0.9], projection='astro degrees mollweide')

    ax.grid(True)
    ax.imshow_hpx(map_data, cmap=cmap, visible=True, zorder=1)
    ax.text(0.15, 0.95, object_name, horizontalalignment='center', verticalalignment='center',
            transform=ax.transAxes, bbox=dict(facecolor='orange', alpha=0.8), fontsize=24)
    # Convert the contour vertex coordinates from world to pixels and draw the contours
    w = wcs.WCS(ax.header)

    # Split a contour if two neighboring pixels are more distant than 10 degrees in RA
    split_step = (10 * (ax.get_xlim()[1] - ax.get_xlim()[0]) / 360.)
    for contour_world in plot_contours:
        contour_pix = w.wcs_world2pix(contour_world, 0)
        x_contours, y_contours = _array_split(contour_pix, split_step)
        for x_contour, y_contour in zip(x_contours, y_contours):
            ax.plot(x_contour, y_contour, linewidth=2.5, color=contours_color, zorder=5)  # , marker='o')

    if save_figure is not None:
        plt.savefig(save_figure, dpi=200., format='pdf', bbox_inches='tight')

    if show_figure:
        plt.show()

    plt.close()
    """

    # Bringing back the previously used matplotlib backend
    matplotlib.use(STARTING_MATPLOTLIB_BACKEND)
Exemple #30
0
def main(args):
    import numpy as np
    import os
    import shutil
    from astropy.coordinates import SkyCoord
    from astropy.coordinates import name_resolve
    from astropy import units as u
    from astropy.io import fits
    from ESOAsg.ancillary import cleaning_lists
    from ESOAsg.core import fitsfiles
    from ESOAsg import msgs

    # Cleaning input lists
    input_fits_files = cleaning_lists.make_list_of_fits_files(args.input_fits)
    # Make whitelight images
    if args.whitelight:
        make_whitelight_image = True
    else:
        make_whitelight_image = False
    # Creating output list
    if args.suffix is None:
        overwrite = False
        msgs.warning('The file will overwrite the input files')
    else:
        overwrite = True
    suffix_string = cleaning_lists.make_string(args.suffix)
    output_fits_files = cleaning_lists.make_list_of_fits_files_and_append_suffix(
        input_fits_files, suffix=suffix_string)
    if make_whitelight_image:
        output_whitelight_files = cleaning_lists.make_list_of_fits_files_and_append_suffix(
            input_fits_files, suffix=suffix_string + '_WL')
    else:
        output_whitelight_files = [None] * len(input_fits_files)
    '''
    # reference
    if args.referenc is not None:
        reference = str(args.referenc[0])
    else:
        reference = str(' ')

    # fluxcal
    if args.fluxcal == 'ABSOLUTE':
        fluxcal = 'ABSOLUTE'
    elif args.fluxcal == 'UNCALIBRATED':
        fluxcal = 'UNCALIBRATED'
    else:
        msgs.error('Possible values for fluxcal are: `ABSOLUTE` or `UNCALIBRATED`')

    # abmaglim
    if args.abmaglim is not None:
        abmaglim = args.abmaglim
        assert isinstance(abmaglim, (int, np.float_)), 'ABMAGLIM must be a float'
        if abmaglim < 0:
            msgs.error('ABMAGLIM must be positive')
    else:
        abmaglim = np.float_(-1.)
    '''

    msgs.start()

    for fits_in, fits_out, image_out in zip(input_fits_files,
                                            output_fits_files,
                                            output_whitelight_files):
        if os.path.exists(fits_out):
            shutil.copy(fits_out, fits_out.replace('.fit', '_old.fit'))
            msgs.warning('{} already exists. Backup created.'.format(fits_out))
        if image_out is not None:
            if os.path.exists(image_out):
                shutil.copy(image_out, image_out.replace('.fit', '_old.fit'))
                msgs.warning(
                    '{} already exists. Backup created.'.format(image_out))

        full_hdul = fitsfiles.get_hdul(fits_in)
        try:
            instrument = full_hdul[0].header['HIERARCH ESO SEQ ARM']
        except KeyError:
            msgs.error(
                'Failed to read the keyword HIERARCH ESO SEQ ARM from the primary header'
            )
        finally:
            if instrument in SUPPORTED_INSTRUMENT:
                msgs.info(
                    'The input file is from SPHERE/{}'.format(instrument))
            else:
                msgs.warning(
                    'Instrument SPHERE/{} not supported'.format(instrument))

        # ToDo
        # These needs to be transformed in objects
        if instrument.startswith('IFS'):
            msgs.work('Fixing header for SPHERE/{} file {}'.format(
                instrument, fits_in))

            # Create a copy of the file where there is a primary HDU and data are in the 'DATA" HDU
            msgs.work('Reshaping cube into PrimaryHEADER and Data Header')
            fitsfiles.new_fits_like(fits_in, [0],
                                    fits_out,
                                    overwrite=overwrite,
                                    fix_header=True)
            hdul = fitsfiles.get_hdul(fits_out, 'update', checksum=True)
            hdr0 = hdul[0].header
            hdr1 = hdul[1].header

            # Check for HISTORY
            # Primary Header
            if 'HISTORY' in hdr0.keys():
                history_cards_hdr0 = [
                    history_card_hdr0 for history_card_hdr0 in hdr0
                    if history_card_hdr0.startswith('HISTORY')
                ]
                history_values_hdr0 = [
                    hdr0[history_card_hdr0] for history_card_hdr0 in hdr0
                    if history_card_hdr0.startswith('HISTORY')
                ]
                for history_card_hdr0, history_value_hdr0 in zip(
                        history_cards_hdr0, history_values_hdr0):
                    msgs.work('Cleaning cards: {} = {}'.format(
                        history_card_hdr0, history_value_hdr0))
                del hdr0['HISTORY'][:]
            # Data Header
            if 'HISTORY' in hdr1.keys():
                history_values_hdr1 = hdr1['HISTORY'][:]
                for history_number in range(0, len(history_values_hdr1)):
                    clean_history = cleaning_lists.remove_non_ascii(
                        history_values_hdr1[history_number])
                    if len(clean_history) > 0:
                        hdr1['HISTORY'][history_number] = str(clean_history)
                    else:
                        hdr1['HISTORY'][history_number] = str(' ')

            # Update cards for headers:
            # Updating values with different CARD in the header
            cards_input = [
                'CRPIX4', 'CRVAL4', 'CTYPE4', 'CUNIT4', 'CD4_4', 'CD1_4',
                'CD2_4', 'CD4_1', 'CD4_2'
            ]
            cards_output = [
                'CRPIX3', 'CRVAL3', 'CTYPE3', 'CUNIT3', 'CD3_3', 'CD1_3',
                'CD2_3', 'CD3_1', 'CD3_2'
            ]
            fitsfiles.transfer_header_cards(hdr1,
                                            hdr1,
                                            cards_input,
                                            output_cards=cards_output,
                                            delete_card=True)
            # Remove not used values
            cards_to_be_removed_hdr1 = ['CD4_3', 'CD3_4']
            for card_to_be_removed_hdr1 in cards_to_be_removed_hdr1:
                hdr1.remove(card_to_be_removed_hdr1, ignore_missing=True)

            # Transfer cards from HDU1 to the PrimaryHDU
            not_to_be_transfer = [
                hdr1_card for hdr1_card in hdr1
                if hdr1_card.startswith('COMMENT') or hdr1_card.startswith(
                    'EXTNAME') or hdr1_card.startswith('BITPIX') or
                hdr1_card.startswith('NAXIS') or hdr1_card.startswith('CRPIX')
                or hdr1_card.startswith('CRVAL') or hdr1_card.startswith(
                    'CDELT') or hdr1_card.startswith('CTYPE')
                or hdr1_card.startswith('CD1_') or hdr1_card.startswith('CD2_')
                or hdr1_card.startswith('CD3_') or hdr1_card.startswith(
                    'CUNIT') or hdr1_card.startswith('CSYER') or hdr1_card.
                startswith('HDUCLAS') or hdr1_card.startswith('XTENSION')
                or hdr1_card.startswith('PCOUNT') or hdr1_card.startswith(
                    'GCOUNT') or hdr1_card.startswith('HDUDOC') or hdr1_card.
                startswith('HDUVER') or hdr1_card.startswith('HISTORY')
            ]
            cards_to_be_transfer = []
            for hdr1_card in hdr1:
                if hdr1_card not in not_to_be_transfer:
                    cards_to_be_transfer.append(hdr1_card)
            fitsfiles.transfer_header_cards(hdr1,
                                            hdr0,
                                            cards_to_be_transfer,
                                            with_comment=True,
                                            delete_card=True)

            # Try to guess coordinates
            if 'CRVAL1' not in hdr1.keys():
                msgs.warning('CRVAL position keywords not preset')
                if 'OBJECT' in hdr0.keys():
                    try:
                        object_coordinate = SkyCoord.from_name(
                            str(hdr0['OBJECT']).strip())
                        ra_obj, dec_obj = object_coordinate.ra.degree, object_coordinate.dec.degree
                        if 'RA' in hdr0.keys() and 'DEC' in hdr0.keys():
                            pointing_coordinate = SkyCoord(float(hdr0['RA']),
                                                           float(hdr0['DEC']),
                                                           unit='deg')
                            msgs.work(
                                'Testing from separation from pointing position'
                            )
                            separation = object_coordinate.separation(
                                pointing_coordinate).arcsec
                            if separation < 120.:
                                msgs.info('Object - Pointing separation is {}'.
                                          format(separation))
                                msgs.info(
                                    'Updating CRVAL1 = {}'.format(ra_obj))
                                msgs.info(
                                    'Updating CRVAL2 = {}'.format(dec_obj))
                                hdr1['CRVAL1'] = ra_obj
                                hdr1['CRVAL2'] = dec_obj
                                msgs.work('Updating CUNIT')
                                hdr1['CUNIT1'] = 'deg'
                                hdr1['CUNIT2'] = 'deg'
                                msgs.work('Updating CTYPE')
                                hdr1['CTYPE1'] = 'RA---TAN'
                                hdr1['CTYPE2'] = 'DEC--TAN'
                                msgs.work('Updating CRPIX')
                                hdr1['CRPIX1'] = float(
                                    hdul[1].data.shape[2]) / 2.
                                hdr1['CRPIX2'] = float(
                                    hdul[1].data.shape[1]) / 2.
                                msgs.info('Updating CD1 and CD2')
                                hdr1['CD1_1'] = 2.06E-06
                                hdr1['CD2_2'] = 2.06E-06
                                hdr1['CD1_2'] = 0.
                                hdr1['CD2_1'] = 0.
                                msgs.work('Updating RA, DEC')
                                hdr0['RA'] = ra_obj
                                hdr0.comments[
                                    'RA'] = object_coordinate.ra.to_string(
                                        u.hour)
                                hdr0['DEC'] = dec_obj
                                hdr0.comments[
                                    'DEC'] = object_coordinate.dec.to_string(
                                        u.degree, alwayssign=True)
                            else:
                                msgs.warning(
                                    'Object - Pointing separation is {}'.
                                    format(separation))
                                msgs.warning(
                                    'This is suspicious, CRVAL not updated')
                    except name_resolve.NameResolveError:
                        msgs.warning('Object {} not recognized'.format(
                            str(hdr0['OBJECT']).strip()))
                        msgs.warning('CRVAL not updated')

            # Updating file prodcatg
            msgs.work('Updating PRODCATG to SCIENCE.CUBE.IFS')
            hdr0['PRODCATG'] = str('SCIENCE.CUBE.IFS')
            # Some more updates
            msgs.work('Setting NAXIS = 0 in primary header')
            hdr0['NAXIS'] = 0
            if 'OBSTECH' not in hdr0.keys():
                msgs.warning('OBSTECH missing')
                if 'ESO PRO TECH' in hdr0.keys():
                    msgs.info('Deriving OBSTECH from HIERARCH ESO PRO TECH')
                    msgs.work('Updating OBSTECH to {}'.format(
                        str(hdr0['HIERARCH ESO PRO TECH'])))
                    hdr0['OBSTECH'] = str(hdr0['HIERARCH ESO PRO TECH'])
            if 'EXPTIME' not in hdr0.keys():
                msgs.warning('EXPTIME missing')
                if 'ESO DET SEQ1 REALDIT' in hdr0.keys(
                ) and 'ESO DET NDIT' in hdr0.keys():
                    msgs.info('Deriving EXPTIME and TEXPTIME as REALDIT * DIT')
                    hdr0['EXPTIME'] = hdr0[
                        'HIERARCH ESO DET SEQ1 REALDIT'] * hdr0[
                            'HIERARCH ESO DET NDIT']
                    hdr0['TEXPTIME'] = hdr0[
                        'HIERARCH ESO DET SEQ1 REALDIT'] * hdr0[
                            'HIERARCH ESO DET NDIT']
                    msgs.work('Updating EXPTIME to {}'.format(
                        str(hdr0['EXPTIME'])))
                    msgs.work('Updating TEXPTIME to {}'.format(
                        str(hdr0['TEXPTIME'])))
            if 'WAVELMIN' not in hdr0.keys():
                msgs.warning('WAVELMIN missing')
                z_pixel = np.arange(int(hdul[1].data.shape[0]))
                z_wave = float(
                    hdr1['CRVAL3']) + (z_pixel * float(hdr1['CD3_3']))
                if str(hdr1['CUNIT3']).strip().upper() == 'MICRONS':
                    msgs.info('Deriving WAVELMIN and WAVELMAX from CRVAL1')
                    z_wave = z_wave * 1000.  # convert to nanometers
                    hdr0['WAVELMIN'] = np.nanmin(z_wave)
                    hdr0['WAVELMAX'] = np.nanmax(z_wave)
                    msgs.work('Updating WAVELMIN to {}'.format(
                        str(hdr0['WAVELMIN'])))
                    msgs.work('Updating WAVELMAX to {}'.format(
                        str(hdr0['WAVELMAX'])))
                else:
                    msgs.warning(
                        'Unknown units {}. WAVELMIN and WAVELMAX not calculated'
                        .format(str(hdr1['CUNIT3'])))
            if 'SPEC_RES' not in hdr0.keys():
                msgs.warning('SPEC_RES missing')
                if 'WAVELMAX' in hdr0.keys():
                    msgs.info('Deriving SPEC_RES from WAVELMAX')
                    if (float(hdr0['WAVELMAX']) > 1300.) and (float(
                            hdr0['WAVELMAX']) < 1400.):
                        hdr0['SPEC_RES'] = 50.
                        msgs.work('Updating SPEC_RES to {}'.format(
                            str(hdr0['SPEC_RES'])))
                    elif (float(hdr0['WAVELMAX']) > 1600.) and (float(
                            hdr0['WAVELMAX']) < 1700.):
                        hdr0['SPEC_RES'] = 30.
                        msgs.work('Updating SPEC_RES to {}'.format(
                            str(hdr0['SPEC_RES'])))
                    else:
                        msgs.warning('WAVELMAX = {} is not in the expected ' /
                                     + 'range of possible values'.format(
                                         str(hdr0['WAVELMAX'])))
            if 'PROGID' not in hdr0.keys():
                msgs.warning('PROG_ID missing')
                if 'ESO OBS PROG ID' in hdr0.keys():
                    msgs.info('Deriving PROG_ID from HIERARCH ESO OBS PROG ID')
                    msgs.work('Updating PROG_ID to {}'.format(
                        str(hdr0['HIERARCH ESO OBS PROG ID'])))
                    hdr0['PROG_ID'] = str(hdr0['HIERARCH ESO OBS PROG ID'])
            if 'MJD-END' not in hdr0.keys():
                msgs.warning('MJD-END missing')
                if 'TEXPTIME' in hdr0.keys():
                    msgs.info('Deriving MJD-END from MJD-OBS and TEXPTIME')
                    texptime_sec = float(hdr0['TEXPTIME'])
                    texptime_day = texptime_sec / (60. * 60. * 24.)
                    mjdend = float(hdr0['MJD-OBS']) + texptime_day
                    fitsfiles.add_header_card(hdr0, 'MJD-END', mjdend,
                                              'End of observation')
                    msgs.work('MJD-OBS = {} and TEXPTIME = {} days'.format(
                        str(hdr0['MJD-OBS']), str(texptime_day)))
                    msgs.work('Updating MJD-END to {}'.format(
                        str(hdr0['MJD-END'])))

            # Remove not used values
            cards_to_be_removed_hdr0 = ['ERRDATA', 'QUALDATA', 'SCIDATA']
            for card_to_be_removed_hdr0 in cards_to_be_removed_hdr0:
                hdr0.remove(card_to_be_removed_hdr0, ignore_missing=True)
            cards_to_be_removed_hdr1 = ['HDUCLASS3']
            for card_to_be_removed_hdr1 in cards_to_be_removed_hdr1:
                hdr1.remove(card_to_be_removed_hdr1, ignore_missing=True)

            # Updating the FITS file definition comment line
            hdr0.add_comment(
                "  FITS (Flexible Image Transport System) format is defined in 'Astronomy"
                + "  and "
                "Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H",
                after='EXTEND')
            if 'COMMENT' in hdr1.keys():
                comment_values_hdr1 = hdr1['COMMENT'][:]
                for index, comment_value_hdr1 in enumerate(
                        comment_values_hdr1):
                    msgs.work('Removing COMMENT card : {}'.format(
                        comment_value_hdr1))
                hdr1.remove('COMMENT', ignore_missing=True, remove_all=True)

            # Creating white light image keyword:
            if make_whitelight_image:
                fitsfiles.add_header_card(
                    hdr0, 'ASSON1',
                    image_out.split('/')[-1],
                    'ANCILLARY.IMAGE.WHITELIGHT filename')
                msgs.work('Updating ASSON1 to {}'.format(hdr0['ASSON1']))

            # Actually creating the white-light image
            if make_whitelight_image:
                msgs.info('Making white light image')
                image_hdu = fits.PrimaryHDU()
                image_hdul = fits.HDUList([image_hdu])
                if str(hdr1['CUNIT3']).strip().upper() == 'MICRONS':
                    to_ang = 10000.
                else:
                    msgs.error('Spectral unit: {} not recognized'.format(
                        hdr1['CUNIT3']))
                delta_wave_bin = hdr1['CD3_3']
                image_hdul.append(
                    fits.ImageHDU(
                        to_ang * delta_wave_bin *
                        np.nansum(hdul[1].data, axis=0, dtype=np.float_)))
                image_hdr0 = image_hdul[0].header
                image_hdr1 = image_hdul[1].header
                card_for_image0 = [
                    'WAVELMIN', 'WAVELMAX', 'OBJECT', 'TELESCOP', 'INSTRUME',
                    'RADECSYS', 'RA', 'DEC', 'EQUINOX'
                ]
                fitsfiles.transfer_header_cards(hdr0,
                                                image_hdr0,
                                                card_for_image0,
                                                with_comment=True,
                                                delete_card=False)
                image_hdr0['PRODCATG'] = str('ANCILLARY.IMAGE.WHITELIGHT')

                card_for_image1 = [
                    'CRPIX1', 'CRPIX2', 'CRVAL1', 'CRVAL2', 'CUNIT1', 'CUNIT2',
                    'NAXIS1', 'NAXIS2', 'EXTNAME', 'CD1_1', 'CD1_2', 'CD2_1',
                    'CD2_2', 'CTYPE1', 'CTYPE2'
                ]
                fitsfiles.transfer_header_cards(hdr1,
                                                image_hdr1,
                                                card_for_image1,
                                                with_comment=True,
                                                delete_card=False)

            # Update checksum and datasum
            msgs.work('Updating checksum and datasum')
            hdul[0].add_checksum(override_datasum=False)
            hdul[1].add_datasum()
            hdul[1].add_checksum(override_datasum=True)
            hdul.flush(output_verify='fix')
            hdul.close()
            msgs.info('File {} produced.'.format(fits_out))
            if make_whitelight_image:
                image_hdul[0].add_datasum()
                image_hdul[1].add_datasum()
                image_hdul[0].add_checksum(override_datasum=True)
                image_hdul[1].add_checksum(override_datasum=True)
                image_hdul.writeto(image_out,
                                   overwrite=True,
                                   output_verify='fix')
            msgs.info('Image {} produced.'.format(image_out))

        elif instrument.startswith('IRDIS'):
            msgs.work('Fixing header for SPHERE/{} file {}'.format(
                instrument, fits_in))
            hdr = fitsfiles.header_from_fits_file(fits_in)
            if 'ESO DPR TECH' in hdr.keys():
                if str(hdr['ESO DPR TECH']).strip(
                ) == 'IMAGE,DUAL,CORONOGRAPHY':
                    msgs.work('Working with {} as observing technique'.format(
                        str(hdr['ESO DPR TECH']).strip()))
                elif 'DUAL' in str(hdr['ESO DPR TECH']).strip():
                    msgs.error('{} needs to be tested'.format(
                        str(hdr['ESO DPR TECH']).strip()))
                else:
                    msgs.error('Only DUAL imaging currently implemented')
            else:
                msgs.error('Cannot recognize the observing technique')

            # defining the two fits_out files:
            fits_out_index = [0, 1]
            fits_out_files = []
            for index in fits_out_index:
                fits_out_file = fits_out.replace('.fit',
                                                 '_' + str(index) + '.fit')
                if os.path.exists(fits_out_file):
                    shutil.copy(fits_out_file,
                                fits_out_file.replace('.fit', '_old.fit'))
                    msgs.warning(
                        '{} already exists. Backup created.'.format(fits_out))
                fitsfiles.new_fits_like(fits_in, [0],
                                        fits_out_file,
                                        overwrite=overwrite,
                                        fix_header=True,
                                        empty_primary_hdu=False)
                fits_out_files.append(fits_out_file)

            for index, fits_out_file in zip(fits_out_index, fits_out_files):
                hdul = fitsfiles.get_hdul(fits_out_file,
                                          'update',
                                          checksum=True)
                hdr0 = hdul[0].header
                hdul[0].data = hdul[0].data[index, :, :]
                # Check for HISTORY
                # Primary Header
                if 'HISTORY' in hdr0.keys():
                    history_cards_hdr0 = [
                        history_card_hdr0 for history_card_hdr0 in hdr0
                        if history_card_hdr0.startswith('HISTORY')
                    ]
                    history_values_hdr0 = [
                        hdr0[history_card_hdr0] for history_card_hdr0 in hdr0
                        if history_card_hdr0.startswith('HISTORY')
                    ]
                    for history_card_hdr0, history_value_hdr0 in zip(
                            history_cards_hdr0, history_values_hdr0):
                        msgs.work('Cleaning cards: {} = {}'.format(
                            history_card_hdr0, history_value_hdr0))
                    del hdr0['HISTORY'][:]
                # Try to guess coordinates
                if 'CRVAL1' not in hdr0.keys():
                    msgs.warning('CRVAL position keywords not preset')
                    if 'OBJECT' in hdr0.keys():
                        try:
                            object_coordinate = SkyCoord.from_name(
                                str(hdr0['OBJECT']).strip())
                            ra_obj, dec_obj = object_coordinate.ra.degree, object_coordinate.dec.degree
                            if 'RA' in hdr0.keys() and 'DEC' in hdr0.keys():
                                pointing_coordinate = SkyCoord(
                                    float(hdr0['RA']),
                                    float(hdr0['DEC']),
                                    unit='deg')
                                msgs.work(
                                    'Testing from separation from pointing position'
                                )
                                separation = object_coordinate.separation(
                                    pointing_coordinate).arcsec
                                if separation < 120.:
                                    msgs.info(
                                        'Object - Pointing separation is {}'.
                                        format(separation))
                                    msgs.info(
                                        'Updating CRVAL1 = {}'.format(ra_obj))
                                    msgs.info(
                                        'Updating CRVAL2 = {}'.format(dec_obj))
                                    hdr0['CRVAL1'] = ra_obj
                                    hdr0['CRVAL2'] = dec_obj
                                    msgs.work('Updating CUNIT')
                                    hdr0['CUNIT1'] = 'deg'
                                    hdr0['CUNIT2'] = 'deg'
                                    msgs.work('Updating CTYPE')
                                    hdr0['CTYPE1'] = 'RA---TAN'
                                    hdr0['CTYPE2'] = 'DEC--TAN'
                                    msgs.work('Updating CRPIX')
                                    hdr0['CRPIX1'] = float(
                                        hdul[0].data.shape[1]) / 2.
                                    hdr0['CRPIX2'] = float(
                                        hdul[0].data.shape[0]) / 2.
                                    msgs.info('Updating CD1 and CD2')
                                    hdr0['CD1_1'] = hdr0[
                                        'PIXSCAL'] * 2.778E-4 / 1000.
                                    hdr0['CD2_2'] = hdr0[
                                        'PIXSCAL'] * 2.778E-4 / 1000.
                                    hdr0['CD1_2'] = 0.
                                    hdr0['CD2_1'] = 0.
                                    msgs.work('Updating RA, DEC')
                                    hdr0['RA'] = ra_obj
                                    hdr0.comments[
                                        'RA'] = object_coordinate.ra.to_string(
                                            u.hour)
                                    hdr0['DEC'] = dec_obj
                                    hdr0.comments[
                                        'DEC'] = object_coordinate.dec.to_string(
                                            u.degree, alwayssign=True)
                                else:
                                    msgs.warning(
                                        'Object - Pointing separation is {}'.
                                        format(separation))
                                    msgs.warning(
                                        'This is suspicious, CRVAL not updated'
                                    )
                        except name_resolve.NameResolveError:
                            msgs.warning('Object {} not recognized'.format(
                                str(hdr0['OBJECT']).strip()))
                            msgs.warning('CRVAL not updated')

                # Updating file prodcatg
                msgs.work('Updating PRODCATG to SCIENCE.IMAGE')
                hdr0['PRODCATG'] = str('SCIENCE.IMAGE')

                if 'PROGID' not in hdr0.keys():
                    msgs.warning('PROG_ID missing')
                    if 'ESO OBS PROG ID' in hdr0.keys():
                        msgs.info(
                            'Deriving PROG_ID from HIERARCH ESO OBS PROG ID')
                        msgs.work('Updating PROG_ID to {}'.format(
                            str(hdr0['HIERARCH ESO OBS PROG ID'])))
                        hdr0['PROG_ID'] = str(hdr0['HIERARCH ESO OBS PROG ID'])

                # Update checksum and datasum
                msgs.work('Updating checksum and datasum')
                hdul[0].add_checksum(override_datasum=False)
                hdul.flush(output_verify='fix')
                hdul.close()
        else:
            msgs.warning(
                'The Instrument {} is not supported \nThe file {} will not be processed'
                .format(instrument, fits_in))

    msgs.end()
    '''