def run_query(tap_service, query, type_of_query, maxrec=default.get_value('maxrec')): r"""Run query to TAP service and return result as an `astropy.Table` If the job requires to much time to run, the code will move to an asynchronous query. Args: tap_service (pyvo.dal.tap.TAPService): TAP service that will be used for the query query (str): query to be run type_of_query (str): type of query to be run maxrec (int): define the maximum number of entries that a single query can return. Default is set by default.get_value('maxrec') Returns: astropy.table: result from the query to the TAP service """ if type_of_query not in TAP_QUERY_TYPES: msgs.error('{} not a valid entry for the type of TAP query. Possibilities are: {}'.format(type_of_query, TAP_QUERY_TYPES)) # Obtaining query results and convert it to an astropy table if query is not None: if type_of_query == 'sync': result_from_query = run_query_sync(tap_service, query, maxrec=maxrec) else: result_from_query = run_query_async(tap_service, query, maxrec=maxrec) else: msgs.warning('Empty query provided') result_from_query = None return result_from_query
def _is_column_in_catalogues(column_name, collections=None, tables=None): r"""Check if a given column is present in the ESO archive Args: column_name (str): column to be tested collections (any): list of `str` containing the names of the collections (or a single `str`) from which the columns will be extracted tables (any): list of `str` (or a single `str`) containing the names of the tables from which the columns will be extracted Returns: bool: `True` if the column is present in the selected collections/tables. `False` and warning raised otherwise """ is_at_eso = True table_all_columns = columns_info(collections=collections, tables=tables, verbose=False) all_column_list = table_all_columns['column_name'].data.data.tolist() if column_name not in all_column_list: msgs.warning( 'Column: {} not recognized. Possible values are:\n{}'.format( column_name, all_column_list)) is_at_eso = False return is_at_eso
def check_value(value): # written by Ema 05.03.2020 r"""Guess for the best type of header values. This is based on `ast.literal_eval` Args: value (`str`): input string value Returns: value (`str`,`int`,`float`,`bool`) output value with (hopefully) the correct type """ special_char = re.compile('[@_!#$%^&*()<>?/\\|}{~:]') if value is not None: if value == 'T': value = np.bool(True) elif value == 'F': value = np.bool(False) elif special_char.search(value) is not None: value = str(value) else: try: value = ast.literal_eval(value) except ValueError: msgs.warning('Cannot recognize format') return value
def make_list_of_int(args_input, length=None): r"""Cleaning an input list of int Args: args_input (`list`): input list of strings that will be checked (usually is coming from `parse_arguments()` in a macro). length (`int`): If set to a value, the code will check that the length of `list_of_string` will match `length`. If not the code will further check if `args_input` contains only one element. In this case the output list will contain this values `length` times. If this situation does not happen, an error is raised. Returns: list_of_int (`list`): list containing all the valid int given in input """ if length is not None: assert(isinstance(length, int)), '`length` must be an integer' list_of_int = [] if not isinstance(args_input, list): args_input_strings = [args_input] else: args_input_strings = args_input for args_input_string in args_input_strings: if isinstance(args_input_string, int): list_of_int.append(args_input_string) else: msgs.warning('{} excluded because not a valid string'.format(args_input_string)) if len(list_of_int) == 0: msgs.error('No valid element present in the list') if length is not None: if len(list_of_int) != length: if len(list_of_int) == 1: list_of_int = [list_of_int[0]] * length else: msgs.error('List length: {} not matching {}'.format(len(list_of_int), length)) return list_of_int
def check_disk_space(min_disk_space=float( default.get_value('min_disk_space'))) -> bool: r"""Check that there is enough space on the location where the code is running Given a disk space limit in GB, the macro returns `True` if the disk where the code is running has more free GB than the given limit. .. warning:: The current implementation checks the disk where the code is running (i.e., from the directory: `./`). This may cause some troubles with shared disks. Args: min_disk_space (float): Size of free space on disk required Returns: bool: `True` if there is enough space on disk """ total, used, free = shutil.disk_usage("./") total = total / (1024.**3.) used = used / (1024.**3.) free = free / (1024.**3.) msgs.info('Your disk has:') msgs.info('Total: {0:.2f} GB, Used: {0:.2f} GB, Free: {0:.2f} GB'.format( total, used, free)) if free > min_disk_space: enough_space = True else: enough_space = False msgs.warning('Not enough space on disk') return enough_space
def get_values(self, check_cards=None): r"""Returns the values associated to the cards in a list. If `check_cards` is defined, only the subset of values selected will be returned. Args: check_cards (`np.array`): List of cards that needs to be returned. Returns: selected_cards (`np.array`): Cards present in the list. selected_values (`np.array`): Values associated to `check_cards` missing_cards (`np.array`): Subset of `check_cards` not present in the list. It is empty if all cards are present. """ selected_cards, missing_cards = self.get_cards(check_cards=check_cards) if check_cards is not None: _check_cards = np.isin(self.cards, _convert_to_numpy_array(check_cards)) if np.size(self.values) == 0: msgs.warning('No values present in the list.') selected_values = np.array([]) else: if np.ndim(self.values) < 2: selected_values = np.copy(self.values[_check_cards]) else: selected_values = np.copy(self.values[0, :][_check_cards]) for index in np.arange(1, np.size(self.values[:, 0])): selected_values = np.vstack( (selected_values, np.copy(self.values[index, :][_check_cards]))) else: selected_values = np.array([]) return selected_cards, selected_values, missing_cards
def _is_table_at_eso(table_name): r"""Check if a given table is present at ESO Args: table_name (str): table to be tested. Returns: bool: `True` if the table is present in the ESO archive. `False` and warning raised otherwise. """ is_at_eso = True table_all_catalogues = all_catalogues_info(verbose=False, all_versions=True) all_table_list = table_all_catalogues['table_name'].data.data.tolist() last_version_list = table_all_catalogues['last_version'].data.data.tolist() if table_name not in all_table_list: msgs.warning( 'Table: {} not recognized. Possible values are:\n{}'.format( table_name, table_all_catalogues)) is_at_eso = False else: if not last_version_list[all_table_list.index(table_name)]: msgs.warning( '{} is not the most recent version of the queried catalogue'. format(table_name)) return is_at_eso
def _is_table_at_eso(table_name): r"""Check if a given table is present at ESO Args: table_name (`str`): Table to be tested. Returns: is_at_eso (`bool`): `True` if the table is present in tapcat. `False` and warning raised otherwise. """ is_at_eso = True # Check for presence of `table_name` on the ESO archive eso_catalogues_all = all_catalogues(verbose=False, all_versions=True) eso_catalogues = eso_catalogues_all['table_name'].data.data.tolist() eso_version = eso_catalogues_all['last_version'].data.data.tolist() if table_name not in eso_catalogues: msgs.warning( 'Catalogue: {} not recognized. Possible values are:\n{}'.format( table_name, eso_catalogues)) is_at_eso = False else: if not eso_version[eso_catalogues.index(table_name)]: msgs.warning( '{} is not the most recent version of the queried catalogue'. format(table_name)) return is_at_eso
def _are_columns_in_table(column_list, table_name): r"""Check if a given table is present at ESO Args: column_list (`list`): list of the columns that will be checked table_name (`str`): Table to be tested. Returns: are_in_table (`list`): `True` if the column is present in the table. `False` and warning raised otherwise. """ are_in_table = [] first_error = True eso_column_list = columns_in_catalogue( table_name, verbose=False)['column_name'].data.data.tolist() for column_test in column_list: if column_test in eso_column_list: are_in_table.append(True) else: are_in_table.append(False) if first_error: msgs.warning( 'Column: {} not recognized. Possible values are:\n{}'. format(column_test, eso_column_list)) first_error = False else: msgs.warning('Column: {} not recognized'.format(column_test)) return are_in_table
def make_list_of_fits_files(args_input, verify_fits=False) -> list: r"""Cleaning an input list of fits files Args: args_input (list): input list of fits files that will be checked (usually is coming from `parse_arguments()` in a macro) verify_fits (bool): if set to `True`, it will verify that the fits file is complaint to the FITS standard Returns: list: list containing all the valid fits files given in input """ list_of_fits_files = [] if not isinstance(args_input, list): args_input_files: list = [args_input] else: args_input_files: list = args_input for args_input_file in args_input_files: if checks.fits_file_is_valid(args_input_file, overwrite=False, verify_fits=verify_fits): list_of_fits_files.append(args_input_file) else: msgs.warning('{} excluded because not a valid fits file'.format(args_input_file)) if len(list_of_fits_files) == 0: msgs.error('No valid fits files present') return list_of_fits_files
def from_line_to_header_card(line): # written by Ema 05.03.2020 r"""Given a line of text from an header, it returns card, value (and comment, if present). This is a tool to read in headers that have been saved in ascii format. Typically a line will be in the form: `DATE = '2003-07-25T05:41:32.569' / UT date when this file was written` the code will try to divide the line into: - card = DATE - value = '2003-07-25T05:41:32.569' - comment = UT date when this file was written Care is taken for cases in which values and/or comment contains characters like `=` or `/` In the possible case that `line` could no be processed, None, None, None will be returned and a warning statement will be raised. Args: line (`str`): input string to be divided into card, value, comment Returns: card, value, comment (`str`, `int`, `float`): if there are no comment, a `None` will be returned. """ # checks for line assert isinstance(line, (str, np.str)), '`line` needs to be a str' if '=' not in line: if 'END' in line and 'END\n' not in line: msgs.warning( 'The following line could not be interpreted as header:\n {}'. format(line)) card, value, comment = None, None, None else: # taking as card, everything that appears before the first occurrence of `=` card, line_leftover = re.split("=", line, maxsplit=1) card = card.strip() # now check how many occurrences of ` \ ` are in line_leftover and split values from comments if line_leftover.count(' / ') == 0: # good splitting and no comment present value, comment = line_leftover.strip(), None elif line_leftover.count(' / ') == 1: # good splitting value, comment = re.split(" / ", line_leftover, maxsplit=1) value, comment = value.strip(), comment.strip() if len(comment) == 0: comment = None else: # Troubles but fingers crossed value, comment = re.split(" / ", line_leftover, maxsplit=1) value, comment = value.strip(), comment.strip() msgs.warning( 'The following line should be double checked:\n {}'.format( line)) return card, check_value(value), comment
def header_from_txt_file(txt_file): # written by Ema 05.03.2020 r"""Load an header from a text file into an `astropy` `hdu.header` object. The text file in input should contain the information in the standard header format: SIMPLE = T / Standard FITS format BITPIX = 16 / # of bits storing pix values NAXIS = 2 / # of axes in frame NAXIS1 = 2148 / # pixels/axis NAXIS2 = 1365 / # pixels/axis ORIGIN = 'ESO' / European Southern Observatory. DATE = '2003-07-25T05:41:32.569' / UT date when this file was written CRVAL1 = 1.0 / value of ref pixel CRPIX1 = 1.0 / Ref. pixel of center of rotation CDELT1 = 2.0 / Binning factor etc.. Cards will be read only if there is a value associated (i.e. if they are followed by a = sign). In case the file does not exist, an empty header will be returned and a warning statement will be raised. Args: txt_file (`str`): txt file name Returns: header_from_txt (`hdu.header`): an header object """ # Checks for txt_name assert isinstance(txt_file, (str, np.str)), '`txt_name` needs to be a str' # creating hdu object hdu = fits.PrimaryHDU() header_from_txt = hdu.header if not os.path.isfile(txt_file): msgs.warning( 'File {} not exists. Returning empty header'.format(txt_file)) else: with open(txt_file, 'r') as txt_header: for line in txt_header: card, value, comment = from_line_to_header_card(line) if card is None: if 'END' not in line and 'END\n' not in line: msgs.warning( 'The following line will not be added to the header\n {}' .format(line)) else: add_header_card(header_from_txt, card, value, comment=comment) return header_from_txt
def fits_file_is_valid(fits_file, verify_fits=False, overwrite=False) -> bool: # Written by Ema 05.03.2020 r"""Check if a file exists and has a valid extension The option `verify_fits` checks the header of the fits file using `astropy.io.fits.verify` Args: fits_file (str): fits file you would like to check verify_fits (bool): if set to `True`, it will verify that the fits file is complaint to the FITS standard. overwrite (bool): if `True`, overwrite the input fits file with the header corrections from `verify_fits` Returns: bool: `True` if exists `False` and warning raised if not. """ is_fits = True # Checks if it is a string assert isinstance(fits_file, str), 'input `fits_file` needs to be a string' # Check for ending # ToDo # to be updated to: PERMITTED_FITS_ENDINGS if not fits_file.endswith('.fits') and not fits_file.endswith( '.fits.fz') and not fits_file.endswith('.fits.gz'): msgs.warning( 'File: {} does not end with `fits` or `fits.fz` or `fits.gz`'. format(fits_file)) is_fits = False # Check for existence if not os.path.exists(fits_file): msgs.warning('File: {} does not exists'.format(fits_file)) is_fits = False # Check for compliance with FITS standard if verify_fits: if overwrite: hdul = fits.open(fits_file, mode='update', checksum=False) if not check_checksums(hdul): is_fits = False hdul.flush(output_verify='fix+warn', verbose=True) hdul.writeto(fits_file, checksum=True, overwrite=True) msgs.info('File checked and rewritten') else: hdul = fits.open(fits_file, mode='readonly', checksum=True) if not check_checksums(hdul): is_fits = False hdul.verify('fix+warn') hdul.close() else: if overwrite: msgs.error('The option overwrite works only if verify_fits = True') return is_fits
def load_from_table(self, table, primary_header=None, copy_header=True, where_time='TIME', where_time_bin='TIME_BIN', where_flux='FLUX', where_error='ERROR', where_background='BACKGROUND', where_quality='QUAL'): r"""Given a table put it in a LightCurves object Args: where_quality: where_background: where_error: where_time_bin: where_time: copy_header: primary_header: where_flux : """ if checks.table_is_valid(table): msgs.work('Reading input table') if primary_header is not None: if len(primary_header) > 0: self.primary_header = primary_header else: msgs.warning('Empty `primary_header` provided') if copy_header: if len(table.header) > 0: self.header = table.header else: msgs.warning('No header found in the table') if isinstance(table, fits.BinTableHDU): self._load_from_BinTableHDU(table, copy_header=copy_header, where_time=where_time, where_time_bin=where_time_bin, where_flux=where_flux, where_error=where_error, where_background=where_background, where_quality=where_quality) elif isinstance(table, fits.TableHDU): # ToDo implement TableHDU case msgs.error('To be implemented') else: msgs.error('Unknown table type')
def which_columns(self): r"""Return a list with all the names of the columns in the attribute `result_from_query` Returns: list_of_columns (`list`): List of all the names of the columns """ if self.result_from_query is None: msgs.warning( '`result_from_query` is empty. You may want to run the query first' ) list_of_columns = [] else: list_of_columns = self.result_from_query.colnames return list_of_columns
def check_checksums(hdul) -> bool: r"""Test if the `datasum` and `checksum` keywords in a `HDUList` are present and up-to-date Args: hdul (:py:obj:`astropy.io.fits.hdu.hdulist.HDUList`): list of `astropy` HDUs to be checked Returns: bool: `True` all the HDUs in the input HDUL have the correct `datasum` and `checksum` """ is_good_checksum = True for hdu in hdul: checks_for_checksum = hdu.verify_checksum() checks_for_datasum = hdu.verify_datasum() if checks_for_checksum == 0: msgs.warning('Checksum not valid') is_good_checksum = False if checks_for_checksum == 2: msgs.warning('Checksum not present') is_good_checksum = False if checks_for_datasum == 0: msgs.warning('Datasum not valid') is_good_checksum = False if checks_for_datasum == 2: msgs.warning('Datasum not present') is_good_checksum = False return is_good_checksum
def header_is_valid(header): r"""Check if an header is valid """ is_header = True # Check if is a fits.header assert isinstance( header, fits.Header ), 'The header is not an instance of `astropy.fits.io.header`' if len(header) == 0: msgs.warning('Empty Header') is_header = False return is_header
def prodcatg(self, prodcatg_type): self.__prodcatg = eso_prodcatg.ProdCatg(prodcatg_type=prodcatg_type) if self.is_primary is True: _prodcatg_value = self.get('PRODCATG', default=None) if _prodcatg_value is None: msgs.info( 'Added PRODCATG = {} to the header'.format(prodcatg_type)) self.set('PRODCATG', prodcatg_type) elif _prodcatg_value != prodcatg_type: msgs.warning('Updating value fo PRODCATG from {} to {}'.format( _prodcatg_value, prodcatg_type)) self.set('PRODCATG', prodcatg_type) elif _prodcatg_value == prodcatg_type: msgs.info('PRODCATG = {}'.format(_prodcatg_value)) else: msgs.error('Cannot set the value of PRODCATG')
def __init__(self, tap_service=None, query=None, type_of_query='sync', result_from_query=None, maxrec=None): self.tap_service = tap_service self.query = query self.result_from_query = result_from_query self.maxrec = maxrec if type_of_query not in tap_queries.TAP_QUERY_TYPES: msgs.warning( '{} not a valid entry for the type of TAP query. Possibilities are: {}' .format(type_of_query, tap_queries.TAP_QUERY_TYPES)) msgs.warning('The `type_of_query` attribute will be set to `sync`') self.type_of_query = 'sync' else: self.type_of_query = type_of_query
def print_query(query): r"""Print the query on the terminal In case the `query` is empty, a warning is raised Args: query (str): String containing the query Returns: None """ if query is None: msgs.warning('The query is empty') else: msgs.info('The query is:') msgs.info('{}'.format(query)) return
def columns_info(collections=None, tables=None, verbose=False): r"""Load a query that get names (and corresponding ucd) of the columns present in a collection If `collections` and `tables` are `None` the query for the column of all collections and tables in the ESO archive is returned. .. note:: The way the query is created is to set as input or `collections` or `tables`. Particular attention should be given if both `collections` and `tables` are not `None`. Given that the connector between the two conditions is an `AND` this may give rise to an un-expected behaviour Args: collections (any, optional): list of `str` containing the names of the collections (or single `str`) from which the columns will be extracted tables (any, optional): list of `str` containing the names of the tables (or single `str`) from which the columns will be extracted verbose (bool): if set to `True` additional info will be displayed Returns: astropy.table: table of all columns present in a table/collection. Information are stored in `table_name`, `column_name`, `ucd`, `datatype`, `description`, and `unit` """ # test on collections clean_collections = _is_collection_list_at_eso(collections) # test on tables clean_tables = _is_table_list_at_eso(tables) # instantiate ESOCatalogues query_all_columns_info = query_catalogues.ESOCatalogues( query=tap_queries.create_query_all_columns( collections=clean_collections, tables=clean_tables)) # rise warning if (collections is not None) and (tables is not None): msgs.warning( 'Setting conditions for both `collections` and `tables`. Please check that this is the wanted ' 'behaviour:') # Print query if verbose or ((collections is not None) and (tables is not None)): query_all_columns_info.print_query() # Obtaining query results query_all_columns_info.run_query(to_string=True) all_columns_table = query_all_columns_info.get_result_from_query() return all_columns_table
def _is_column_in_obscore(column_name): r"""Check if a given column is present in `ivoa.ObsCore` Args: column_name (str): column to be tested Returns: bool: `True` if the column is present in `ivoa.ObsCore`. `False` and warning raised otherwise """ is_in_obscore = True table_all_columns = columns_info(verbose=False) all_column_list = table_all_columns['column_name'].data.data.tolist() if column_name not in all_column_list: msgs.warning( 'Column: {} not recognized. Possible values are:\n{}'.format( column_name, all_column_list)) is_in_obscore = False return is_in_obscore
def get_cards(self, check_cards=None): r"""Returns the `cards` present in a list object. If `check_cards` is not `None`, it checks that such cards are contained in the list and return them. If not present an empty `np.vector` is returned. Args: check_cards (`np.array`, `None`): List of cards that needs to be checked. Returns: selected_cards (`np.array`): Cards present in the list. missing_cards (`np.array`): Subset of `check_cards` not present in the list. It is empty if all cards are present. """ if check_cards is not None: _check_cards = np.isin(self.cards, _convert_to_numpy_array(check_cards)) selected_cards = np.copy(self.cards[_check_cards]) if np.size(selected_cards) < 1: msgs.warning( 'None of the cards in input are present in the list.') selected_cards = np.array([]) missing_cards = np.array([]) else: msgs.work('There are {} occurrences of the {} cards in input.'. format(np.size(selected_cards), np.size(check_cards))) if np.all(np.isin(check_cards, selected_cards)): msgs.work('All cards in input are present in the list.') missing_cards = np.array([]) else: msgs.work( 'Not all the cards in input are present in the list.') missing_cards = check_cards[np.logical_not( np.isin(check_cards, selected_cards))] msgs.work('The missing cards are:') for missing_card in missing_cards: msgs.work(' - {}'.format(missing_card)) else: selected_cards = np.copy(self.cards) missing_cards = np.array([]) return selected_cards, missing_cards
def _is_collection_at_eso(collection): r"""Check if a given collection is present in the ESO archive Args: collection (str): collection to be tested Returns: bool: `True` if the table is present in the ESO archive, `False` and warning raised otherwise """ is_at_eso = True table_all_catalogues = all_catalogues_info(verbose=False, all_versions=False) all_collections_list = np.unique( table_all_catalogues['collection'].data.data).tolist() if collection not in all_collections_list: msgs.warning( 'Collection: {} not recognized. Possible values are:\n{}'.format( collection, all_collections_list)) is_at_eso = False return is_at_eso
def image2d_is_valid(image2d) -> bool: # Written by Ema 12.03.2020 r"""Check if a 2D image is valid Args: image2d (obj:`numpy.ndarray`): image that you would like to check Returns: bool: `True` if a valid 2D image `False` and error raised if not. """ is_image2d = True # Checks if it is a numpy array assert isinstance(image2d, np.ndarray), 'The image is not a `numpy array`' # Check for dimensions if not image2d.ndim == 2: msgs.warning( 'The image is not two dimensional (N. of dimension={})'.format( image2d.ndim)) is_image2d = False return is_image2d
def download_tess_data(target_name): r"""Run a query to MAST to obtain the TESS data of an object. Data is saved in the directories: `./mastDownload/TESS/<target_name>_*/*.fits` Args: target_name (str): name of the target: e.g., '231663901' Returns: bool: it returns `True` if data are retrieved and `False` otherwise. """ tess_obs = Observations.query_criteria(target_name=target_name, obs_collection='TESS') if len(tess_obs) == 0: msgs.warning('No TESS data for target {}'.format(target_name)) return False for idx in np.arange(0, len(tess_obs)): tess_prods = Observations.get_product_list(tess_obs[idx]) tess_fits_prods = Observations.filter_products(tess_prods, extension='fits', mrp_only=False) Observations.download_products(tess_fits_prods, mrp_only=False, cache=False) return True
def download_kepler_data(target_name): r"""Run a query to MAST to obtain the kepler data of an object The data is saved in the directories: `./mastDownload/Kepler/<target_name>_*/*.fits` Args: target_name (str): name of the target: e.g., 'kplr011446443' Returns: bool: it returns `True` if data are retrieved and `False` otherwise. """ kepler_obs = Observations.query_criteria(target_name=target_name, obs_collection='Kepler') if len(kepler_obs) == 0: msgs.warning('No Kepler data for target {}'.format(target_name)) return False for idx in np.arange(0, len(kepler_obs)): kepler_prods = Observations.get_product_list(kepler_obs[idx]) kepler_fits_prods = Observations.filter_products(kepler_prods, extension='fits', mrp_only=False) Observations.download_products(kepler_fits_prods, mrp_only=False, cache=False) return True
def run_query_sync(tap_service, query, maxrec=default.get_value('maxrec')): r"""Run a synchronous query to TAP service and return result as an `astropy.Table` If the synchronous query fails, the code automatically tries to run the same query asynchronously Args: tap_service (pyvo.dal.tap.TAPService): TAP service that will be used for the query query (str): query to be run maxrec (int): define the maximum number of entries that a single query can return. Default is set by default.get_value('maxrec') Returns: astropy.table: result from the query to the TAP service """ try: result_from_query = tap_service.search(query=query, maxrec=maxrec).to_table() except (ValueError, DALQueryError, DALFormatError): msgs.warning('The query timed out. Trying `async` instead') result_from_query = run_query_async(tap_service=tap_service, query=query, maxrec=maxrec) return result_from_query
def connection_to_website(url, timeout=1.) -> bool: # written by Ema 05.03.2020 r"""Check there is an active connection to a website Args: url (str): link to the website you want to check timeout (float): timeout waiting for the website to respond Returns: bool: `True` if there is an active connection, `False` and error raised if not. """ # Checks for url assert isinstance(url, str), 'The url needs to be a string' if url.startswith('www'): url_clean = 'http://' + url msgs.warning('Modifying url to: {}'.format(url_clean)) else: url_clean = url request = urllib.request.Request(url_clean) try: urllib.request.urlopen(request, timeout=timeout) except urllib.error.HTTPError as err: msgs.warning('HTTP Error: {}'.format(err.code)) is_active = False except urllib.error.URLError as err: msgs.warning('URL Error: {}'.format(err.reason)) is_active = False else: is_active = True return is_active
def set_last_version(self, update=True): r"""Set the `last_version` column to the `result_from_query` attribute `last_version` is a column of `bool` where `False` means that there is a more update version of a catalogue This works only if `result_from_query` contains the columns: `version` and `title`. In case the `last_version` column is already present, a warning is raised. Args: update (bool): in case the `last_version` column is already present the code will update the value only if `update` is set to `True` Returns: None """ # Require that title and version are present in result_from_query for check_column in ['title', 'version']: if check_column not in self.which_columns(): msgs.warning( '{} column not present, `last_version` will not be created' .format(check_column)) # Check that last_version is not present in result_from_query if 'last_version' in self.which_columns(): if update: msgs.warning('`last_version` column already present') else: msgs.warning( '`last_version` column already present and it will not be updated' ) return # Get last versions unique_titles = np.unique( self.result_from_query['title'].data).tolist() last_version = np.zeros_like(self.result_from_query['version'].data, dtype=bool) for unique_title in unique_titles: most_recent_version = np.nanmax( self.result_from_query['version'].data[( self.result_from_query['title'].data == unique_title)]) last_version[(self.result_from_query['title'].data == unique_title) & (self.result_from_query['version'].data == most_recent_version)] = True self.result_from_query.add_column( MaskedColumn( data=last_version, name='last_version', dtype=bool, description='True if this is the latest version of the catalog' )) return