def get_lytaf_event_types(lytaf_path=None, print_event_types=True): """ Prints the different event types in the each of the LYTAF databases. Parameters ---------- lytaf_path : `str` Path location where LYTAF files are stored. Default = Path stored in confog file. print_event_types : `bool` If True, prints the artifacts in each lytaf database to screen. Returns ------- all_event_types : `list` List of all events types in all lytaf databases. """ # Set lytaf_path is not done by user if lytaf_path: warn( 'lytaf_path is deprecated, has no effect and will be removed in SunPy 2.1.', SunpyDeprecationWarning) suffixes = ["lyra", "manual", "ppt", "science"] all_event_types = [] # For each database file extract the event types and print them. if print_event_types: print("\nLYTAF Event Types\n-----------------\n") for suffix in suffixes: dbname = f"annotation_{suffix}.db" # Check database file exists, else download it. lytaf_path = cache.download(urljoin(LYTAF_REMOTE_PATH, dbname)) # Open SQLITE3 LYTAF files connection = sqlite3.connect(str(lytaf_path)) # Create cursor to manipulate data in annotation file cursor = connection.cursor() cursor.execute("select type from eventType;") event_types = cursor.fetchall() all_event_types.append(event_types) if print_event_types: print("----------------\n{} database\n----------------".format( suffix)) for event_type in event_types: print(str(event_type[0])) print(" ") # Unpack event types in all_event_types into single list all_event_types = [ event_type[0] for event_types in all_event_types for event_type in event_types ] return all_event_types
def get_lytaf_event_types(print_event_types=True): """ Prints the different event types in the each of the LYTAF databases. Parameters ---------- print_event_types : `bool` If True, prints the artifacts in each lytaf database to screen. Returns ------- all_event_types : `list` List of all events types in all lytaf databases. """ suffixes = ["lyra", "manual", "ppt", "science"] all_event_types = [] # For each database file extract the event types and print them. if print_event_types: print("\nLYTAF Event Types\n-----------------\n") for suffix in suffixes: dbname = f"annotation_{suffix}.db" # Check database file exists, else download it. lytaf_path = cache.download(urljoin(LYTAF_REMOTE_PATH, dbname)) # Open SQLITE3 LYTAF files connection = sqlite3.connect(str(lytaf_path)) # Create cursor to manipulate data in annotation file cursor = connection.cursor() cursor.execute("select type from eventType;") event_types = cursor.fetchall() all_event_types.append(event_types) if print_event_types: print("----------------\n{} database\n----------------".format( suffix)) for event_type in event_types: print(str(event_type[0])) print(" ") # Unpack event types in all_event_types into single list all_event_types = [ event_type[0] for event_types in all_event_types for event_type in event_types ] return all_event_types
def get_lytaf_events(start_time, end_time, lytaf_path=None, combine_files=("lyra", "manual", "ppt", "science"), csvfile=None, force_use_local_lytaf=False): """ Extracts combined lytaf file for given time range. Given a time range defined by start_time and end_time, this function extracts the segments of each LYRA annotation file and combines them. Parameters ---------- start_time : `astropy.time.Time` or `str` Start time of period for which annotation file is required. end_time : `astropy.time.Time` or `str` End time of period for which annotation file is required. lytaf_path : `str` directory path where the LYRA annotation files are stored. combine_files : `tuple` of strings States which LYRA annotation files are to be combined. Default is all four, i.e. lyra, manual, ppt, science. See Notes section for an explanation of each. force_use_local_lytaf : `bool` Ensures current local version of lytaf files are not replaced by up-to-date online versions even if current local lytaf files do not cover entire input time range etc. Default=False Returns ------- lytaf : `numpy.recarray` Containing the various parameters stored in the LYTAF files. Notes ----- There are four LYRA annotation files which mark different types of events or artifacts in the data. They are named annotation_suffix.db where suffix is a variable equalling either lyra, manual, ppt, or science. annotation_lyra.db : contains entries regarding possible effects to the data due to normal operation of LYRA instrument. annotation_manual.db : contains entries regarding possible effects to the data due to unusual or manually logged events. annotation_ppt.db : contains entries regarding possible effects to the data due to pointing or positioning of PROBA2. annotation_science.db : contains events in the data scientifically interesting, e.g. GOES flares. References ---------- Further documentation: http://proba2.oma.be/data/TARDIS Examples -------- Get all events in the LYTAF files for January 2014 >>> from sunpy.instr.lyra import get_lytaf_events >>> lytaf = get_lytaf_events('2014-01-01', '2014-02-01') # doctest: +SKIP """ # Check inputs # Check lytaf path if lytaf_path: warn('laytaf_path is deprecated, has no effect and will be removed in SunPy 2.1.', SunpyDeprecationWarning) # Parse start_time and end_time start_time = parse_time(start_time) end_time = parse_time(end_time) # Check combine_files contains correct inputs if not all(suffix in ["lyra", "manual", "ppt", "science"] for suffix in combine_files): raise ValueError("Elements in combine_files must be strings equalling " "'lyra', 'manual', 'ppt', or 'science'.") # Remove any duplicates from combine_files input combine_files = list(set(combine_files)) combine_files.sort() # Convert input times to UNIX timestamp format since this is the # time format in the annotation files start_time_uts = (start_time - Time('1970-1-1')).sec end_time_uts = (end_time - Time('1970-1-1')).sec # Define numpy record array which will hold the information from # the annotation file. lytaf = np.empty((0,), dtype=[("insertion_time", object), ("begin_time", object), ("reference_time", object), ("end_time", object), ("event_type", object), ("event_definition", object)]) # Access annotation files for suffix in combine_files: # Check database files are present dbname = f"annotation_{suffix}.db" lytaf_path = cache.download(urljoin(LYTAF_REMOTE_PATH, dbname)) # Open SQLITE3 annotation files connection = sqlite3.connect(str(lytaf_path)) # Create cursor to manipulate data in annotation file cursor = connection.cursor() # Check if lytaf file spans the start and end times defined by # user. If not, download newest version. # First get start time of first event and end time of last # event in lytaf. cursor.execute("select begin_time from event order by begin_time asc " "limit 1;") db_first_begin_time = cursor.fetchone()[0] db_first_begin_time = datetime.datetime.fromtimestamp(db_first_begin_time) cursor.execute("select end_time from event order by end_time desc " "limit 1;") db_last_end_time = cursor.fetchone()[0] db_last_end_time = datetime.datetime.fromtimestamp(db_last_end_time) # If lytaf does not include entire input time range... if not force_use_local_lytaf: if end_time > db_last_end_time or start_time < db_first_begin_time: # ...close lytaf file... cursor.close() connection.close() # ...Download latest lytaf file... lytaf_path = cache.download(urljoin(LYTAF_REMOTE_PATH, dbname), redownload=True) # ...and open new version of lytaf database. connection = sqlite3.connect(str(lytaf_path)) cursor = connection.cursor() # Select and extract the data from event table within file within # given time range cursor.execute("select insertion_time, begin_time, reference_time, " "end_time, eventType_id from event where end_time >= " "{} and begin_time <= " "{}".format(start_time_uts, end_time_uts)) event_rows = cursor.fetchall() # Select and extract the event types from eventType table cursor.row_factory = sqlite3.Row cursor.execute("select * from eventType") eventType_rows = cursor.fetchall() eventType_id = [] eventType_type = [] eventType_definition = [] for eventType_row in eventType_rows: eventType_id.append(eventType_row["id"]) eventType_type.append(eventType_row["type"]) eventType_definition.append(eventType_row["definition"]) # Enter desired information into the lytaf numpy record array for event_row in event_rows: id_index = eventType_id.index(event_row[4]) lytaf = np.append(lytaf, np.array((Time(datetime.datetime.utcfromtimestamp(event_row[0]), format='datetime'), Time(datetime.datetime.utcfromtimestamp(event_row[1]), format='datetime'), Time(datetime.datetime.utcfromtimestamp(event_row[2]), format='datetime'), Time(datetime.datetime.utcfromtimestamp(event_row[3]), format='datetime'), eventType_type[id_index], eventType_definition[id_index]), dtype=lytaf.dtype)) # Close file cursor.close() connection.close() # Sort lytaf in ascending order of begin time np.recarray.sort(lytaf, order="begin_time") # If csvfile kwarg is set, write out lytaf to csv file if csvfile: # Open and write data to csv file. with open(csvfile, 'w') as openfile: csvwriter = csv.writer(openfile, delimiter=';') # Write header. csvwriter.writerow(lytaf.dtype.names) # Write data. for row in lytaf: new_row = [] new_row.append(row[0].strftime("%Y-%m-%dT%H:%M:%S")) new_row.append(row[1].strftime("%Y-%m-%dT%H:%M:%S")) new_row.append(row[2].strftime("%Y-%m-%dT%H:%M:%S")) new_row.append(row[3].strftime("%Y-%m-%dT%H:%M:%S")) new_row.append(row[4]) new_row.append(row[5]) csvwriter.writerow(new_row) return lytaf
def _parse_url(self, arg, **kwargs): url = arg.full_url path = str(cache.download(url).absolute()) pairs = self._read_file(path, **kwargs) return pairs
def _parse_args(self, *args, **kwargs): """ Parses an args list for data-header pairs. args can contain any mixture of the following entries: * tuples of data,header * data, header not in a tuple * data, wcs object in a tuple * data, wcs object not in a tuple * filename, as a str or pathlib.Path, which will be read * directory, as a str or pathlib.Path, from which all files will be read * glob, from which all files will be read * url, which will be downloaded and read * lists containing any of the above. Example ------- self._parse_args(data, header, (data, header), ['file1', 'file2', 'file3'], 'file4', 'directory1', '*.fits') """ data_header_pairs = list() already_maps = list() # Account for nested lists of items args = expand_list(args) # For each of the arguments, handle each of the cases i = 0 while i < len(args): arg = args[i] # Data-header or data-WCS pair if isinstance(arg, SUPPORTED_ARRAY_TYPES): arg_header = args[i+1] if isinstance(arg_header, WCS): arg_header = args[i+1].to_header() if self._validate_meta(arg_header): pair = (args[i], OrderedDict(arg_header)) data_header_pairs.append(pair) i += 1 # an extra increment to account for the data-header pairing # A database Entry elif isinstance(arg, DatabaseEntryType): data_header_pairs += self._read_file(arg.path, **kwargs) # Already a Map elif isinstance(arg, GenericMap): already_maps.append(arg) # URL elif isinstance(arg, str) and _is_url(arg): url = arg path = str(cache.download(url).absolute()) pairs = self._read_file(path, **kwargs) data_header_pairs += pairs # File system path (file or directory or glob) elif _possibly_a_path(arg): path = pathlib.Path(arg).expanduser() if _is_file(path): pairs = self._read_file(path, **kwargs) data_header_pairs += pairs elif _is_dir(path): for afile in sorted(path.glob('*')): data_header_pairs += self._read_file(afile, **kwargs) elif glob.glob(os.path.expanduser(arg)): for afile in sorted(glob.glob(os.path.expanduser(arg))): data_header_pairs += self._read_file(afile, **kwargs) else: raise ValueError(f'Did not find any files at {arg}') else: raise ValueError(f"Invalid input: {arg}") i += 1 # TODO: # In the end, if there are already maps it should be put in the same # order as the input, currently they are not. return data_header_pairs, already_maps