def __init__(self, info_update_time: Union[str, datetime], start_time: Union[str, datetime], end_time: Union[str, datetime], num_files: int, size_in_mb: int): self._info_update_time = to_datetime(info_update_time, default=None) self._start_time = to_datetime(start_time, default=None) self._end_time = to_datetime(end_time, default=None) self._num_files = num_files self._size_in_mb = size_in_mb
def resolve_base_paths( self, time_range: TimeRange = (None, None)) -> List[str]: """Return a list of all paths between the given times. For all dates, including the first and the last time, the wildcard in the pattern is resolved for the date. Parameters ---------- time_range : a tuple of datetime or str, optional The *time_range*, if given, limits the dataset in time. The first date of the time range, can be None if the file set has a *start_time*. In this case the *start_time* is used. The last date of the time range, can be None if the file set has a *end_time*. In this case the *end_time* is used. """ date1 = to_datetime(time_range[0], default=self._fileset_info.start_time if self._fileset_info else None) date2 = to_datetime(time_range[1], default=self._fileset_info.end_time if self._fileset_info else None) if date1 is None: raise ValueError( "illegal time_range: can't determine start of interval") if date2 is None: raise ValueError( "illegal time_range: can't determine end of interval") if date1 > date2: raise ValueError("start time '%s' is after end time '%s'" % (date1, date2)) return [ self._resolve_base_path(date1 + timedelta(days=i)) for i in range((date2 - date1).days + 1) ]
def temporal_agg(source: str, start_date: str = None, end_date: str = None, var: VarNamesLike.TYPE = None, level: str = 'mon', method: str = 'mean', save_data: bool = False, monitor: Monitor = Monitor.NONE) -> (xr.Dataset, str): """ Perform temporal aggregation of the given data source to the given level using the given method for the given time range. Only full time periods of the given time range will be aggregated. Depending on the given time range, data size, as well as internet connection quality, this operation can potentially take a very long time to finish. Careful consideration is needed in choosing the var parameter to create meaningful outputs. This is unique for each data source. The aggregation result is saved into the local data store for later reuse. :param source: Data source to aggregate :param start_date: Start date of aggregation. If not given, data source start date is used instead :param end_date: End date of aggregation. If not given, data source end date is used instead :param var: If given, only these dataset variables will be preserved in the result :param level: Aggregation level :param method: Aggregation method :param save_data: Whether to save data downloaded during this operation. This can potentially be a lot of data. :param monitor: A progress monitor to use :return: The local data source identifier for the aggregated data """ # Raise not implemented, while not finished raise ValueError("Operation is not implemented.") var = VarNamesLike.convert(var) # Select the appropriate data source data_store_list = DATA_STORE_REGISTRY.get_data_stores() data_sources = query_data_sources(data_store_list, name=source) if len(data_sources) == 0: raise ValueError("No data_source found for the given query " "term {}".format(source)) elif len(data_sources) > 1: raise ValueError("{} data_sources found for the given query " "term {}".format(data_sources, source)) data_source = data_sources[0] source_info = data_source.cache_info # We have to do this to have temporal coverage info in meta_info data_source._init_file_list() # Check if the data source temporal resolution is known known_res = ('day', '8-days', 'mon', 'yr') fq = data_source.meta_info['time_frequency'] if (not fq) or (fq not in known_res): raise ValueError("The given data source features unknown time " "resolution: {}".format(fq)) # Check if the operation supports the desired aggregation step valid_steps = list() valid_steps.append(('day', 'mon')) if (fq, level) not in valid_steps: raise ValueError("Currently the operation does not support aggregation" " from {} to {}".format(fq, level)) # Determine start and end dates if not start_date: start_date = data_source.meta_info['temporal_coverage_start'] start_date = to_datetime(start_date) # If start_date is not start of the month, move it to the 1st of next # month if start_date.day != 1: try: start_date = datetime(start_date.year, start_date.month + 1, 1) except ValueError: # We have tried to set the month to 13 start_date = datetime(start_date.year + 1, 1, 1) if not end_date: end_date = data_source.meta_info['temporal_coverage_end'] end_date = to_datetime(end_date) # If end date is not end of the month, move it to the last day of the # previous month if not _is_end_of_month(end_date): try: end_date = datetime(end_date.year, end_date.month - 1, 27) except ValueError: # We have tried to set the month to 0 end_date = datetime(end_date.year - 1, 12, 31) end_date = _end_of_month(end_date.year, end_date.month) # Determine the count of processing periods n_periods = (end_date.year - start_date.year + 1) * 12\ + end_date.month - start_date.month - 11 # 2000-4-1, 2000-6-30 -> 12 + 2 -11 = 3 if n_periods < 1: raise ValueError("The given time range does not contain any full " "calendar months to do aggregation with.") # Set up the monitor total_work = 100 with monitor.starting('Aggregate', total_work=total_work): monitor.progress(work=0) step = total_work * 0.9 / n_periods # Process the data source period by period tmin = start_date while tmin < end_date: tmax = _end_of_month(tmin.year, tmin.month) # Determine if the data for the given period are already downloaded # If at least one file of the given time range is present, we # don't delete the data for this period, we do the syncing anyway was_already_downloaded = False dt_range = to_datetime_range(tmin, tmax) for date in source_info: if dt_range[0] <= date <= dt_range[1]: was_already_downloaded = True # One is enough break worked = monitor._worked data_source.sync(dt_range, monitor=monitor.child(work=step * 0.9)) if worked == monitor._worked: monitor.progress(work=step * 0.9) ds = data_source.open_dataset(dt_range) # Filter the dataset ds = select_var(ds, var) # Do the aggregation # Save the dataset for this period into local data store # Close and delete the files if needed ds.close() # delete data for the current period,if it should be deleted and it # was not already downloaded. if (not save_data) and (not was_already_downloaded): data_source.delete_local(dt_range) monitor.progress(work=step * 0.1) # tmin for next iteration try: tmin = datetime(tmin.year, tmin.month + 1, 1) except ValueError: # Couldn't add a month -> end of year tmin = datetime(tmin.year + 1, 1, 1) pass monitor.progress(work=step * 0.1) # Return the local data source id return None