def test_filter_with_datetime_in_place_list(self): start_dt = strptime_to_utc_datetime('2009-07-01 00:00:00.0') end_dt = strptime_to_utc_datetime('2010-07-01 00:00:00.0') filters = [f'datetime > {start_dt}', f'datetime < {end_dt}'] # should return only event 2 test_cat = copy.deepcopy(self.test_cat1) test_cat = test_cat.filter(filters, in_place=True) numpy.testing.assert_equal(numpy.array([b'2'], dtype='S256'), test_cat.get_event_ids())
def _parse_datetime_to_zmap(date, time): """ Helping function to return datetime in zmap format. Args: date: string record from .ndk file time: string record from .ndk file Returns: out: dictionary following out_dict = {'year': year, 'month': month, 'day': day', 'hour': hour, 'minute': minute, 'second': second} """ add_minute = False if ":60.0" in time: time = time.replace(":60.0", ":0.0") add_minute = True try: dt = strptime_to_utc_datetime(date + " " + time, format="%Y/%m/%d %H:%M:%S.%f") except (TypeError, ValueError): msg = ("Could not parse date/time string '%s' and '%s' to a valid " "time" % (date, time)) raise RuntimeError(msg) if add_minute: dt += datetime.timedelta(minutes=1) out = {} out['year'] = dt.year out['month'] = dt.month out['day'] = dt.day out['hour'] = dt.hour out['minute'] = dt.minute out['second'] = dt.second return out
def parse_filename(filename): # this works for unix basename = str(os.path.basename(filename.rstrip('/')).split('.')[0]) split_fname = basename.split('_') name = split_fname[0] start_time = strptime_to_utc_datetime(split_fname[1], format="%Y-%m-%dT%H-%M-%S-%f") return (name, start_time)
def _none_or_datetime(value): if isinstance(value, datetime.datetime): return value if value is not None: format = parse_string_format(value) value = strptime_to_utc_datetime(value, format=format) return value
def test_filter_with_datetime(self): end_dt = strptime_to_utc_datetime('2010-07-01 00:00:00.0') filters = f'datetime < {end_dt}' # should return only event 1 and 2 test_cat = copy.deepcopy(self.test_cat1) filtered_test_cat = test_cat.filter(filters, in_place=False) numpy.testing.assert_equal( numpy.array([b'1', b'2'], dtype='S256').T, filtered_test_cat.get_event_ids())
def load_catalog_forecast(fname, catalog_loader=None, format='native', type='ascii', **kwargs): """ General function to handle loading catalog forecasts. Currently, just a simple wrapper, but can contain more complex logic in the future. Args: fname (str): pathname to the forecast file or directory containing the forecast files catalog_loader (func): callable that can load catalogs, see load_stochastic_event_sets above. format (str): either 'native' or 'csep'. if 'csep', will attempt to be returned into csep catalog format. used to convert between observed_catalog type. type (str): either 'ucerf3' or 'csep', determines the catalog format of the forecast. if loader is provided, then this parameter is ignored. **kwargs: other keyword arguments passed to the :class:`csep.core.forecasts.CatalogForecast`. Returns: :class:`csep.core.forecasts.CatalogForecast` """ # sanity checks if not os.path.exists(fname): raise FileNotFoundError( f"Could not locate file {fname}. Unable to load forecast.") # sanity checks if catalog_loader is not None and not callable(catalog_loader): raise AttributeError( "Loader must be callable. Unable to load forecast.") # factory methods for loading different types of catalogs catalog_loader_mapping = { 'ascii': catalogs.CSEPCatalog.load_ascii_catalogs, 'ucerf3': catalogs.UCERF3Catalog.load_catalogs } if catalog_loader is None: catalog_loader = catalog_loader_mapping[type] # try and parse information from filename and send to forecast constructor if format == 'native' and type == 'ascii': try: basename = str(os.path.basename(fname.rstrip('/')).split('.')[0]) split_fname = basename.split('_') name = split_fname[0] start_time = strptime_to_utc_datetime( split_fname[1], format="%Y-%m-%dT%H-%M-%S-%f") # update kwargs _ = kwargs.setdefault('name', name) _ = kwargs.setdefault('start_time', start_time) except: pass # create observed_catalog forecast return CatalogForecast(filename=fname, loader=catalog_loader, catalog_format=format, catalog_type=type, **kwargs)
# # Most of the core functionality can be imported from the top-level :mod:`csep` package. Utilities are available from the # :mod:`csep.utils` subpackage. import csep from csep.utils import datasets, time_utils #################################################################################################################################### # Define forecast properties # -------------------------- # # We choose a :ref:`time-independent-forecast` to show how to evaluate a grid-based earthquake forecast using PyCSEP. Note, # the start and end date should be chosen based on the creation of the forecast. This is important for time-independent forecasts # because they can be rescale to any arbitrary time period. start_date = time_utils.strptime_to_utc_datetime('2006-11-12 00:00:00.0') end_date = time_utils.strptime_to_utc_datetime('2011-11-12 00:00:00.0') #################################################################################################################################### # Load forecast # ------------- # # For this example, we provide the example forecast data set along with the main repository. The filepath is relative # to the root directory of the package. You can specify any file location for your forecasts. forecast = csep.load_gridded_forecast(datasets.helmstetter_mainshock_fname, start_date=start_date, end_date=end_date, name='helmstetter_mainshock') ####################################################################################################################################
def test_strptime_to_utc_datetime(self): timestring = '1984-04-24 21:15:18.760' # note, the microseconds. .760 = 760000 microseconds dt_test = datetime.datetime(1984,4,24,21,15,18,760000, tzinfo=datetime.timezone.utc) dt = strptime_to_utc_datetime(timestring) self.assertEqual(dt, dt_test)
# # Most of the core functionality can be imported from the top-level :mod:`csep` package. Utilities are available from the # :mod:`csep.utils` subpackage. import csep from csep.core import regions, catalog_evaluations from csep.utils import datasets, time_utils #################################################################################################################################### # Define start and end times of forecast # -------------------------------------- # # Forecasts should define a time horizon in which they are valid. The choice is flexible for catalog-based forecasts, because # the catalogs can be filtered to accommodate multiple end-times. Conceptually, these should be separate forecasts. start_time = time_utils.strptime_to_utc_datetime("1992-06-28 11:57:34.14") end_time = time_utils.strptime_to_utc_datetime("1992-07-28 11:57:34.14") #################################################################################################################################### # Define spatial and magnitude regions # ------------------------------------ # # Before we can compute the bin-wise rates we need to define a spatial region and a set of magnitude bin edges. The magnitude # bin edges # are the lower bound (inclusive) except for the last bin, which is treated as extending to infinity. We can # bind these # to the forecast object. This can also be done by passing them as keyword arguments # into :func:`csep.load_catalog_forecast`. # Magnitude bins properties min_mw = 4.95 max_mw = 8.95 dmw = 0.1