Ejemplo n.º 1
0
    def test_catalog_binning_and_filtering_acceptance(self):
        # create space-magnitude region
        region = regions.create_space_magnitude_region(
            regions.california_relm_region(),
            regions.magnitude_bins(4.5, 10.05, 0.1)
        )

        # read catalog
        comcat = csep.load_catalog(comcat_path(), region=region).filter(f"magnitude >= 4.5")

        # create data set from data set
        d = forecasts.MarkedGriddedDataSet(
            data=comcat.spatial_magnitude_counts(),
            region=comcat.region,
            magnitudes=comcat.region.magnitudes
        )

        for idm, m_min in enumerate(d.magnitudes):
            # catalog filtered cumulative
            print(m_min)
            c = comcat.filter([f'magnitude >= {m_min}'], in_place=False)
            # catalog filtered incrementally
            c_int = comcat.filter([f'magnitude >= {m_min}', f'magnitude < {m_min + 0.1}'], in_place=False)
            # sum from overall data set
            gs = d.data[:, idm:].sum()
            # incremental counts
            gs_int = d.data[:, idm].sum()
            # event count from filtered catalog and events in binned data should be the same
            numpy.testing.assert_equal(gs, c.event_count)
            numpy.testing.assert_equal(gs_int, c_int.event_count)
Ejemplo n.º 2
0
def test_JmaCsvCatalog_loading():
    datadir = get_datadir()
    csv_file = os.path.join(datadir, 'test.csv')

    test_catalog = csep.load_catalog(csv_file, type='jma-csv')

    assert len(test_catalog.catalog) == 22284, 'invalid number of events in observed_catalog object'

    _dummy = test_catalog.get_magnitudes()
    assert len(_dummy) == len(test_catalog.catalog)

    _dummy = test_catalog.get_depths()
    assert len(_dummy) == len(test_catalog.catalog)

    _dummy = test_catalog.get_longitudes()
    assert len(_dummy) == len(test_catalog.catalog)

    _dummy = test_catalog.get_latitudes()
    assert len(_dummy) == len(test_catalog.catalog)

    _dummy = test_catalog.get_epoch_times()
    assert len(_dummy) == len(test_catalog.catalog)

    _dummy = test_catalog.get_datetimes()
    assert len(_dummy) == len(test_catalog.catalog)

    # assert (d[0].timestamp() * 1000.) == c.observed_catalog['timestamp'][0]

    _datetimes = numpy.ndarray(test_catalog.event_count, dtype='<i8')
    _datetimes.fill(numpy.nan)

    for _idx, _val in enumerate(_dummy):
        _datetimes[_idx] = round(1000. * _val.timestamp())

    numpy.testing.assert_allclose(_datetimes, test_catalog.catalog['origin_time'],
                                  err_msg='timestamp mismatch',
                                  verbose=True, rtol=0, atol=0)
Ejemplo n.º 3
0
    def setUp(self):

        self.test_catalog = csep.load_catalog(comcat_example_catalog_fname)
def process_ucerf3_forecast(config):
    """ Post-processing script for ucerf3-forecasts

    Program will perform N, M, and S tests and write out evaluation results.

    Args:
        config (dict): contents of configuration needed to run the job

    """
    # Get directory of forecast file from simulation manifest
    forecast_dir = get_forecast_filepath(config['simulation_list'],
                                         config['job_idx'])
    config.update({'forecast_dir': forecast_dir})
    print(f"Working on forecast in {config['forecast_dir']}.")

    # Search for forecast files
    forecast_path = os.path.join(forecast_dir, 'results_complete.bin.gz')
    if not os.path.exists(forecast_path):
        print(
            f"Did not find a forecast at {forecast_path}. Looking for uncompressed version.",
            flush=True)
        forecast_path = os.path.join(forecast_dir, 'results_complete.bin')
        if not os.path.exists(forecast_path):
            print(f"Unable to find uncompressed forecast. Aborting.",
                  flush=True)
            sys.exit(-1)
    config['forecast_path'] = forecast_path
    print(f"Found forecast file at {config['forecast_path']}.")

    # Create output directory
    mkdirs(config['output_dir'])

    # Initialize processing tasks
    print(f"Processing forecast at {forecast_path}.", flush=True)
    config_path = os.path.join(config['forecast_dir'], 'config.json')
    with open(config_path) as json_file:
        u3etas_config = json.load(json_file)

    # Time horizon of the forecast
    start_epoch = u3etas_config['startTimeMillis']
    end_epoch = start_epoch + config['forecast_duration_millis']
    config['start_epoch'] = start_epoch
    config['end_epoch'] = end_epoch

    # Create region information from configuration file
    region_config = config['region_information']
    region = create_space_magnitude_region(region_config['name'],
                                           region_config['min_mw'],
                                           region_config['max_mw'],
                                           region_config['dmw'])
    min_magnitude = region.magnitudes[0]

    # Set up filters for forecast and catalogs
    filters = [
        f'origin_time >= {start_epoch}', f'origin_time < {end_epoch}',
        f'magnitude >= {min_magnitude}'
    ]

    # Forecast, note: filters are applied when iterating through the forecast
    forecast_basename = os.path.basename(config['forecast_dir'])
    forecast = load_catalog_forecast(forecast_path,
                                     type='ucerf3',
                                     name=f'ucerf3-{forecast_basename}',
                                     region=region,
                                     filters=filters,
                                     filter_spatial=True,
                                     apply_filters=True)

    # Sanity check to ensure that forecasts are filtered properly
    min_mws = []
    for catalog in forecast:
        if catalog.event_count > 0:
            min_mws.append(catalog.get_magnitudes().min())
    print(
        f"Overall minimum magnitude of catalogs in forecast: {np.min(min_mws)}"
    )

    # Compute expected rates for spatial test and magnitude test
    _ = forecast.get_expected_rates()
    sc = forecast.expected_rates.spatial_counts()
    sc_path = os.path.join(
        config['output_dir'],
        create_output_filepath(config['forecast_dir'],
                               'spatial_counts_arr-f8.bin'))
    with open(sc_path, 'wb') as sc_file:
        print(f"Writing spatial counts to {sc_path}")
        sc.tofile(sc_file)

    # Prepare evaluation catalog
    eval_catalog = load_catalog(config['catalog_path'],
                                region=region,
                                filters=filters,
                                name='comcat',
                                apply_filters=True)

    # Compute and store number test
    print("Computing number-test on forecast.")
    ntest_result = catalog_evaluations.number_test(forecast, eval_catalog)
    ntest_path = os.path.join(
        config['output_dir'],
        create_output_filepath(config['forecast_dir'], 'ntest_result.json'))
    try:
        write_json(ntest_result, ntest_path)
        config['ntest_path'] = ntest_path
        print(f"Writing outputs to {config['ntest_path']}.")
    except IOError:
        print("Unable to write n-test result.")

    # Compute number test over multiple magnitudes
    # print("Computing number test over multiple magnitudes")
    # ntest_results = number_test_multiple_mag(forecast, eval_catalog)
    # config['ntest_paths'] = []
    # for r in ntest_results:
    #     min_mw = r.min_mw
    #     ntest_path = os.path.join(
    #         config['output_dir'],
    #         create_output_filepath(config['forecast_dir'], 'ntest_result_' + str(min_mw).replace('.','p') + '.json')
    #     )
    #     try:
    #         write_json(ntest_result, ntest_path)
    #         config['ntest_paths'].append(ntest_path)
    #         print(f"Writing outputs to {ntest_path}.")
    #     except IOError:
    #         print("Unable to write n-test result.")

    # Compute and store magnitude test
    print("Computing magnitude-test on forecast.")
    mtest_result = catalog_evaluations.magnitude_test(forecast, eval_catalog)
    mtest_path = os.path.join(
        config['output_dir'],
        create_output_filepath(config['forecast_dir'], 'mtest_result.json'))
    try:
        write_json(mtest_result, mtest_path)
        config['mtest_path'] = mtest_path
        print(f"Writing outputs to {config['mtest_path']}.")
    except IOError:
        print("Unable to write m-test result.")

    # Compute and store spatial test
    print("Computing spatial test on forecast.")
    stest_path = os.path.join(
        config['output_dir'],
        create_output_filepath(config['forecast_dir'], 'stest_result.json'))
    stest_result = catalog_evaluations.spatial_test(forecast, eval_catalog)
    try:
        write_json(stest_result, stest_path)
        config['stest_path'] = stest_path
    except (IOError, TypeError, ValueError):
        print("Unable to write s-test result.")

    # Write calculation configuration
    config_path = os.path.join(
        config['output_dir'],
        create_output_filepath(config['forecast_dir'], 'meta.json'))
    print(f"Saving run-time configuration to {config_path}.")
    with open(config_path, 'w') as f:
        json.dump(config, f, indent=4, separators=(',', ': '))
Ejemplo n.º 5
0
    len(u3catalogs_nofaults), (t1 - t0)))
for u3catalog_nofaults in u3catalogs_nofaults:
    if u3catalog_nofaults.catalog_id % 500 == 0:
        print('Loaded {} catalogs'.format(u3catalog_nofaults.catalog_id))
    nofaults_numbers.append(
        u3catalog_nofaults.filter('magnitude > 3.95').get_number_of_events())

# Comcat Synthetics
epoch_time = 709732655000
duration_in_years = 1.0
t0 = time.time()
comcat = load_catalog(type='comcat',
                      format='native',
                      start_epoch=epoch_time,
                      duration_in_years=1.0,
                      min_magnitude=2.55,
                      min_latitude=31.50,
                      max_latitude=43.00,
                      min_longitude=-125.40,
                      max_longitude=-113.10,
                      name='Comcat').filter('magnitude > 3.95')
comcat_count = comcat.get_number_of_events()
t1 = time.time()

# Statements about Comcat Downloads
print("Fetched Comcat catalog in {} seconds.\n".format(t1 - t0))
print("Downloaded Comcat Catalog with following parameters")
print("Start Date: {}\nEnd Date: {}".format(str(comcat.start_time),
                                            str(comcat.end_time)))
print("Min Latitude: {} and Max Latitude: {}".format(comcat.min_latitude,
                                                     comcat.max_latitude))
print("Min Longitude: {} and Max Longitude: {}".format(comcat.min_longitude,