Beispiel #1
0
def subdivide_observed_eqs(bin_gdf: GeoDataFrame, subcat_n_years: int):

    # collate earthquakes from bins
    obs_eqs = get_total_obs_eqs(bin_gdf, prospective=False)

    # divide earthquakes into groups, starting with the first observed year.
    # this could be changed to account for years with no events bounding the
    # catalog, but that would mean refactoring of the input yaml.
    obs_eqs.sort(key=lambda x: x.time)

    first_year = obs_eqs[0].time.year
    interval_start = first_year * 1
    last_year = obs_eqs[-1].time.year

    n_eqs = []
    while (interval_start + subcat_n_years) <= last_year:
        interval_end = interval_start + subcat_n_years
        n_eqs.append(
            len([
                eq for eq in obs_eqs
                if (interval_start <= eq.time.year <= interval_end)
            ]))
        interval_start += subcat_n_years + 1

    return n_eqs
Beispiel #2
0
def s_test_function(
    bin_gdf: GeoDataFrame,
    t_yrs: float,
    n_iters: int,
    likelihood_fn: str,
    prospective: bool = False,
    critical_pct: float = 0.25,
    not_modeled_likelihood: float = 0.0,
    append_results: bool = False,
):
    N_obs = len(get_total_obs_eqs(bin_gdf, prospective=prospective))
    N_pred = get_model_annual_eq_rate(bin_gdf) * t_yrs
    N_norm = N_obs / N_pred

    bin_like_cfg = {
        "investigation_time": t_yrs,
        "likelihood_fn": likelihood_fn,
        "not_modeled_likelihood": not_modeled_likelihood,
        "n_iters": n_iters,
    }
    bin_likes = s_test_gdf_series(bin_gdf, bin_like_cfg, N_norm)

    obs_likes = np.array([bl[0] for bl in bin_likes])
    stoch_likes = np.vstack([bl[1] for bl in bin_likes]).T
    bad_bins = list(unique(list(chain(*[bl[2] for bl in bin_likes]))))

    obs_like_total = sum(obs_likes)
    stoch_like_totals = np.sum(stoch_likes, axis=1)

    if append_results:
        bin_pcts = []
        for i, obs_like in enumerate(obs_likes):
            stoch_like = stoch_likes[:, i]
            bin_pct = len(stoch_like[stoch_like <= obs_like]) / n_iters
            bin_pcts.append(bin_pct)
        bin_gdf["S_bin_pct"] = bin_pcts

        bin_gdf["N_model"] = bin_gdf.SpacemagBin.apply(
            lambda x: get_n_eqs_from_mfd(x.get_rupture_mfd()) * t_yrs)

        bin_gdf["N_obs"] = bin_gdf.SpacemagBin.apply(
            lambda x: get_n_eqs_from_mfd(x.observed_earthquakes))

    pctile = (len(stoch_like_totals[stoch_like_totals <= obs_like_total]) /
              n_iters)

    test_pass = True if pctile >= critical_pct else False
    test_res = "Pass" if test_pass else "Fail"

    test_result = {
        "critical_pct": critical_pct,
        "percentile": pctile,
        "test_pass": bool(test_pass),
        "test_res": test_res,
        "bad_bins": bad_bins,
    }

    return test_result
Beispiel #3
0
 def test_get_total_obs_eqs(self):
     obs_eqs = get_total_obs_eqs(self.bin_gdf)
     # just test some aspects instead of instantiating Earthquakes
     for eq in obs_eqs:
         assert eq.magnitude in [7.4, 7.54, 6.07, 5.95, 6.26, 6.44]
     assert len(obs_eqs) == 6