Пример #1
0
    def test_mag_greater_8pt5(self):
        gmpe = SadighEtAl1997()

        sctx = SitesContext()
        rctx = RuptureContext()
        dctx = DistancesContext()

        rctx.rake = 0.0
        dctx.rrup = numpy.array([0., 1.])
        sctx.vs30 = numpy.array([800., 800.])

        rctx.mag = 9.0
        mean_rock_9, _ = gmpe.get_mean_and_stddevs(sctx, rctx, dctx, PGA(),
                                                   [StdDev.TOTAL])
        rctx.mag = 8.5
        mean_rock_8pt5, _ = gmpe.get_mean_and_stddevs(sctx, rctx, dctx, PGA(),
                                                      [StdDev.TOTAL])
        numpy.testing.assert_allclose(mean_rock_9, mean_rock_8pt5)

        sctx.vs30 = numpy.array([300., 300.])
        rctx.mag = 9.0
        mean_soil_9, _ = gmpe.get_mean_and_stddevs(sctx, rctx, dctx, PGA(),
                                                   [StdDev.TOTAL])
        rctx.mag = 8.5
        mean_soil_8pt5, _ = gmpe.get_mean_and_stddevs(sctx, rctx, dctx, PGA(),
                                                      [StdDev.TOTAL])
        numpy.testing.assert_allclose(mean_soil_9, mean_soil_8pt5)
Пример #2
0
    def test_mag_greater_8pt5(self):
        gmpe = SadighEtAl1997()

        sctx = SitesContext()
        rctx = RuptureContext()
        dctx = DistancesContext()

        rctx.rake =  0.0
        dctx.rrup = numpy.array([0., 1.])
        sctx.vs30 = numpy.array([800., 800.])

        rctx.mag = 9.0
        mean_rock_9, _ = gmpe.get_mean_and_stddevs(
            sctx, rctx, dctx, PGA(), [StdDev.TOTAL]
        )
        rctx.mag = 8.5
        mean_rock_8pt5, _ = gmpe.get_mean_and_stddevs(
            sctx, rctx, dctx, PGA(), [StdDev.TOTAL]
        )
        numpy.testing.assert_allclose(mean_rock_9, mean_rock_8pt5)

        sctx.vs30 = numpy.array([300., 300.])
        rctx.mag = 9.0
        mean_soil_9, _ = gmpe.get_mean_and_stddevs(
            sctx, rctx, dctx, PGA(), [StdDev.TOTAL]
        )
        rctx.mag = 8.5
        mean_soil_8pt5, _ = gmpe.get_mean_and_stddevs(
            sctx, rctx, dctx, PGA(), [StdDev.TOTAL]
        )
        numpy.testing.assert_allclose(mean_soil_9, mean_soil_8pt5)
Пример #3
0
    def test_mag_dist_outside_range(self):
        sctx = SitesContext()
        rctx = RuptureContext()
        dctx = DistancesContext()

        # rupture with Mw = 3 (Mblg=2.9434938048208452) at rhypo = 1 must give
        # same mean as rupture with Mw = 4.4 (Mblg=4.8927897867183798) at
        # rhypo = 10
        rctx.mag = 2.9434938048208452
        dctx.rhypo = numpy.array([1])
        mean_mw3_d1, _ = self.GSIM_CLASS().get_mean_and_stddevs(
            sctx, rctx, dctx, SA(0.1, 5), [StdDev.TOTAL])

        rctx.mag = 4.8927897867183798
        dctx.rhypo = numpy.array([10])
        mean_mw4pt4_d10, _ = self.GSIM_CLASS().get_mean_and_stddevs(
            sctx, rctx, dctx, SA(0.1, 5), [StdDev.TOTAL])

        self.assertAlmostEqual(float(mean_mw3_d1), float(mean_mw4pt4_d10))

        # rupture with Mw = 9 (Mblg = 8.2093636421088814) at rhypo = 1500 km
        # must give same mean as rupture with Mw = 8.2
        # (Mblg = 7.752253535347597) at rhypo = 1000
        rctx.mag = 8.2093636421088814
        dctx.rhypo = numpy.array([1500.])
        mean_mw9_d1500, _ = self.GSIM_CLASS().get_mean_and_stddevs(
            sctx, rctx, dctx, SA(0.1, 5), [StdDev.TOTAL])

        rctx.mag = 7.752253535347597
        dctx.rhypo = numpy.array([1000.])
        mean_mw8pt2_d1000, _ = self.GSIM_CLASS().get_mean_and_stddevs(
            sctx, rctx, dctx, SA(0.1, 5), [StdDev.TOTAL])

        self.assertAlmostEqual(mean_mw9_d1500, mean_mw8pt2_d1000)
Пример #4
0
 def test_get_mean_table(self, idx=0):
     """
     Test the retrieval of the mean amplification tables for a given
     magnitude and IMT
     """
     rctx = RuptureContext()
     rctx.mag = 6.0
     # PGA
     expected_table = np.ones([10, 2])
     expected_table[:, self.IDX] *= 1.5
     np.testing.assert_array_almost_equal(
         self.amp_table.get_mean_table(imt_module.PGA(), rctx),
         expected_table)
     # SA
     expected_table[:, self.IDX] = 2.0 * np.ones(10)
     np.testing.assert_array_almost_equal(
         self.amp_table.get_mean_table(imt_module.SA(0.5), rctx),
         expected_table)
     # SA (period interpolation)
     interpolator = interp1d(np.log10(self.amp_table.periods),
                             np.log10(np.array([1.5, 2.0, 0.5])))
     period = 0.3
     expected_table[:, self.IDX] = (
         10.0 ** interpolator(np.log10(period))) * np.ones(10.)
     np.testing.assert_array_almost_equal(
         self.amp_table.get_mean_table(imt_module.SA(period), rctx),
         expected_table)
Пример #5
0
 def test_get_mean_and_stddevs(self):
     """
     Tests mean and standard deviations without amplification
     """
     gsim = GMPETable(gmpe_table=self.TABLE_FILE)
     rctx = RuptureContext()
     rctx.mag = 6.0
     dctx = DistancesContext()
     # Test values at the given distances and those outside range
     dctx.rjb = np.array([0.5, 1.0, 10.0, 100.0, 500.0])
     sctx = SitesContext()
     stddevs = [const.StdDev.TOTAL]
     expected_mean = np.array([2.0, 2.0, 1.0, 0.5, 1.0E-20])
     # PGA
     mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
                                             imt_module.PGA(),
                                             stddevs)
     np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
     np.testing.assert_array_almost_equal(sigma[0], 0.5 * np.ones(5), 5)
     # SA
     mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
                                             imt_module.SA(1.0),
                                             stddevs)
     np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
     np.testing.assert_array_almost_equal(sigma[0], 0.8 * np.ones(5), 5)
     # PGV
     mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
                                             imt_module.PGV(),
                                             stddevs)
     np.testing.assert_array_almost_equal(np.exp(mean),
                                          10. * expected_mean,
                                          5)
     np.testing.assert_array_almost_equal(sigma[0], 0.5 * np.ones(5), 5)
Пример #6
0
 def test_get_mean_and_stddevs_good(self):
     """
     Tests the full execution of the GMPE tables for valid data
     """
     gsim = GMPETable(gmpe_table=self.TABLE_FILE)
     rctx = RuptureContext()
     rctx.mag = 6.0
     dctx = DistancesContext()
     # Test values at the given distances and those outside range
     dctx.rjb = np.array([0.5, 1.0, 10.0, 100.0, 500.0])
     sctx = SitesContext()
     sctx.vs30 = 1000. * np.ones(5)
     stddevs = [const.StdDev.TOTAL]
     expected_mean = np.array([2.0, 2.0, 1.0, 0.5, 1.0E-20])
     expected_sigma = 0.25 * np.ones(5)
     # PGA
     mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
                                             imt_module.PGA(), stddevs)
     np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
     np.testing.assert_array_almost_equal(sigma[0], expected_sigma, 5)
     # SA
     mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
                                             imt_module.SA(1.0), stddevs)
     np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
     np.testing.assert_array_almost_equal(sigma[0], 0.4 * np.ones(5), 5)
     # PGV
     mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
                                             imt_module.PGV(), stddevs)
     np.testing.assert_array_almost_equal(np.exp(mean), 10. * expected_mean,
                                          5)
     np.testing.assert_array_almost_equal(sigma[0], expected_sigma, 5)
Пример #7
0
 def test_get_mean_table(self, idx=0):
     """
     Test the retrieval of the mean amplification tables for a given
     magnitude and IMT
     """
     ctx = RuptureContext()
     ctx.mag = 6.0
     # PGA
     expected_table = np.ones([10, 2])
     expected_table[:, self.IDX] *= 1.5
     np.testing.assert_array_almost_equal(
         self.amp_table.get_mean_table(imt_module.PGA(), ctx),
         expected_table)
     # SA
     expected_table[:, self.IDX] = 2.0 * np.ones(10)
     np.testing.assert_array_almost_equal(
         self.amp_table.get_mean_table(imt_module.SA(0.5), ctx),
         expected_table)
     # SA (period interpolation)
     interpolator = interp1d(np.log10(self.amp_table.periods),
                             np.log10(np.array([1.5, 2.0, 0.5])))
     period = 0.3
     expected_table[:, self.IDX] = (10.0**interpolator(
         np.log10(period))) * np.ones(10)
     np.testing.assert_array_almost_equal(
         self.amp_table.get_mean_table(imt_module.SA(period), ctx),
         expected_table)
Пример #8
0
 def test_get_amplification_factors(self):
     """
     Tests the amplification tables
     """
     ctx = RuptureContext()
     ctx.rake = 45.0
     ctx.mag = 6.0
     # Takes distances at the values found in the table (not checking
     # distance interpolation)
     ctx.rjb = np.copy(self.amp_table.distances[:, 0, 0])
     # Test Vs30 is 700.0 m/s midpoint between the 400 m/s and 1000 m/s
     # specified in the table
     stddevs = [const.StdDev.TOTAL]
     expected_mean = np.ones_like(ctx.rjb)
     # Check PGA and PGV
     mean_amp, sigma_amp = self.amp_table.get_amplification_factors(
         imt_module.PGA(), ctx, ctx.rjb, stddevs)
     np.testing.assert_array_almost_equal(
         mean_amp,
         midpoint(1.0, 1.5) * expected_mean)
     np.testing.assert_array_almost_equal(sigma_amp[0], 0.9 * expected_mean)
     mean_amp, sigma_amp = self.amp_table.get_amplification_factors(
         imt_module.PGV(), ctx, ctx.rjb, stddevs)
     np.testing.assert_array_almost_equal(
         mean_amp,
         midpoint(1.0, 0.5) * expected_mean)
     np.testing.assert_array_almost_equal(sigma_amp[0], 0.9 * expected_mean)
     # Sa (0.5)
     mean_amp, sigma_amp = self.amp_table.get_amplification_factors(
         imt_module.SA(0.5), ctx, ctx.rjb, stddevs)
     np.testing.assert_array_almost_equal(
         mean_amp,
         midpoint(1.0, 2.0) * expected_mean)
     np.testing.assert_array_almost_equal(sigma_amp[0], 0.9 * expected_mean)
Пример #9
0
 def test_get_mean_and_stddevs_good_amplified(self):
     """
     Tests the full execution of the GMPE tables for valid data with
     amplification
     """
     gsim = GMPETable(gmpe_table=self.TABLE_FILE)
     ctx = RuptureContext()
     ctx.mag = 6.0
     # Test values at the given distances and those outside range
     ctx.rjb = np.array([0.5, 1.0, 10.0, 100.0, 500.0])
     ctx.sids = np.arange(5)
     ctx.vs30 = 100. * np.ones(5)
     stddevs = [const.StdDev.TOTAL]
     expected_mean = np.array([20., 20., 10., 5., 1.0E-19])
     expected_sigma = 0.25 * np.ones(5)
     # PGA
     mean, sigma = gsim.get_mean_and_stddevs(ctx, ctx, ctx,
                                             imt_module.PGA(), stddevs)
     np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
     np.testing.assert_array_almost_equal(sigma[0], expected_sigma, 5)
     # SA
     mean, sigma = gsim.get_mean_and_stddevs(ctx, ctx, ctx,
                                             imt_module.SA(1.0), stddevs)
     np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
     np.testing.assert_array_almost_equal(sigma[0], 0.4 * np.ones(5), 5)
Пример #10
0
 def test_get_mean_and_stddevs(self):
     """
     Tests mean and standard deviations without amplification
     """
     gsim = GMPETable(gmpe_table=self.TABLE_FILE)
     ctx = RuptureContext()
     ctx.mag = 6.0
     # Test values at the given distances and those outside range
     ctx.rjb = np.array([0.5, 1.0, 10.0, 100.0, 500.0])
     ctx.sids = np.arange(5)
     stddevs = [const.StdDev.TOTAL]
     expected_mean = np.array([2.0, 2.0, 1.0, 0.5, 1.0E-20])
     # PGA
     mean, sigma = gsim.get_mean_and_stddevs(ctx, ctx, ctx,
                                             imt_module.PGA(), stddevs)
     np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
     np.testing.assert_array_almost_equal(sigma[0], 0.5 * np.ones(5), 5)
     # SA
     mean, sigma = gsim.get_mean_and_stddevs(ctx, ctx, ctx,
                                             imt_module.SA(1.0), stddevs)
     np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
     np.testing.assert_array_almost_equal(sigma[0], 0.8 * np.ones(5), 5)
     # PGV
     mean, sigma = gsim.get_mean_and_stddevs(ctx, ctx, ctx,
                                             imt_module.PGV(), stddevs)
     np.testing.assert_array_almost_equal(np.exp(mean), 10. * expected_mean,
                                          5)
     np.testing.assert_array_almost_equal(sigma[0], 0.5 * np.ones(5), 5)
Пример #11
0
def calculate_total_std(gsim_list, imts, vs30):
    std_total = {}
    std_inter = {}
    std_intra = {}
    for gsim in gsim_list:
        rctx = RuptureContext()
        # The calculator needs these inputs but they are not used
        # in the std calculation
        rctx.mag = 5
        rctx.rake = 0
        rctx.hypo_depth = 0
        dctx = DistancesContext()
        dctx.rjb = np.copy(np.array([1]))  # I do not care about the distance
        dctx.rrup = np.copy(np.array([1]))  # I do not care about the distance
        sctx = SitesContext()
        sctx.vs30 = vs30 * np.ones_like(np.array([0]))
        for imt in imts:
            gm_table, [
                gm_stddev_inter, gm_stddev_intra
            ] = (gsim.get_mean_and_stddevs(
                sctx, rctx, dctx, imt,
                [const.StdDev.INTER_EVENT, const.StdDev.INTRA_EVENT]))
            std_total[gsim, imt] = (np.sqrt(gm_stddev_inter[0]**2 +
                                            gm_stddev_intra[0]**2))
            std_inter[gsim, imt] = gm_stddev_inter[0]
            std_intra[gsim, imt] = gm_stddev_intra[0]
    return (std_total, std_inter, std_intra)
Пример #12
0
 def test_get_mean_and_stddevs_good(self):
     """
     Tests the full execution of the GMPE tables for valid data
     """
     gsim = GMPETable(gmpe_table=self.TABLE_FILE)
     rctx = RuptureContext()
     rctx.mag = 6.0
     rctx.rake = 90.0
     dctx = DistancesContext()
     # Test values at the given distances and those outside range
     dctx.rjb = np.array([0.5, 1.0, 10.0, 100.0, 500.0])
     sctx = SitesContext()
     stddevs = [const.StdDev.TOTAL]
     expected_mean = np.array([20.0, 20.0, 10.0, 5.0, 1.0E-19])
     # PGA
     mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
                                             imt_module.PGA(),
                                             stddevs)
     np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
     np.testing.assert_array_almost_equal(sigma[0], 0.25 * np.ones(5), 5)
     # SA
     mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx,
                                             imt_module.SA(1.0),
                                             stddevs)
     np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5)
     np.testing.assert_array_almost_equal(sigma[0], 0.4 * np.ones(5), 5)
Пример #13
0
 def test_recarray_conversion(self):
     # automatic recarray conversion for backward compatibility
     imt = PGA()
     gsim = AbrahamsonGulerce2020SInter()
     ctx = RuptureContext()
     ctx.mag = 5.
     ctx.sids = [0, 1]
     ctx.vs30 = [760., 760.]
     ctx.rrup = [100., 110.]
     mean, _stddevs = gsim.get_mean_and_stddevs(ctx, ctx, ctx, imt, [])
     numpy.testing.assert_allclose(mean, [-5.81116004, -6.00192455])
Пример #14
0
    def test_dist_not_in_increasing_order(self):
        ctx = RuptureContext()
        ctx.mag = 5.
        ctx.sids = [0, 1]
        ctx.rhypo = numpy.array([150, 100])
        mean_150_100, _ = self.GSIM_CLASS().get_mean_and_stddevs(
            ctx, ctx, ctx, SA(0.1, 5), [StdDev.TOTAL])

        ctx.rhypo = numpy.array([100, 150])
        mean_100_150, _ = self.GSIM_CLASS().get_mean_and_stddevs(
            ctx, ctx, ctx, SA(0.1, 5), [StdDev.TOTAL])
        self.assertAlmostEqual(mean_150_100[1], mean_100_150[0])
        self.assertAlmostEqual(mean_150_100[0], mean_100_150[1])
Пример #15
0
    def test_mag_dist_outside_range(self):
        sctx = SitesContext()
        rctx = RuptureContext()
        dctx = DistancesContext()

        # rupture with Mw = 3 (Mblg=2.9434938048208452) at rhypo = 1 must give
        # same mean as rupture with Mw = 4.4 (Mblg=4.8927897867183798) at
        # rhypo = 10
        rctx.mag = 2.9434938048208452
        dctx.rhypo = numpy.array([1])
        mean_mw3_d1, _ = self.GSIM_CLASS().get_mean_and_stddevs(
            sctx, rctx, dctx, SA(0.1, 5), [StdDev.TOTAL]
        )

        rctx.mag = 4.8927897867183798
        dctx.rhypo = numpy.array([10])
        mean_mw4pt4_d10, _ = self.GSIM_CLASS().get_mean_and_stddevs(
            sctx, rctx, dctx, SA(0.1, 5), [StdDev.TOTAL]
        )

        self.assertAlmostEqual(float(mean_mw3_d1), float(mean_mw4pt4_d10))

        # rupture with Mw = 9 (Mblg = 8.2093636421088814) at rhypo = 1500 km
        # must give same mean as rupture with Mw = 8.2
        # (Mblg = 7.752253535347597) at rhypo = 1000
        rctx.mag = 8.2093636421088814
        dctx.rhypo = numpy.array([1500.])
        mean_mw9_d1500, _ = self.GSIM_CLASS().get_mean_and_stddevs(
            sctx, rctx, dctx, SA(0.1, 5), [StdDev.TOTAL]
        )

        rctx.mag = 7.752253535347597
        dctx.rhypo = numpy.array([1000.])
        mean_mw8pt2_d1000, _ = self.GSIM_CLASS().get_mean_and_stddevs(
            sctx, rctx, dctx, SA(0.1, 5), [StdDev.TOTAL]
        )

        self.assertAlmostEqual(mean_mw9_d1500, mean_mw8pt2_d1000)
Пример #16
0
    def test_dist_not_in_increasing_order(self):
        sctx = SitesContext()
        rctx = RuptureContext()
        dctx = DistancesContext()

        rctx.mag = 5.
        dctx.rhypo = numpy.array([150, 100])
        mean_150_100, _ = self.GSIM_CLASS().get_mean_and_stddevs(
            sctx, rctx, dctx, SA(0.1, 5), [StdDev.TOTAL])

        dctx.rhypo = numpy.array([100, 150])
        mean_100_150, _ = self.GSIM_CLASS().get_mean_and_stddevs(
            sctx, rctx, dctx, SA(0.1, 5), [StdDev.TOTAL])
        self.assertAlmostEqual(mean_150_100[1], mean_100_150[0])
        self.assertAlmostEqual(mean_150_100[0], mean_100_150[1])
Пример #17
0
 def test_zero_distance(self):
     # test the calculation in case of zero rrup distance (for rrup=0
     # the equations have a singularity). In this case the
     # method should return values equal to the ones obtained by
     # replacing 0 values with 1
     ctx = RuptureContext()
     ctx.sids = [0, 1]
     ctx.vs30 = numpy.array([500.0, 2500.0])
     ctx.mag = 5.0
     ctx.rrup = numpy.array([0.0, 0.2])
     mean_0, stds_0 = self.GSIM_CLASS().get_mean_and_stddevs(
         ctx, ctx, ctx, PGA(), [StdDev.TOTAL])
     ctx.rrup = numpy.array([1.0, 0.2])
     mean_01, stds_01 = self.GSIM_CLASS().get_mean_and_stddevs(
         ctx, ctx, ctx, PGA(), [StdDev.TOTAL])
     numpy.testing.assert_array_equal(mean_0, mean_01)
     numpy.testing.assert_array_equal(stds_0, stds_01)
Пример #18
0
def evaluate_model(site_params, rup_params, df, npts, azimuth, moveout, mod,
                   imt):
    sx = SitesContext()
    rx = RuptureContext()
    dx = DistancesContext()

    # TODO: some site parameters can be pulled from the dataframe so we don't
    # have to use the defaults (vs30, azimuth, etc.)
    if not moveout:
        npts = df.shape[0]
    for param in site_params.keys():
        setattr(sx, param, np.full(npts, site_params[param]))

    rx.__dict__.update(rup_params)
    rx.mag = df['EarthquakeMagnitude'].iloc[0]
    rx.hypo_depth = df['EarthquakeDepth'].iloc[0]

    if moveout:
        dx.rjb = np.linspace(0, df['JoynerBooreDistance'].max(), npts)
        dx.rrup = np.sqrt(dx.rjb**2 + df['EarthquakeDepth'].iloc[0]**2)
        dx.rhypo = dx.rrup
        dx.repi = dx.rjb
    else:
        dx.rjb = df['JoynerBooreDistance']
        dx.rrup = df['RuptureDistance']
        dx.rhypo = df['HypocentralDistance']
        dx.repi = df['EpicentralDistance']

    # TODO: some of these distances can be pulled from the dataframe
    dx.ry0 = dx.rjb
    dx.rx = np.full_like(dx.rjb, -1)
    dx.azimuth = np.full_like(npts, azimuth)
    dx.rcdpp = dx.rjb
    dx.rvolc = dx.rjb

    try:
        mean, sd = MODELS_DICT[mod]().get_mean_and_stddevs(
            sx, rx, dx,
            manage_imts(imt)[0], [StdDev.TOTAL])
        mean = convert_units(mean, imt)
        if moveout:
            return mean, dx
        else:
            return mean, sd[0]
    except Exception:
        return
Пример #19
0
    def test_dist_not_in_increasing_order(self):
        sctx = SitesContext()
        rctx = RuptureContext()
        dctx = DistancesContext()

        rctx.mag = 5.
        dctx.rhypo = numpy.array([150, 100])
        mean_150_100, _ = self.GSIM_CLASS().get_mean_and_stddevs(
            sctx, rctx, dctx, SA(0.1, 5), [StdDev.TOTAL]
        )

        dctx.rhypo = numpy.array([100, 150])
        mean_100_150, _ = self.GSIM_CLASS().get_mean_and_stddevs(
            sctx, rctx, dctx, SA(0.1, 5), [StdDev.TOTAL]
        )
        self.assertAlmostEqual(mean_150_100[1], mean_100_150[0])
        self.assertAlmostEqual(mean_150_100[0], mean_100_150[1])
Пример #20
0
 def test_get_sigma_table(self):
     """
     Test the retrieval of the standard deviation modification tables
     for a given magnitude and IMT
     """
     ctx = RuptureContext()
     ctx.mag = 6.0
     # PGA
     expected_table = np.ones([10, 2])
     expected_table[:, self.IDX] *= 0.8
     stddevs = [const.StdDev.TOTAL]
     pga_table = self.amp_table.get_sigma_tables(imt_module.PGA(), ctx,
                                                 stddevs)[0]
     np.testing.assert_array_almost_equal(pga_table, expected_table)
     # SA (for coverage)
     sa_table = self.amp_table.get_sigma_tables(imt_module.SA(0.3), ctx,
                                                stddevs)[0]
     np.testing.assert_array_almost_equal(sa_table, expected_table)
Пример #21
0
 def test_rhypo_smaller_than_15(self):
     # test the calculation in case of rhypo distances less than 15 km
     # (for rhypo=0 the distance term has a singularity). In this case the
     # method should return values equal to the ones obtained by clipping
     # distances at 15 km.
     ctx = RuptureContext()
     ctx.sids = [0, 1, 2]
     ctx.vs30 = numpy.array([800.0, 800.0, 800.0])
     ctx.mag = 5.0
     ctx.rake = 0
     ctx.rhypo = numpy.array([0.0, 10.0, 16.0])
     ctx.rhypo.flags.writeable = False
     mean_0, stds_0 = self.GSIM_CLASS().get_mean_and_stddevs(
         ctx, ctx, ctx, PGA(), [StdDev.TOTAL])
     mean_15, stds_15 = self.GSIM_CLASS().get_mean_and_stddevs(
         ctx, ctx, ctx, PGA(), [StdDev.TOTAL])
     numpy.testing.assert_array_equal(mean_0, mean_15)
     numpy.testing.assert_array_equal(stds_0, stds_15)
Пример #22
0
 def test_get_mean_stddevs_unsupported_stddev(self):
     """
     Tests the execution of the GMPE with an unsupported standard deviation
     type
     """
     gsim = GMPETable(gmpe_table=self.TABLE_FILE)
     rctx = RuptureContext()
     rctx.mag = 6.0
     dctx = DistancesContext()
     # Test values at the given distances and those outside range
     dctx.rjb = np.array([0.5, 1.0, 10.0, 100.0, 500.0])
     sctx = SitesContext()
     sctx.vs30 = 1000. * np.ones(5)
     stddevs = [const.StdDev.TOTAL, const.StdDev.INTER_EVENT]
     with self.assertRaises(ValueError) as ve:
         gsim.get_mean_and_stddevs(sctx, rctx, dctx, imt_module.PGA(),
                                   stddevs)
     self.assertEqual(str(ve.exception),
                      "Standard Deviation type Inter event not supported")
Пример #23
0
 def test_get_mean_stddevs_unsupported_stddev(self):
     """
     Tests the execution of the GMPE with an unsupported standard deviation
     type
     """
     gsim = GMPETable(gmpe_table=self.TABLE_FILE)
     rctx = RuptureContext()
     rctx.mag = 6.0
     dctx = DistancesContext()
     # Test values at the given distances and those outside range
     dctx.rjb = np.array([0.5, 1.0, 10.0, 100.0, 500.0])
     sctx = SitesContext()
     sctx.vs30 = 1000. * np.ones(5)
     stddevs = [const.StdDev.TOTAL, const.StdDev.INTER_EVENT]
     with self.assertRaises(ValueError) as ve:
         gsim.get_mean_and_stddevs(sctx, rctx, dctx, imt_module.PGA(),
                                   stddevs)
     self.assertEqual(str(ve.exception),
                      "Standard Deviation type Inter event not supported")
Пример #24
0
 def test_get_amplification_factors(self):
     """
     Tests the amplification tables
     """
     rctx = RuptureContext()
     rctx.mag = 6.0
     dctx = DistancesContext()
     # Takes distances at the values found in the table (not checking
     # distance interpolation)
     dctx.rjb = np.copy(self.amp_table.distances[:, 0, 0])
     # Test Vs30 is 700.0 m/s midpoint between the 400 m/s and 1000 m/s
     # specified in the table
     sctx = SitesContext()
     sctx.vs30 = 700.0 * np.ones_like(dctx.rjb)
     stddevs = [const.StdDev.TOTAL]
     expected_mean = np.ones_like(dctx.rjb)
     expected_sigma = np.ones_like(dctx.rjb)
     # Check PGA and PGV
     mean_amp, sigma_amp = self.amp_table.get_amplification_factors(
         imt_module.PGA(), sctx, rctx, dctx.rjb, stddevs)
     np.testing.assert_array_almost_equal(
         mean_amp,
         midpoint(1.0, 1.5) * expected_mean)
     np.testing.assert_array_almost_equal(
         sigma_amp[0],
         0.9 * expected_mean)
     mean_amp, sigma_amp = self.amp_table.get_amplification_factors(
         imt_module.PGV(), sctx, rctx, dctx.rjb, stddevs)
     np.testing.assert_array_almost_equal(
         mean_amp,
         midpoint(1.0, 0.5) * expected_mean)
     np.testing.assert_array_almost_equal(
         sigma_amp[0],
         0.9 * expected_mean)
     # Sa (0.5)
     mean_amp, sigma_amp = self.amp_table.get_amplification_factors(
         imt_module.SA(0.5), sctx, rctx, dctx.rjb, stddevs)
     np.testing.assert_array_almost_equal(
         mean_amp,
         midpoint(1.0, 2.0) * expected_mean)
     np.testing.assert_array_almost_equal(
         sigma_amp[0],
         0.9 * expected_mean)
Пример #25
0
    def check_gmpe_adjustments(self, adj_gmpe_set, original_gmpe):
        """
        Takes a set of three adjusted GMPEs representing the "low", "middle"
        and "high" stress drop adjustments for Germany and compares them
        against the original "target" GMPE for a variety of magnitudes
        and styles of fauling.
        """
        low_gsim, mid_gsim, high_gsim = adj_gmpe_set
        tot_std = [const.StdDev.TOTAL]
        for imt in self.imts:
            for mag in self.mags:
                for rake in self.rakes:
                    rctx = RuptureContext()
                    rctx.mag = mag
                    rctx.rake = rake
                    rctx.hypo_depth = 10.
                    # Get "original" values
                    mean = original_gmpe.get_mean_and_stddevs(self.sctx, rctx,
                                                              self.dctx, imt,
                                                              tot_std)[0]
                    mean = np.exp(mean)
                    # Get "low" adjustments (0.75 times the original)
                    low_mean = low_gsim.get_mean_and_stddevs(self.sctx, rctx,
                                                             self.dctx, imt,
                                                             tot_std)[0]
                    np.testing.assert_array_almost_equal(
                        np.exp(low_mean) / mean, 0.75 * np.ones_like(low_mean))

                    # Get "middle" adjustments (1.25 times the original)
                    mid_mean = mid_gsim.get_mean_and_stddevs(self.sctx, rctx,
                                                             self.dctx, imt,
                                                             tot_std)[0]
                    np.testing.assert_array_almost_equal(
                        np.exp(mid_mean) / mean, 1.25 * np.ones_like(mid_mean))

                    # Get "high" adjustments (1.5 times the original)
                    high_mean = high_gsim.get_mean_and_stddevs(self.sctx, rctx,
                                                               self.dctx, imt,
                                                               tot_std)[0]
                    np.testing.assert_array_almost_equal(
                        np.exp(high_mean) / mean,
                        1.5 * np.ones_like(high_mean))
Пример #26
0
 def test_zero_distance(self):
     # test the calculation in case of zero rrup distance (for rrup=0
     # the slab correction term has a singularity). In this case the
     # method should return values equal to the ones obtained by
     # replacing 0 values with 0.1
     ctx = RuptureContext()
     ctx.sids = [0, 1]
     ctx.vs30 = numpy.array([800.0, 800.0])
     ctx.mag = 5.0
     ctx.rake = 0.0
     ctx.hypo_depth = 0.0
     ctx.rrup = numpy.array([0.0, 0.2])
     ctx.occurrence_rate = .0001
     mean_0, stds_0 = self.GSIM_CLASS().get_mean_and_stddevs(
         ctx, ctx, ctx, PGA(), [StdDev.TOTAL])
     ctx.rrup = numpy.array([0.1, 0.2])
     mean_01, stds_01 = self.GSIM_CLASS().get_mean_and_stddevs(
         ctx, ctx, ctx, PGA(), [StdDev.TOTAL])
     numpy.testing.assert_array_equal(mean_0, mean_01)
     numpy.testing.assert_array_equal(stds_0, stds_01)
 def test_rhypo_smaller_than_15(self):
     # test the calculation in case of rhypo distances less than 15 km
     # (for rhypo=0 the distance term has a singularity). In this case the
     # method should return values equal to the ones obtained by clipping
     # distances at 15 km.
     sctx = SitesContext()
     sctx.vs30 = numpy.array([800.0, 800.0, 800.0])
     rctx = RuptureContext()
     rctx.mag = 5.0
     rctx.rake = 0
     dctx = DistancesContext()
     dctx.rhypo = numpy.array([0.0, 10.0, 16.0])
     dctx.rhypo.flags.writeable = False
     mean_0, stds_0 = self.GSIM_CLASS().get_mean_and_stddevs(
         sctx, rctx, dctx, PGA(), [StdDev.TOTAL])
     setattr(dctx, 'rhypo', numpy.array([15.0, 15.0, 16.0]))
     mean_15, stds_15 = self.GSIM_CLASS().get_mean_and_stddevs(
         sctx, rctx, dctx, PGA(), [StdDev.TOTAL])
     numpy.testing.assert_array_equal(mean_0, mean_15)
     numpy.testing.assert_array_equal(stds_0, stds_15)
Пример #28
0
 def get_response_spectrum(self, magnitude, distance, periods, rake=90, vs30=800, damping=0.05):
     """
     """
     responses = np.zeros((len(periods),))
     p_damping = damping * 100
     rup = RuptureContext()
     rup.mag = magnitude
     rup.rake = rake
     dists = DistancesContext()
     dists.rjb = np.array([distance])
     sites = SitesContext()
     sites.vs30 = np.array([vs30])
     stddev_types = [StdDev.TOTAL]
     for i, period in enumerate(periods):
         if period == 0:
             imt = _PGA()
         else:
             imt = _SA(period, p_damping)
         responses[i] = np.exp(self._gmpe.get_mean_and_stddevs(sites, rup, dists, imt, stddev_types)[0][0])
     return ResponseSpectrum(periods, responses, unit='g', damping=damping)
Пример #29
0
 def test_get_sigma_table(self):
     """
     Test the retrieval of the standard deviation modification tables
     for a given magnitude and IMT
     """
     rctx = RuptureContext()
     rctx.mag = 6.0
     # PGA
     expected_table = np.ones([10, 2])
     expected_table[:, self.IDX] *= 0.8
     stddevs = ["Total"]
     pga_table = self.amp_table.get_sigma_tables(imt_module.PGA(),
                                                 rctx,
                                                 stddevs)[0]
     np.testing.assert_array_almost_equal(pga_table, expected_table)
     # SA (for coverage)
     sa_table = self.amp_table.get_sigma_tables(imt_module.SA(0.3),
                                                rctx,
                                                stddevs)[0]
     np.testing.assert_array_almost_equal(sa_table, expected_table)
Пример #30
0
    def test_equality(self):
        sctx1 = SitesContext()
        sctx1.vs30 = numpy.array([500., 600., 700.])
        sctx1.vs30measured = True
        sctx1.z1pt0 = numpy.array([40., 50., 60.])
        sctx1.z2pt5 = numpy.array([1, 2, 3])

        sctx2 = SitesContext()
        sctx2.vs30 = numpy.array([500., 600., 700.])
        sctx2.vs30measured = True
        sctx2.z1pt0 = numpy.array([40., 50., 60.])
        sctx2.z2pt5 = numpy.array([1, 2, 3])

        self.assertTrue(sctx1 == sctx2)

        sctx2 = SitesContext()
        sctx2.vs30 = numpy.array([500., 600.])
        sctx2.vs30measured = True
        sctx2.z1pt0 = numpy.array([40., 50., 60.])
        sctx2.z2pt5 = numpy.array([1, 2, 3])

        self.assertTrue(sctx1 != sctx2)

        sctx2 = SitesContext()
        sctx2.vs30 = numpy.array([500., 600., 700.])
        sctx2.vs30measured = False
        sctx2.z1pt0 = numpy.array([40., 50., 60.])
        sctx2.z2pt5 = numpy.array([1, 2, 3])

        self.assertTrue(sctx1 != sctx2)

        sctx2 = SitesContext()
        sctx2.vs30 = numpy.array([500., 600., 700.])
        sctx2.vs30measured = True
        sctx2.z1pt0 = numpy.array([40., 50., 60.])

        self.assertTrue(sctx1 != sctx2)

        rctx = RuptureContext()
        rctx.mag = 5.
        self.assertTrue(sctx1 != rctx)
Пример #31
0
    def test_equality(self):
        sctx1 = SitesContext()
        sctx1.vs30 = numpy.array([500., 600., 700.])
        sctx1.vs30measured = True
        sctx1.z1pt0 = numpy.array([40., 50., 60.])
        sctx1.z2pt5 = numpy.array([1, 2, 3])

        sctx2 = SitesContext()
        sctx2.vs30 = numpy.array([500., 600., 700.])
        sctx2.vs30measured = True
        sctx2.z1pt0 = numpy.array([40., 50., 60.])
        sctx2.z2pt5 = numpy.array([1, 2, 3])

        self.assertTrue(sctx1 == sctx2)

        sctx2 = SitesContext()
        sctx2.vs30 = numpy.array([500., 600.])
        sctx2.vs30measured = True
        sctx2.z1pt0 = numpy.array([40., 50., 60.])
        sctx2.z2pt5 = numpy.array([1, 2, 3])

        self.assertTrue(sctx1 != sctx2)

        sctx2 = SitesContext()
        sctx2.vs30 = numpy.array([500., 600., 700.])
        sctx2.vs30measured = False
        sctx2.z1pt0 = numpy.array([40., 50., 60.])
        sctx2.z2pt5 = numpy.array([1, 2, 3])

        self.assertTrue(sctx1 != sctx2)

        sctx2 = SitesContext()
        sctx2.vs30 = numpy.array([500., 600., 700.])
        sctx2.vs30measured = True
        sctx2.z1pt0 = numpy.array([40., 50., 60.])

        self.assertTrue(sctx1 != sctx2)

        rctx = RuptureContext()
        rctx.mag = 5.
        self.assertTrue(sctx1 != rctx)
Пример #32
0
    def check_gmpe_adjustments(self, adj_gmpe_set, original_gmpe):
        """
        Takes a set of three adjusted GMPEs representing the "low", "middle"
        and "high" stress drop adjustments for Germany and compares them
        against the original "target" GMPE for a variety of magnitudes
        and styles of fauling.
        """
        low_gsim, mid_gsim, high_gsim = adj_gmpe_set
        tot_std = [const.StdDev.TOTAL]
        for imt in self.imts:
            for mag in self.mags:
                for rake in self.rakes:
                    rctx = RuptureContext()
                    rctx.mag = mag
                    rctx.rake = rake
                    rctx.hypo_depth = 10.
                    rctx.width = 0.0001
                    # Get "original" values
                    mean = original_gmpe.get_mean_and_stddevs(
                        self.sctx, rctx, self.dctx, imt, tot_std)[0]
                    mean = np.exp(mean)
                    # Get "low" adjustments (0.75 times the original)
                    low_mean = low_gsim.get_mean_and_stddevs(
                        self.sctx, rctx, self.dctx, imt, tot_std)[0]
                    np.testing.assert_array_almost_equal(
                        np.exp(low_mean) / mean, 0.75 * np.ones_like(low_mean))

                    # Get "middle" adjustments (1.25 times the original)
                    mid_mean = mid_gsim.get_mean_and_stddevs(
                        self.sctx, rctx, self.dctx, imt, tot_std)[0]
                    np.testing.assert_array_almost_equal(
                        np.exp(mid_mean) / mean, 1.25 * np.ones_like(mid_mean))

                    # Get "high" adjustments (1.5 times the original)
                    high_mean = high_gsim.get_mean_and_stddevs(
                        self.sctx, rctx, self.dctx, imt, tot_std)[0]
                    np.testing.assert_array_almost_equal(
                        np.exp(high_mean) / mean,
                        1.5 * np.ones_like(high_mean))
Пример #33
0
def build_gmpe_table(matrixMagsMin, matrixMagsMax, matrixMagsStep,
                     matrixDistsMin, matrixDistsMax, matrixDistsStep,
                     imt_filtering, limitIM, gsim_list, limit_max_mag,
                     limit_min_mag):
    # Define the magnitude range of interest, 5.0 - 9.0 every 0.1
    mags = np.arange(matrixMagsMin, matrixMagsMax, matrixMagsStep)
    # Define the distance range of interest, 0.0 - 300.0 every 1 km
    dists = np.arange(matrixDistsMin, matrixDistsMax, matrixDistsStep)
    # Define the Vs30 range of interest, 180.0 - 1000.0 every 1 m/s
    vs30s = np.arange(180.0, 181., 1.)
    gm_table = np.zeros([len(dists), len(mags), len(vs30s)])
    stddevs = [const.StdDev.TOTAL]
    gsim_tables = []
    for gsim in gsim_list:
        for i, mag in enumerate(mags):
            for j, vs30 in enumerate(vs30s):
                # The RuptureContext object holds all of the
                # rupture related attributes (e.g. mag, rake, ztor, hypo_depth)
                rctx = RuptureContext()
                rctx.mag = mag
                rctx.rake = 0.0
                rctx.hypo_depth = 10
                # The DistancesContext object holds all of the distance
                # calculations (e.g. rjb, rrup, rx, ry0)
                # OQ GMPEs are vectorised by distance - so this needs
                # to be an array
                dctx = DistancesContext()
                dctx.rjb = np.copy(dists)
                dctx.rrup = np.copy(dists)
                # dctx.rhypo = np.copy(dists)
                # The SitesContext object holds all of the site
                # attributes - also an array
                sctx = SitesContext()
                # The attributes of the site array must be of the
                # same size as the distances
                sctx.vs30 = vs30 * np.ones_like(dists)
                # GMPE produces 2 outputs, the means (well their
                # natural logarithm) and standard deviations
                gm_table[:, i, j], gm_stddevs = gsim.get_mean_and_stddevs(
                                        sctx, rctx, dctx, imt_filtering,
                                        stddevs)
        gm_table_exp = np.exp(gm_table)
        gsim_tables.append(gm_table_exp)

    if len(gsim_list) == 1:
        gm_table_final = gsim_tables[0]
    else:
        gm_table_final = np.maximum(gsim_tables[0], gsim_tables[1])
    # These "if" exclude all ruptures above and below the limit magnitude
    if limit_max_mag < matrixMagsMax:
        indexMag = int((limit_max_mag - matrixMagsMin) / matrixMagsStep)
        list_mag_to_exclude = np.arange(indexMag+1, len(mags))
        gm_table_final[:, list_mag_to_exclude, 0] = 0.001
    
    if limit_min_mag > matrixMagsMin:
        indexMinMag = int((limit_min_mag - matrixMagsMin) / matrixMagsStep)
        list_min_mag_to_exclude = np.arange(0, indexMinMag)
        gm_table_final[:, list_min_mag_to_exclude, 0] = 0.001

    gm_mask = gm_table_final >= limitIM
    GMPEmatrix = gm_mask[:, :, 0]
    return GMPEmatrix
Пример #34
0
    'usp000a1b0', 'usp000d6vk', 'usp000fg9t', 'usp000g9h6', 'us2000gge9',
    'us1000etmq', 'us2000dwh6', 'nc30228270', 'nc72282711', 'ci14383980',
    'ci14607652', 'usp0009eq0'
]

# Calculate mean Arias intensity and the standard deviations.
Trav_Ia_mean = []
interSD = []
intraSD = []

for event in eventids:
    partial_df = df.loc[df['USGS_eventID'] == event]

    sx.vs30 = np.array(partial_df['Vs30(m/s)'])
    dx.rrup = np.array(partial_df['rrup'])
    rx.mag = np.array(partial_df['magnitude'])[0]
    rx.rake = np.array(partial_df['rake_angle'])[0]
    Ia_mean, sd = trav2003.get_mean_and_stddevs(sx, rx, dx, imt, sd_types)

    Trav_Ia_mean.append(np.exp(Ia_mean))
    interSD.append(sd[0])
    intraSD.append(sd[1])

# Flatten lists.
flat_Ia = [val for sublist in Trav_Ia_mean for val in sublist]
flat_interSD = [val for sublist in interSD for val in sublist]
flat_intraSD = [val for sublist in intraSD for val in sublist]

# Calculate residuals.
Ia_obs = np.array(df['Ia_arith(m/s)'])
logIa_obs = np.log(Ia_obs)
def signal_end(st, event_time, event_lon, event_lat, event_mag,
               method=None, vmin=None, floor=None,
               model=None, epsilon=2.0):
    """
    Estimate end of signal by using a model of the 5-95% significant
    duration, and adding this value to the "signal_split" time. This probably
    only works well when the split is estimated with a p-wave picker since
    the velocity method often ends up with split times that are well before
    signal actually starts.

    Args:
        st (StationStream):
            Stream of data.
        event_time (UTCDateTime):
            Event origin time.
        event_mag (float):
            Event magnitude.
        event_lon (float):
            Event longitude.
        event_lat (float):
            Event latitude.
        method (str):
            Method for estimating signal end time. Either 'velocity'
            or 'model'.
        vmin (float):
            Velocity (km/s) for estimating end of signal. Only used if
            method="velocity".
        floor (float):
            Minimum duration (sec) applied along with vmin.
        model (str):
            Short name of duration model to use. Must be defined in the
            gmprocess/data/modules.yml file.
        epsilon (float):
            Number of standard deviations; if epsilon is 1.0, then the signal
            window duration is the mean Ds + 1 standard deviation. Only used
            for method="model".

    Returns:
        trace with stats dict updated to include a
        stats['processing_parameters']['signal_end'] dictionary.

    """
    # Load openquake stuff if method="model"
    if method == "model":
        mod_file = pkg_resources.resource_filename(
            'gmprocess', os.path.join('data', 'modules.yml'))
        with open(mod_file, 'r') as f:
            mods = yaml.load(f)

        # Import module
        cname, mpath = mods['modules'][model]
        dmodel = getattr(import_module(mpath), cname)()

        # Set some "conservative" inputs (in that they will tend to give
        # larger durations).
        sctx = SitesContext()
        sctx.vs30 = np.array([180.0])
        sctx.z1pt0 = np.array([0.51])
        rctx = RuptureContext()
        rctx.mag = event_mag
        rctx.rake = -90.0
        dur_imt = imt.from_string('RSD595')
        stddev_types = [const.StdDev.INTRA_EVENT]

    for tr in st:
        if not tr.hasParameter('signal_split'):
            continue
        if method == "velocity":
            if vmin is None:
                raise ValueError('Must specify vmin if method is "velocity".')
            if floor is None:
                raise ValueError('Must specify floor if method is "velocity".')
            epi_dist = gps2dist_azimuth(
                lat1=event_lat,
                lon1=event_lon,
                lat2=tr.stats['coordinates']['latitude'],
                lon2=tr.stats['coordinates']['longitude'])[0] / 1000.0
            end_time = event_time + max(floor, epi_dist / vmin)
        elif method == "model":
            if model is None:
                raise ValueError('Must specify model if method is "model".')
            epi_dist = gps2dist_azimuth(
                lat1=event_lat,
                lon1=event_lon,
                lat2=tr.stats['coordinates']['latitude'],
                lon2=tr.stats['coordinates']['longitude'])[0] / 1000.0
            dctx = DistancesContext()
            # Repi >= Rrup, so substitution here should be conservative
            # (leading to larger durations).
            dctx.rrup = np.array([epi_dist])
            lnmu, lnstd = dmodel.get_mean_and_stddevs(
                sctx, rctx, dctx, dur_imt, stddev_types)
            duration = np.exp(lnmu + epsilon * lnstd[0])
            # Get split time
            split_time = tr.getParameter('signal_split')['split_time']
            end_time = split_time + float(duration)
        else:
            raise ValueError('method must be either "velocity" or "model".')
        # Update trace params
        end_params = {
            'end_time': end_time,
            'method': method,
            'vsplit': vmin,
            'floor': floor,
            'model': model,
            'epsilon': epsilon
        }
        tr.setParameter('signal_end', end_params)

    return st
Пример #36
0
def test_scr_rlme():
    old_gmpe = set_gmpe('stable_continental_nshmp2014_rlme')
    spec_file = pkg_resources.resource_filename(
        'scenarios', os.path.join('data', 'configspec.conf'))
    validator = get_custom_validator()
    config = ConfigObj(os.path.join(os.path.expanduser('~'), 'scenarios.conf'),
                       configspec=spec_file)
    tmp = pkg_resources.resource_filename(
        'scenarios', os.path.join('..', 'data', 'gmpe_sets.conf'))
    config.merge(ConfigObj(tmp, configspec=spec_file))
    tmp = pkg_resources.resource_filename(
        'scenarios', os.path.join('..', 'data', 'modules.conf'))
    config.merge(ConfigObj(tmp, configspec=spec_file))
    results = config.validate(validator)
    if results != True:
        config_error(config, results)

    # MultiGMPE from config
    config = config.dict()
    gmpe = MultiGMPE.from_config(config)

    # Input stuff
    IMT = imt.SA(1.0)
    rctx = RuptureContext()
    dctx = DistancesContext()
    sctx = SitesContext()

    rctx.rake = 0.0
    rctx.dip = 90.0
    rctx.ztor = 0.0
    rctx.mag = 8.0
    rctx.width = 10.0
    rctx.hypo_depth = 8.0

    dctx.rjb = np.logspace(1, np.log10(800), 100)
    dctx.rrup = dctx.rjb
    dctx.rhypo = dctx.rjb
    dctx.rx = dctx.rjb
    dctx.ry0 = dctx.rjb

    sctx.vs30 = np.ones_like(dctx.rjb) * 275.0
    sctx.vs30measured = np.full_like(dctx.rjb, False, dtype='bool')
    sctx = MultiGMPE.set_sites_depth_parameters(sctx, gmpe)

    # Evaluate
    conf_lmean, dummy = gmpe.get_mean_and_stddevs(sctx, rctx, dctx, IMT,
                                                  [const.StdDev.TOTAL])

    target_lmean = np.array([
        0.10556736, 0.0839267, 0.06189444, 0.03945984, 0.01661264, -0.006657,
        -0.03035844, -0.05450058, -0.07909179, -0.10413995, -0.1296524,
        -0.15563655, -0.1821091, -0.20909381, -0.23661405, -0.26469259,
        -0.29335086, -0.32257956, -0.35232905, -0.38254639, -0.41317807,
        -0.44417017, -0.47549552, -0.5071888, -0.53929293, -0.57185042,
        -0.60490345, -0.63848027, -0.67255251, -0.70707712, -0.74201096,
        -0.77731091, -0.81293906, -0.84889737, -0.88520644, -0.92188724,
        -0.95899471, -0.99699613, -1.03583184, -1.07530664, -1.11531737,
        -1.15576129, -1.19653696, -1.23757689, -1.2772327, -1.2915098,
        -1.30576498, -1.32001713, -1.33429606, -1.3486727, -1.36322545,
        -1.37803346, -1.39317668, -1.40677752, -1.42081409, -1.43538898,
        -1.45056417, -1.46640223, -1.48327111, -1.50656497, -1.53368548,
        -1.56645985, -1.59991327, -1.63399401, -1.66867278, -1.7039438,
        -1.73980246, -1.77624473, -1.81326727, -1.85087166, -1.889066,
        -1.92784814, -1.96721442, -2.0071855, -2.04779304, -2.08909259,
        -2.13114448, -2.17401045, -2.21775376, -2.26243406, -2.30808979,
        -2.35475487, -2.40246494, -2.4512575, -2.50117075, -2.55223495,
        -2.60447754, -2.65792811, -2.71261851, -2.61732716, -2.67007323,
        -2.72399057, -2.77918054, -2.83574666, -2.89379416, -2.95340501,
        -3.01462691, -3.07750731, -3.14209631, -3.20844679
    ])

    np.testing.assert_allclose(conf_lmean, target_lmean, atol=1e-6)

    # Redo for 3 sec so some GMPEs are filtered out
    IMT = imt.SA(3.0)
    gmpe = MultiGMPE.from_config(config, filter_imt=IMT)
    conf_lmean, dummy = gmpe.get_mean_and_stddevs(sctx, rctx, dctx, IMT,
                                                  [const.StdDev.TOTAL])

    target_lmean = np.array([
        -1.26636973, -1.289514, -1.31300386, -1.33683936, -1.36102084,
        -1.38554902, -1.41042497, -1.43565015, -1.46122642, -1.48715602,
        -1.51344154, -1.54008586, -1.56709215, -1.59446375, -1.62220409,
        -1.65031664, -1.6788048, -1.70767178, -1.7369205, -1.76655351,
        -1.79657287, -1.82698005, -1.85777587, -1.88896039, -1.92053288,
        -1.95249175, -1.98483453, -2.01755788, -2.05065755, -2.08412844,
        -2.11796463, -2.15215943, -2.18670547, -2.22159473, -2.25681869,
        -2.29236835, -2.32823441, -2.36453464, -2.40140834, -2.43883442,
        -2.47679132, -2.51525752, -2.55421156, -2.59363211, -2.63112832,
        -2.63336521, -2.63582817, -2.6385319, -2.64147962, -2.64466761,
        -2.64809268, -2.65175214, -2.6556438, -2.65976592, -2.66411721,
        -2.66869673, -2.67350386, -2.67853821, -2.68413311, -2.69604497,
        -2.7124745, -2.73590549, -2.75964098, -2.78367044, -2.80798539,
        -2.8325853, -2.85746998, -2.88263948, -2.90809408, -2.93383429,
        -2.95986073, -2.98617306, -3.01275705, -3.03961495, -3.06675608,
        -3.09419043, -3.12192861, -3.14998191, -3.17836228, -3.20708239,
        -3.23615561, -3.26559604, -3.29541858, -3.32563888, -3.35627343,
        -3.38733956, -3.41885548, -3.4508403, -3.48331409, -3.56476842,
        -3.59987076, -3.63573296, -3.67238872, -3.70987332, -3.74822369,
        -3.78747847, -3.82767809, -3.86886488, -3.91108308, -3.95437899
    ])

    np.testing.assert_allclose(conf_lmean, target_lmean, atol=1e-6)

    # Clean up
    set_gmpe(old_gmpe)
Пример #37
0
def get_extent(rupture=None, config=None):
    """
    Method to compute map extent from rupture. There are numerous methods for
    getting the extent:
        - It can be specified directly in the config file,
        - it can be hard coded for specific magnitude ranges in the config
          file, or
        - it can be based on the MultiGMPE for the event.

    All methods except for the first requires a rupture object.

    If no config is provided then a rupture is required and the extent is based
    on a generic set of active/stable.

    Args:
        rupture (Rupture): A ShakeMap Rupture instance.
        config (ConfigObj): ShakeMap config object.

    Returns:
        tuple: lonmin, lonmax, latmin, latmax rounded to the nearest
        arc-minute..

    """

    # -------------------------------------------------------------------------
    # Check to see what parameters are specified in the extent config
    # -------------------------------------------------------------------------
    spans = {}
    bounds = []
    if config is not None:
        if 'extent' in config:
            if 'magnitude_spans' in config['extent']:
                if len(config['extent']['magnitude_spans']):
                    if isinstance(config['extent']['magnitude_spans'], dict):
                        spans = config['extent']['magnitude_spans']
            if 'bounds' in config['extent']:
                if 'extent' in config['extent']['bounds']:
                    if config['extent']['bounds']['extent'][0] != -999.0:
                        bounds = config['extent']['bounds']['extent']

    # -------------------------------------------------------------------------
    # Simplest option: extent was specified in the config, use that and exit.
    # -------------------------------------------------------------------------
    if len(bounds):
        xmin, ymin, xmax, ymax = bounds
        return (xmin, xmax, ymin, ymax)

    if not rupture or not isinstance(rupture, Rupture):
        raise TypeError('get_extent() requires a rupture object if the extent '
                        'is not specified in the config object.')

    # Find the central point
    origin = rupture.getOrigin()
    if isinstance(rupture, (QuadRupture, EdgeRupture)):
        # For an extended rupture, it is the midpoint between the extent of the
        # verticies
        lats = rupture.lats
        lons = rupture.lons

        # Remove nans
        lons = lons[~np.isnan(lons)]
        lats = lats[~np.isnan(lats)]

        clat = 0.5 * (np.nanmax(lats) + np.nanmin(lats))
        clon = 0.5 * (np.nanmax(lons) + np.nanmin(lons))
    else:
        # For a point source, it is just the epicenter
        clat = origin.lat
        clon = origin.lon

    mag = origin.mag

    # -------------------------------------------------------------------------
    # Second simplest option: spans are hardcoded based on magnitude
    # -------------------------------------------------------------------------
    if len(spans):
        xmin = None
        xmax = None
        ymin = None
        ymax = None
        for spankey, span in spans.items():
            if mag > span[0] and mag <= span[1]:
                ymin = clat - span[2] / 2
                ymax = clat + span[2] / 2
                xmin = clon - span[3] / 2
                xmax = clon + span[3] / 2
                break
        if xmin is not None:
            return (xmin, xmax, ymin, ymax)

    # -------------------------------------------------------------------------
    # Use MultiGMPE to get spans
    # -------------------------------------------------------------------------
    if config is not None:
        gmpe = MultiGMPE.from_config(config)
        gmice = get_object_from_config('gmice', 'modeling', config)
    else:
        # Put in some default values for conf
        config = {
            'extent': {
                'mmi': {
                    'threshold': 4.5,
                    'mindist': 100,
                    'maxdist': 1000
                }
            }
        }

        # Generic GMPEs choices based only on active vs stable
        # as defaults...
        stable = is_stable(origin.lon, origin.lat)
        if not stable:
            ASK14 = AbrahamsonEtAl2014()
            CB14 = CampbellBozorgnia2014()
            CY14 = ChiouYoungs2014()
            gmpes = [ASK14, CB14, CY14]
            site_gmpes = None
            weights = [1 / 3.0, 1 / 3.0, 1 / 3.0]
            gmice = WGRW12()
        else:
            Fea96 = FrankelEtAl1996MwNSHMP2008()
            Tea97 = ToroEtAl1997MwNSHMP2008()
            Sea02 = SilvaEtAl2002MwNSHMP2008()
            C03 = Campbell2003MwNSHMP2008()
            TP05 = TavakoliPezeshk2005MwNSHMP2008()
            AB06p = AtkinsonBoore2006Modified2011()
            Pea11 = PezeshkEtAl2011()
            Atk08p = Atkinson2008prime()
            Sea01 = SomervilleEtAl2001NSHMP2008()
            gmpes = [
                Fea96, Tea97, Sea02, C03, TP05, AB06p, Pea11, Atk08p, Sea01
            ]
            site_gmpes = [AB06p]
            weights = [0.16, 0.0, 0.0, 0.17, 0.17, 0.3, 0.2, 0.0, 0.0]
            gmice = AK07()

        gmpe = MultiGMPE.from_list(gmpes,
                                   weights,
                                   default_gmpes_for_site=site_gmpes)

    min_mmi = config['extent']['mmi']['threshold']
    default_imt = imt.SA(1.0)
    sd_types = [const.StdDev.TOTAL]

    # Distance context
    dx = DistancesContext()
    # This imposes minimum/ maximum distances of:
    #   80 and 800 km; could make this configurable
    d_min = config['extent']['mmi']['mindist']
    d_max = config['extent']['mmi']['maxdist']
    dx.rjb = np.logspace(np.log10(d_min), np.log10(d_max), 2000)
    # Details don't matter for this; assuming vertical surface rupturing fault
    # with epicenter at the surface.
    dx.rrup = dx.rjb
    dx.rhypo = dx.rjb
    dx.repi = dx.rjb
    dx.rx = np.zeros_like(dx.rjb)
    dx.ry0 = np.zeros_like(dx.rjb)
    dx.rvolc = np.zeros_like(dx.rjb)

    # Sites context
    sx = SitesContext()
    # Set to soft soil conditions
    sx.vs30 = np.full_like(dx.rjb, 180)
    sx = MultiGMPE.set_sites_depth_parameters(sx, gmpe)
    sx.vs30measured = np.full_like(sx.vs30, False, dtype=bool)
    sx = Sites._addDepthParameters(sx)
    sx.backarc = np.full_like(sx.vs30, False, dtype=bool)

    # Rupture context
    rx = RuptureContext()
    rx.mag = origin.mag
    rx.rake = 0.0
    # From WC94...
    rx.width = 10**(-0.76 + 0.27 * rx.mag)
    rx.dip = 90.0
    rx.ztor = origin.depth
    rx.hypo_depth = origin.depth

    gmpe_imt_mean, _ = gmpe.get_mean_and_stddevs(sx, rx, dx, default_imt,
                                                 sd_types)

    # Convert to MMI
    gmpe_to_mmi, _ = gmice.getMIfromGM(gmpe_imt_mean, default_imt)

    # Minimum distance that exceeds threshold MMI?
    dists_exceed_mmi = dx.rjb[gmpe_to_mmi > min_mmi]
    if len(dists_exceed_mmi):
        mindist_km = np.max(dists_exceed_mmi)
    else:
        mindist_km = d_min

    # Get a projection
    proj = OrthographicProjection(clon - 4, clon + 4, clat + 4, clat - 4)
    if isinstance(rupture, (QuadRupture, EdgeRupture)):
        ruptx, rupty = proj(lons, lats)
    else:
        ruptx, rupty = proj(clon, clat)

    xmin = np.nanmin(ruptx) - mindist_km
    ymin = np.nanmin(rupty) - mindist_km
    xmax = np.nanmax(ruptx) + mindist_km
    ymax = np.nanmax(rupty) + mindist_km

    # Put a limit on range of aspect ratio
    dx = xmax - xmin
    dy = ymax - ymin
    ar = dy / dx
    if ar > 1.2:
        # Inflate x
        dx_target = dy / 1.2
        ddx = dx_target - dx
        xmax = xmax + ddx / 2
        xmin = xmin - ddx / 2
    if ar < 0.83:
        # Inflate y
        dy_target = dx * 0.83
        ddy = dy_target - dy
        ymax = ymax + ddy / 2
        ymin = ymin - ddy / 2

    lonmin, latmin = proj(np.array([xmin]), np.array([ymin]), reverse=True)
    lonmax, latmax = proj(np.array([xmax]), np.array([ymax]), reverse=True)

    #
    # Round coordinates to the nearest minute -- that should make the
    # output grid register with common grid resolutions (60c, 30c,
    # 15c, 7.5c)
    #
    logging.debug("Extent: %f, %f, %f, %f" % (lonmin, lonmax, latmin, latmax))
    return _round_coord(lonmin[0]), _round_coord(lonmax[0]), \
        _round_coord(latmin[0]), _round_coord(latmax[0])
Пример #38
0
def _get_extent_from_multigmpe(rupture, config=None):
    """
    Use MultiGMPE to determine extent
    """
    (clon, clat) = _rupture_center(rupture)
    origin = rupture.getOrigin()
    if config is not None:
        gmpe = MultiGMPE.from_config(config)
        gmice = get_object_from_config('gmice', 'modeling', config)
        if imt.SA in gmice.DEFINED_FOR_INTENSITY_MEASURE_TYPES:
            default_imt = imt.SA(1.0)
        elif imt.PGV in gmice.DEFINED_FOR_INTENSITY_MEASURE_TYPES:
            default_imt = imt.PGV()
        else:
            default_imt = imt.PGA()
    else:
        # Put in some default values for conf
        config = {
            'extent': {
                'mmi': {
                    'threshold': 4.5,
                    'mindist': 100,
                    'maxdist': 1000
                }
            }
        }

        # Generic GMPEs choices based only on active vs stable
        # as defaults...
        stable = is_stable(origin.lon, origin.lat)
        if not stable:
            ASK14 = AbrahamsonEtAl2014()
            CB14 = CampbellBozorgnia2014()
            CY14 = ChiouYoungs2014()
            gmpes = [ASK14, CB14, CY14]
            site_gmpes = None
            weights = [1/3.0, 1/3.0, 1/3.0]
            gmice = WGRW12()
        else:
            Fea96 = FrankelEtAl1996MwNSHMP2008()
            Tea97 = ToroEtAl1997MwNSHMP2008()
            Sea02 = SilvaEtAl2002MwNSHMP2008()
            C03 = Campbell2003MwNSHMP2008()
            TP05 = TavakoliPezeshk2005MwNSHMP2008()
            AB06p = AtkinsonBoore2006Modified2011()
            Pea11 = PezeshkEtAl2011()
            Atk08p = Atkinson2008prime()
            Sea01 = SomervilleEtAl2001NSHMP2008()
            gmpes = [Fea96, Tea97, Sea02, C03,
                     TP05, AB06p, Pea11, Atk08p, Sea01]
            site_gmpes = [AB06p]
            weights = [0.16, 0.0, 0.0, 0.17, 0.17, 0.3, 0.2, 0.0, 0.0]
            gmice = AK07()

        gmpe = MultiGMPE.from_list(
            gmpes, weights, default_gmpes_for_site=site_gmpes)
        default_imt = imt.SA(1.0)

    min_mmi = config['extent']['mmi']['threshold']
    sd_types = [const.StdDev.TOTAL]

    # Distance context
    dx = DistancesContext()
    # This imposes minimum/ maximum distances of:
    #   80 and 800 km; could make this configurable
    d_min = config['extent']['mmi']['mindist']
    d_max = config['extent']['mmi']['maxdist']
    dx.rjb = np.logspace(np.log10(d_min), np.log10(d_max), 2000)
    # Details don't matter for this; assuming vertical surface rupturing fault
    # with epicenter at the surface.
    dx.rrup = dx.rjb
    dx.rhypo = dx.rjb
    dx.repi = dx.rjb
    dx.rx = np.zeros_like(dx.rjb)
    dx.ry0 = np.zeros_like(dx.rjb)
    dx.rvolc = np.zeros_like(dx.rjb)

    # Sites context
    sx = SitesContext()
    # Set to soft soil conditions
    sx.vs30 = np.full_like(dx.rjb, 180)
    sx = MultiGMPE.set_sites_depth_parameters(sx, gmpe)
    sx.vs30measured = np.full_like(sx.vs30, False, dtype=bool)
    sx = Sites._addDepthParameters(sx)
    sx.backarc = np.full_like(sx.vs30, False, dtype=bool)

    # Rupture context
    rx = RuptureContext()
    rx.mag = origin.mag
    rx.rake = 0.0
    # From WC94...
    rx.width = 10**(-0.76 + 0.27*rx.mag)
    rx.dip = 90.0
    rx.ztor = origin.depth
    rx.hypo_depth = origin.depth

    gmpe_imt_mean, _ = gmpe.get_mean_and_stddevs(
        sx, rx, dx, default_imt, sd_types)

    # Convert to MMI
    gmpe_to_mmi, _ = gmice.getMIfromGM(gmpe_imt_mean, default_imt)

    # Minimum distance that exceeds threshold MMI?
    dists_exceed_mmi = dx.rjb[gmpe_to_mmi > min_mmi]
    if len(dists_exceed_mmi):
        mindist_km = np.max(dists_exceed_mmi)
    else:
        mindist_km = d_min

    # Get a projection
    proj = OrthographicProjection(clon - 4, clon + 4, clat + 4, clat - 4)
    if isinstance(rupture, (QuadRupture, EdgeRupture)):
        ruptx, rupty = proj(
            rupture.lons[~np.isnan(rupture.lons)],
            rupture.lats[~np.isnan(rupture.lats)]
        )
    else:
        ruptx, rupty = proj(clon, clat)

    xmin = np.nanmin(ruptx) - mindist_km
    ymin = np.nanmin(rupty) - mindist_km
    xmax = np.nanmax(ruptx) + mindist_km
    ymax = np.nanmax(rupty) + mindist_km

    # Put a limit on range of aspect ratio
    dx = xmax - xmin
    dy = ymax - ymin
    ar = dy / dx
    if ar > 1.2:
        # Inflate x
        dx_target = dy / 1.2
        ddx = dx_target - dx
        xmax = xmax + ddx / 2
        xmin = xmin - ddx / 2
    if ar < 0.83:
        # Inflate y
        dy_target = dx * 0.83
        ddy = dy_target - dy
        ymax = ymax + ddy / 2
        ymin = ymin - ddy / 2

    lonmin, latmin = proj(np.array([xmin]), np.array([ymin]), reverse=True)
    lonmax, latmax = proj(np.array([xmax]), np.array([ymax]), reverse=True)

    #
    # Round coordinates to the nearest minute -- that should make the
    # output grid register with common grid resolutions (60c, 30c,
    # 15c, 7.5c)
    #
    logging.debug("Extent: %f, %f, %f, %f" %
                  (lonmin, lonmax, latmin, latmax))
    return _round_coord(lonmin[0]), _round_coord(lonmax[0]), \
        _round_coord(latmin[0]), _round_coord(latmax[0])
Пример #39
0
def signal_end(st,
               event_time,
               event_lon,
               event_lat,
               event_mag,
               method=None,
               vmin=None,
               floor=None,
               model=None,
               epsilon=2.0):
    """
    Estimate end of signal by using a model of the 5-95% significant
    duration, and adding this value to the "signal_split" time. This probably
    only works well when the split is estimated with a p-wave picker since
    the velocity method often ends up with split times that are well before
    signal actually starts.

    Args:
        st (StationStream):
            Stream of data.
        event_time (UTCDateTime):
            Event origin time.
        event_mag (float):
            Event magnitude.
        event_lon (float):
            Event longitude.
        event_lat (float):
            Event latitude.
        method (str):
            Method for estimating signal end time. Either 'velocity'
            or 'model'.
        vmin (float):
            Velocity (km/s) for estimating end of signal. Only used if
            method="velocity".
        floor (float):
            Minimum duration (sec) applied along with vmin.
        model (str):
            Short name of duration model to use. Must be defined in the
            gmprocess/data/modules.yml file.
        epsilon (float):
            Number of standard deviations; if epsilon is 1.0, then the signal
            window duration is the mean Ds + 1 standard deviation. Only used
            for method="model".

    Returns:
        trace with stats dict updated to include a
        stats['processing_parameters']['signal_end'] dictionary.

    """
    # Load openquake stuff if method="model"
    if method == "model":
        mod_file = pkg_resources.resource_filename(
            'gmprocess', os.path.join('data', 'modules.yml'))
        with open(mod_file, 'r') as f:
            mods = yaml.load(f, Loader=yaml.FullLoader)

        # Import module
        cname, mpath = mods['modules'][model]
        dmodel = getattr(import_module(mpath), cname)()

        # Set some "conservative" inputs (in that they will tend to give
        # larger durations).
        sctx = SitesContext()
        sctx.vs30 = np.array([180.0])
        sctx.z1pt0 = np.array([0.51])
        rctx = RuptureContext()
        rctx.mag = event_mag
        rctx.rake = -90.0
        dur_imt = imt.from_string('RSD595')
        stddev_types = [const.StdDev.INTRA_EVENT]

    for tr in st:
        if not tr.hasParameter('signal_split'):
            continue
        if method == "velocity":
            if vmin is None:
                raise ValueError('Must specify vmin if method is "velocity".')
            if floor is None:
                raise ValueError('Must specify floor if method is "velocity".')
            epi_dist = gps2dist_azimuth(
                lat1=event_lat,
                lon1=event_lon,
                lat2=tr.stats['coordinates']['latitude'],
                lon2=tr.stats['coordinates']['longitude'])[0] / 1000.0
            end_time = event_time + max(floor, epi_dist / vmin)
        elif method == "model":
            if model is None:
                raise ValueError('Must specify model if method is "model".')
            epi_dist = gps2dist_azimuth(
                lat1=event_lat,
                lon1=event_lon,
                lat2=tr.stats['coordinates']['latitude'],
                lon2=tr.stats['coordinates']['longitude'])[0] / 1000.0
            dctx = DistancesContext()
            # Repi >= Rrup, so substitution here should be conservative
            # (leading to larger durations).
            dctx.rrup = np.array([epi_dist])
            lnmu, lnstd = dmodel.get_mean_and_stddevs(sctx, rctx, dctx,
                                                      dur_imt, stddev_types)
            duration = np.exp(lnmu + epsilon * lnstd[0])
            # Get split time
            split_time = tr.getParameter('signal_split')['split_time']
            end_time = split_time + float(duration)
        else:
            raise ValueError('method must be either "velocity" or "model".')
        # Update trace params
        end_params = {
            'end_time': end_time,
            'method': method,
            'vsplit': vmin,
            'floor': floor,
            'model': model,
            'epsilon': epsilon
        }
        tr.setParameter('signal_end', end_params)

    return st
Пример #40
0
def trim_multiple_events(
    st,
    origin,
    catalog,
    travel_time_df,
    pga_factor,
    pct_window_reject,
    gmpe,
    site_parameters,
    rupture_parameters,
):
    """
    Uses a catalog (list of ScalarEvents) to handle cases where a trace might
    contain signals from multiple events. The catalog should contain events
    down to a low enough magnitude in relation to the events of interest.
    Overall, the algorithm is as follows:

    1) For each earthquake in the catalog, get the P-wave travel time
       and estimated PGA at this station.

    2) Compute the PGA (of the as-recorded horizontal channels).

    3) Select the P-wave arrival times across all events for this record
       that are (a) within the signal window, and (b) the predicted PGA is
       greater than pga_factor times the PGA from step #1.

    4) If any P-wave arrival times match the above criteria, then if any of
       the arrival times fall within in the first pct_window_reject*100%
       of the signal window, then reject the record. Otherwise, trim the
       record such that the end time does not include any of the arrivals
       selected in step #3.

    Args:
        st (StationStream):
            Stream of data.
        origin (ScalarEvent):
            ScalarEvent object associated with the StationStream.
        catalog (list):
            List of ScalarEvent objects.
        travel_time_df (DataFrame):
            A pandas DataFrame that contains the travel time information
            (obtained from
             gmprocess.waveform_processing.phase.create_travel_time_dataframe).
            The columns in the DataFrame are the station ids and the indices
            are the earthquake ids.
        pga_factor (float):
            A decimal factor used to determine whether the predicted PGA
            from an event arrival is significant enough that it should be
            considered for removal.
        pct_window_reject (float):
           A decimal from 0.0 to 1.0 used to determine if an arrival should
            be trimmed from the record, or if the entire record should be
            rejected. If the arrival falls within the first
            pct_window_reject * 100% of the signal window, then the entire
            record will be rejected. Otherwise, the record will be trimmed
            appropriately.
        gmpe (str):
            Short name of the GMPE to use. Must be defined in the modules file.
        site_parameters (dict):
            Dictionary of site parameters to input to the GMPE.
        rupture_parameters:
            Dictionary of rupture parameters to input to the GMPE.

    Returns:
        StationStream: Processed stream.

    """

    if not st.passed:
        return st

    # Check that we know the signal split for each trace in the stream
    for tr in st:
        if not tr.hasParameter("signal_split"):
            return st

    signal_window_starttime = st[0].getParameter("signal_split")["split_time"]

    arrivals = travel_time_df[st[0].stats.network + "." + st[0].stats.station]
    arrivals = arrivals.sort_values()

    # Filter by any arrival times that appear in the signal window
    arrivals = arrivals[(arrivals > signal_window_starttime)
                        & (arrivals < st[0].stats.endtime)]

    # Make sure we remove the arrival that corresponds to the event of interest
    if origin.id in arrivals.index:
        arrivals.drop(index=origin.id, inplace=True)

    if arrivals.empty:
        return st

    # Calculate the recorded PGA for this record
    stasum = StationSummary.from_stream(st, ["ROTD(50.0)"], ["PGA"])
    recorded_pga = stasum.get_pgm("PGA", "ROTD(50.0)")

    # Load the GMPE model
    gmpe = load_model(gmpe)

    # Generic context
    rctx = RuptureContext()

    # Make sure that site parameter values are converted to numpy arrays
    site_parameters_copy = site_parameters.copy()
    for k, v in site_parameters_copy.items():
        site_parameters_copy[k] = np.array([site_parameters_copy[k]])
    rctx.__dict__.update(site_parameters_copy)

    # Filter by arrivals that have significant expected PGA using GMPE
    is_significant = []
    for eqid, arrival_time in arrivals.items():
        event = next(event for event in catalog if event.id == eqid)

        # Set rupture parameters
        rctx.__dict__.update(rupture_parameters)
        rctx.mag = event.magnitude

        # TODO: distances should be calculated when we refactor to be
        # able to import distance calculations
        rctx.repi = np.array([
            gps2dist_azimuth(
                st[0].stats.coordinates.latitude,
                st[0].stats.coordinates.longitude,
                event.latitude,
                event.longitude,
            )[0] / 1000
        ])
        rctx.rjb = rctx.repi
        rctx.rhypo = np.sqrt(rctx.repi**2 + event.depth_km**2)
        rctx.rrup = rctx.rhypo
        rctx.sids = np.array(range(np.size(rctx.rrup)))
        pga, sd = gmpe.get_mean_and_stddevs(rctx, rctx, rctx, imt.PGA(), [])

        # Convert from ln(g) to %g
        predicted_pga = 100 * np.exp(pga[0])
        if predicted_pga > (pga_factor * recorded_pga):
            is_significant.append(True)
        else:
            is_significant.append(False)

    significant_arrivals = arrivals[is_significant]
    if significant_arrivals.empty:
        return st

    # Check if any of the significant arrivals occur within the
    signal_length = st[0].stats.endtime - signal_window_starttime
    cutoff_time = signal_window_starttime + pct_window_reject * (signal_length)
    if (significant_arrivals < cutoff_time).any():
        for tr in st:
            tr.fail("A significant arrival from another event occurs within "
                    "the first %s percent of the signal window" %
                    (100 * pct_window_reject))

    # Otherwise, trim the stream at the first significant arrival
    else:
        for tr in st:
            signal_end = tr.getParameter("signal_end")
            signal_end["end_time"] = significant_arrivals[0]
            signal_end["method"] = "Trimming before right another event"
            tr.setParameter("signal_end", signal_end)
        cut(st)

    return st
Пример #41
0
def signal_end(
    st,
    event_time,
    event_lon,
    event_lat,
    event_mag,
    method=None,
    vmin=None,
    floor=None,
    model=None,
    epsilon=2.0,
):
    """
    Estimate end of signal by using a model of the 5-95% significant
    duration, and adding this value to the "signal_split" time. This probably
    only works well when the split is estimated with a p-wave picker since
    the velocity method often ends up with split times that are well before
    signal actually starts.

    Args:
        st (StationStream):
            Stream of data.
        event_time (UTCDateTime):
            Event origin time.
        event_mag (float):
            Event magnitude.
        event_lon (float):
            Event longitude.
        event_lat (float):
            Event latitude.
        method (str):
            Method for estimating signal end time. Either 'velocity'
            or 'model'.
        vmin (float):
            Velocity (km/s) for estimating end of signal. Only used if
            method="velocity".
        floor (float):
            Minimum duration (sec) applied along with vmin.
        model (str):
            Short name of duration model to use. Must be defined in the
            gmprocess/data/modules.yml file.
        epsilon (float):
            Number of standard deviations; if epsilon is 1.0, then the signal
            window duration is the mean Ds + 1 standard deviation. Only used
            for method="model".

    Returns:
        trace with stats dict updated to include a
        stats['processing_parameters']['signal_end'] dictionary.

    """
    # Load openquake stuff if method="model"
    if method == "model":
        dmodel = load_model(model)

        # Set some "conservative" inputs (in that they will tend to give
        # larger durations).
        rctx = RuptureContext()
        rctx.mag = event_mag
        rctx.rake = -90.0
        rctx.vs30 = np.array([180.0])
        rctx.z1pt0 = np.array([0.51])
        dur_imt = imt.from_string("RSD595")
        stddev_types = [const.StdDev.TOTAL]

    for tr in st:
        if not tr.hasParameter("signal_split"):
            continue
        if method == "velocity":
            if vmin is None:
                raise ValueError('Must specify vmin if method is "velocity".')
            if floor is None:
                raise ValueError('Must specify floor if method is "velocity".')
            epi_dist = (gps2dist_azimuth(
                lat1=event_lat,
                lon1=event_lon,
                lat2=tr.stats["coordinates"]["latitude"],
                lon2=tr.stats["coordinates"]["longitude"],
            )[0] / 1000.0)
            end_time = event_time + max(floor, epi_dist / vmin)
        elif method == "model":
            if model is None:
                raise ValueError('Must specify model if method is "model".')
            epi_dist = (gps2dist_azimuth(
                lat1=event_lat,
                lon1=event_lon,
                lat2=tr.stats["coordinates"]["latitude"],
                lon2=tr.stats["coordinates"]["longitude"],
            )[0] / 1000.0)
            # Repi >= Rrup, so substitution here should be conservative
            # (leading to larger durations).
            rctx.rrup = np.array([epi_dist])
            rctx.sids = np.array(range(np.size(rctx.rrup)))
            lnmu, lnstd = dmodel.get_mean_and_stddevs(rctx, rctx, rctx,
                                                      dur_imt, stddev_types)
            duration = np.exp(lnmu + epsilon * lnstd[0])
            # Get split time
            split_time = tr.getParameter("signal_split")["split_time"]
            end_time = split_time + float(duration)
        else:
            raise ValueError('method must be either "velocity" or "model".')
        # Update trace params
        end_params = {
            "end_time": end_time,
            "method": method,
            "vsplit": vmin,
            "floor": floor,
            "model": model,
            "epsilon": epsilon,
        }
        tr.setParameter("signal_end", end_params)

    return st
Пример #42
0
    ## Make the rupture and distance contexts:
    i_rctx = RuptureContext()
    i_dctx = DistancesContext()
    i_sctx = SitesContext()

    # Zhao & Travasarou wants rrup, but dont' have that - so set to rhypo;
    # BSSA14 wants rjb but don't have it, so set to repi:
    i_dctx.rrup = rhypo[recording_i]
    i_dctx.rjb = repi[recording_i]

    ## Site:
    i_sctx.vs30 = np.array([vs30_data[recording_i]])

    ## Rupture - USE THE PGD MAGNTIUDE!
    i_rctx.rake = rake[recording_i]
    i_rctx.mag = mw_pgd[recording_i]
    i_rctx.hypo_depth = hypodepth[recording_i]

    ## Get thje Zhao predictions (in g), Travasarou (unitless), and BSSA (cm/s)
    i_median_zhao2006, i_sd_zhao2006 = zhao2006.get_mean_and_stddevs(
        i_sctx, i_rctx, i_dctx, imt_pga, [const.StdDev.TOTAL])
    i_median_travasarou, i_sd_travasarou = travasarou.get_mean_and_stddevs(
        i_sctx, i_rctx, i_dctx, imt_arias, [const.StdDev.TOTAL])
    i_median_bssa14, i_sd_bssa14 = bssa14.get_mean_and_stddevs(
        i_sctx, i_rctx, i_dctx, imt_pgv, [const.StdDev.TOTAL])

    ## Convert BSSA from cm/s to m/s, and keep in linear space:
    i_median_bssa14 = np.exp(i_median_bssa14) * 1e-2

    ## Convert Zhao from g to m/s/s. and keep in linear space:
    i_median_zhao2006 = np.exp(i_median_zhao2006) * g
Пример #43
0

## This all works..... ##

ASK14 = AbrahamsonEtAl2014()

IMT = imt.PGA()
rctx = RuptureContext()
dctx = DistancesContext()
sctx = SitesContext()
sctx_rock = SitesContext()

rctx.rake = 0.0
rctx.dip = 90.0
rctx.ztor = 7.13
rctx.mag = 3.0
#rctx.mag = np.linspace(0.1,5.)
rctx.width = 10.0
rctx.hypo_depth = 8.0

#dctx.rrup = np.logspace(1,np.log10(200),100)
dctx.rrup = np.logspace(np.log10(10),np.log10(10.0),1)


# Assuming average ztor, get rjb:
dctx.rjb = np.sqrt(dctx.rrup**2 - rctx.ztor**2)
dctx.rhypo = dctx.rrup
dctx.rx = dctx.rjb
dctx.ry0 = dctx.rx

sctx.vs30 = np.ones_like(dctx.rrup) * 760.0