Esempio n. 1
0
    def _b_ml(self, catalogue, config, cmag, ctime, ref_mag, dmag):
        end_year = float(catalogue.end_year)
        catalogue = catalogue.data
        ival = 0
        mag_eq_tolerance = 1E-5
        aki_ml = AkiMaxLikelihood()

        while ival < np.shape(ctime)[0]:

            id0 = np.abs(ctime - ctime[ival]) < mag_eq_tolerance
            m_c = np.min(cmag[id0])

            print('--- ctime', ctime[ival], ' m_c', m_c)

            # Find events later than cut-off year, and with magnitude
            # greater than or equal to the corresponding completeness
            # magnitude. m_c - mag_eq_tolerance is required to correct
            # floating point differences.
            id1 = np.logical_and(
                catalogue['year'] >= ctime[ival], catalogue['magnitude'] >=
                (m_c - mag_eq_tolerance))
            # Get a- and b- value for the selected events
            temp_rec_table = recurrence_table(catalogue['magnitude'][id1],
                                              dmag, catalogue['year'][id1],
                                              end_year - ctime[ival] + 1)

            bval, sigma_b = aki_ml._aki_ml(temp_rec_table[:, 0],
                                           temp_rec_table[:, 1], dmag, m_c)

            if ival == 0:
                gr_pars = np.array([np.hstack([bval, sigma_b])])
                neq = np.sum(id1)  # Number of events
            else:
                gr_pars = np.vstack([gr_pars, np.hstack([bval, sigma_b])])
                neq = np.hstack([neq, np.sum(id1)])
            ival = ival + np.sum(id0)

        # Get average GR parameters
        bval, sigma_b = self._average_parameters(gr_pars, neq,
                                                 config['Average Type'])
        aval = self._calculate_a_value(bval, np.float(np.sum(neq)), cmag,
                                       ctime, catalogue['magnitude'], end_year,
                                       dmag)
        sigma_a = self._calculate_a_value(bval + sigma_b,
                                          np.float(np.sum(neq)), cmag, ctime,
                                          catalogue['magnitude'], end_year,
                                          dmag)
        if not config['reference_magnitude']:
            return bval,\
                sigma_b,\
                aval,\
                sigma_a - aval
        else:
            rate = 10.**(aval - bval * config['reference_magnitude'])
            sigma_rate = 10.**(sigma_a -
                               bval * config['reference_magnitude']) - rate
            return bval,\
                sigma_b,\
                rate,\
                sigma_rate
Esempio n. 2
0
    def calculate(self, catalogue, config, completeness=None):
        """
        Main function to calculate the a- and b-value
        """
        # Input checks
        cmag, ctime, ref_mag, dmag, config = input_checks(catalogue,
                                                          config,
                                                          completeness)
        ival = 0
        tolerance = 1E-7
        number_intervals = np.shape(ctime)[0]
        b_est = np.zeros(number_intervals, dtype=float)
        neq = np.zeros(number_intervals, dtype=float)
        nyr = np.zeros(number_intervals, dtype=float)

        for ival in range(0, number_intervals):
            id0 = np.abs(ctime - ctime[ival]) < tolerance
            m_c = np.min(cmag[id0])
            if ival == 0:
                id1 = np.logical_and(
                    catalogue.data['year'] >= (ctime[ival] - tolerance),
                    catalogue.data['magnitude'] >= (m_c - tolerance))
                nyr[ival] = float(catalogue.end_year) - ctime[ival] + 1.
            elif ival == number_intervals - 1:
                id1 = np.logical_and(
                    catalogue.data['year'] < (ctime[ival - 1] - tolerance),
                    catalogue.data['magnitude'] >= (m_c - tolerance))
                nyr[ival] = ctime[ival - 1] - ctime[ival]
            else:
                id1 = np.logical_and(
                    catalogue.data['year'] >= (ctime[ival] - tolerance),
                    catalogue.data['year'] < (ctime[ival - 1] - tolerance))
                id1 = np.logical_and(
                    id1, catalogue.data['magnitude'] > (m_c - tolerance))
                nyr[ival] = ctime[ival - 1] - ctime[ival]
            neq[ival] = np.sum(id1)
            # Get a- and b- value for the selected events
            temp_rec_table = recurrence_table(catalogue.data['magnitude'][id1],
                                              dmag,
                                              catalogue.data['year'][id1])

            aki_ml = AkiMaxLikelihood()
            b_est[ival] = aki_ml._aki_ml(temp_rec_table[:, 0],
                                         temp_rec_table[:, 1],
                                         dmag, m_c)[0]
            ival += 1
        total_neq = np.float(np.sum(neq))
        bval = self._harmonic_mean(b_est, neq)
        sigma_b = bval / np.sqrt(total_neq)
        aval = self._calculate_a_value(bval, total_neq, nyr, cmag, ref_mag)
        sigma_a = self._calculate_a_value(bval + sigma_b, total_neq, nyr,
                                          cmag, ref_mag)

        if not config['reference_magnitude']:
            aval = np.log10(aval)
            sigma_a = np.log10(sigma_a) - aval
        else:
            sigma_a = sigma_a - aval
        return bval, sigma_b, aval, sigma_a
class AkiMaximumLikelihoodTestCase(unittest.TestCase):
    def setUp(self):
        """
        This generates a minimum data-set to be used for the regression.
        """
        # Test A: Generates a data set assuming b=1 and N(m=4.0)=10.0 events
        self.dmag = 0.1
        mext = np.arange(4.0, 7.01, 0.1)
        self.mval = mext[0:-1] + self.dmag / 2.0
        self.bval = 1.0
        self.numobs = np.flipud(
            np.diff(np.flipud(10.0**(-self.bval * mext + 8.0))))
        # Test B: Generate a completely artificial catalogue using the
        # Gutenberg-Richter distribution defined above
        numobs = np.around(self.numobs)
        size = int(np.sum(self.numobs))
        magnitude = np.zeros(size)
        lidx = 0
        for mag, nobs in zip(self.mval, numobs):
            uidx = int(lidx + nobs)
            magnitude[lidx:uidx] = mag + 0.01
            lidx = uidx
        year = np.ones(size) * 1999
        self.catalogue = Catalogue.make_from_dict({
            'magnitude': magnitude,
            'year': year
        })
        # Create the seismicity occurrence calculator
        self.aki_ml = AkiMaxLikelihood()

    def test_aki_maximum_likelihood_A(self):
        """
        Tests that the computed b value corresponds to the same value
        used to generate the test data set
        """
        bval, sigma_b = self.aki_ml._aki_ml(self.mval, self.numobs)
        self.assertAlmostEqual(self.bval, bval, 2)

    def test_aki_maximum_likelihood_B(self):
        """
        Tests that the computed b value corresponds to the same value
        used to generate the test data set
        """
        bval, sigma_b = self.aki_ml.calculate(self.catalogue)
        self.assertAlmostEqual(self.bval, bval, 2)
class AkiMaximumLikelihoodTestCase(unittest.TestCase):

    def setUp(self):
        """
        This generates a minimum data-set to be used for the regression.
        """
        # Test A: Generates a data set assuming b=1 and N(m=4.0)=10.0 events
        self.dmag = 0.1
        mext = np.arange(4.0, 7.01, 0.1)
        self.mval = mext[0:-1] + self.dmag / 2.0
        self.bval = 1.0
        self.numobs = np.flipud(
            np.diff(np.flipud(10.0 ** (-self.bval * mext + 8.0))))
        # Test B: Generate a completely artificial catalogue using the
        # Gutenberg-Richter distribution defined above
        numobs = np.around(self.numobs)
        size = int(np.sum(self.numobs))
        magnitude = np.zeros(size)
        lidx = 0
        for mag, nobs in zip(self.mval, numobs):
            uidx = int(lidx + nobs)
            magnitude[lidx:uidx] = mag + 0.01
            lidx = uidx
        year = np.ones(size) * 1999
        self.catalogue = Catalogue.make_from_dict(
            {'magnitude': magnitude, 'year': year})
        # Create the seismicity occurrence calculator
        self.aki_ml = AkiMaxLikelihood()

    def test_aki_maximum_likelihood_A(self):
        """
        Tests that the computed b value corresponds to the same value
        used to generate the test data set
        """
        bval, sigma_b = self.aki_ml._aki_ml(self.mval, self.numobs)
        self.assertAlmostEqual(self.bval, bval, 2)

    def test_aki_maximum_likelihood_B(self):
        """
        Tests that the computed b value corresponds to the same value
        used to generate the test data set
        """
        bval, sigma_b = self.aki_ml.calculate(self.catalogue)
        self.assertAlmostEqual(self.bval, bval, 2)
Esempio n. 5
0
    def _b_ml(self, catalogue, config, cmag, ctime, ref_mag, dmag):
        end_year = float(catalogue.end_year)
        catalogue = catalogue.data
        ival = 0
        mag_eq_tolerance = 1E-5
        aki_ml = AkiMaxLikelihood()

        while ival < np.shape(ctime)[0]:

            id0 = np.abs(ctime - ctime[ival]) < mag_eq_tolerance
            m_c = np.min(cmag[id0])

            print('--- ctime', ctime[ival], ' m_c', m_c)

            # Find events later than cut-off year, and with magnitude
            # greater than or equal to the corresponding completeness
            # magnitude. m_c - mag_eq_tolerance is required to correct
            # floating point differences.
            id1 = np.logical_and(
                catalogue['year'] >= ctime[ival],
                catalogue['magnitude'] >= (m_c - mag_eq_tolerance))
            # Get a- and b- value for the selected events
            temp_rec_table = recurrence_table(catalogue['magnitude'][id1],
                                              dmag,
                                              catalogue['year'][id1],
                                              end_year-ctime[ival]+1)

            bval, sigma_b = aki_ml._aki_ml(temp_rec_table[:, 0],
                                           temp_rec_table[:, 1], dmag, m_c)

            if ival == 0:
                gr_pars = np.array([np.hstack([bval, sigma_b])])
                neq = np.sum(id1)  # Number of events
            else:
                gr_pars = np.vstack([gr_pars, np.hstack([bval, sigma_b])])
                neq = np.hstack([neq, np.sum(id1)])
            ival = ival + np.sum(id0)

        # Get average GR parameters
        bval, sigma_b = self._average_parameters(
            gr_pars, neq, config['Average Type'])
        aval = self._calculate_a_value(bval,
                                       np.float(np.sum(neq)),
                                       cmag,
                                       ctime,
                                       catalogue['magnitude'],
                                       end_year,
                                       dmag)
        sigma_a = self._calculate_a_value(bval + sigma_b,
                                          np.float(np.sum(neq)),
                                          cmag,
                                          ctime,
                                          catalogue['magnitude'],
                                          end_year,
                                          dmag)
        if not config['reference_magnitude']:
            return bval,\
                   sigma_b,\
                   aval,\
                   sigma_a - aval
        else:
            rate = 10. ** (aval - bval * config['reference_magnitude'])
            sigma_rate =  10. ** (sigma_a - 
                bval * config['reference_magnitude']) - rate
            return bval,\
                   sigma_b,\
                   rate,\
                   sigma_rate