コード例 #1
0
 def setUp(self):
     """
     This generates a minimum data-set to be used for the regression.
     """
     # Test A: Generates a data set assuming b=1 and N(m=4.0)=10.0 events
     self.dmag = 0.1
     mext = np.arange(4.0, 7.01, 0.1)
     self.mval = mext[0:-1] + self.dmag / 2.0
     self.bval = 1.0
     self.numobs = np.flipud(
         np.diff(np.flipud(10.0 ** (-self.bval * mext + 8.0))))
     # Test B: Generate a completely artificial catalogue using the
     # Gutenberg-Richter distribution defined above
     numobs = np.around(self.numobs)
     size = int(np.sum(self.numobs))
     magnitude = np.zeros(size)
     lidx = 0
     for mag, nobs in zip(self.mval, numobs):
         uidx = int(lidx+nobs)
         magnitude[lidx:uidx] = mag + 0.01
         lidx = uidx
     year = np.ones(size) * 1999
     self.catalogue = Catalogue.make_from_dict(
         {'magnitude': magnitude, 'year': year})
     # Create the seismicity occurrence calculator
     self.aki_ml = AkiMaxLikelihood()
コード例 #2
0
ファイル: kijko_smit.py プロジェクト: francescovisini/hmtk
    def calculate(self, catalogue, config, completeness=None):
        """
        Main function to calculate the a- and b-value
        """
        # Input checks
        cmag, ctime, ref_mag, dmag, config = input_checks(catalogue,
                                                          config,
                                                          completeness)
        ival = 0
        tolerance = 1E-7
        number_intervals = np.shape(ctime)[0]
        b_est = np.zeros(number_intervals, dtype=float)
        neq = np.zeros(number_intervals, dtype=float)
        nyr = np.zeros(number_intervals, dtype=float)

        for ival in range(0, number_intervals):
            id0 = np.abs(ctime - ctime[ival]) < tolerance
            m_c = np.min(cmag[id0])
            if ival == 0:
                id1 = np.logical_and(
                    catalogue.data['year'] >= (ctime[ival] - tolerance),
                    catalogue.data['magnitude'] >= (m_c - tolerance))
                nyr[ival] = float(catalogue.end_year) - ctime[ival] + 1.
            elif ival == number_intervals - 1:
                id1 = np.logical_and(
                    catalogue.data['year'] < (ctime[ival - 1] - tolerance),
                    catalogue.data['magnitude'] >= (m_c - tolerance))
                nyr[ival] = ctime[ival - 1] - ctime[ival]
            else:
                id1 = np.logical_and(
                    catalogue.data['year'] >= (ctime[ival] - tolerance),
                    catalogue.data['year'] < (ctime[ival - 1] - tolerance))
                id1 = np.logical_and(id1,
                    catalogue.data['magnitude'] > (m_c - tolerance))
                nyr[ival] = ctime[ival - 1] - ctime[ival]
            neq[ival] = np.sum(id1)
            #print ival, m_c, ctime, neq, np.where(id1)[0]
            # Get a- and b- value for the selected events
            temp_rec_table = recurrence_table(catalogue.data['magnitude'][id1],
                                              dmag,
                                              catalogue.data['year'][id1])

            aki_ml = AkiMaxLikelihood()
            b_est[ival] = aki_ml._aki_ml(temp_rec_table[:, 0],
                                         temp_rec_table[:, 1],
                                         dmag, m_c)[0]
            ival += 1
        total_neq = np.float(np.sum(neq))
        bval = self._harmonic_mean(b_est, neq)
        sigma_b = bval / np.sqrt(total_neq)
        aval = self._calculate_a_value(bval, total_neq, nyr, cmag, ref_mag)
        sigma_a = self._calculate_a_value(bval + sigma_b, total_neq, nyr,
                                          cmag, ref_mag)

        if not config['reference_magnitude']:
            aval = np.log10(aval)
            sigma_a = np.log10(sigma_a) - aval
        else:
            sigma_a = sigma_a - aval
        return bval, sigma_b, aval, sigma_a
コード例 #3
0
ファイル: kijko_smit.py プロジェクト: matley/hmtk
    def calculate(self, catalogue, config, completeness=None):
        '''Main function to calculate the a- and b-value'''
        # Input checks
        cmag, ctime, ref_mag, dmag = input_checks(catalogue, config,
                                                  completeness)
        ival = 0
        mag_eq_tolerance = 1E-5
        number_intervals = np.shape(ctime)[0]
        b_est = np.zeros(number_intervals, dtype=float)
        neq = np.zeros(number_intervals, dtype=float)
        nyr = np.zeros(number_intervals, dtype=float)

        for ival in range(0, number_intervals):
            id0 = np.abs(ctime - ctime[ival]) < mag_eq_tolerance
            m_c = np.min(cmag[id0])
            if ival == number_intervals - 1:
                id1 = np.logical_and(
                    catalogue['year'] >= ctime[ival], catalogue['magnitude'] >=
                    (m_c - mag_eq_tolerance))
            else:
                id1 = np.logical_and(catalogue['year'] >= ctime[ival],
                                     catalogue['year'] < ctime[ival + 1])
                id1 = np.logical_and(
                    id1, catalogue['magnitude'] >= (m_c - mag_eq_tolerance))

#        while ival < number_intervals:
#            id0 = np.abs(ctime - ctime[ival]) < mag_eq_tolerance
#            m_c = np.min(cmag[id0])
#            # Find events later than cut-off year, and with magnitude
#            # greater than or equal to the corresponding completeness magnitude.
#            # m_c - mag_eq_tolerance is required to correct floating point
#            # differences.
#            id1 = np.logical_and(catalogue['year'] >= ctime[ival],
#                catalogue['magnitude'] >= (m_c - mag_eq_tolerance))
            nyr[ival] = np.float(
                np.max(catalogue['year'][id1]) -
                np.min(catalogue['year'][id1]) + 1)
            neq[ival] = np.sum(id1)
            # Get a- and b- value for the selected events
            temp_rec_table = recurrence_table(catalogue['magnitude'][id1],
                                              dmag, catalogue['year'][id1])

            aki_ml = AkiMaxLikelihood()
            b_est[ival] = aki_ml._aki_ml(temp_rec_table[:, 0],
                                         temp_rec_table[:, 1], dmag, m_c)[0]
            ival += 1

        total_neq = np.float(np.sum(neq))
        bval = self._harmonic_mean(b_est, neq)
        sigma_b = bval / np.sqrt(total_neq)
        aval = self._calculate_a_value(bval, total_neq, nyr, cmag, ref_mag)
        sigma_a = self._calculate_a_value(bval + sigma_b, total_neq, nyr, cmag,
                                          ref_mag)

        if not 'reference_magnitude' in config:
            aval = np.log10(aval)
            sigma_a = np.log10(sigma_a) - aval
        else:
            sigma_a = sigma_a - aval
        return bval, sigma_b, aval, sigma_a
コード例 #4
0
    def _b_ml(self, catalogue, config, cmag, ctime, ref_mag, dmag):
        end_year = float(catalogue.end_year)
        catalogue = catalogue.data
        ival = 0
        mag_eq_tolerance = 1E-5
        aki_ml = AkiMaxLikelihood()

        while ival < np.shape(ctime)[0]:

            id0 = np.abs(ctime - ctime[ival]) < mag_eq_tolerance
            m_c = np.min(cmag[id0])

            print('--- ctime', ctime[ival], ' m_c', m_c)

            # Find events later than cut-off year, and with magnitude
            # greater than or equal to the corresponding completeness
            # magnitude. m_c - mag_eq_tolerance is required to correct
            # floating point differences.
            id1 = np.logical_and(
                catalogue['year'] >= ctime[ival], catalogue['magnitude'] >=
                (m_c - mag_eq_tolerance))
            # Get a- and b- value for the selected events
            temp_rec_table = recurrence_table(catalogue['magnitude'][id1],
                                              dmag, catalogue['year'][id1],
                                              end_year - ctime[ival] + 1)

            bval, sigma_b = aki_ml._aki_ml(temp_rec_table[:, 0],
                                           temp_rec_table[:, 1], dmag, m_c)

            if ival == 0:
                gr_pars = np.array([np.hstack([bval, sigma_b])])
                neq = np.sum(id1)  # Number of events
            else:
                gr_pars = np.vstack([gr_pars, np.hstack([bval, sigma_b])])
                neq = np.hstack([neq, np.sum(id1)])
            ival = ival + np.sum(id0)

        # Get average GR parameters
        bval, sigma_b = self._average_parameters(gr_pars, neq,
                                                 config['Average Type'])
        aval = self._calculate_a_value(bval, np.float(np.sum(neq)), cmag,
                                       ctime, catalogue['magnitude'], end_year,
                                       dmag)
        sigma_a = self._calculate_a_value(bval + sigma_b,
                                          np.float(np.sum(neq)), cmag, ctime,
                                          catalogue['magnitude'], end_year,
                                          dmag)
        if not config['reference_magnitude']:
            return bval,\
                   sigma_b,\
                   aval,\
                   sigma_a - aval
        else:
            rate = 10.**(aval - bval * config['reference_magnitude'])
            sigma_rate = 10.**(sigma_a -
                               bval * config['reference_magnitude']) - rate
            return bval,\
                   sigma_b,\
                   rate,\
                   sigma_rate
コード例 #5
0
ファイル: b_maximum_likelihood.py プロジェクト: matley/hmtk
    def _b_ml(self, catalogue, config, cmag, ctime, ref_mag, dmag, end_year):
        """
        """

        ival = 0
        mag_eq_tolerance = 1E-5
        aki_ml = AkiMaxLikelihood()

        while ival < np.shape(ctime)[0]:

            id0 = np.abs(ctime - ctime[ival]) < mag_eq_tolerance
            m_c = np.min(cmag[id0])

            print '--- ctime',ctime[ival],' m_c',m_c

            # Find events later than cut-off year, and with magnitude
            # greater than or equal to the corresponding completeness magnitude.
            # m_c - mag_eq_tolerance is required to correct floating point
            # differences.
            id1 = np.logical_and(catalogue['year'] >= ctime[ival],
                catalogue['magnitude'] >= (m_c - mag_eq_tolerance))
            nyr = np.float(np.max(catalogue['year'][id1])) - ctime[ival] + 1.
            # Get a- and b- value for the selected events
            temp_rec_table = recurrence_table(catalogue['magnitude'][id1], 
                                              dmag, 
                                              catalogue['year'][id1],
                                              end_year-ctime[ival]+1)
            
            bval, sigma_b = aki_ml._aki_ml(temp_rec_table[:, 0],
                                             temp_rec_table[:, 1], dmag, m_c)

            aval = np.log10(np.float(np.sum(id1)) / nyr) + bval * m_c
            sigma_a = np.abs(np.log10(np.float(np.sum(id1)) / nyr) +
                (bval + sigma_b) * ref_mag - aval)

            # Calculate reference rate
            rate = 10.0 ** (aval - bval * ref_mag)
            sigrate = 10.0 ** ((aval + sigma_a) - (bval * ref_mag) -
                np.log10(rate))
            if ival == 0:
                gr_pars = np.array([np.hstack([bval, sigma_b, rate, sigrate])])
                neq = np.sum(id1)  # Number of events
            else:
                gr_pars = np.vstack([gr_pars, np.hstack([bval, sigma_b, rate,
                                                         sigrate])])
                neq = np.hstack([neq, np.sum(id1)])
            ival = ival + np.sum(id0)

        # Get average GR parameters
        bval, sigma_b, aval, sigma_a = self._average_parameters(gr_pars, neq, 
                config['Average Type'])

        if not 'reference_magnitude' in config:
            d_aval = aval - sigma_a
            aval = np.log10(aval)
            sigma_a = aval - np.log10(d_aval)

        return bval, sigma_b, aval, sigma_a
コード例 #6
0
ファイル: kijko_smit.py プロジェクト: lcui24/hmtk
    def calculate(self, catalogue, config, completeness=None):
        """
        Main function to calculate the a- and b-value
        """
        # Input checks
        cmag, ctime, ref_mag, dmag, config = input_checks(
            catalogue, config, completeness)
        ival = 0
        tolerance = 1E-7
        number_intervals = np.shape(ctime)[0]
        b_est = np.zeros(number_intervals, dtype=float)
        neq = np.zeros(number_intervals, dtype=float)
        nyr = np.zeros(number_intervals, dtype=float)

        for ival in range(0, number_intervals):
            id0 = np.abs(ctime - ctime[ival]) < tolerance
            m_c = np.min(cmag[id0])
            if ival == 0:
                id1 = np.logical_and(
                    catalogue.data['year'] >= (ctime[ival] - tolerance),
                    catalogue.data['magnitude'] >= (m_c - tolerance))
                nyr[ival] = float(catalogue.end_year) - ctime[ival] + 1.
            elif ival == number_intervals - 1:
                id1 = np.logical_and(
                    catalogue.data['year'] < (ctime[ival - 1] - tolerance),
                    catalogue.data['magnitude'] >= (m_c - tolerance))
                nyr[ival] = ctime[ival - 1] - ctime[ival]
            else:
                id1 = np.logical_and(
                    catalogue.data['year'] >= (ctime[ival] - tolerance),
                    catalogue.data['year'] < (ctime[ival - 1] - tolerance))
                id1 = np.logical_and(
                    id1, catalogue.data['magnitude'] > (m_c - tolerance))
                nyr[ival] = ctime[ival - 1] - ctime[ival]
            neq[ival] = np.sum(id1)
            #print ival, m_c, ctime, neq, np.where(id1)[0]
            # Get a- and b- value for the selected events
            temp_rec_table = recurrence_table(catalogue.data['magnitude'][id1],
                                              dmag,
                                              catalogue.data['year'][id1])

            aki_ml = AkiMaxLikelihood()
            b_est[ival] = aki_ml._aki_ml(temp_rec_table[:, 0],
                                         temp_rec_table[:, 1], dmag, m_c)[0]
            ival += 1
        total_neq = np.float(np.sum(neq))
        bval = self._harmonic_mean(b_est, neq)
        sigma_b = bval / np.sqrt(total_neq)
        aval = self._calculate_a_value(bval, total_neq, nyr, cmag, ref_mag)
        sigma_a = self._calculate_a_value(bval + sigma_b, total_neq, nyr, cmag,
                                          ref_mag)

        if not config['reference_magnitude']:
            aval = np.log10(aval)
            sigma_a = np.log10(sigma_a) - aval
        else:
            sigma_a = sigma_a - aval
        return bval, sigma_b, aval, sigma_a
コード例 #7
0
 def setUp(self):
     """
     This generates a minimum data-set to be used for the regression.
     """
     # Test A: Generates a data set assuming b=1 and N(m=4.0)=10.0 events
     self.dmag = 0.1
     mext = np.arange(4.0, 7.01, 0.1)
     self.mval = mext[0:-1] + self.dmag / 2.0
     self.bval = 1.0
     self.numobs = np.flipud(
         np.diff(np.flipud(10.0 ** (-self.bval * mext + 8.0))))
     # Test B: Generate a completely artificial catalogue using the
     # Gutenberg-Richter distribution defined above
     numobs = np.around(self.numobs)
     size = int(np.sum(self.numobs))
     magnitude = np.zeros(size)
     lidx = 0
     for mag, nobs in zip(self.mval, numobs):
         uidx = int(lidx+nobs)
         magnitude[lidx:uidx] = mag + 0.01
         lidx = uidx
     year = np.ones(size) * 1999
     self.catalogue = Catalogue.make_from_dict(
         {'magnitude': magnitude, 'year': year})
     # Create the seismicity occurrence calculator
     self.aki_ml = AkiMaxLikelihood()
コード例 #8
0
ファイル: kijko_smith.py プロジェクト: atalayayele/hmtk
    def calculate(self, catalogue, config, completeness=None):
        '''Main function to calculate the a- and b-value'''
        # Input checks
        cmag, ctime, ref_mag, dmag = input_checks(catalogue, config, 
                                                   completeness)
        ival = 0
        mag_eq_tolerance = 1E-5
        number_intervals = np.shape(ctime)[0]
        b_est = np.zeros(number_intervals, dtype=float)
        neq = np.zeros(number_intervals, dtype=float)
        nyr = np.zeros(number_intervals, dtype=float)
        while ival < number_intervals:
            id0 = np.abs(ctime - ctime[ival]) < mag_eq_tolerance
            m_c = np.min(cmag[id0])
            # Find events later than cut-off year, and with magnitude
            # greater than or equal to the corresponding completeness magnitude.
            # m_c - mag_eq_tolerance is required to correct floating point
            # differences.
            id1 = np.logical_and(catalogue['year'] >= ctime[ival],
                catalogue['magnitude'] >= (m_c - mag_eq_tolerance))
            nyr[ival] = np.float(np.max(catalogue['year'][id1]) -
                                 np.min(catalogue['year'][id1]) + 1)
            neq[ival] = np.sum(id1)
            # Get a- and b- value for the selected events
            temp_rec_table = recurrence_table(catalogue['magnitude'][id1], 
                                              dmag, 
                                              catalogue['year'][id1])

            aki_ml = AkiMaxLikelihood()
            b_est[ival]= aki_ml._aki_ml(temp_rec_table[:, 0], 
                                        temp_rec_table[:, 1], 
                                        dmag, m_c)[0]
            ival += 1

        total_neq = np.float(np.sum(neq))
        bval = self._harmonic_mean(b_est, neq)
        sigma_b = bval / np.sqrt(total_neq)
        aval = self._calculate_a_value(bval, total_neq, nyr, cmag, ref_mag)
        sigma_a = self._calculate_a_value(bval + sigma_b, total_neq, nyr, 
                                          cmag, ref_mag)

        if not 'reference_magnitude' in config:
            aval = np.log10(aval)
            sigma_a = np.log10(sigma_a) - aval
        else: 
            sigma_a = sigma_a - aval
        return bval, sigma_b, aval, sigma_a 
コード例 #9
0
class AkiMaximumLikelihoodTestCase(unittest.TestCase):
    def setUp(self):
        """
        This generates a minimum data-set to be used for the regression.
        """
        # Test A: Generates a data set assuming b=1 and N(m=4.0)=10.0 events
        self.dmag = 0.1
        mext = np.arange(4.0, 7.01, 0.1)
        self.mval = mext[0:-1] + self.dmag / 2.0
        self.bval = 1.0
        self.numobs = np.flipud(
            np.diff(np.flipud(10.0**(-self.bval * mext + 8.0))))
        # Test B: Generate a completely artificial catalogue using the
        # Gutenberg-Richter distribution defined above
        numobs = np.around(self.numobs)
        size = int(np.sum(self.numobs))
        magnitude = np.zeros(size)
        lidx = 0
        for mag, nobs in zip(self.mval, numobs):
            uidx = int(lidx + nobs)
            magnitude[lidx:uidx] = mag + 0.01
            lidx = uidx
        year = np.ones(size) * 1999
        self.catalogue = Catalogue.make_from_dict({
            'magnitude': magnitude,
            'year': year
        })
        # Create the seismicity occurrence calculator
        self.aki_ml = AkiMaxLikelihood()

    def test_aki_maximum_likelihood_A(self):
        """
        Tests that the computed b value corresponds to the same value
        used to generate the test data set
        """
        bval, sigma_b = self.aki_ml._aki_ml(self.mval, self.numobs)
        self.assertAlmostEqual(self.bval, bval, 2)

    def test_aki_maximum_likelihood_B(self):
        """
        Tests that the computed b value corresponds to the same value
        used to generate the test data set
        """
        bval, sigma_b = self.aki_ml.calculate(self.catalogue)
        self.assertAlmostEqual(self.bval, bval, 2)
コード例 #10
0
class AkiMaximumLikelihoodTestCase(unittest.TestCase):

    def setUp(self):
        """
        This generates a minimum data-set to be used for the regression.
        """
        # Test A: Generates a data set assuming b=1 and N(m=4.0)=10.0 events
        self.dmag = 0.1
        mext = np.arange(4.0, 7.01, 0.1)
        self.mval = mext[0:-1] + self.dmag / 2.0
        self.bval = 1.0
        self.numobs = np.flipud(
            np.diff(np.flipud(10.0 ** (-self.bval * mext + 8.0))))
        # Test B: Generate a completely artificial catalogue using the
        # Gutenberg-Richter distribution defined above
        numobs = np.around(self.numobs)
        size = int(np.sum(self.numobs))
        magnitude = np.zeros(size)
        lidx = 0
        for mag, nobs in zip(self.mval, numobs):
            uidx = int(lidx+nobs)
            magnitude[lidx:uidx] = mag + 0.01
            lidx = uidx
        year = np.ones(size) * 1999
        self.catalogue = Catalogue.make_from_dict(
            {'magnitude': magnitude, 'year': year})
        # Create the seismicity occurrence calculator
        self.aki_ml = AkiMaxLikelihood()

    def test_aki_maximum_likelihood_A(self):
        """
        Tests that the computed b value corresponds to the same value
        used to generate the test data set
        """
        bval, sigma_b = self.aki_ml._aki_ml(self.mval, self.numobs)
        self.assertAlmostEqual(self.bval, bval, 2)

    def test_aki_maximum_likelihood_B(self):
        """
        Tests that the computed b value corresponds to the same value
        used to generate the test data set
        """
        bval, sigma_b = self.aki_ml.calculate(self.catalogue)
        self.assertAlmostEqual(self.bval, bval, 2)
コード例 #11
0
ファイル: utils_test.py プロジェクト: g-weatherill/hmtk
class TestSyntheticCatalogues(unittest.TestCase):
    '''
    Tests the synthetic catalogue functions
    '''
    def setUp(self):
        '''
        '''
        self.occur = AkiMaxLikelihood()

    def test_generate_magnitudes(self):
        '''
        Tests the hmtk.seismicity.occurence.utils function
        generate_trunc_gr_magnitudes
        '''
        bvals = []
        # Generate set of synthetic catalogues
        for _ in range(0, 100):
            mags = rec_utils.generate_trunc_gr_magnitudes(1.0, 4.0, 8.0, 1000)
            cat = Catalogue.make_from_dict({
                'magnitude':
                mags,
                'year':
                np.zeros(len(mags), dtype=int)
            })
            bvals.append(self.occur.calculate(cat)[0])
        bvals = np.array(bvals)
        self.assertAlmostEqual(np.mean(bvals), 1.0, 1)

    def test_generate_synthetic_catalogues(self):
        '''
        Tests the hmtk.seismicity.occurence.utils function
        generate_synthetic_magnitudes
        '''
        bvals = []
        # Generate set of synthetic catalogues
        for i in range(0, 100):
            cat1 = rec_utils.generate_synthetic_magnitudes(
                4.5, 1.0, 4.0, 8.0, 1000)
            bvals.append(
                self.occur.calculate(Catalogue.make_from_dict(cat1))[0])
        bvals = np.array(bvals)
        self.assertAlmostEqual(np.mean(bvals), 1.0, 1)
コード例 #12
0
ファイル: utils_test.py プロジェクト: g-weatherill/hmtk
class TestSyntheticCatalogues(unittest.TestCase):
    '''
    Tests the synthetic catalogue functions
    '''
    def setUp(self):
        '''
        '''
        self.occur = AkiMaxLikelihood()

    def test_generate_magnitudes(self):
        '''
        Tests the hmtk.seismicity.occurence.utils function
        generate_trunc_gr_magnitudes
        '''
        bvals = []
        # Generate set of synthetic catalogues
        for _ in range(0, 100):
            mags = rec_utils.generate_trunc_gr_magnitudes(1.0, 4.0, 8.0, 1000)
            cat = Catalogue.make_from_dict(
                {'magnitude': mags,
                 'year': np.zeros(len(mags), dtype=int)})
            bvals.append(self.occur.calculate(cat)[0])
        bvals = np.array(bvals)
        self.assertAlmostEqual(np.mean(bvals), 1.0, 1)

    def test_generate_synthetic_catalogues(self):
        '''
        Tests the hmtk.seismicity.occurence.utils function
        generate_synthetic_magnitudes
        '''
        bvals = []
        # Generate set of synthetic catalogues
        for i in range(0, 100):
            cat1 = rec_utils.generate_synthetic_magnitudes(4.5, 1.0, 4.0, 8.0,
                                                           1000)
            bvals.append(self.occur.calculate(
                Catalogue.make_from_dict(cat1))[0])
        bvals = np.array(bvals)
        self.assertAlmostEqual(np.mean(bvals), 1.0, 1)
コード例 #13
0
ファイル: utils_test.py プロジェクト: g-weatherill/hmtk
 def setUp(self):
     '''
     '''
     self.occur = AkiMaxLikelihood()
コード例 #14
0
 def setUp(self):
     '''
     '''
     self.occur = AkiMaxLikelihood()
コード例 #15
0
    def _b_ml(self, catalogue, config, cmag, ctime, ref_mag, dmag):
        end_year = float(catalogue.end_year)
        catalogue = catalogue.data
        ival = 0
        mag_eq_tolerance = 1E-5
        aki_ml = AkiMaxLikelihood()

        while ival < np.shape(ctime)[0]:

            id0 = np.abs(ctime - ctime[ival]) < mag_eq_tolerance
            m_c = np.min(cmag[id0])

            print '--- ctime', ctime[ival], ' m_c', m_c

            # Find events later than cut-off year, and with magnitude
            # greater than or equal to the corresponding completeness
            # magnitude. m_c - mag_eq_tolerance is required to correct
            # floating point differences.
            id1 = np.logical_and(
                catalogue['year'] >= ctime[ival],
                catalogue['magnitude'] >= (m_c - mag_eq_tolerance))
            nyr = np.float(np.max(catalogue['year'][id1])) - ctime[ival] + 1.
            # Get a- and b- value for the selected events
            temp_rec_table = recurrence_table(catalogue['magnitude'][id1],
                                              dmag,
                                              catalogue['year'][id1],
                                              end_year-ctime[ival]+1)

            bval, sigma_b = aki_ml._aki_ml(temp_rec_table[:, 0],
                                           temp_rec_table[:, 1], dmag, m_c)

            if ival == 0:
                gr_pars = np.array([np.hstack([bval, sigma_b])])
                neq = np.sum(id1)  # Number of events
            else:
                gr_pars = np.vstack([gr_pars, np.hstack([bval, sigma_b])])
                neq = np.hstack([neq, np.sum(id1)])
            ival = ival + np.sum(id0)

        # Get average GR parameters
        bval, sigma_b = self._average_parameters(gr_pars, neq,
                config['Average Type'])
        aval = self._calculate_a_value(bval,
                                       np.float(np.sum(neq)),
                                       cmag,
                                       ctime,
                                       catalogue['magnitude'],
                                       end_year,
                                       dmag)
        sigma_a = self._calculate_a_value(bval + sigma_b,
                                          np.float(np.sum(neq)),
                                          cmag,
                                          ctime,
                                          catalogue['magnitude'],
                                          end_year,
                                          dmag)
        if not config['reference_magnitude']:
            return bval,\
                   sigma_b,\
                   aval,\
                   sigma_a - aval
        else:
            rate = 10. ** (aval - bval * config['reference_magnitude'])
            sigma_rate =  10. ** (sigma_a - 
                bval * config['reference_magnitude']) - rate
            return bval,\
                   sigma_b,\
                   rate,\
                   sigma_rate