Beispiel #1
0
    def setUp(self):
        """
        """
        # Read initial dataset
        filename = os.path.join(self.BASE_DATA_PATH,
                                'completeness_test_cat.csv')
        test_data = np.genfromtxt(filename, delimiter=',', skip_header=1)
        # Create the catalogue A
        self.catalogueA = Catalogue.make_from_dict(
            {'year': test_data[:,3], 'magnitude': test_data[:,17]})

        # Read initial dataset
        filename = os.path.join(self.BASE_DATA_PATH,
                                'recurrence_test_cat_B.csv')
        test_data = np.genfromtxt(filename, delimiter=',', skip_header=1)
        # Create the catalogue A
        self.catalogueB = Catalogue.make_from_dict(
            {'year': test_data[:,3], 'magnitude': test_data[:,17]})

        # Read the verification table A
        filename = os.path.join(self.BASE_DATA_PATH,
                                'recurrence_table_test_A.csv')
        self.true_tableA = np.genfromtxt(filename, delimiter = ',')

        # Read the verification table A
        filename = os.path.join(self.BASE_DATA_PATH,
                                'recurrence_table_test_B.csv')
        self.true_tableB = np.genfromtxt(filename, delimiter = ',')
 def setUp(self):
     """
     This generates a minimum data-set to be used for the regression.
     """
     # Test A: Generates a data set assuming b=1 and N(m=4.0)=10.0 events
     self.dmag = 0.1
     mext = np.arange(4.0, 7.01, 0.1)
     self.mval = mext[0:-1] + self.dmag / 2.0
     self.bval = 1.0
     self.numobs = np.flipud(
         np.diff(np.flipud(10.0 ** (-self.bval * mext + 8.0))))
     # Test B: Generate a completely artificial catalogue using the
     # Gutenberg-Richter distribution defined above
     numobs = np.around(self.numobs)
     size = int(np.sum(self.numobs))
     magnitude = np.zeros(size)
     lidx = 0
     for mag, nobs in zip(self.mval, numobs):
         uidx = int(lidx+nobs)
         magnitude[lidx:uidx] = mag + 0.01
         lidx = uidx
     year = np.ones(size) * 1999
     self.catalogue = Catalogue.make_from_dict(
         {'magnitude': magnitude, 'year': year})
     # Create the seismicity occurrence calculator
     self.aki_ml = AkiMaxLikelihood()
Beispiel #3
0
 def test_input_checks_sets_magnitude_interval(self):
     fake_completeness_table = 0.0
     catalogue = Catalogue.make_from_dict({'year': [1900]})
     config = {'magnitude_interval': 0.1}
     cmag, ctime, ref_mag, dmag, _ = rec_utils.input_checks(
         catalogue, config, fake_completeness_table)
     self.assertEqual(0.1, dmag)
Beispiel #4
0
 def setUp(self):
     """
     Sets up the test catalogue to be used for the Weichert algorithm
     """
     cat_file = os.path.join(BASE_DATA_PATH, "synthetic_test_cat1.csv")
     raw_data = np.genfromtxt(cat_file, delimiter=",")
     neq = raw_data.shape[0]
     self.catalogue = Catalogue.make_from_dict({
         "eventID":
         raw_data[:, 0].astype(int),
         "year":
         raw_data[:, 1].astype(int),
         "dtime":
         raw_data[:, 2],
         "longitude":
         raw_data[:, 3],
         "latitude":
         raw_data[:, 4],
         "magnitude":
         raw_data[:, 5],
         "depth":
         raw_data[:, 6]
     })
     self.config = {"reference_magnitude": 3.0}
     self.completeness = np.array([[1990., 3.0], [1975., 4.0], [1960., 5.0],
                                   [1930., 6.0], [1910., 7.0]])
Beispiel #5
0
 def test_input_checks_use_reference_magnitude(self):
     fake_completeness_table = 0.0
     catalogue = Catalogue.make_from_dict({'year': [1900]})
     config = {'reference_magnitude': 3.0}
     cmag, ctime, ref_mag, dmag, _ = rec_utils.input_checks(
         catalogue, config, fake_completeness_table)
     self.assertEqual(3.0, ref_mag)
 def test_kijko_smit_set_reference_magnitude(self):
     completeness_table = np.array([[1900, 1.0]])
     catalogue = Catalogue.make_from_dict(
         {'magnitude': np.array([5.0, 6.0]),
          'year': np.array([2000, 2000])})
     config = {'reference_magnitude': 0.0}
     self.ks_ml.calculate(catalogue, config, completeness_table)
 def setUp(self):
     """
     This generates a minimum data-set to be used for the regression.
     """
     # Test A: Generates a data set assuming b=1 and N(m=4.0)=10.0 events
     self.dmag = 0.1
     mext = np.arange(4.0, 7.01, 0.1)
     self.mval = mext[0:-1] + self.dmag / 2.0
     self.bval = 1.0
     self.numobs = np.flipud(
         np.diff(np.flipud(10.0 ** (-self.bval * mext + 8.0))))
     # Test B: Generate a completely artificial catalogue using the
     # Gutenberg-Richter distribution defined above
     numobs = np.around(self.numobs)
     size = int(np.sum(self.numobs))
     magnitude = np.zeros(size)
     lidx = 0
     for mag, nobs in zip(self.mval, numobs):
         uidx = int(lidx+nobs)
         magnitude[lidx:uidx] = mag + 0.01
         lidx = uidx
     year = np.ones(size) * 1999
     self.catalogue = Catalogue.make_from_dict(
         {'magnitude': magnitude, 'year': year})
     # Create the seismicity occurrence calculator
     self.aki_ml = AkiMaxLikelihood()
Beispiel #8
0
 def test_input_checks_use_reference_magnitude(self):
     fake_completeness_table = 0.0
     catalogue = Catalogue.make_from_dict({'year': [1900]})
     config = {'reference_magnitude' : 3.0}
     cmag, ctime, ref_mag, dmag, _ = rec_utils.input_checks(catalogue,
             config, fake_completeness_table)
     self.assertEqual(3.0, ref_mag)
Beispiel #9
0
 def test_input_checks_sets_magnitude_interval(self):
     fake_completeness_table = 0.0
     catalogue = Catalogue.make_from_dict({'year': [1900]})
     config = {'magnitude_interval' : 0.1}
     cmag, ctime, ref_mag, dmag, _ = rec_utils.input_checks(catalogue,
             config, fake_completeness_table)
     self.assertEqual(0.1, dmag)
Beispiel #10
0
 def test_input_checks_simple_input(self):
     completeness_table = [[1900, 2.0]]
     catalogue = Catalogue.make_from_dict({
         'magnitude': [5.0, 6.0],
         'year': [2000, 2000]
     })
     config = {}
     rec_utils.input_checks(catalogue, config, completeness_table)
Beispiel #11
0
 def test_kijko_smit_set_reference_magnitude(self):
     completeness_table = np.array([[1900, 1.0]])
     catalogue = Catalogue.make_from_dict({
         'magnitude': np.array([5.0, 6.0]),
         'year': np.array([2000, 2000])
     })
     config = {'reference_magnitude': 0.0}
     self.ks_ml.calculate(catalogue, config, completeness_table)
Beispiel #12
0
def build_catalogue_from_file(filename):
    """
    Creates a "minimal" catalogue from a raw csv file
    """
    raw_data = np.genfromtxt(filename, delimiter=",")
    neq = raw_data.shape[0]
    return Catalogue.make_from_dict({"eventID": raw_data[:, 0].astype(int),
                                     "year": raw_data[:, 1].astype(int),
                                     "dtime": raw_data[:, 2],
                                     "longitude": raw_data[:, 3],
                                     "latitude": raw_data[:, 4],
                                     "magnitude": raw_data[:, 5],
                                     "depth": raw_data[:, 6]})
Beispiel #13
0
 def test_generate_synthetic_catalogues(self):
     '''
     Tests the hmtk.seismicity.occurence.utils function
     generate_synthetic_magnitudes
     '''
     bvals = []
     # Generate set of synthetic catalogues
     for i in range(0, 100):
         cat1 = rec_utils.generate_synthetic_magnitudes(
             4.5, 1.0, 4.0, 8.0, 1000)
         bvals.append(
             self.occur.calculate(Catalogue.make_from_dict(cat1))[0])
     bvals = np.array(bvals)
     self.assertAlmostEqual(np.mean(bvals), 1.0, 1)
Beispiel #14
0
 def test_generate_synthetic_catalogues(self):
     '''
     Tests the hmtk.seismicity.occurence.utils function
     generate_synthetic_magnitudes
     '''
     bvals = []
     # Generate set of synthetic catalogues
     for i in range(0, 100):
         cat1 = rec_utils.generate_synthetic_magnitudes(4.5, 1.0, 4.0, 8.0,
                                                        1000)
         bvals.append(self.occur.calculate(
             Catalogue.make_from_dict(cat1))[0])
     bvals = np.array(bvals)
     self.assertAlmostEqual(np.mean(bvals), 1.0, 1)
Beispiel #15
0
 def test_generate_magnitudes(self):
     '''
     Tests the hmtk.seismicity.occurence.utils function
     generate_trunc_gr_magnitudes
     '''
     bvals = []
     # Generate set of synthetic catalogues
     for _ in range(0, 100):
         mags = rec_utils.generate_trunc_gr_magnitudes(1.0, 4.0, 8.0, 1000)
         cat = Catalogue.make_from_dict(
             {'magnitude': mags,
              'year': np.zeros(len(mags), dtype=int)})
         bvals.append(self.occur.calculate(cat)[0])
     bvals = np.array(bvals)
     self.assertAlmostEqual(np.mean(bvals), 1.0, 1)
Beispiel #16
0
    def setUp(self):
        """
        """
        # Read initial dataset
        filename = os.path.join(self.BASE_DATA_PATH,
                                'completeness_test_cat.csv')
        test_data = np.genfromtxt(filename, delimiter=',', skip_header=1)
        # Create the catalogue A
        self.catalogueA = Catalogue.make_from_dict({
            'year':
            test_data[:, 3],
            'magnitude':
            test_data[:, 17]
        })

        # Read initial dataset
        filename = os.path.join(self.BASE_DATA_PATH,
                                'recurrence_test_cat_B.csv')
        test_data = np.genfromtxt(filename, delimiter=',', skip_header=1)
        # Create the catalogue A
        self.catalogueB = Catalogue.make_from_dict({
            'year':
            test_data[:, 3],
            'magnitude':
            test_data[:, 17]
        })

        # Read the verification table A
        filename = os.path.join(self.BASE_DATA_PATH,
                                'recurrence_table_test_A.csv')
        self.true_tableA = np.genfromtxt(filename, delimiter=',')

        # Read the verification table A
        filename = os.path.join(self.BASE_DATA_PATH,
                                'recurrence_table_test_B.csv')
        self.true_tableB = np.genfromtxt(filename, delimiter=',')
Beispiel #17
0
    def setUp(self):
        """
        This generates a catalogue to be used for the regression.
        """
        # Generates a data set assuming b=1
        self.dmag = 0.1
        mext = np.arange(4.0, 7.01, 0.1)
        self.mval = mext[0:-1] + self.dmag / 2.0
        self.bval = 1.0
        numobs = np.flipud(np.diff(np.flipud(10.0**(-self.bval * mext + 7.0))))

        # Define completeness window
        numobs[0:6] *= 10
        numobs[6:13] *= 20
        numobs[13:22] *= 50
        numobs[22:] *= 100

        compl = np.array([[1900, 1950, 1980, 1990], [6.34, 5.44, 4.74, 3.0]])
        print(compl)
        self.compl = compl.transpose()
        print('completeness')
        print(self.compl)
        print(self.compl.shape)

        numobs = np.around(numobs)
        print(numobs)

        magnitude = np.zeros(int(np.sum(numobs)))
        year = np.zeros(int(np.sum(numobs))) * 1999

        lidx = 0
        for mag, nobs in zip(self.mval, numobs):
            uidx = int(lidx + nobs)
            magnitude[lidx:uidx] = mag + 0.01
            year_low = compl[0, np.min(np.nonzero(compl[1, :] < mag)[0])]
            year[lidx:uidx] = (year_low + np.random.rand(uidx - lidx) *
                               (2000 - year_low))
            print('%.2f %.0f %.0f' %
                  (mag, np.min(year[lidx:uidx]), np.max(year[lidx:uidx])))
            lidx = uidx

        self.catalogue = Catalogue.make_from_dict({
            'magnitude': magnitude,
            'year': year
        })
        self.b_ml = BMaxLikelihood()
        self.config = {'Average Type': 'Weighted'}
    def setUp(self):
        """
        This generates a catalogue to be used for the regression.
        """
        # Generates a data set assuming b=1
        self.dmag = 0.1
        mext = np.arange(4.0,7.01,0.1)
        self.mval = mext[0:-1] + self.dmag / 2.0
        self.bval = 1.0
        numobs = np.flipud(np.diff(np.flipud(10.0**(-self.bval*mext+7.0))))

        # Define completeness window
        numobs[0:6] *= 10
        numobs[6:13] *= 20
        numobs[13:22] *= 50
        numobs[22:] *= 100

        compl = np.array([[1900, 1950, 1980, 1990], [6.34, 5.44, 4.74, 3.0]])
        print compl
        self.compl = compl.transpose()
        print 'completeness'
        print self.compl
        print self.compl.shape

        numobs = np.around(numobs)
        print numobs

        magnitude = np.zeros((np.sum(numobs)))
        year = np.zeros((np.sum(numobs))) * 1999

        lidx = 0
        for mag, nobs in zip(self.mval, numobs):
            uidx = int(lidx+nobs)
            magnitude[lidx:uidx] = mag + 0.01
            year_low = compl[0,np.min(np.nonzero(compl[1,:] < mag)[0])]
            year[lidx:uidx] = (year_low + np.random.rand(uidx-lidx) *
                    (2000-year_low))
            print '%.2f %.0f %.0f' % (mag,np.min(year[lidx:uidx]),
                                      np.max(year[lidx:uidx]))
            lidx = uidx

        self.catalogue = Catalogue.make_from_dict(
            {'magnitude': magnitude, 'year': year})
        self.b_ml = BMaxLikelihood()
        self.config = {'Average Type' : 'Weighted'}
Beispiel #19
0
 def setUp(self):
     cat_file = os.path.join(BASE_DATA_PATH, "synthetic_test_cat1.csv")
     raw_data = np.genfromtxt(cat_file, delimiter=",")
     neq = raw_data.shape[0]
     self.catalogue = Catalogue.make_from_dict({
         "eventID": raw_data[:, 0].astype(int),
         "year": raw_data[:, 1].astype(int),
         "dtime": raw_data[:, 2],
         "longitude": raw_data[:, 3],
         "latitude": raw_data[:, 4],
         "magnitude": raw_data[:, 5],
         "depth": raw_data[:, 6]})
     self.config = {"reference_magnitude": 3.0}
     self.completeness = np.array([[1990., 3.0],
                                   [1975., 4.0],
                                   [1960., 5.0],
                                   [1930., 6.0],
                                   [1910., 7.0]])
Beispiel #20
0
 def test_generate_magnitudes(self):
     '''
     Tests the hmtk.seismicity.occurence.utils function
     generate_trunc_gr_magnitudes
     '''
     bvals = []
     # Generate set of synthetic catalogues
     for _ in range(0, 100):
         mags = rec_utils.generate_trunc_gr_magnitudes(1.0, 4.0, 8.0, 1000)
         cat = Catalogue.make_from_dict({
             'magnitude':
             mags,
             'year':
             np.zeros(len(mags), dtype=int)
         })
         bvals.append(self.occur.calculate(cat)[0])
     bvals = np.array(bvals)
     self.assertAlmostEqual(np.mean(bvals), 1.0, 1)
Beispiel #21
0
 def setUp(self):
     """
     This generates a catalogue to be used for the regression.
     """
     # Generates a data set assuming b=1
     self.dmag = 0.1
     mext = np.arange(4.0, 7.01, 0.1)
     self.mval = mext[0:-1] + self.dmag / 2.0
     self.bval = 1.0
     numobs = np.flipud(np.diff(np.flipud(10.0**(-self.bval * mext + 7.0))))
     # Compute the number of observations in the different magnitude
     # intervals (according to completeness)
     numobs[0:6] *= 10
     numobs[6:13] *= 20
     numobs[13:22] *= 50
     numobs[22:] *= 100
     # Define completeness window
     compl = np.array([[1900, 1950, 1980, 1990], [6.34, 5.44, 4.74, 3.0]])
     self.compl = np.flipud(compl.transpose())
     # Compute the number of observations (i.e. earthquakes) in each
     # magnitude bin
     numobs = np.around(numobs)
     magnitude = np.zeros((np.sum(numobs)))
     year = np.zeros((np.sum(numobs))) * 1999
     # Generate the catalogue
     lidx = 0
     for mag, nobs in zip(self.mval, numobs):
         uidx = int(lidx + nobs)
         magnitude[lidx:uidx] = mag + 0.01
         year_low = compl[0, np.min(np.nonzero(compl[1, :] < mag)[0])]
         year[lidx:uidx] = (year_low + np.random.rand(uidx - lidx) *
                            (2000 - year_low))
         lidx = uidx
     # Fix the parameters that later will be used for the testing
     self.catalogue = Catalogue.make_from_dict({
         'magnitude': magnitude,
         'year': year
     })
     self.wei = Weichert()
     self.config = {'Average Type': 'Weighted'}
Beispiel #22
0
 def setUp(self):
     """
     This generates a catalogue to be used for the regression.
     """
     # Generates a data set assuming b=1
     self.dmag = 0.1
     mext = np.arange(4.0,7.01,0.1)
     self.mval = mext[0:-1] + self.dmag / 2.0
     self.bval = 1.0
     numobs = np.flipud(np.diff(np.flipud(10.0**(-self.bval*mext+7.0))))
     # Compute the number of observations in the different magnitude
     # intervals (according to completeness) 
     numobs[0:6] *= 10
     numobs[6:13] *= 20
     numobs[13:22] *= 50
     numobs[22:] *= 100
     # Define completeness window
     compl = np.array([[1900, 1950, 1980, 1990], [6.34, 5.44, 4.74, 3.0]])
     self.compl = np.flipud(compl.transpose())
     # Compute the number of observations (i.e. earthquakes) in each
     # magnitude bin
     numobs = np.around(numobs)
     magnitude = np.zeros( (np.sum(numobs)) )
     year = np.zeros( (np.sum(numobs)) ) * 1999
     # Generate the catalogue
     lidx = 0
     for mag, nobs in zip(self.mval, numobs):
         uidx = int(lidx+nobs)
         magnitude[lidx:uidx] = mag + 0.01
         year_low = compl[0,np.min(np.nonzero(compl[1,:] < mag)[0])]
         year[lidx:uidx] = (year_low + np.random.rand(uidx-lidx) *
                 (2000-year_low))
         lidx = uidx
     # Fix the parameters that later will be used for the testing
     self.catalogue = Catalogue.make_from_dict(
         {'magnitude' : magnitude, 'year' : year})
     self.wei = Weichert()
     self.config = {'Average Type' : 'Weighted'}
Beispiel #23
0
    def update_selection(self):
        """
        :returns: a catalogue holding the selection
        """
        initial = catalogue = self.catalogue_model.catalogue

        if not self.selection_editor.selectorList.count():
            self.catalogue_map.select([])
        else:
            for i in range(self.selection_editor.selectorList.count()):
                selector = self.selection_editor.selectorList.item(i)
                if (not self.intersect_with_selection and not
                    isinstance(selector, Invert)):
                    union_data = selector.apply(initial, initial).data
                    if initial != catalogue:
                        union_data['eventID'] = numpy.append(
                            union_data['eventID'], catalogue.data['eventID'])
                        union_data['year'] = numpy.append(
                            union_data['year'], catalogue.data['year'])
                    catalogue = Catalogue.make_from_dict(union_data)
                else:
                    catalogue = selector.apply(catalogue, initial)
            self.catalogue_map.select(catalogue.data['eventID'])

        features_num = len(
            self.catalogue_map.catalogue_layer.selectedFeatures())
        if not features_num:
            self.selection_editor.selectorSummaryLabel.setText(
                "No event selected")
        elif features_num == initial.get_number_events():
            self.selection_editor.selectorSummaryLabel.setText(
                "All events selected")
        else:
            self.selection_editor.selectorSummaryLabel.setText(
                "%d events selected" % features_num)

        return catalogue
Beispiel #24
0
 def test_input_checks_simple_input(self):
     completeness_table = [[1900, 2.0]]
     catalogue = Catalogue.make_from_dict(
         {'magnitude': [5.0, 6.0], 'year': [2000, 2000]})
     config = {}
     rec_utils.input_checks(catalogue, config, completeness_table)
Beispiel #25
0
 def test_input_checks_use_a_float_for_completeness(self):
     fake_completeness_table = 0.0
     catalogue = Catalogue.make_from_dict({'year': [1900]})
     config = {}
     rec_utils.input_checks(catalogue, config, fake_completeness_table)
Beispiel #26
0
 def test_input_checks_use_a_float_for_completeness(self):
     fake_completeness_table = 0.0
     catalogue = Catalogue.make_from_dict({'year': [1900]})
     config = {}
     rec_utils.input_checks(catalogue, config, fake_completeness_table)