def test_continuum_seismicity(self): # Tests the function openquake.hmtk.strain.shift.Shift.continuum_seismicity - # the python implementation of the Subroutine Continuum Seismicity # from the Fortran 90 code GSRM.f90 self.strain_model = GeodeticStrain() # Define a simple strain model test_data = {'longitude': np.zeros(3, dtype=float), 'latitude': np.zeros(3, dtype=float), 'exx': np.array([1E-9, 1E-8, 1E-7]), 'eyy': np.array([5E-10, 5E-9, 5E-8]), 'exy': np.array([2E-9, 2E-8, 2E-7])} self.strain_model.get_secondary_strain_data(test_data) self.model = Shift([5.66, 6.66]) threshold_moment = moment_function(np.array([5.66, 6.66])) expected_rate = np.array([[-14.43624419, -22.48168502], [-13.43624419, -21.48168502], [-12.43624419, -20.48168502]]) np.testing.assert_array_almost_equal( expected_rate, np.log10(self.model.continuum_seismicity( threshold_moment, self.strain_model.data['e1h'], self.strain_model.data['e2h'], self.strain_model.data['err'], BIRD_GLOBAL_PARAMETERS['OSRnor'])))
def test_get_number_observations(self): ''' Tests the count of the number of observations ''' self.data = {'longitude': np.array([10., 20., 30.]), 'latitude': np.array([10., 20., 30.]), 'exx': np.array([1E-9, 20E-9, 25E-9]), 'eyy': np.array([1E-9, 20E-9, 25E-9]), 'exy': np.array([1E-9, 20E-9, 25E-9])} self.model = GeodeticStrain() # Test when no data is input (should equal 0) self.assertEqual(self.model.get_number_observations(), 0) # Test with data self.model.data = self.data self.assertEqual(self.model.get_number_observations(), 3)
def test_point_in_tectonic_region(self): # Basic check to ensure that a point is correctly identified as being # inside the regional polygon # Setup Model polygon = { 'long_lims': [-1.0, 1.0], 'lat_lims': [-1.0, 1.0], 'area': 1.0, 'region_type': 'XXX' } self.model = GeodeticStrain() self.model.data = { 'longitude': np.array([-1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5]), 'latitude': np.array([-1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5]), 'region': np.zeros(7, dtype='|S3'), 'area': np.zeros(7, dtype=float), 'exx': np.zeros(7, dtype=float) } self.reader = KreemerRegionalisation('a filename') self.reader.strain = self.model self.reader._point_in_tectonic_region(polygon) expected_region = [b'', b'XXX', b'XXX', b'XXX', b'XXX', b'', b''] for iloc in range(0, 7): self.assertEqual(expected_region[iloc], self.reader.strain.data['region'][iloc]) np.testing.assert_array_almost_equal( self.reader.strain.data['area'], np.array([0., 1., 1., 1., 1., 0., 0.]))
def setUp(self): ''' ''' self.writer = None self.model = GeodeticStrain() self.model.data = dict([('longitude', np.array([30., 30., 30.])), ('latitude', np.array([30., 30., 30.])), ('exx', np.array([1., 2., 3.])), ('eyy', np.array([1., 2., 3.])), ('exy', np.array([1., 2., 3.]))]) self.filename = None
def test_full_regionalisation_workflow(self): # Tests the function to apply the full Kreemer regionalisation workflow # using a simple 2 polygon case self.reader = KreemerRegionalisation(KREEMER_2REG_FILE) self.model = GeodeticStrain() self.model.data = {'longitude': np.array([179.7, -179.7, 10.0]), 'latitude': np.array([-65.7, -65.7, 10.0]), 'exx': 1E-9 * np.ones(3), 'eyy': 1E-9 * np.ones(3), 'exy': 1E-9 * np.ones(3)} self.model = self.reader.get_regionalisation(self.model) np.testing.assert_array_equal(self.model.data['region'], np.array([b'R', b'C', b'IPL'])) np.testing.assert_array_equal(self.model.data['area'], np.array([1., 5., np.nan]))
def setUp(self): self.model = None self.strain_model = GeodeticStrain()
def __init__(self, strain_file): ''' ''' self.filename = strain_file self.strain = GeodeticStrain()
class TestShift(unittest.TestCase): ''' Test suite for the class openquake.hmtk.strain.shift.Shift ''' def setUp(self): self.model = None self.strain_model = GeodeticStrain() def test_basic_instantiation(self): # Tests the basic instantiation of the SHIFT class # Instantiatiation with float self.model = Shift(5.0) np.testing.assert_array_almost_equal(self.model.target_magnitudes, np.array([5.0])) self.assertEqual(self.model.number_magnitudes, 1) # Instantiation with a numpy array self.model = Shift(np.arange(5., 8., 0.5)) np.testing.assert_array_almost_equal(self.model.target_magnitudes, np.arange(5., 8., 0.5)) self.assertEqual(self.model.number_magnitudes, 6) # Instantiation with list self.model = Shift([5., 6., 7., 8.]) np.testing.assert_array_almost_equal(self.model.target_magnitudes, np.array([5., 6., 7., 8.])) self.assertEqual(self.model.number_magnitudes, 4) # Otherwise raise an error with self.assertRaises(ValueError) as ae: self.model = Shift(None) self.assertEqual(str(ae.exception), 'Minimum magnitudes must be float, list or array') # Check regionalisation - assuming defaults self.model = Shift(5.0) for region in self.model.regionalisation.keys(): self.assertDictEqual(BIRD_GLOBAL_PARAMETERS[region], self.model.regionalisation[region]) np.testing.assert_array_almost_equal(np.log10(self.model.base_rate), np.array([-20.74610902])) def test_reclassify_with_bird_data(self): # Tests the re-classification from the Kreemer classification (C, O, S, # R and IPL) to the Bird & Liu (2007) classification: # Region Type Kreemer Code Bird Code # Intraplate IPL IPL # Subduction S SUB # Oceanic O OCB # Continental Transform C CTF # Continental Convergent C CCB # Continental Rift C CRB # Rigde (e1h & e2h > 0.) R OSRnor (Normal spreading) # Ridge (e1h == 0.) R OSRnor # Ridge ((e1h * e2h < 0) and # (e1h + e2h >= 0) R OSRnor/OTFmed # Ridge ((e1h * e2h < 0) and # (e1h + e2h < 0) R OCB/OTFmed # Ridge (any other) R OCB self.model = Shift(5.0) self.strain_model.data = { # IPL SUB OCB CCB CRB CTF CTF OSRn OSRn OSR1 OSR2 OCB 'err': np.array([0., 0., 0., 1.0, -1.0, 0.1, -0.1, 0.0, 0.0, 0.0, 0.0, 0.0]), 'e1h': np.array([0., 0., 0., 0.0, -1.0, 0.0, -1.0, 1.0, 0.0, -1.0, -1.0, -1.0]), 'e2h': np.array([0., 0., 0., 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 2.0, 0.5, -1.0]), 'region': np.array(['IPL', 'S', 'O', 'C', 'C', 'C', 'C', 'R', 'R', 'R', 'R', 'R'], dtype='a13')} self.model.strain = self.strain_model expected_regions = [b'IPL', b'SUB', b'OCB', b'CCB', b'CRB', b'CTF', b'CTF', b'OSRnor', b'OSRnor', b'OSR_special_1', b'OSR_special_2', b'OCB'] # Apply Bird Classification self.model._reclassify_Bird_regions_with_data() self.assertListEqual(expected_regions, self.model.strain.data['region'].tolist()) def test_continuum_seismicity(self): # Tests the function openquake.hmtk.strain.shift.Shift.continuum_seismicity - # the python implementation of the Subroutine Continuum Seismicity # from the Fortran 90 code GSRM.f90 self.strain_model = GeodeticStrain() # Define a simple strain model test_data = {'longitude': np.zeros(3, dtype=float), 'latitude': np.zeros(3, dtype=float), 'exx': np.array([1E-9, 1E-8, 1E-7]), 'eyy': np.array([5E-10, 5E-9, 5E-8]), 'exy': np.array([2E-9, 2E-8, 2E-7])} self.strain_model.get_secondary_strain_data(test_data) self.model = Shift([5.66, 6.66]) threshold_moment = moment_function(np.array([5.66, 6.66])) expected_rate = np.array([[-14.43624419, -22.48168502], [-13.43624419, -21.48168502], [-12.43624419, -20.48168502]]) np.testing.assert_array_almost_equal( expected_rate, np.log10(self.model.continuum_seismicity( threshold_moment, self.strain_model.data['e1h'], self.strain_model.data['e2h'], self.strain_model.data['err'], BIRD_GLOBAL_PARAMETERS['OSRnor']))) def test_calculate_activity_rate(self): # Tests for the calculation of the activity rate. At this point # this is really a circular test - an independent test would be # helpful in future! parser0 = ReadStrainCsv(STRAIN_FILE) self.strain_model = parser0.read_data() self.model = Shift([5.0]) self.model.calculate_activity_rate(self.strain_model) expected_rate = np.array([ [5.66232696e-14], [5.66232696e-14], [5.66232696e-14], [5.66232696e-14], [2.73091764e-12], [2.80389274e-12], [2.88207458e-12], [6.11293721e-12], [8.19834427e-12], [6.55082175e-12], [7.90822653e-11], [7.85391610e-11], [8.12633607e-11], [7.66785657e-11], [4.07359524e-11], [2.16914046e-10], [4.74341943e-10], [1.99907599e-10], [3.55861556e-11], [1.69536101e-10], [1.69884622e-10], [1.70233341e-10], [5.06642764e-10]]) np.testing.assert_array_almost_equal( np.log10(expected_rate), np.log10(self.model.strain.seismicity_rate))
def setUp(self): ''' ''' self.data = {} self.model = GeodeticStrain()
class TestGeodeticStrain(unittest.TestCase): ''' Tests the :class: openquake.hmtk.strain.geodetic_strain.GeodeticStrain ''' def setUp(self): ''' ''' self.data = {} self.model = GeodeticStrain() def test_instantiation(self): ''' Tests the basic instantiation of the class ''' expected_dict = {'data': None, 'regions': None, 'seismicity_rate': None, 'regionalisation': None, 'target_magnitudes': None, 'data_variables': []} self.assertDictEqual(expected_dict, self.model.__dict__) def test_secondary_strain_data_input_error(self): ''' Tests to ensure correct error are raised when i) strain data is missing ii) Strain data lacks critical attribute ''' # No strain data with self.assertRaises(ValueError) as ae: self.model.get_secondary_strain_data() self.assertEqual(str(ae.exception), 'Strain data not input or incorrectly formatted') # Strain data missing critical attribute - e.g. exy self.data = {'longitude': np.array([10., 20., 30.]), 'latitude': np.array([10., 20., 30.]), 'exx': np.array([1E-9, 20E-9, 25E-9]), 'eyy': np.array([1E-9, 20E-9, 25E-9])} with self.assertRaises(ValueError) as ae: self.model.get_secondary_strain_data(self.data) self.assertEqual(str(ae.exception), 'Essential strain information exy missing!') def test_secondary_strain_data_with_input(self): ''' Test to verify correct calculation of i) i) Second Invarient ii) err iii) dilatation iv) e1h & e2h ''' self.data = {'longitude': np.array([10., 20., 30.]), 'latitude': np.array([10., 20., 30.]), 'exx': 1E-9 * np.array([100., 10.0, 1.0]), 'eyy': 1E-9 * np.array([50., 5.0, 0.5]), 'exy': 1E-9 * np.array([10., 1.0, 0.1])} self.model.get_secondary_strain_data(self.data) # Check that all expected keys are present expected_keys = ['longitude', 'latitude', 'exx', 'eyy', 'exy', '2nd_inv', 'dilatation', 'err', 'e1h', 'e2h'] for key in expected_keys: self.assertTrue(key in self.model.data.keys()) # Test second invariant np.testing.assert_array_almost_equal( np.log10(self.model.data['2nd_inv']), np.array([-6.94809814, -7.94809814, -8.94809814])) # Test dilatation np.testing.assert_array_almost_equal( np.log10(self.model.data['dilatation']), np.array([-6.82390874, -7.82390874, -8.82390874])) # Test err np.testing.assert_array_almost_equal( self.model.data['dilatation'] + self.model.data['err'], np.zeros(3, dtype=float), 14) # Test e1h np.testing.assert_array_almost_equal( np.log10(self.model.data['e1h']), np.array([-7.31808815, -8.31808815, -9.31808815])) # Test e2h np.testing.assert_array_almost_equal( np.log10(self.model.data['e2h']), np.array([-6.99171577, -7.99171577, -8.99171577])) def test_get_number_observations(self): ''' Tests the count of the number of observations ''' self.data = {'longitude': np.array([10., 20., 30.]), 'latitude': np.array([10., 20., 30.]), 'exx': np.array([1E-9, 20E-9, 25E-9]), 'eyy': np.array([1E-9, 20E-9, 25E-9]), 'exy': np.array([1E-9, 20E-9, 25E-9])} self.model = GeodeticStrain() # Test when no data is input (should equal 0) self.assertEqual(self.model.get_number_observations(), 0) # Test with data self.model.data = self.data self.assertEqual(self.model.get_number_observations(), 3)
class ReadStrainCsv(object): ''' :class:`openquake.hmtk.parsers.strain_csv_parser.ReadStrainCsv` reads a strain model (defined by :class: `openquake.hmtk.strain.geodetic_strain.GeodeticStrain`) from a headed csv file :param str filename: Name of strain file in csv format :param strain: Container for the strain data as instance of :class: `openquake.hmtk.strain.geodetic_strain.GeodeticStrain` ''' def __init__(self, strain_file): ''' ''' self.filename = strain_file self.strain = GeodeticStrain() def read_data(self, scaling_factor=1E-9, strain_headers=None): ''' Reads the data from the csv file :param float scaling_factor: Scaling factor used for all strain values (default 1E-9 for nanostrain) :param list strain_headers: List of the variables in the file that correspond to strain parameters :returns: strain - Strain model as an instance of the :class: openquake.hmtk.strain.geodetic_strain.GeodeticStrain ''' if strain_headers: self.strain.data_variables = strain_headers else: self.strain.data_variables = STRAIN_VARIABLES datafile = open(self.filename, 'r') reader = csv.DictReader(datafile) self.strain.data = dict([(name, []) for name in reader.fieldnames]) for row in reader: for name in row.keys(): if 'region' in name.lower(): self.strain.data[name].append(row[name]) elif name in self.strain.data_variables: self.strain.data[name].append( scaling_factor * float(row[name])) else: self.strain.data[name].append(float(row[name])) for key in self.strain.data.keys(): if 'region' in key: self.strain.data[key] = np.array(self.strain.data[key], dtype='S13') else: self.strain.data[key] = np.array(self.strain.data[key]) self._check_invalid_longitudes() if 'region' not in self.strain.data: print('No tectonic regionalisation found in input file!') self.strain.data_variables = self.strain.data.keys() # Update data with secondary data (i.e. 2nd invariant, e1h, e2h etc. self.strain.get_secondary_strain_data() return self.strain def _check_invalid_longitudes(self): ''' Checks to ensure that all longitudes are in the range -180. to 180 ''' idlon = self.strain.data['longitude'] > 180. if np.any(idlon): self.strain.data['longitude'][idlon] = \ self.strain.data['longitude'][idlon] - 360.
class ReadStrainCsv(object): ''' :class:`openquake.hmtk.parsers.strain_csv_parser.ReadStrainCsv` reads a strain model (defined by :class: `openquake.hmtk.strain.geodetic_strain.GeodeticStrain`) from a headed csv file :param str filename: Name of strain file in csv format :param strain: Container for the strain data as instance of :class: `openquake.hmtk.strain.geodetic_strain.GeodeticStrain` ''' def __init__(self, strain_file): ''' ''' self.filename = strain_file self.strain = GeodeticStrain() def read_data(self, scaling_factor=1E-9, strain_headers=None): ''' Reads the data from the csv file :param float scaling_factor: Scaling factor used for all strain values (default 1E-9 for nanostrain) :param list strain_headers: List of the variables in the file that correspond to strain parameters :returns: strain - Strain model as an instance of the :class: openquake.hmtk.strain.geodetic_strain.GeodeticStrain ''' if strain_headers: self.strain.data_variables = strain_headers else: self.strain.data_variables = STRAIN_VARIABLES datafile = open(self.filename, 'rU') reader = csv.DictReader(datafile) self.strain.data = OrderedDict([(name, []) for name in reader.fieldnames]) for row in reader: for name in row.keys(): if 'region' in name.lower(): self.strain.data[name].append(row[name]) elif name in self.strain.data_variables: self.strain.data[name].append( scaling_factor * float(row[name])) else: self.strain.data[name].append(float(row[name])) for key in self.strain.data.keys(): if 'region' in key: self.strain.data[key] = np.array(self.strain.data[key], dtype='S13') else: self.strain.data[key] = np.array(self.strain.data[key]) self._check_invalid_longitudes() if 'region' not in self.strain.data: print('No tectonic regionalisation found in input file!') self.strain.data_variables = self.strain.data.keys() # Update data with secondary data (i.e. 2nd invariant, e1h, e2h etc. self.strain.get_secondary_strain_data() return self.strain def _check_invalid_longitudes(self): ''' Checks to ensure that all longitudes are in the range -180. to 180 ''' idlon = self.strain.data['longitude'] > 180. if np.any(idlon): self.strain.data['longitude'][idlon] = \ self.strain.data['longitude'][idlon] - 360.