Ejemplo n.º 1
0
    def setUp(self):
        filename = os.path.join(BASE_DATA_PATH, 'completeness_test_cat.csv')
        parser0 = CsvCatalogueParser(filename)
        self.catalogue = parser0.read_file()

        self.config = {'algorithm': None, 'number_bootstraps': None}
        self.model = CumulativeMoment()
Ejemplo n.º 2
0
 def setUp(self):
     """
     Read the sample catalogue 
     """
     flnme = 'gardner_knopoff_test_catalogue.csv'
     filename = os.path.join(self.BASE_DATA_PATH, flnme)
     parser = CsvCatalogueParser(filename)
     self.cat = parser.read_file()
Ejemplo n.º 3
0
 def setUp(self):
     """
     Read the sample catalogue
     """
     flnme = 'afteran_test_catalogue.csv'
     filename = os.path.join(self.BASE_DATA_PATH, flnme)
     parser = CsvCatalogueParser(filename)
     self.cat = parser.read_file()
     self.dec = Afteran()
Ejemplo n.º 4
0
 def setUp(self):
     filename = os.path.join(BASE_DATA_PATH,'completeness_test_cat.csv')
     parser0 = CsvCatalogueParser(filename)
     self.catalogue = parser0.read_file()
     self.config = {'maximum_iterations': 1000,
                    'number_earthquakes': 100,
                    'number_samples': 51,
                    'tolerance': 0.05}
     self.model = KijkoNonParametricGaussian()
Ejemplo n.º 5
0
 def setUp(self):
     filename = os.path.join(BASE_DATA_PATH,'completeness_test_cat.csv')
     parser0 = CsvCatalogueParser(filename)
     self.catalogue = parser0.read_file()
     self.config = {'b-value': 1.0,
                    'sigma-b': 0.05,
                    'input_mmin': 5.0,
                    'input_mmax': None,
                    'input_mmax_uncertainty': None,
                    'tolerance': 0.001,
                    'maximum_iterations': 1000}
     self.model = KijkoSellevolBayes()
Ejemplo n.º 6
0
 def setUp(self):
     '''
     Set up test class
     '''
     filename = os.path.join(BASE_DATA_PATH,'completeness_test_cat.csv')
     parser0 = CsvCatalogueParser(filename)
     self.catalogue = parser0.read_file()
     self.config = {'b-value': 1.0,
                    'input_mmin': 5.0,
                    'input_mmax': None,
                    'tolerance': 0.001,
                    'maximum_iterations': 1000}
     self.model = KijkoSellevolFixedb()
Ejemplo n.º 7
0
    def test_complete_stepp_analysis_basic(self):
        '''
        Basic test of the entire completeness analysis using a synthetic
        test catalogue with in-built completeness periods
        '''
        parser0 = CsvCatalogueParser(INPUT_FILE_1)
        self.catalogue = parser0.read_file()

        self.config = {
            'magnitude_bin': 0.5,
            'time_bin': 5.0,
            'increment_lock': True,
            'filename': None
        }

        expected_completeness_table = np.array([[1990., 4.0], [1962., 4.5],
                                                [1959., 5.0], [1906., 5.5],
                                                [1906., 6.0], [1904., 6.5],
                                                [1904., 7.0]])

        np.testing.assert_array_almost_equal(
            expected_completeness_table,
            self.process.completeness(self.catalogue, self.config))
Ejemplo n.º 8
0
        np.arange(map_config['min_lon'], map_config['max_lon'], 5))
    plt.colorbar(label='log10(Smoothed rate per cell)')
    plt.legend()
    figname = smoother_filename[:-4] + '_smoothed_rates_map.png'
    plt.savefig(figname)


# Set up paralell
proc = pypar.size()  # Number of processors as specified by mpirun
myid = pypar.rank()  # Id of of this process (myid in [0, proc-1])
node = pypar.get_processor_name(
)  # Host name on which current process is running
print 'I am proc %d of %d on node %s' % (myid, proc, node)
t0 = pypar.time()

parser = CsvCatalogueParser(catalogue_filename)  # From .csv to hmtk

# Read and process the catalogue content in a variable called "catalogue"
catalogue = parser.read_file(start_year=1965, end_year=2016)

# How many events in the catalogue?
print "The catalogue contains %g events" % catalogue.get_number_events()

# What is the geographical extent of the catalogue?
bbox = catalogue.get_bounding_box()
print "Catalogue ranges from %.4f E to %.4f E Longitude and %.4f N to %.4f N Latitude\n" % bbox

catalogue.sort_catalogue_chronologically()
catalogue.data['magnitude']
index = catalogue.data['magnitude'] > 1.5
Ejemplo n.º 9
0
from openquake.hazardlib.sourcewriter import obj_to_node
from openquake.baselib.node import Node
from openquake.hazardlib import nrml
from openquake.hazardlib.geo.point import Point
from openquake.hazardlib.mfd import TruncatedGRMFD
from openquake.hazardlib.geo.nodalplane import NodalPlane
from openquake.hazardlib.nrml import SourceModelParser, write, NAMESPACE
from openquake.hazardlib.pmf import PMF
print "Everything Imported OK!"

bvalue = float(sys.argv[1])
print 'b value', bvalue

ifile = "../../catalogue/data/AUSTCAT_V0.12_hmtk_declustered.csv"
#ifile = "../../catalogue/data/AUSTCAT_V0.12_hmtk_mx_orig.csv"
parser = CsvCatalogueParser(ifile)
catalogue = parser.read_file(start_year=1965, end_year=2010)
# How many events in the catalogue?
print "The catalogue contains %g events" % catalogue.get_number_events()
neq = len(catalogue.data['magnitude'])
print "The catalogue contains %g events" % neq
# What is the geographical extent of the catalogue?
bbox = catalogue.get_bounding_box()
print "Catalogue ranges from %.4f E to %.4f E Longitude and %.4f N to %.4f N Latitude\n" % bbox

catalogue.sort_catalogue_chronologically()
index = np.logical_and(catalogue.data["magnitude"] > 1.5,
                       catalogue.data["depth"] >= 0.0)
catalogue.purge_catalogue(index)
catalogue.get_number_events()
Ejemplo n.º 10
0
from openquake.hazardlib.mfd import TruncatedGRMFD
from openquake.hazardlib.geo.nodalplane import NodalPlane
from openquake.hazardlib.pmf import PMF
#from nrml.models import HypocentralDepth
#nrml.models import HypocentralDepth
print "Everything Imported OK!"


bvalue = float(sys.argv[1])
catalogue_filename = sys.argv[2]
print 'b value', bvalue

#Importing catalogue
#catalogue_filename = "../../catalogue/data/AUSTCAT_V0.12_hmtk_declustered.csv"
#catalogue_filename = "../../catalogue/data/AUSTCAT_V0.12_hmtk_mx_orig.csv"
parser = CsvCatalogueParser(catalogue_filename)

# Read and process the catalogue content in a variable called "catalogue"
catalogue = parser.read_file(start_year=1788, end_year=2010)

# How many events in the catalogue?
print "The catalogue contains %g events" % catalogue.get_number_events()

# What is the geographical extent of the catalogue?
bbox = catalogue.get_bounding_box()
print "Catalogue ranges from %.4f E to %.4f E Longitude and %.4f N to %.4f N Latitude\n" % bbox


# In[26]:

catalogue.sort_catalogue_chronologically()