def setUp(self):
     """
     Read a sample catalogue containing 8 events after instantiating
     the CsvCatalogueParser object.
     """
     filename = os.path.join(self.BASE_DATA_PATH, 'test_catalogue.csv') 
     parser = CsvCatalogueParser(filename)
     self.cat = parser.read_file()
Exemple #2
0
 def setUp(self):
     """
     Read a sample catalogue containing 8 events after instantiating
     the CsvCatalogueParser object.
     """
     filename = os.path.join(self.BASE_DATA_PATH, 'test_catalogue.csv')
     parser = CsvCatalogueParser(filename)
     self.cat = parser.read_file()
Exemple #3
0
 def test_specifying_years(self):
     """
     Tests that when the catalogue is parsed with the specified start and
     end years that this are recognised as attributes of the catalogue
     """
     filename = os.path.join(self.BASE_DATA_PATH, 'test_catalogue.csv')
     parser = CsvCatalogueParser(filename)
     self.cat = parser.read_file(start_year=1000, end_year=1100)
     self.assertEqual(self.cat.start_year, 1000)
     self.assertEqual(self.cat.end_year, 1100)
 def test_catalogue_writer_no_purging(self):
     '''
     Tests the writer without any purging
     '''
     # Write to file
     writer = CsvCatalogueWriter(self.output_filename)
     writer.write_file(self.catalogue)
     parser = CsvCatalogueParser(self.output_filename)
     cat2 = parser.read_file()
     self.check_catalogues_are_equal(self.catalogue, cat2)
Exemple #5
0
 def test_catalogue_writer_no_purging(self):
     '''
     Tests the writer without any purging
     '''
     # Write to file
     writer = CsvCatalogueWriter(self.output_filename)
     writer.write_file(self.catalogue)
     parser = CsvCatalogueParser(self.output_filename)
     cat2 = parser.read_file()
     self.check_catalogues_are_equal(self.catalogue, cat2)
 def test_specifying_years(self):
     """
     Tests that when the catalogue is parsed with the specified start and
     end years that this are recognised as attributes of the catalogue
     """
     filename = os.path.join(self.BASE_DATA_PATH, 'test_catalogue.csv')
     parser = CsvCatalogueParser(filename)
     self.cat = parser.read_file(start_year=1000, end_year=1100)
     self.assertEqual(self.cat.start_year, 1000)
     self.assertEqual(self.cat.end_year, 1100)
Exemple #7
0
 def test_without_specifying_years(self):
     """
     Tests that when the catalogue is parsed without specifying the start
     and end year that the start and end year come from the minimum and
     maximum in the catalogue
     """
     filename = os.path.join(self.BASE_DATA_PATH, 'test_catalogue.csv')
     parser = CsvCatalogueParser(filename)
     self.cat = parser.read_file()
     self.assertEqual(self.cat.start_year, np.min(self.cat.data['year']))
     self.assertEqual(self.cat.end_year, np.max(self.cat.data['year']))
 def test_without_specifying_years(self):
     """
     Tests that when the catalogue is parsed without specifying the start
     and end year that the start and end year come from the minimum and
     maximum in the catalogue
     """
     filename = os.path.join(self.BASE_DATA_PATH, 'test_catalogue.csv')
     parser = CsvCatalogueParser(filename)
     self.cat = parser.read_file()
     self.assertEqual(self.cat.start_year, np.min(self.cat.data['year']))
     self.assertEqual(self.cat.end_year, np.max(self.cat.data['year']))
Exemple #9
0
def decluster_iscgem_gk74(hmtk_csv):
    from hmtk.parsers.catalogue.csv_catalogue_parser import CsvCatalogueParser
    from writers import htmk2shp_isc
    from os import path
    
    # parse HMTK csv
    parser = CsvCatalogueParser(hmtk_csv)
    cat = parser.read_file()
    
    # write shapefile
    htmk2shp_isc(cat, path.join('shapefiles', 'ISC-GEM_V4_hmtk_full.shp'))
    
    decluster_GK74(cat, hmtk_csv)
def parse_orig_hmtk_cat(hmtk_csv):

    print 'parsing HMTK catalogue...'

    # parse HMTK csv using modified version of HMTK parser
    parser = CsvCatalogueParser(hmtk_csv)
    hmtkcat = parser.read_file()

    # get number of earthquakes
    neq = len(hmtkcat.data['magnitude'])

    # reformat HMTK dict to one expected for code below
    cat = []
    for i in range(0, neq):
        # first make datestr
        try:
            if not isnan(hmtkcat.data['second'][i]):
                datestr = str(hmtkcat.data['eventID'][i]) \
                          + str('%2.2f' % hmtkcat.data['second'][i])
            else:
                datestr = str(hmtkcat.data['eventID'][i]) + '00.00'

            evdt = datetime.strptime(datestr, '%Y%m%d%H%M%S.%f')

        # if ID not date form, do it the hard way!
        except:
            if hmtkcat.data['day'][i] == 0:
                hmtkcat.data['day'][i] = 1

            if hmtkcat.data['month'][i] == 0:
                hmtkcat.data['month'][i] = 1

            datestr = ''.join((str(hmtkcat.data['year'][i]),
                               str('%02d' % hmtkcat.data['month'][i]),
                               str('%02d' % hmtkcat.data['day'][i]),
                               str('%02d' % hmtkcat.data['hour'][i]),
                               str('%02d' % hmtkcat.data['minute'][i])))
            evdt = datetime.strptime(datestr, '%Y%m%d%H%M')

        tdict = {'datetime':evdt, 'prefmag':hmtkcat.data['magnitude'][i], \
                 'lon':hmtkcat.data['longitude'][i], 'lat':hmtkcat.data['latitude'][i], \
                 'dep':hmtkcat.data['depth'][i], 'year':hmtkcat.data['year'][i], \
                 'month':hmtkcat.data['month'][i], 'fixdep':0, 'prefmagtype':'MW', \
                 'auth':hmtkcat.data['Agency'][i]}

        cat.append(tdict)

    return cat, neq
    def test_catalogue_writer_only_mag_table_purging(self):
        '''
        Tests the writer only purging according to the magnitude table
        '''
        # Write to file
        writer = CsvCatalogueWriter(self.output_filename)
        writer.write_file(self.catalogue, magnitude_table=self.magnitude_table)
        parser = CsvCatalogueParser(self.output_filename)
        cat2 = parser.read_file()

        expected_catalogue = Catalogue()
        expected_catalogue.data['eventID'] = np.array([1, 3, 5])
        expected_catalogue.data['magnitude'] = np.array([5.6, 4.8, 5.0])
        expected_catalogue.data['year'] = np.array([1960, 1970, 1990])
        expected_catalogue.data['ErrorStrike'] =np.array([np.nan, np.nan, 
                                                          np.nan])
        self.check_catalogues_are_equal(expected_catalogue, cat2)
    def test_catalogue_writer_only_flag_purging(self):
        '''
        Tests the writer only purging according to the flag
        '''
        # Write to file
        writer = CsvCatalogueWriter(self.output_filename)
        writer.write_file(self.catalogue, flag_vector=self.flag)
        parser = CsvCatalogueParser(self.output_filename)
        cat2 = parser.read_file()

        expected_catalogue = Catalogue()
        expected_catalogue.data['eventID'] = np.array([1, 2, 3, 4])
        expected_catalogue.data['magnitude'] = np.array([5.6, 5.4, 4.8, 4.3])
        expected_catalogue.data['year'] = np.array([1960, 1965, 1970, 1980])
        expected_catalogue.data['ErrorStrike'] = np.array([np.nan, np.nan, 
                                                           np.nan, np.nan])
        self.check_catalogues_are_equal(expected_catalogue, cat2)
Exemple #13
0
    def test_catalogue_writer_only_flag_purging(self):
        '''
        Tests the writer only purging according to the flag
        '''
        # Write to file
        writer = CsvCatalogueWriter(self.output_filename)
        writer.write_file(self.catalogue, flag_vector=self.flag)
        parser = CsvCatalogueParser(self.output_filename)
        cat2 = parser.read_file()

        expected_catalogue = Catalogue()
        expected_catalogue.data['eventID'] = ['1', '2', '3', '4']
        expected_catalogue.data['magnitude'] = np.array([5.6, 5.4, 4.8, 4.3])
        expected_catalogue.data['year'] = np.array([1960, 1965, 1970, 1980])
        expected_catalogue.data['ErrorStrike'] = np.array(
            [np.nan, np.nan, np.nan, np.nan])
        self.check_catalogues_are_equal(expected_catalogue, cat2)
Exemple #14
0
    def test_catalogue_writer_only_mag_table_purging(self):
        '''
        Tests the writer only purging according to the magnitude table
        '''
        # Write to file
        writer = CsvCatalogueWriter(self.output_filename)
        writer.write_file(self.catalogue, magnitude_table=self.magnitude_table)
        parser = CsvCatalogueParser(self.output_filename)
        cat2 = parser.read_file()

        expected_catalogue = Catalogue()
        expected_catalogue.data['eventID'] = ['1', '3', '5']
        expected_catalogue.data['magnitude'] = np.array([5.6, 4.8, 5.0])
        expected_catalogue.data['year'] = np.array([1960, 1970, 1990])
        expected_catalogue.data['ErrorStrike'] = np.array(
            [np.nan, np.nan, np.nan])
        self.check_catalogues_are_equal(expected_catalogue, cat2)
Exemple #15
0
    def test_catalogue_writer_both_purging(self):
        '''
        Tests the writer only purging according to the magnitude table and
        the flag vector
        '''
        # Write to file
        writer = CsvCatalogueWriter(self.output_filename)
        writer.write_file(self.catalogue,
                          flag_vector=self.flag,
                          magnitude_table=self.magnitude_table)
        parser = CsvCatalogueParser(self.output_filename)
        cat2 = parser.read_file()

        expected_catalogue = Catalogue()
        expected_catalogue.data['eventID'] = np.array([1, 3])
        expected_catalogue.data['magnitude'] = np.array([5.6, 4.8])
        expected_catalogue.data['year'] = np.array([1960, 1970])
        expected_catalogue.data['ErrorStrike'] = np.array([np.nan, np.nan])
        self.check_catalogues_are_equal(expected_catalogue, cat2)
    def test_catalogue_writer_both_purging(self):
        '''
        Tests the writer only purging according to the magnitude table and
        the flag vector
        '''
        # Write to file
        writer = CsvCatalogueWriter(self.output_filename)
        writer.write_file(self.catalogue,
                          flag_vector=self.flag,
                          magnitude_table=self.magnitude_table)
        parser = CsvCatalogueParser(self.output_filename)
        cat2 = parser.read_file()

        expected_catalogue = Catalogue()
        expected_catalogue.data['eventID'] = ['1', '3']
        expected_catalogue.data['magnitude'] = np.array([5.6, 4.8])
        expected_catalogue.data['year'] = np.array([1960, 1970])
        expected_catalogue.data['ErrorStrike'] = np.array([np.nan, np.nan])
        self.check_catalogues_are_equal(expected_catalogue, cat2)
Exemple #17
0
###
###    Catalogue
###

input_catalogue_file = 'data_input/hmtk_sa3'
#input_catalogue_file = 'data_input/hmtk_bsb2013'

###
###    Catalogue cache or read/cache
###

try:
    catalogue = pickle.load(open(input_catalogue_file + ".pkl", 'rb'))
except:

    parser = CsvCatalogueParser(input_catalogue_file + ".csv")
    catalogue = parser.read_file()
    print 'Input complete: %s events in catalogue' % catalogue.get_number_events(
    )
    print 'Catalogue Covers the Period: %s to %s' % (catalogue.start_year,
                                                     catalogue.end_year)

    # Sort catalogue chronologically
    catalogue.sort_catalogue_chronologically()
    print 'Catalogue sorted chronologically!'

    valid_magnitudes = np.logical_not(np.isnan(catalogue.data['magnitude']))
    catalogue.select_catalogue_events(valid_magnitudes)
    valid_magnitudes = catalogue.data['magnitude'] >= 3.0
    catalogue.select_catalogue_events(valid_magnitudes)
    #print catalogue.data['magnitude']
Exemple #18
0
# -*- coding: utf-8 -*-





# catalogue
import os
from hmtk.parsers.catalogue.csv_catalogue_parser import CsvCatalogueParser
BASE_PATH = 'data_input/'
TEST_CATALOGUE = 'hmtk_bsb2013_pp_decluster.csv'
_CATALOGUE = os.path.join(BASE_PATH,TEST_CATALOGUE)
parser = CsvCatalogueParser(_CATALOGUE)
catalogue = parser.read_file()
catalogue.sort_catalogue_chronologically()




import numpy as np


method = "frankel1995"
#method = "woo1996"
method = "helmstetter2012"
#method = "oq-dourado2014"

filename = "dourado_reproduction/final_result_dourado/brasil_8sources1_475_0.csv"
#filename = "data_output/poe_0.1_%s.csv"%(method)

Exemple #19
0
# -*- coding: utf-8 -*-

import numpy as np
from hmtk.parsers.catalogue.csv_catalogue_parser import CsvCatalogueParser

catalogue_file = "data_input/hmtk_bsb2013_pp_decluster.csv"

#from hmtk.seismicity.catalogue import Catalogue

# catalogue
parser = CsvCatalogueParser(catalogue_file)
catalogue = parser.read_file()

catalogue.sort_catalogue_chronologically()



method = "frankel1995"
#method = "woo1996"
#method = "helmstetter2012"
#method = "oq-dourado2014_b2"

filename = "data_output/poe_0.1_smooth_decluster_%s.csv"%(method)
#filename = "data_output/poe_0.1_%s.csv"%(method)
#filename = "data_output/poe_0.1_smooth_decluster_%s_cum.csv"%(method)
#filename = "data_output/bsb2013_helmstetter2012.csv"
filename = "/Users/pirchiner/dev/helmstetter/output/conan/rates_2_280.csv"

d = np.genfromtxt(fname=filename, 
                 #comments='#',
                  delimiter=',', 
Exemple #20
0
                    fontsize=12,
                    dashes=[2, 2],
                    color='0.5',
                    linewidth=0.75)

    return m


#############################################################
# parse catalogue & plot
#############################################################

sheef_full = path.join('2010SHEEF', 'SHEEF2010Mw2_hmtk.csv')
sheef_decl = path.join('2010SHEEF', 'SHEEF2010Mw2_hmtk_declustered.csv')

parser = CsvCatalogueParser(sheef_full)
cat_full = parser.read_file()

lonf = cat_full.data['longitude']
latf = cat_full.data['latitude']
magf = cat_full.data['magnitude']

###############################################################
# plt full catalogue
###############################################################
plt.subplot(121)

# map earthquakes that pass completeness
m = make_basemap(cnrs)

# get index of events
Exemple #21
0
from hmtk.parsers.catalogue.csv_catalogue_parser import CsvCatalogueParser, CsvCatalogueWriter

# Import Mmax Tools
from hmtk.seismicity.max_magnitude.kijko_nonparametric_gaussian import KijkoNonParametricGaussian
from hmtk.seismicity.max_magnitude.kijko_sellevol_bayes import KijkoSellevolBayes
from hmtk.seismicity.max_magnitude.kijko_sellevol_fixed_b import KijkoSellevolFixedb
from hmtk.seismicity.max_magnitude.cumulative_moment_release import CumulativeMoment

print 'Import OK!'

# In[ ]:

input_file = 'data_input/hmtk_bsb2013.csv'

parser = CsvCatalogueParser(input_file)
catalogue = parser.read_file()
print 'Input complete: %s events in catalogue' % catalogue.get_number_events()
print 'Catalogue Covers the Period: %s to %s' % (catalogue.start_year,
                                                 catalogue.end_year)

# Sort catalogue chronologically
catalogue.sort_catalogue_chronologically()
print 'Catalogue sorted chronologically!'

# Plot magnitude time density
from hmtk.plotting.seismicity.catalogue_plots import plot_magnitude_time_density
magnitude_bin = 0.2
time_bin = 10.0
plot_magnitude_time_density(catalogue, magnitude_bin, time_bin)
Exemple #22
0
def get_hmtk_catalogue(filename):
    catalogue_parser = CsvCatalogueParser(filename)
    return catalogue_parser.read_file()
Exemple #23
0
# -*- coding: utf-8 -*-

import numpy as np
from hmtk.parsers.catalogue.csv_catalogue_parser import CsvCatalogueParser

catalogue_file = "data_input/hmtk_bsb2013_pp_decluster.csv"

#from hmtk.seismicity.catalogue import Catalogue

# catalogue
parser = CsvCatalogueParser(catalogue_file)
catalogue = parser.read_file()

catalogue.sort_catalogue_chronologically()



method = "frankel1995"
method = "woo1996"
#method = "helmstetter2012"
#method = "oq-dourado2014_b2"

filename = "data_output/poe_0.1_smooth_decluster_%s.csv"%(method)
filename = "data_output/poe_0.1_%s.csv"%(method)
#filename = "data_output/poe_0.1_smooth_decluster_%s_cum.csv"%(method)
filename = "data_output/bsb2013_helmstetter2012.csv"


d = np.genfromtxt(fname=filename, 
                 #comments='#',
                  delimiter=',', 
got basics from here:

http://seiscode.iag.usp.br/gitlab/hazard/pshab_source_models/raw/646202c6c5a38426783b4851b188280a1441e032/notes/01_workflow_decluster.py
'''

#####################################################
# parse catalogues and prep declusterer
#####################################################

# reformat SHEEF
hmtkfile = sheef2hmtk_csv(path.join(
    '2010SHEEF', 'SHEEF2010_crust.gmtdat'))  # only need to do this once

# parse HMTK catalogue
inputsheef = path.join(hmtkfile)
parser = CsvCatalogueParser(inputsheef)
catalogue = parser.read_file()

decluster_config = {
    'time_distance_window': GardnerKnopoffWindow(),
    'fs_time_prop': 1.0
}

#####################################################
# decluster here
#####################################################

print 'Running GK declustering...'
decluster_method = GardnerKnopoffType1()

#---------------------------------------------
plt.rcParams['pdf.fonttype'] = 42
mpl.style.use('classic')

##########################################################################################
# parse epicentres
##########################################################################################

# parse HMTK csv
'''
hmtk_csv = '/nas/gemd/ehp/georisk_earthquake/modelling/sandpits/tallen/NSHA2018/catalogue/data/NSHA18CAT_V0.1_hmtk_declustered.csv'
parser = CsvCatalogueParser(hmtk_csv)    
declcat = parser.read_file()
'''
hmtk_csv = '/nas/gemd/ehp/georisk_earthquake/modelling/sandpits/tallen/NSHA2018/catalogue/data/NSHA18CAT_V0.1_hmtk_declustered.csv'
parser = CsvCatalogueParser(hmtk_csv)
cat = parser.read_file()

##########################################################################################
#108/152/-44/-8
urcrnrlat = -8.
llcrnrlat = -46.
urcrnrlon = 157.
llcrnrlon = 109.
lon_0 = mean([llcrnrlon, urcrnrlon])
lat_1 = percentile([llcrnrlat, urcrnrlat], 25)
lat_2 = percentile([llcrnrlat, urcrnrlat], 75)

fig = plt.figure(figsize=(20, 12))
ax = fig.add_subplot(121)
plt.tick_params(labelsize=8)
Exemple #26
0
### 
###    Catalogue 
###

#input_catalogue_file = 'data_input/hmtk_sa3'
input_catalogue_file = 'data_input/hmtk_bsb2013'

### 
###    Catalogue cache or read/cache
###

try:
    catalogue = pickle.load(open(input_catalogue_file + ".pkl", 'rb'))
except:

    parser = CsvCatalogueParser(input_catalogue_file + ".csv")
    catalogue = parser.read_file()
    print 'Input complete: %s events in catalogue' % catalogue.get_number_events()
    print 'Catalogue Covers the Period: %s to %s' % (catalogue.start_year, catalogue.end_year)

    # Sort catalogue chronologically
    catalogue.sort_catalogue_chronologically()
    print 'Catalogue sorted chronologically!'

    valid_magnitudes = np.logical_not(np.isnan(catalogue.data['magnitude']))
    catalogue.select_catalogue_events(valid_magnitudes)
    valid_magnitudes = catalogue.data['magnitude'] >= 3.0
    catalogue.select_catalogue_events(valid_magnitudes)
    #print catalogue.data['magnitude']
     
    valid_depths = np.logical_not(np.isnan(catalogue.data['depth']))
Exemple #27
0
###
###    Catalogue
###

#input_catalogue_file = 'data_input/hmtk_sa3'
input_catalogue_file = 'data_input/hmtk_bsb2013'

###
###    Catalogue cache or read/cache
###

try:
    catalogue = pickle.load(open(input_catalogue_file + ".pkl", 'rb'))
except:

    parser = CsvCatalogueParser(input_catalogue_file + ".csv")
    catalogue = parser.read_file()
    print 'Input complete: %s events in catalogue' % catalogue.get_number_events(
    )
    print 'Catalogue Covers the Period: %s to %s' % (catalogue.start_year,
                                                     catalogue.end_year)

    # Sort catalogue chronologically
    catalogue.sort_catalogue_chronologically()
    print 'Catalogue sorted chronologically!'

    valid_magnitudes = np.logical_not(np.isnan(catalogue.data['magnitude']))
    catalogue.select_catalogue_events(valid_magnitudes)
    valid_magnitudes = catalogue.data['magnitude'] >= 3.0
    catalogue.select_catalogue_events(valid_magnitudes)
    #print catalogue.data['magnitude']
Exemple #28
0
def read_catalog(input_catalogue_file, m_min=3.0):
    
    
    ### 
    ###    Catalogue cache or read/cache
    ###
    
    try:
        print '--Reading Catalog'
        print input_catalogue_file
        catalogue = pickle.load(open(input_catalogue_file + ".pkl", 'rb'))
        print 'Input complete: %s events in catalogue' % catalogue.get_number_events()
        print 'Catalogue Covers the Period: %s to %s' % (catalogue.start_year, catalogue.end_year)
    except:
        print '--Reading Catalog'
        parser = CsvCatalogueParser(input_catalogue_file + ".csv")
        catalogue = parser.read_file()

        print 'Input complete: %s events in catalogue' % catalogue.get_number_events()
        print 'Catalogue Covers the Period: %s to %s' % (catalogue.start_year, catalogue.end_year)
    
        # Sort catalogue chronologically
        catalogue.sort_catalogue_chronologically()
        print 'Catalogue sorted chronologically!'
    
        print '--Removing nan magnitudes'
        valid_magnitudes = np.logical_not(np.isnan(catalogue.data['magnitude']))
        catalogue.select_catalogue_events(valid_magnitudes)
        print 'Input complete: %s events in catalogue' % catalogue.get_number_events()
        print 'Catalogue Covers the Period: %s to %s' % (catalogue.start_year, catalogue.end_year)

        print '--Removing magnitudes < %f'%m_min
        valid_magnitudes = catalogue.data['magnitude'] >= m_min
        catalogue.select_catalogue_events(valid_magnitudes)
        print 'Input complete: %s events in catalogue' % catalogue.get_number_events()
        print 'Catalogue Covers the Period: %s to %s' % (catalogue.start_year, catalogue.end_year)
         
        print '--Removing nan depths'
        valid_depths = np.logical_not(np.isnan(catalogue.data['depth']))
        catalogue.select_catalogue_events(valid_depths)
        print 'Input complete: %s events in catalogue' % catalogue.get_number_events()
        print 'Catalogue Covers the Period: %s to %s' % (catalogue.start_year, catalogue.end_year)
         
        print '--Removing 0 days'
        valid_months = catalogue.data['day'] != 0
        catalogue.select_catalogue_events(valid_months)
        print 'Input complete: %s events in catalogue' % catalogue.get_number_events()
        print 'Catalogue Covers the Period: %s to %s' % (catalogue.start_year, catalogue.end_year)
        
        # Cache
        # Set-up the file writer
        print '--Caching'
        output_file_name = input_catalogue_file + '.csv'
        #writer = CsvCatalogueWriter(output_file_name)
        #writer.write_file(catalogue)
        #exit()
         
        #print 'File %s written' % output_file_name
        f=open(input_catalogue_file + ".pkl",'wb')
        pickle.dump(catalogue, f)
        f.close()

    return catalogue
    kagan_i0 = probs.get_i0()
    kagan_i1 = probs.get_i1()
    print "Poisson LLH = %.6f,  I0 = %.6f,   I1 = %.6f,   I' = %.6f" % (
        poiss_llh, kagan_i0, kagan_i1, kagan_i0 - kagan_i1)


SARA_COMP_TABLE = np.array([[1992., 4.5], [1974., 5.], [1964., 5.5],
                            [1954., 5.75], [1949., 6.], [1949., 6.5],
                            [1930., 7.0]])
SARA_CAT_FILE = "catalogue/sara_all_v07_harm_per123_dist_crustal_clean.csv"
SARA_DECLUST_CAT = "catalogue/sara_cat_shallow_declust.csv"
COMP_FILE = "sam_completeness_zones.hdf5"

if __name__ == "__main__":
    # Load in catalogue
    parser = CsvCatalogueParser(SARA_DECLUST_CAT)
    cat1 = parser.read_file()
    idx = cat1.data["magnitude"] >= 3.0
    cat1.purge_catalogue(idx)
    bbox = [-90.5, -30.0, 0.1, -60.5, 15.5, 0.1, 0., 100.0, 100.0]
    config = {
        "k": 5,
        "r_min": 0.0005 / 25.0,
        "bvalue": 1.0,
        "mmin": 3.0,
        "learning_start": 1930,
        "learning_end": 2003,
        "target_start": 2004,
        "target_end": 2013
    }
    # Run
Exemple #30
0
from hmtk.seismicity.smoothing.kernels.isotropic_gaussian import \
    IsotropicGaussian

BASE_PATH = 'data_input/'
#OUTPUT_FILE = 'data_output/hmtk_bsb2013_decluster_frankel1995.csv'
OUTPUT_FILE = '/Users/pirchiner/dev/pshab/data_output/hmtk_sa3_decluster_frankel1995.csv'

model_name = 'hmtk_bsb2013'
model_name = 'hmtk_sa3'
#TEST_CATALOGUE = 'hmtk_bsb2013_pp_decluster.csv'
TEST_CATALOGUE = 'hmtk_sa3_pp_decluster.csv'

_CATALOGUE = os.path.join(BASE_PATH, TEST_CATALOGUE)

# catalogue
parser = CsvCatalogueParser(_CATALOGUE)
catalogue = parser.read_file()

catalogue.sort_catalogue_chronologically()
#print catalogue.get_number_events()

#res, spc = 0.5, 100
res, spc = 1, 50
#res, spc = 0.2, 250

# model
#[xmin, xmax, spcx, ymin, ymax, spcy, zmin, spcz]
map_config = {
    'min_lon': -95.0,
    'max_lon': -25.0,
    'min_lat': -65.0,
from gmt_tools import cpt2colormap
from os import path, walk, system
#from obspy.imaging.beachball import Beach
from hmtk.parsers.catalogue.csv_catalogue_parser import CsvCatalogueParser, CsvCatalogueWriter
from misc_tools import remove_last_cmap_colour

plt.rcParams['pdf.fonttype'] = 42
mpl.style.use('classic')

##########################################################################################
# parse epicentres
##########################################################################################

# parse HMTK csv
hmtk_csv = '/nas/gemd/ehp/georisk_earthquake/modelling/sandpits/tallen/NSHA2018/catalogue/data/merged_NSHA18-ISCGEM_hmtk.csv'
parser = CsvCatalogueParser(hmtk_csv)
fullcat = parser.read_file()

hmtk_csv = '/nas/gemd/ehp/georisk_earthquake/modelling/sandpits/tallen/NSHA2018/catalogue/data/NSHA18CAT_V0.1_hmtk_declustered.csv'
parser = CsvCatalogueParser(hmtk_csv)
declcat = parser.read_file()

##########################################################################################
#108/152/-44/-8
urcrnrlat = -29.5
llcrnrlat = -33.3
urcrnrlon = 118.75
llcrnrlon = 115.25

lon_0 = mean([llcrnrlon, urcrnrlon])
lat_1 = percentile([llcrnrlat, urcrnrlat], 25)