Example #1
0
def decluster_GK74(catalogue):
    from hmtk.seismicity.declusterer.dec_gardner_knopoff import GardnerKnopoffType1
    from hmtk.seismicity.declusterer.distance_time_windows import GardnerKnopoffWindow

    decluster_config = {
        'time_distance_window': GardnerKnopoffWindow(),
        'fs_time_prop': 1.0
    }

    #####################################################
    # decluster here
    #####################################################

    print 'Running GK declustering...'
    decluster_method = GardnerKnopoffType1()

    #---------------------------------------------
    # declustering
    cluster_index_gk, cluster_flags_gk = decluster_method.decluster(
        catalogue, decluster_config)
    #---------------------------------------------

    # adding to the catalog
    # The cluster flag (main shock or after/foreshock) and cluster index to the catalogue keys
    catalogue.data['cluster_index_gk'] = cluster_index_gk
    catalogue.data['cluster_flags_gk'] = cluster_flags_gk

    #####################################################
    # purge remove non-poissonian events
    #####################################################

    # create a copy from the catalogue object to preserve it
    catalogue_gk = deepcopy(catalogue)

    catalogue_gk.purge_catalogue(
        cluster_flags_gk == 0)  # cluster_flags == 0: mainshocks

    print 'Gardner-Knopoff\tbefore: ', catalogue.get_number_events(
    ), " after: ", catalogue_gk.get_number_events()

    #####################################################
    # write declustered catalogue
    #####################################################

    # setup the writer
    declustered_catalog_file = hmtk_csv.split('.')[0] + '_declustered_GK74.csv'

    # if it exists, delete previous file
    try:
        remove(declustered_catalog_file)
    except:
        print declustered_catalog_file, 'does not exist'

    # set-up writer
    writer = CsvCatalogueWriter(declustered_catalog_file)

    # write
    writer.write_file(catalogue_gk)
    #writer.write_file(catalogue_af)
    print 'Declustered catalogue: ok\n'
Example #2
0
 def test_catalogue_writer_no_purging(self):
     '''
     Tests the writer without any purging
     '''
     # Write to file
     writer = CsvCatalogueWriter(self.output_filename)
     writer.write_file(self.catalogue)
     parser = CsvCatalogueParser(self.output_filename)
     cat2 = parser.read_file()
     self.check_catalogues_are_equal(self.catalogue, cat2)
Example #3
0
 def test_catalogue_writer_no_purging(self):
     '''
     Tests the writer without any purging
     '''
     # Write to file
     writer = CsvCatalogueWriter(self.output_filename)
     writer.write_file(self.catalogue)
     parser = CsvCatalogueParser(self.output_filename)
     cat2 = parser.read_file()
     self.check_catalogues_are_equal(self.catalogue, cat2)
Example #4
0
    def test_catalogue_writer_only_mag_table_purging(self):
        '''
        Tests the writer only purging according to the magnitude table
        '''
        # Write to file
        writer = CsvCatalogueWriter(self.output_filename)
        writer.write_file(self.catalogue, magnitude_table=self.magnitude_table)
        parser = CsvCatalogueParser(self.output_filename)
        cat2 = parser.read_file()

        expected_catalogue = Catalogue()
        expected_catalogue.data['eventID'] = ['1', '3', '5']
        expected_catalogue.data['magnitude'] = np.array([5.6, 4.8, 5.0])
        expected_catalogue.data['year'] = np.array([1960, 1970, 1990])
        expected_catalogue.data['ErrorStrike'] = np.array(
            [np.nan, np.nan, np.nan])
        self.check_catalogues_are_equal(expected_catalogue, cat2)
Example #5
0
    def test_catalogue_writer_only_flag_purging(self):
        '''
        Tests the writer only purging according to the flag
        '''
        # Write to file
        writer = CsvCatalogueWriter(self.output_filename)
        writer.write_file(self.catalogue, flag_vector=self.flag)
        parser = CsvCatalogueParser(self.output_filename)
        cat2 = parser.read_file()

        expected_catalogue = Catalogue()
        expected_catalogue.data['eventID'] = ['1', '2', '3', '4']
        expected_catalogue.data['magnitude'] = np.array([5.6, 5.4, 4.8, 4.3])
        expected_catalogue.data['year'] = np.array([1960, 1965, 1970, 1980])
        expected_catalogue.data['ErrorStrike'] = np.array(
            [np.nan, np.nan, np.nan, np.nan])
        self.check_catalogues_are_equal(expected_catalogue, cat2)
Example #6
0
    def test_catalogue_writer_only_mag_table_purging(self):
        '''
        Tests the writer only purging according to the magnitude table
        '''
        # Write to file
        writer = CsvCatalogueWriter(self.output_filename)
        writer.write_file(self.catalogue, magnitude_table=self.magnitude_table)
        parser = CsvCatalogueParser(self.output_filename)
        cat2 = parser.read_file()

        expected_catalogue = Catalogue()
        expected_catalogue.data['eventID'] = np.array([1, 3, 5])
        expected_catalogue.data['magnitude'] = np.array([5.6, 4.8, 5.0])
        expected_catalogue.data['year'] = np.array([1960, 1970, 1990])
        expected_catalogue.data['ErrorStrike'] =np.array([np.nan, np.nan, 
                                                          np.nan])
        self.check_catalogues_are_equal(expected_catalogue, cat2)
Example #7
0
    def test_catalogue_writer_only_flag_purging(self):
        '''
        Tests the writer only purging according to the flag
        '''
        # Write to file
        writer = CsvCatalogueWriter(self.output_filename)
        writer.write_file(self.catalogue, flag_vector=self.flag)
        parser = CsvCatalogueParser(self.output_filename)
        cat2 = parser.read_file()

        expected_catalogue = Catalogue()
        expected_catalogue.data['eventID'] = np.array([1, 2, 3, 4])
        expected_catalogue.data['magnitude'] = np.array([5.6, 5.4, 4.8, 4.3])
        expected_catalogue.data['year'] = np.array([1960, 1965, 1970, 1980])
        expected_catalogue.data['ErrorStrike'] = np.array([np.nan, np.nan, 
                                                           np.nan, np.nan])
        self.check_catalogues_are_equal(expected_catalogue, cat2)
    def test_catalogue_writer_both_purging(self):
        '''
        Tests the writer only purging according to the magnitude table and
        the flag vector
        '''
        # Write to file
        writer = CsvCatalogueWriter(self.output_filename)
        writer.write_file(self.catalogue,
                          flag_vector=self.flag,
                          magnitude_table=self.magnitude_table)
        parser = CsvCatalogueParser(self.output_filename)
        cat2 = parser.read_file()

        expected_catalogue = Catalogue()
        expected_catalogue.data['eventID'] = ['1', '3']
        expected_catalogue.data['magnitude'] = np.array([5.6, 4.8])
        expected_catalogue.data['year'] = np.array([1960, 1970])
        expected_catalogue.data['ErrorStrike'] = np.array([np.nan, np.nan])
        self.check_catalogues_are_equal(expected_catalogue, cat2)
Example #9
0
    def test_catalogue_writer_both_purging(self):
        '''
        Tests the writer only purging according to the magnitude table and
        the flag vector
        '''
        # Write to file
        writer = CsvCatalogueWriter(self.output_filename)
        writer.write_file(self.catalogue,
                          flag_vector=self.flag,
                          magnitude_table=self.magnitude_table)
        parser = CsvCatalogueParser(self.output_filename)
        cat2 = parser.read_file()

        expected_catalogue = Catalogue()
        expected_catalogue.data['eventID'] = np.array([1, 3])
        expected_catalogue.data['magnitude'] = np.array([5.6, 4.8])
        expected_catalogue.data['year'] = np.array([1960, 1970])
        expected_catalogue.data['ErrorStrike'] = np.array([np.nan, np.nan])
        self.check_catalogues_are_equal(expected_catalogue, cat2)
Example #10
0
    # Sort catalogue chronologically
    catalogue.sort_catalogue_chronologically()
    print 'Catalogue sorted chronologically!'

    valid_magnitudes = np.logical_not(np.isnan(catalogue.data['magnitude']))
    catalogue.select_catalogue_events(valid_magnitudes)
    valid_magnitudes = catalogue.data['magnitude'] >= 3.0
    catalogue.select_catalogue_events(valid_magnitudes)
    #print catalogue.data['magnitude']

    valid_depths = np.logical_not(np.isnan(catalogue.data['depth']))
    catalogue.select_catalogue_events(valid_depths)

    # Set-up the file writer
    output_file_name = 'data_input/hmtk_sa3.csv'
    writer = CsvCatalogueWriter(output_file_name)
    # Write the catalogue to file
    writer.write_file(catalogue)
    #exit()

    print 'File %s written' % output_file_name
    f = open(input_catalogue_file + ".pkl", 'wb')
    pickle.dump(catalogue, f)
    f.close()

# Set up the declustering algorithm
# Step 1 - set-up the tool
gardner_knopoff = GardnerKnopoffType1()

declust_config = {
    'time_distance_window': GardnerKnopoffWindow(),
Example #11
0
def decluster_catalogue(catalogue, config):
    
    
    ### 
    ###    Catalogue cache or read/cache
    ###

    # Set up the declustering algorithm
    # Step 1 - set-up the tool
    if config['decluster_method'] == 'afteran':
        decluster_method = Afteran()
    elif config['decluster_method'] == 'gardner_knopoff':
        decluster_method = GardnerKnopoffType1()
    else:
        print "invalid decluster_method configuration: use [afteran|gardner_knopoff]"    
        return None 
    
    
    print 'Running declustering ...'
    cluster_vector, flag_vector = decluster_method.decluster(catalogue, config)
    print 'done!'
    print '%s clusters found' % np.max(cluster_vector)
    print '%s Non-poissionian events identified' % np.sum(flag_vector != 0)

    
    if config['plot']:
        ### 
        ###    Map Config 
        ###
        
        map_dpi = 90 
        add_geology = True
        add_sourcemodel = True
        savefig=False
        
        #map_title = 'Brazilian Seismic Zones'
        map_title = 'Clusters'
        #map_title = 'ISC-GEM Catalogue'
        #map_title = 'South-American Lithology'
        
        
        # Configure the limits of the map and the coastline resolution
        map_config = {'min_lon': -80.0, 'max_lon': -30.0, 'min_lat': -37.0, 'max_lat': 14.0, 'resolution':'l'}
        #map_config = {'min_lon': -72.0, 'max_lon': -68.0, 'min_lat': -22.0, 'max_lat': -18.0, 'resolution':'l'}
        #map_config = {'min_lon': -95.0, 'max_lon': -25.0, 'min_lat': -65.0, 'max_lat': 25.0, 'resolution':'l'}
        
        basemap = HMTKBaseMap(map_config, map_title, dpi=map_dpi)       
        #basemap.add_catalogue(catalogue, linewidth=0.2, alpha=0.1, overlay=True)
    
        idx = cluster_vector != 0
        x = catalogue.data['longitude'][idx]
        y = catalogue.data['latitude'][idx]
        c = cluster_vector[idx]
        
        basemap.add_colour_scaled_points(x, y, c, 
                                         overlay=True,
                                         shape='s', alpha=0.5, size=100, 
                                         linewidth=0.5, facecolor='none', 
                                         cmap=plt.cm.get_cmap('Paired'),
                                         )
    
        plt.show()

        if config['figname']:
            basemap.savemap(config['figname'])

    
    print 'Original catalogue had %s events' % catalogue.get_number_events()
    catalogue.select_catalogue_events(flag_vector == 0)
    print 'Purged catalogue now contains %s events' % catalogue.get_number_events()

    if config['filename']:
        writer = CsvCatalogueWriter(config['filename'])
        writer.write_file(catalogue)
    
    return catalogue
Example #12
0
completeness = np.array([[1980., 3.0], [1985., 4.0], [1964., 5.0],
                         [1910., 6.5]])
plot_observed_recurrence(catalogue, completeness, 0.2, catalogue.end_year)

# In[ ]:

# Limit the catalogue to depths less than 50 km
valid_depth = catalogue.data['depth'] <= 50.
catalogue.select_catalogue_events(valid_depth)
plot_depth_histogram(catalogue, 2.0)

# In[ ]:

# Set-up the file writer
output_file_name = 'data_output/basic_demo_catalogue_1.csv'
writer = CsvCatalogueWriter(output_file_name)

# Write the catalogue to file
writer.write_file(catalogue)

print 'File %s written' % output_file_name

# In[ ]:

completeness = np.array([[1985., 4.0], [1964., 5.0], [1910., 6.5]])
# Set-up the exporter
output_file_name = 'data_output/basic_demo_catalogue_complete_1.csv'
writer = CsvCatalogueWriter(output_file_name)

# Write the catalogue to file, purging events from the incomplete period
writer.write_file(catalogue, magnitude_table=completeness)
Example #13
0
def decluster_SCR(method, cat, deblastOnly):

    # set flag for dependent events
    flagvector = np.zeros(len(cat.data['magnitude']), dtype=int)

    #########################################################################
    # call declustering
    #########################################################################
    # use "method" to decluster
    if deblastOnly == False:
        # flag aftershocks
        doAftershocks = True
        flagvector_as = flag_dependent_events(cat, flagvector, doAftershocks,
                                              method)

        # flag foreshocks
        doAftershocks = False
        flagvector_asfs = flag_dependent_events(cat, flagvector_as,
                                                doAftershocks, method)

        # now find mannually picked foreshocks/aftershocks (1 = fore/aftershock; 2 = blast/coal)
        idx = np.where(cat.data['flag'] >= 1)[0]
        flagvector_asfsman = flagvector_asfs
        flagvector_asfsman[idx] = 1

    # else remove coal & blast events only
    else:
        idx = np.where(cat.data['flag'] == 2)[0]
        flagvector_asfsman = flagvector
        flagvector_asfsman[idx] = 1

    #########################################################################
    # purge non-poissonian events
    #########################################################################

    # adding cluster flag to the catalog
    cat.data['cluster_flag'] = flagvector_asfsman

    # create a copy from the catalogue object to preserve it
    catalogue_l08 = deepcopy(cat)

    catalogue_l08.purge_catalogue(
        flagvector_asfsman == 0)  # cluster_flags == 0: mainshocks

    print 'Leonard 2008\tbefore: ', cat.get_number_events(
    ), " after: ", catalogue_l08.get_number_events()

    #####################################################
    # write declustered catalogue
    #####################################################

    # setup the writer
    declustered_catalog_file = '.'.join(
        hmtk_csv.split('.')[0:2]) + '_declustered_test.csv'

    # if it exists, delete previous file
    try:
        remove(declustered_catalog_file)
    except:
        print declustered_catalog_file, 'does not exist'

    # set-up writer
    writer = CsvCatalogueWriter(declustered_catalog_file)

    # write
    writer.write_file(catalogue_l08)

    print 'Declustered catalogue: ok\n'

    return declustered_catalog_file
Example #14
0
    # Sort catalogue chronologically
    catalogue.sort_catalogue_chronologically()
    print 'Catalogue sorted chronologically!'

    valid_magnitudes = np.logical_not(np.isnan(catalogue.data['magnitude']))
    catalogue.select_catalogue_events(valid_magnitudes)
    valid_magnitudes = catalogue.data['magnitude'] >= 3.0
    catalogue.select_catalogue_events(valid_magnitudes)
    #print catalogue.data['magnitude']
     
    valid_depths = np.logical_not(np.isnan(catalogue.data['depth']))
    catalogue.select_catalogue_events(valid_depths)
     
    # Set-up the file writer
    output_file_name = input_catalogue_file + ".csv"
    writer = CsvCatalogueWriter(output_file_name)
    # Write the catalogue to file
    writer.write_file(catalogue)
    #exit()
    
    print 'File %s written' % output_file_name
    f=open(input_catalogue_file + ".pkl",'wb')
    pickle.dump(catalogue, f)
    f.close()






X = catalogue.data['depth']
# write declustered catalogue
#####################################################

# setup the writer

declustered_catalog_file = inputsheef.replace('hmtk.csv',
                                              '') + 'hmtk_declustered.csv'

# if it exists, delete previous file
try:
    remove(declustered_catalog_file)
except:
    print 'no purged file...'

# set-up writer
writer = CsvCatalogueWriter(declustered_catalog_file)

# write
writer.write_file(catalogue_gk)
#writer.write_file(catalogue_af)
print 'Declustered catalogue: ok\n'

# setup the writer
removed_catalog_file = inputsheef.replace('hmtk.csv', '') + 'hmtk_purged.csv'

# if it exists, delete previous file
try:
    remove(removed_catalog_file)
except:
    print 'no purged file...'
Example #16
0
    # Sort catalogue chronologically
    catalogue.sort_catalogue_chronologically()
    print "Catalogue sorted chronologically!"

    valid_magnitudes = np.logical_not(np.isnan(catalogue.data["magnitude"]))
    catalogue.select_catalogue_events(valid_magnitudes)
    valid_magnitudes = catalogue.data["magnitude"] >= 3.0
    catalogue.select_catalogue_events(valid_magnitudes)
    # print catalogue.data['magnitude']

    valid_depths = np.logical_not(np.isnan(catalogue.data["depth"]))
    catalogue.select_catalogue_events(valid_depths)

    # Set-up the file writer
    output_file_name = "data_input/hmtk_sa3.csv"
    writer = CsvCatalogueWriter(output_file_name)
    # Write the catalogue to file
    writer.write_file(catalogue)
    # exit()

    print "File %s written" % output_file_name
    f = open(input_catalogue_file + ".pkl", "wb")
    pickle.dump(catalogue, f)
    f.close()


###
###    Source Model
###