示例#1
0
def decluster_GK74(catalogue):
    from hmtk.seismicity.declusterer.dec_gardner_knopoff import GardnerKnopoffType1
    from hmtk.seismicity.declusterer.distance_time_windows import GardnerKnopoffWindow

    decluster_config = {
        'time_distance_window': GardnerKnopoffWindow(),
        'fs_time_prop': 1.0
    }

    #####################################################
    # decluster here
    #####################################################

    print 'Running GK declustering...'
    decluster_method = GardnerKnopoffType1()

    #---------------------------------------------
    # declustering
    cluster_index_gk, cluster_flags_gk = decluster_method.decluster(
        catalogue, decluster_config)
    #---------------------------------------------

    # adding to the catalog
    # The cluster flag (main shock or after/foreshock) and cluster index to the catalogue keys
    catalogue.data['cluster_index_gk'] = cluster_index_gk
    catalogue.data['cluster_flags_gk'] = cluster_flags_gk

    #####################################################
    # purge remove non-poissonian events
    #####################################################

    # create a copy from the catalogue object to preserve it
    catalogue_gk = deepcopy(catalogue)

    catalogue_gk.purge_catalogue(
        cluster_flags_gk == 0)  # cluster_flags == 0: mainshocks

    print 'Gardner-Knopoff\tbefore: ', catalogue.get_number_events(
    ), " after: ", catalogue_gk.get_number_events()

    #####################################################
    # write declustered catalogue
    #####################################################

    # setup the writer
    declustered_catalog_file = hmtk_csv.split('.')[0] + '_declustered_GK74.csv'

    # if it exists, delete previous file
    try:
        remove(declustered_catalog_file)
    except:
        print declustered_catalog_file, 'does not exist'

    # set-up writer
    writer = CsvCatalogueWriter(declustered_catalog_file)

    # write
    writer.write_file(catalogue_gk)
    #writer.write_file(catalogue_af)
    print 'Declustered catalogue: ok\n'
示例#2
0
 def test_dec_gardner_knopoff(self):
     """
     Testing the Gardner and Knopoff algorithm 
     """
     config = {
         'time_distance_window': GardnerKnopoffWindow(),
         'fs_time_prop': 1.0
     }
     # Instantiate the declusterer and process the sample catalogue
     dec = GardnerKnopoffType1()
     vcl, flagvector = dec.decluster(self.cat, config)
     print 'vcl:', vcl
     print 'flagvector:', flagvector, self.cat.data['flag']
     self.assertTrue(np.allclose(flagvector, self.cat.data['flag']))
示例#3
0
    # Set-up the file writer
    output_file_name = 'data_input/hmtk_sa3.csv'
    writer = CsvCatalogueWriter(output_file_name)
    # Write the catalogue to file
    writer.write_file(catalogue)
    #exit()

    print 'File %s written' % output_file_name
    f = open(input_catalogue_file + ".pkl", 'wb')
    pickle.dump(catalogue, f)
    f.close()

# Set up the declustering algorithm
# Step 1 - set-up the tool
gardner_knopoff = GardnerKnopoffType1()

declust_config = {
    'time_distance_window': GardnerKnopoffWindow(),
    'fs_time_prop': 1.0
}
print declust_config

print 'Running declustering ...'
vcl, flag_vector = gardner_knopoff.decluster(catalogue, declust_config)
print 'done!'
print '%s clusters found' % np.max(vcl)
print '%s Non-poissionian events identified' % np.sum(flag_vector != 0)

# In[ ]:
示例#4
0
def decluster_catalogue(catalogue, config):
    
    
    ### 
    ###    Catalogue cache or read/cache
    ###

    # Set up the declustering algorithm
    # Step 1 - set-up the tool
    if config['decluster_method'] == 'afteran':
        decluster_method = Afteran()
    elif config['decluster_method'] == 'gardner_knopoff':
        decluster_method = GardnerKnopoffType1()
    else:
        print "invalid decluster_method configuration: use [afteran|gardner_knopoff]"    
        return None 
    
    
    print 'Running declustering ...'
    cluster_vector, flag_vector = decluster_method.decluster(catalogue, config)
    print 'done!'
    print '%s clusters found' % np.max(cluster_vector)
    print '%s Non-poissionian events identified' % np.sum(flag_vector != 0)

    
    if config['plot']:
        ### 
        ###    Map Config 
        ###
        
        map_dpi = 90 
        add_geology = True
        add_sourcemodel = True
        savefig=False
        
        #map_title = 'Brazilian Seismic Zones'
        map_title = 'Clusters'
        #map_title = 'ISC-GEM Catalogue'
        #map_title = 'South-American Lithology'
        
        
        # Configure the limits of the map and the coastline resolution
        map_config = {'min_lon': -80.0, 'max_lon': -30.0, 'min_lat': -37.0, 'max_lat': 14.0, 'resolution':'l'}
        #map_config = {'min_lon': -72.0, 'max_lon': -68.0, 'min_lat': -22.0, 'max_lat': -18.0, 'resolution':'l'}
        #map_config = {'min_lon': -95.0, 'max_lon': -25.0, 'min_lat': -65.0, 'max_lat': 25.0, 'resolution':'l'}
        
        basemap = HMTKBaseMap(map_config, map_title, dpi=map_dpi)       
        #basemap.add_catalogue(catalogue, linewidth=0.2, alpha=0.1, overlay=True)
    
        idx = cluster_vector != 0
        x = catalogue.data['longitude'][idx]
        y = catalogue.data['latitude'][idx]
        c = cluster_vector[idx]
        
        basemap.add_colour_scaled_points(x, y, c, 
                                         overlay=True,
                                         shape='s', alpha=0.5, size=100, 
                                         linewidth=0.5, facecolor='none', 
                                         cmap=plt.cm.get_cmap('Paired'),
                                         )
    
        plt.show()

        if config['figname']:
            basemap.savemap(config['figname'])

    
    print 'Original catalogue had %s events' % catalogue.get_number_events()
    catalogue.select_catalogue_events(flag_vector == 0)
    print 'Purged catalogue now contains %s events' % catalogue.get_number_events()

    if config['filename']:
        writer = CsvCatalogueWriter(config['filename'])
        writer.write_file(catalogue)
    
    return catalogue
# parse HMTK catalogue
inputsheef = path.join(hmtkfile)
parser = CsvCatalogueParser(inputsheef)
catalogue = parser.read_file()

decluster_config = {
    'time_distance_window': GardnerKnopoffWindow(),
    'fs_time_prop': 1.0
}

#####################################################
# decluster here
#####################################################

print 'Running GK declustering...'
decluster_method = GardnerKnopoffType1()

#---------------------------------------------
# declustering
cluster_index_gk, cluster_flags_gk = decluster_method.decluster(
    catalogue, decluster_config)
#---------------------------------------------

# adding to the catalog
# The cluster flag (main shock or after/foreshock) and cluster index to the catalogue keys
catalogue.data['cluster_index_gk'] = cluster_index_gk
catalogue.data['cluster_flags_gk'] = cluster_flags_gk

#####################################################
# purge remove non-poissonian events
#####################################################