示例#1
0
def build_model(combined_output_dir, pt_source_model, fsm, source_model_name,
                fault_mesh_spacing):
    """Combine the fault and pt source model"""

    #   additive_pt_sources_filename =  pt_source_model[:-4] + '_pts.xml'
    print 'reading fault sources'
    additive_pt_sources = read_pt_source(pt_source_model)
    # Apply additive approach
    if not os.path.exists(combined_output_dir):
        os.makedirs(combined_output_dir)
    print 'Writing full additive model'
    outfile = os.path.join(combined_output_dir, source_model_name)
    fault_sources = read_simplefault_source(
        fsm, rupture_mesh_spacing=fault_mesh_spacing)
    write_combined_faults_points(additive_pt_sources,
                                 fault_sources,
                                 outfile,
                                 source_model_name,
                                 nrml_version='04')
    read_simplefault_source, combine_pt_sources
from glob import glob

source_model_name = 'National_Fault_Source_Model_2018_Collapsed_AUS6'
#area_source_model = '../zones/2012_mw_ge_4.0/NSHA13/input/collapsed/NSHA13_collapsed.xml'
area_source_model = '../zones/2012_mw_ge_4.0/AUS6/input/collapsed/AUS6_collapsed.xml'
geom_pt_sources_filename = area_source_model[:-4] + '_pts_geom_weighted.xml'

tmp_pt_source_filenames = glob(
    geom_pt_sources_filename.rstrip('.xml') + '_*.xml')
num_files = len(tmp_pt_source_filenames) / 2
tmp_pt_source_filename_list = []
tmp_pt_source_list = []
# Now combine into one file
for j in range(0, num_files, 1):
    tmp_pt_filename = geom_filtered_pt_sources_sublist = geom_pt_sources_filename.rstrip('.xml') + \
        '_%03d.xml' % j
    tmp_pt_source_filename_list.append(tmp_pt_filename)
for tmp_pt_source_file in tmp_pt_source_filename_list:
    print 'Reading %s' % tmp_pt_source_file
    tmp_pt_source = read_pt_source(tmp_pt_source_file)
    tmp_pt_source_list.append(tmp_pt_source)
merged_filename = geom_pt_sources_filename.rstrip(
    '.xml') + '_merged_parallel.xml'
model_name = geom_pt_sources_filename.rstrip('.xml')
combine_pt_sources(tmp_pt_source_list,
                   merged_filename,
                   model_name,
                   nrml_version='04',
                   id_location_flag=None)
示例#3
0
    filename = 'source_model_Australia_Adaptive_K3_merged_inc_b_mmax_uncert_v1.xml'
    name = filename.rstrip('.xml')

    # read list of files
    #    pt_source_model_list =[]
    #    for point_source_model in point_source_list:
    #        print 'Reading %s' % point_source_model
    #        pt_model = read_pt_source(point_source_model)
    ##        pt_source_model_list.append(pt_model)
    ##    combine_pt_sources(pt_source_model_list, filename, name , nrml_version='04',
    ##                       id_location_flag = 'id')

    outfile_bestb = 'smoothed_frankel_50_3_mmin_3.0_merged_bestb.xml'
    outfile_upperb = 'smoothed_frankel_50_3_mmin_3.0_merged_upperb.xml'
    outfile_lowerb = 'smoothed_frankel_50_3_mmin_3.0_merged_lowerb.xml'
    point_source_list = [outfile_bestb, outfile_upperb, outfile_lowerb]
    filename = 'source_model_smoothed_frankel_50_3_mmin_3.0_merged_inc_b_mmax_uncert_v1.xml'
    name = filename.rstrip('.xml')

    # read list of files
    pt_source_model_list = []
    for point_source_model in point_source_list:
        print 'Reading %s' % point_source_model
        pt_model = read_pt_source(point_source_model)
        pt_source_model_list.append(pt_model)
    combine_pt_sources(pt_source_model_list,
                       filename,
                       name,
                       nrml_version='04',
                       id_location_flag='id')
示例#4
0
from openquake.hazardlib.geo.nodalplane import NodalPlane
from openquake.hazardlib.pmf import PMF
from openquake.hazardlib.mfd.evenly_discretized import EvenlyDiscretizedMFD

infiles = [
    'Australia_Adaptive_K4_b1.198.xml', 'Australia_Adaptive_K4_b1.352.xml',
    'Australia_Adaptive_K4_b1.043.xml'
]
domains_shp = '../zones/2012_mw_ge_4.0/NSHA13_Background/shapefiles/NSHA13_BACKGROUND_NSHA18_M\
FD.shp'

# Create test datasets
create_test_data = False
if create_test_data == True:
    for pt_file in infiles:
        pt_sources = read_pt_source(pt_file)
        pt_sources = pt_sources[:50]
        name = 'test'
        nodes = list(map(obj_to_node, sorted(pt_sources)))
        source_model = Node("sourceModel", {"name": name}, nodes=nodes)
        outfile = pt_file.rstrip('.xml') + '_testdata.xml'
        with open(outfile, 'wb') as f:
            nrml.write([source_model], f, '%s', xmlns=NAMESPACE)
if create_test_data == False:
    pass
lt = logic_tree.LogicTree(
    '../../shared/seismic_source_model_weights_rounded_p0.4.csv')
filedict_bestb = {
    'Non_cratonic': 'Australia_Adaptive_K4_b1.198_testdata.xml',
    'Cratonic': 'Australia_Adaptive_K4_b1.198_testdata.xml',
    'Extended': 'Australia_Adaptive_K4_b1.198_testdata.xml'
myid = pypar.rank()  # Id of of this process (myid in [0, proc-1])
node = pypar.get_processor_name(
)  # Host name on which current process is running
print 'I am proc %d of %d on node %s' % (myid, proc, node)
#nruns = 320 # currently hard coded - need to improve this
t0 = pypar.time()

fault_mesh_spacing = 2  #2 Fault source mesh
rupture_mesh_spacing = 2  #10 # Area source mesh
area_source_discretisation = 15  #20
source_model_name = 'National_Fault_Source_Model_2018_Collapsed_DIMAUS_2018'
#area_source_model = '../zones/2018_mw/NSHA13/input/collapsed/NSHA13_collapsed.xml'
#area_source_model = '../zones/2018_mw/NSHA13/input/collapsed/NSHA13_collapsed.xml'
area_source_model = '../zones/2018_mw/DIMAUS/input/collapsed/DIMAUS_collapsed.xml'
geom_pt_sources_filename = area_source_model[:-4] + '_pts_geom_weighted.xml'
geom_pt_sources = read_pt_source(geom_pt_sources_filename)


def chunks(l, n):
    """Yield successive n-sized chunks from l."""
    for i in range(0, len(l), n):
        yield l[i:i + n]


# Split sources
list_length = len(geom_pt_sources) / (proc * 10)
print list_length
if (len(geom_pt_sources) % proc) > 0:
    list_length += 1
pt_list = list(chunks(geom_pt_sources, list_length))
#print pt_list
示例#6
0
model_name = area_source_model.split('/')[-1].rstrip('.xml') + '_geom_filter'
print 'Writing %s' % model_name
geom_pt_sources = weighted_pt_source(pt_source_list,
                                     total_geom_weight,
                                     model_name,
                                     geom_pt_sources_filename,
                                     nrml_version='04')
print 'Exiting here'
sys.exit()
# Apply geometrical filtering
print 'Applying geometrical filtering - this should be pre-calculated using run_geom_filter.sh!'
fsm = os.path.join(source_model_name, source_model_name + '_geom_filtered.xml')
fault_sources = read_simplefault_source(
    fsm, rupture_mesh_spacing=fault_mesh_spacing)
geom_filtered_pt_source_file = area_source_model[:-4] + '_pts_geom_filtered.xml'
geom_filtered_pt_sources = read_pt_source(geom_filtered_pt_source_file)
#pt2fault_distance(geom_pt_sources, fault_sources, min_distance=5.0,
#                  filename=geom_filtered_pt_source_file,
#                  buffer_distance = 100.,
#                  name=source_model_name)
outfile = os.path.join(source_model_name,
                       source_model_name + '_geom_filtered_zone.xml')
write_combined_faults_points(geom_filtered_pt_sources,
                             fault_sources,
                             outfile,
                             model_name,
                             nrml_version='04')

# Apply additive approach
print 'Writing full additive model'
fsm = os.path.join(source_model_name, source_model_name + '_additive.xml')
示例#7
0
def combine_ss_models(filename_stem, domains_shp, params,lt, bval_key, output_dir='./',
                      nrml_version = '04', weight=1.):#, id_base = 'ASS'):
    """ Combine smoothed seismicity models based on tectonic region types
    :params filename_stem:
        String for the start of the xml filename for the source model,
        assuming generic components (non generic are inferred, 
        e.g. bvalue and completeness model)
    :params domains_shp:
        shapefile defining tectonic domain regions
    :params params:
        list of dicts containing parameters derivded from the shapefile
     :bval_key
         key for the dicts in params  as we are merging by 
         bvalues  (best, lower, upper)
    :params lt:
        LogicTree object containing relevant values and weights for Mmax
    :params outfile:
        output nrml formatted file
    """

    dsf = shapefile.Reader(domains_shp)
    dom_shapes = dsf.shapes()    
    # Get indicies of relevant fields
    for i, f in enumerate(dsf.fields):
        if f[0]=='CODE':
            code_index = i-1
        if f[0]=='TRT':
            trt_index = i-1

    hypo_depth_dist_nc = PMF([(0.5, 10.0),
                              (0.25, 5.0),
                              (0.25, 15.0)])
    hypo_depth_dist_c = PMF([(0.5, 5.0),
                             (0.25, 2.5),
                             (0.25, 10.0)])
    hypo_depth_dist_ex = hypo_depth_dist_c
    hypo_depth_dict = {'Cratonic': hypo_depth_dist_c,
                       'Non_cratonic': hypo_depth_dist_nc,
                       'Extended': hypo_depth_dist_ex}
    # FIXME! - Temporary solution until nodal plan logic tree
    # info can be read directly from shapefile attributes
    nodal_plane_dist = PMF([(0.3, NodalPlane(0, 30, 90)),
                            (0.2, NodalPlane(90, 30, 90)),
                            (0.3, NodalPlane(180, 30, 90)),
                            (0.2, NodalPlane(270, 30, 90))])

    merged_pts = []
    pt_ids = []

    # Get mmax values and weights
    mmaxs = {}
    mmaxs_w = {}
    for dom in params:
        print 'Processing source %s' % dom['CODE']
        print dom['TRT']
        if dom['TRT'] == 'NCratonic' or dom['TRT'] == 'Extended':
            dom['TRT'] = 'Non_cratonic'
        # For the moment, only consider regions within AUstralia
        if dom['TRT'] == 'Active' or dom['TRT'] == 'Interface' or \
                dom['TRT'] == 'Oceanic' or \
                dom['TRT'] == 'Intraslab' or dom['CODE'] == 'NECS' or \
                dom['CODE'] == 'NWO': 
            print 'Source %s not on continental Australia, skipping' % dom['CODE']
            continue
        elif dom['TRT'] == 'Cratonic':
            if dom['DOMAIN'] == 1:
                mmax_values, mmax_weights = lt.get_weights('Mmax', 'Archean')
            else:
                mmax_values, mmax_weights = lt.get_weights('Mmax', 'Proterozoic')
#        elif dom['TRT'] == 'Active':
#            print 'MMax logic tree not yet defined for active crust, using extended crust'
#            mmax_values, mmax_weights = lt.get_weights('Mmax', 'Extended')
        else:
            mmax_values, mmax_weights = lt.get_weights('Mmax', dom['TRT'])
        mmax_values = [float(i) for i in mmax_values]
        mmax_weights = [float(i) for i in mmax_weights]
        print mmax_values
        print mmax_weights
        mmaxs[dom['CODE']] = mmax_values
        mmaxs_w[dom['CODE']] = mmax_weights

        #pt_ids = []
    #for trt, filename in filedict.iteritems():
    #    print trt
        completeness_table = np.array([dom['COMPLETENESS'][0]])
        completeness_string = 'comp'
        for ym in completeness_table:
            completeness_string += '_%i_%.1f' % (ym[0], ym[1])
        mmin = dom['COMPLETENESS'][0][1]
        filename = "%s_b%.3f_mmin_%.1f_0.1%s.xml" % (
            filename_stem, dom[bval_key], mmin,
            completeness_string)
        print 'Parsing %s' % filename
        # Only keep points within domain
        pts = read_pt_source(filename)
        #shapes = np.where(trt_types
        for shape in dsf.shapeRecords():
            print shape.record[code_index]
            if shape.record[code_index] == dom['CODE']:
                # Check for undefined depths  (-999 values)
                if dom['DEP_BEST'] < 0:
                    print 'Setting best depth to 10 km'
                    dom['DEP_BEST']=10
                if dom['DEP_UPPER'] < 0:
                    print 'Setting upper depth to 5 km'
                    dom['DEP_UPPER']=5
                if dom['DEP_LOWER'] < 0:
                    print 'Setting lower depth to 15 km'
                    dom['DEP_LOWER']=15
                hypo_depth_dist = PMF([(0.5, dom['DEP_BEST']),
                             (0.25, dom['DEP_LOWER']),
                             (0.25, dom['DEP_UPPER'])])
                # Define nodal planes as thrusts except for special cases
                str1 = dom['SHMAX'] + 90.
                str2 = dom['SHMAX'] + 270.
                str3 = dom['SHMAX'] + dom['SHMAX_SIG'] + 90.
                str4 = dom['SHMAX']+ dom['SHMAX_SIG'] + 270.
                str5 = dom['SHMAX'] - dom['SHMAX_SIG'] + 90.
                str6 = dom['SHMAX'] - dom['SHMAX_SIG'] + 270.
                strikes = [str1,str2,str3,str4,str5,str6]
                for i,strike in enumerate(strikes):
                    if strike >=360:
                        strikes[i]=strike-360
                nodal_plane_dist = PMF([(0.34, NodalPlane(strikes[0], 30, 90)),
                                       (0.34, NodalPlane(strikes[1], 30, 90)),
                                       (0.08, NodalPlane(strikes[2], 30, 90)),
                                       (0.08, NodalPlane(strikes[3], 30, 90)),
                                       (0.08, NodalPlane(strikes[4], 30, 90)),
                                       (0.08, NodalPlane(strikes[5], 30, 90))])
                if dom['CODE'] == 'WARM' or dom['CODE'] == 'WAPM':
                    print 'Define special case for WARM'
                    nodal_plane_dist = PMF([(0.75, NodalPlane(45, 90, 0)),
                                           (0.125, NodalPlane(strikes[0], 30, 90)),
                                           (0.125, NodalPlane(strikes[1], 30, 90))])
                if dom['CODE'] == 'FMLR':
                    print 'Define special case for FMLR, 0.5 thrust, 0.5 SS'
                    nodal_plane_dist = PMF([(0.17, NodalPlane(strikes[0], 30, 90)),
                                           (0.17, NodalPlane(strikes[1], 30, 90)),
                                           (0.04, NodalPlane(strikes[2], 30, 90)),
                                           (0.04, NodalPlane(strikes[3], 30, 90)),
                                           (0.04, NodalPlane(strikes[4], 30, 90)),
                                           (0.04, NodalPlane(strikes[5], 30, 90)),
                                           (0.17, NodalPlane(strikes[0], 90, 0)),
                                           (0.17, NodalPlane(strikes[1], 90, 0)),
                                           (0.04, NodalPlane(strikes[2], 90, 0)),
                                           (0.04, NodalPlane(strikes[3], 90, 0)),
                                           (0.04, NodalPlane(strikes[4], 90, 0)),
                                           (0.04, NodalPlane(strikes[5], 90, 0))])
                dom_poly = Polygon(shape.shape.points)
                for pt in pts:
                    pt_loc = Point(pt.location.x, pt.location.y)
                    if pt_loc.within(dom_poly):
#                        pt.tectonic_region_type = dom['TRT']
                        pt.tectonic_region_type = dom['GMM_TRT']
                        pt.nodal_plane_distribution = nodal_plane_dist # FIXME! update based on data extracted from shapefile
                        pt.hypocenter_distribution = hypo_depth_dist
                        pt.rupture_aspect_ratio=2
                        mfd = pt.mfd
                        new_mfd = gr2inc_mmax(mfd, mmaxs[dom['CODE']], mmaxs_w[dom['CODE']], weight)
                        pt.mfd = new_mfd
                        if pt.source_id in pt_ids:
                            print 'Point source %s already exists!' % pt.source_id
                            print 'Skipping this source for trt %s' % dom['TRT']
                        else:
                            merged_pts.append(pt)
                            pt_ids.append(pt.source_id)
    outfile = "%s_%s.xml" % (
            filename_stem, bval_key)
    outfile = os.path.join(output_dir, outfile)
    name = outfile.rstrip('.xml')
    if nrml_version == '04':
        nodes = list(map(obj_to_node, sorted(merged_pts)))
        source_model = Node("sourceModel", {"name": name}, nodes=nodes)
        with open(outfile, 'wb') as f:
            nrml.write([source_model], f, '%s', xmlns = NAMESPACE)
    return outfile
示例#8
0
def combine_ss_models(filedict,
                      domains_shp,
                      lt,
                      outfile,
                      nrml_version='04',
                      weight=1.):  #, id_base = 'ASS'):
    """ Combine smoothed seismicity models based on tectonic region types
    :params filedict:
        dict of form filedict[trt] = filename specifying input file for that region
    :params domains_shp:
        shapefile defining tectonic domain regions
    :params lt:
        LogicTree object containing relevant values and weights for Mmax
    :params outfile:
        output nrml formatted file
    """
    print 'Getting tectonic region type from %s' % domains_shp
    driver = ogr.GetDriverByName("ESRI Shapefile")
    data_source = driver.Open(domains_shp, 0)
    dsf = data_source.GetLayer()
    trt_types = []
    for feature in dsf:
        trt_types.append(feature.GetField('TRT'))
    dsf = shapefile.Reader(domains_shp)
    dom_shapes = dsf.shapes()

    hypo_depth_dist_nc = PMF([(0.5, 10.0), (0.25, 5.0), (0.25, 15.0)])
    hypo_depth_dist_c = PMF([(0.5, 5.0), (0.25, 2.5), (0.25, 10.0)])
    hypo_depth_dist_ex = hypo_depth_dist_c
    hypo_depth_dict = {
        'Cratonic': hypo_depth_dist_c,
        'Non_cratonic': hypo_depth_dist_nc,
        'Extended': hypo_depth_dist_ex
    }
    nodal_plane_dist = PMF([(0.3, NodalPlane(0, 30, 90)),
                            (0.2, NodalPlane(90, 30, 90)),
                            (0.3, NodalPlane(180, 30, 90)),
                            (0.2, NodalPlane(270, 30, 90))])

    merged_pts = []

    # Get mmax values and weights
    mmaxs = {}
    mmaxs_w = {}
    for trt, filename in filedict.iteritems():
        if trt == 'Cratonic':
            mmax_values, mmax_weights = lt.get_weights('Mmax', 'Proterozoic')
        else:
            mmax_values, mmax_weights = lt.get_weights('Mmax', trt)
        mmax_values = [float(i) for i in mmax_values]
        mmax_weights = [float(i) for i in mmax_weights]
        print mmax_values
        print mmax_weights
        mmaxs[trt] = mmax_values
        mmaxs_w[trt] = mmax_weights

    pt_ids = []
    for trt, filename in filedict.iteritems():
        print trt
        print 'Parsing %s' % filename
        # Only keep points within domain
        pts = read_pt_source(filename)
        #        shapes = np.where(trt_types
        for zone_trt, dom_shape in zip(trt_types, dom_shapes):
            print zone_trt
            print dom_shape
            if zone_trt == trt:
                print 'TRT %s, procesing shape %s' % (zone_trt, dom_shape)
                dom_poly = Polygon(dom_shape.points)
                for pt in pts:
                    pt_loc = Point(pt.location.x, pt.location.y)
                    if pt_loc.within(dom_poly):
                        pt.tectonic_region_type = zone_trt
                        pt.nodal_plane_distribution = nodal_plane_dist
                        pt.hypocenter_distribution = hypo_depth_dict[zone_trt]
                        pt.rupture_aspect_ratio = 2
                        mfd = pt.mfd
                        new_mfd = gr2inc_mmax(mfd, mmaxs[trt], mmaxs_w[trt],
                                              weight)
                        pt.mfd = new_mfd
                        if pt.source_id in pt_ids:
                            print 'Point source %s already exists!' % pt.source_id
                            print 'Skipping this source for trt %s' % zone_trt
                        else:
                            merged_pts.append(pt)
                            pt_ids.append(pt.source_id)

    name = outfile.rstrip('.xml')
    if nrml_version == '04':
        nodes = list(map(obj_to_node, sorted(merged_pts)))
        source_model = Node("sourceModel", {"name": name}, nodes=nodes)
        with open(outfile, 'wb') as f:
            nrml.write([source_model], f, '%s', xmlns=NAMESPACE)