Пример #1
0
def discretize_zones_with_h3_grid(h3_level: str, fname_poly: str,
                                  folder_out: str):

    h3_level = int(h3_level)
    create_folder(folder_out)

    tmp = "mapping_h{:d}.csv".format(h3_level)
    fname_out = os.path.join(folder_out, tmp)

    # Read polygons
    polygons_gdf = gpd.read_file(fname_poly)

    # Select point in polygon
    fout = open(fname_out, 'w')
    for idx, poly in polygons_gdf.iterrows():

        geojson_poly = eval(json.dumps(shapely.geometry.mapping(
            poly.geometry)))

        # Revert the positions of lons and lats
        coo = [[c[1], c[0]] for c in geojson_poly['coordinates'][0]]
        geojson_poly['coordinates'] = [coo]

        # Discretizing
        hexagons = list(h3.polyfill(geojson_poly, h3_level))
        for hxg in hexagons:
            if isinstance(poly.id, str):
                fout.write("{:s},{:s}\n".format(hxg, poly.id))
            else:
                fout.write("{:s},{:d}\n".format(hxg, poly.id))

    fout.close()
Пример #2
0
def main(fname_config, label, edges_folder, out_file, *, resampling=None):
    """
    Creates the .xml input for the interface sources
    """

    create_folder(os.path.dirname(out_file))

    # check edges folder
    assert os.path.exists(edges_folder)

    # Read the config file
    config = toml.load(fname_config)

    # Create .xml
    es = EdgesSet.from_files(edges_folder)
    src = es.get_complex_fault(section_length=float(resampling))

    binw = config['bin_width']
    agr = config['sources'][label]['agr']
    bgr = config['sources'][label]['bgr']
    mmin = config['mmin']
    mmax = config['sources'][label]['mmax']
    mfd = TruncatedGRMFD(min_mag=mmin,
                         max_mag=mmax,
                         bin_width=binw,
                         a_val=agr,
                         b_val=bgr)
    src.mfd = mfd
    src.rupture_mesh_spacing = 10.0

    write_source_model(out_file, [src], 'Name')
def create_subcatalogues(fname_polygons: str, fname_cat: str, folder_out: str):

    # Create output folder
    create_folder(folder_out)

    # Create geodataframe with the catalogue
    df = pd.read_csv(fname_cat)
    gdf = gpd.GeoDataFrame(
        df,
        crs='epsg:4326',
        geometry=[Point(xy) for xy in zip(df.longitude, df.latitude)])

    # Read polygons
    polygons_gdf = gpd.read_file(fname_polygons)

    # Select point in polygon
    columns = [
        'eventID', 'year', 'month', 'day', 'magnitude', 'longitude',
        'latitude', 'depth'
    ]

    # Iterate over sources
    for idx, poly in polygons_gdf.iterrows():
        df = pd.DataFrame({'Name': [poly.id], 'Polygon': [poly.geometry]})
        gdf_poly = gpd.GeoDataFrame(df, geometry='Polygon', crs='epsg:4326')
        within = gpd.sjoin(gdf, gdf_poly, op='within')
        # Create output file
        if isinstance(poly.id, int):
            fname = 'subcatalogue_zone_{:d}.csv'.format(poly.id)
        else:
            fname = 'subcatalogue_zone_{:s}.csv'.format(poly.id)
        out_fname = os.path.join(folder_out, fname)
        within.to_csv(out_fname, index=False, columns=columns)
Пример #4
0
def main(h3_mapping: str, h3_level: int, folder_out: str):

    # Reading the input
    df = pd.read_csv(h3_mapping, names=['key', 'sid'])
    df.head()

    # Create the output folder - If needed
    create_folder(folder_out)

    # Preparing the dataframe
    lons = []
    lats = []
    for i, row in df.iterrows():
        la, lo = h3.h3_to_geo(row.key)
        lons.append(lo)
        lats.append(la)
    df['lon'] = lons
    df['lat'] = lats
    df['nocc'] = 1.

    # Writing output
    for sid in df.sid.unique():
        if isinstance(sid, str):
            tmps = '{:s}.csv'.format(sid[0:3])
        else:
            tmps = '{:02d}.csv'.format(sid)
        fname_out = os.path.join(folder_out, tmps)
        print(fname_out)
        tdf = df.loc[df.sid == sid]
        tdf.to_csv(fname_out, columns=['lon', 'lat', 'nocc'], index=False)
Пример #5
0
def create_smoothing_per_zone(fname_points: str,
                              fname_polygons: str,
                              folder_out: str = '/tmp',
                              skip=[]):
    """
    Creates subsets of points, one for each of the polygons included in the 
    `fname_polygons` shapefile. The attibute table must have an 'id' attribute.
    
    :param fname_points:
        Name of the file with the output of the smoothing
    :param fname_polygons:
        The shapefile with the polygons
    :param folder_out:
        The name of the folder where to write the output
    :returns:
        A number of .csv files in `folder_out` 
    """

    create_folder(folder_out)

    # Create a geodataframe with the point sources
    df = pd.read_csv(fname_points)
    gdf = gpd.GeoDataFrame(df.drop(['lon', 'lat'], axis=1),
                           crs='epsg:4326',
                           geometry=[Point(xy) for xy in zip(df.lon, df.lat)])

    # Read polygons
    polygons_gdf = gpd.read_file(fname_polygons)

    # Iterate over the polygons defining the boundaries of area sources
    for idx, poly in polygons_gdf.iterrows():

        if poly.id in skip:
            continue

        # Create a geodataframe with the polygon in question
        df = pd.DataFrame({'Name': [poly.id], 'Polygon': [poly.geometry]})
        gdf_poly = gpd.GeoDataFrame(df, geometry='Polygon', crs='epsg:4326')

        # Find the point sources from the smoothing inside the polygon
        within = gpd.sjoin(gdf, gdf_poly, op='within')

        within['lon'] = within['geometry'].x
        within['lat'] = within['geometry'].y

        if isinstance(poly.id, str):
            fout = os.path.join(folder_out, '{:s}.csv'.format(poly.id))
        else:
            fout = os.path.join(folder_out, '{:d}.csv'.format(poly.id))

        if len(within):
            within.to_csv(fout, index=False, columns=['lon', 'lat', 'nocc'])
def create_nrml_sources(fname_input_pattern: str, fname_config: str,
                        folder_out: str, as_multipoint: bool,
                        fname_subzone_shp: str = "",
                        fname_subzone_config: str = "",):

    create_folder(folder_out)

    # If true we take some of the information from subzones
    subzones = (len(fname_subzone_shp) > 0 and len(fname_subzone_config) > 0)
    model_subz = None
    if subzones:
        polygons_gdf = gpd.read_file(fname_subzone_shp)
        model_subz = toml.load(fname_subzone_config)

    # This is used to instantiate the MSR
    module = importlib.import_module('openquake.hazardlib.scalerel')

    # Parsing config
    model = toml.load(fname_config)

    rms = model['rupture_mesh_spacing']
    mmin = model['mmin']
    bwid = model['bin_width']
    tom = PoissonTOM(1.0)

    # Processing files
    for fname in glob(fname_input_pattern):

        src_id = os.path.basename(fname).split('.')[0]
        df = pd.read_csv(fname)

        # Create a geodataframe with the points in a given zone
        if subzones:

            # Create a geodataframe with points
            geom = [PointShapely(xy) for xy in zip(df.lon, df.lat)]
            gdf = gpd.GeoDataFrame(df, crs='epsg:4326', geometry=geom)

            # Select subzones within a zone
            tdf = polygons_gdf[polygons_gdf["parent"] == src_id]

            # Should contain the points within
            df = gpd.sjoin(gdf, tdf, op='within')

        if as_multipoint:
            write_as_multipoint_sources(df, model, src_id, module, subzones,
                                        model_subz, mmin, bwid, rms, tom,
                                        folder_out)
        else:
            write_as_set_point_sources(df, model, src_id, module, subzones,
                                       model_subz, mmin, bwid, rms, tom,
                                       folder_out)
Пример #7
0
def create_gcmt_files(fname_polygons: str,
                      gcmt_filename: str,
                      folder_out: str,
                      depth_max: float = 600.0,
                      depth_min: float = 0.0):

    # Create output folder
    create_folder(folder_out)

    # Create geodataframe with the catalogue
    tmp = get_dataframe(gcmt_filename)

    # Filter depths
    df = tmp[(tmp.depth > depth_min) & (tmp.depth <= depth_max)]
    if len(df) < 0:
        return []

    # Create geodataframe
    gdf = gpd.GeoDataFrame(
        df,
        crs='epsg:4326',
        geometry=[Point(xy) for xy in zip(df.longitude, df.latitude)])

    # Read polygons
    polygons_gdf = gpd.read_file(fname_polygons)

    # Iterate over sources
    fnames_list = []
    for idx, poly in polygons_gdf.iterrows():

        df = pd.DataFrame({'Name': [poly.id], 'Polygon': [poly.geometry]})
        gdf_poly = gpd.GeoDataFrame(df, geometry='Polygon', crs='epsg:4326')
        within = gpd.sjoin(gdf, gdf_poly, op='within')

        if len(df) < 0:
            continue

        # Create output file
        if isinstance(poly.id, int):
            fname = 'subcatalogue_zone_{:d}.csv'.format(poly.id)
        else:
            fname = 'subcatalogue_zone_{:s}.csv'.format(poly.id)
        out_fname = os.path.join(folder_out, fname)
        within.to_csv(out_fname, index=False)

        fnames_list.append(out_fname)

    return fnames_list
def main(config_fname: str, output_folder: str, *,
         trt: str=TRT.SUBDUCTION_INTRASLAB):

    investigation_t = 1.0

    # Parsing config
    model = toml.load(config_fname)
    path = os.path.dirname(config_fname)

    # Creating xml
    for key in model['sources']:
        ini_fname = os.path.join(path, model['sources'][key]['ini_fname'])
        config = configparser.ConfigParser()
        config.read_file(open(ini_fname))
        tmp = config.get('main', 'out_hdf5_fname')
        rupture_hdf5_fname = os.path.join(path, tmp)
        outf = os.path.join(output_folder, key)
        create_folder(outf)
        create(key, rupture_hdf5_fname, outf, investigation_t, trt)
def catalogue_declustering(fname: str,
                           output_folder: str,
                           *,
                           subcatalogues: bool = False):
    """
    Decluster a catalogue
    """

    create_folder(output_folder)
    create_folder('./tmp')

    # Create a fake file with the classification. We use a fake classification
    # since earthquakes in this analysis are just from stable crust
    tr_fname = './tmp/fake.hdf5'
    cat = _load_catalogue(fname)
    label = np.ones_like(np.array(cat['magnitude']))
    f = h5py.File(tr_fname, 'w')
    _ = f.create_dataset("undef", data=label)
    f.close()

    labels = ['undef']

    # Declustering with the classical GK algorithm
    declustering_meth = 'GardnerKnopoffType1'
    declustering_params = {
        'time_distance_window': 'GardnerKnopoffWindow',
        'fs_time_prop': 0.9
    }
    out = decluster(fname,
                    declustering_meth,
                    declustering_params,
                    output_folder,
                    labels=labels,
                    tr_fname=tr_fname,
                    subcatalogues=subcatalogues,
                    olab='_gk',
                    save_af=True,
                    fix_defaults=True)

    declustering_meth = 'GardnerKnopoffType1'
    declustering_params = {
        'time_distance_window': 'UhrhammerWindow',
        'fs_time_prop': 0.9
    }
    out = decluster(fname,
                    declustering_meth,
                    declustering_params,
                    output_folder,
                    labels=labels,
                    tr_fname=tr_fname,
                    subcatalogues=subcatalogues,
                    olab='_uh',
                    save_af=True,
                    fix_defaults=True)

    declustering_meth = 'GardnerKnopoffType1'
    declustering_params = {
        'time_distance_window': 'GruenthalWindow',
        'fs_time_prop': 0.9
    }
    _ = decluster(fname,
                  declustering_meth,
                  declustering_params,
                  output_folder,
                  labels=labels,
                  tr_fname=tr_fname,
                  subcatalogues=subcatalogues,
                  olab='_gr',
                  save_af=True,
                  fix_defaults=True)
Пример #10
0
def add_baseline_seismicity(folder_name: str,
                            folder_name_out: str,
                            fname_config: str,
                            fname_poly: str,
                            skip=[]):
    """

    :param folder_name:
        The name of the folder containing the files with GR parameters for the
        points in each zone considered
    :param folder_name_out:
        The folder where to write the results
    :param config_file:
        A .toml file with the configuration parameters
    :param shapefile:
        The name of the shapefile containing the geometry of the polygons used
    :param skip:
        A list with the sources that should be skipped [NOT ACTIVE!!!]
    :returns:
        An updated set of .csv files
    """

    # Create output folder
    create_folder(folder_name_out)

    # Parsing config
    model = toml.load(fname_config)
    h3_level = model['baseline']['h3_level']
    basel_agr = model['baseline']['a_value']
    basel_bgr = model['baseline']['b_value']

    # Read polygons
    polygons_gdf = gpd.read_file(fname_poly)

    # Loop over the polygons
    polygons_gdf.sort_values(by="id", ascending=True, inplace=True)
    polygons_gdf.reset_index(drop=True, inplace=True)

    for idx, poly in polygons_gdf.iterrows():

        geojson_poly = eval(json.dumps(shapely.geometry.mapping(
            poly.geometry)))

        # Revert the positions of lons and lats
        coo = [[c[1], c[0]] for c in geojson_poly['coordinates'][0]]
        geojson_poly['coordinates'] = [coo]

        # Discretizing the polygon i.e. find all the hexagons covering the
        # polygon describing the current zone
        hexagons = list(h3.polyfill(geojson_poly, h3_level))

        # Read the file with the points obtained by the smoothing
        print("Source ID", poly.id)
        fname = os.path.join(folder_name, '{:s}.csv'.format(poly.id))
        df = pd.read_csv(fname)

        srcs_idxs = [
            h3.geo_to_h3(la, lo, h3_level) for lo, la in zip(df.lon, df.lat)
        ]
        hxg_idxs = [hxg for hxg in hexagons]

        missing = list(set(hxg_idxs) - set(srcs_idxs))
        tmp = np.nonzero([df.agr <= basel_agr])[0]

        # If we don't miss cells and rates are all above the threshold there
        # is nothing else to do
        fname = os.path.join(folder_name_out, "{:s}.csv".format(poly.id))
        if len(missing) == 0 and len(tmp) == 0:
            df.to_csv(fname, index=False)
            continue

        # Get the indexes of the point sources with low rates
        idxs = np.nonzero(df.agr.to_numpy() <= basel_agr)[0]
        low = [srcs_idxs[i] for i in idxs]

        # Removing the sources with activity below the threshold
        df.drop(df.index[idxs], inplace=True)

        # Find the h3 indexes of the point sources either without seismicity
        # or with a rate below the baseline
        both = set(missing) | set(low)

        # Adding baseline seismicity to the dataframe for the current source
        if len(both) > 0:
            tmp_df = create_missing(both, h3_level, basel_agr, basel_bgr)
            df = df.append(tmp_df)

        # Creating output file
        assert len(hxg_idxs) == df.shape[0]
        df.to_csv(fname, index=False)
Пример #11
0
def subcatalogues_analysis(fname_input_pattern,
                           fname_config,
                           outdir,
                           *,
                           skip=[],
                           yealim='',
                           **kwargs):
    """
    Analyze the catalogue
    """

    create_folder(outdir)

    # Parsing config
    model = toml.load(fname_config)

    # Processing files
    for fname in sorted(glob(fname_input_pattern)):

        # Get source ID
        src_id = _get_src_id(fname)
        if src_id in skip:
            continue

        # Create figure
        out = create_mtd(fname,
                         src_id,
                         None,
                         False,
                         False,
                         0.25,
                         10,
                         pmint=1900)

        if out is None:
            continue

        if len(yealim) > 0:
            tmp = yealim.split(',')
            tmp = numpy.array(tmp)
            tmp = tmp.astype(numpy.float)
            plt.xlim(tmp)

        if 'xlim' in kwargs:
            plt.xlim(kwargs['xlim'])

        if 'ylim' in kwargs:
            plt.ylim(kwargs['ylim'])

        print('src_id: {:s} '.format(src_id), end='')
        if ('sources' in model and src_id in model['sources']
                and 'completeness_table' in model['sources'][src_id]):
            print(' source specific completeness')
            ctab = numpy.array(model['sources'][src_id]['completeness_table'])
            ctab = ctab.astype(numpy.float)
        else:
            print(' default completeness')
            ctab = numpy.array(model['default']['completeness_table'])
            ctab = ctab.astype(numpy.float)

        print(ctab)
        _plot_ctab(ctab)

        ext = 'png'
        figure_fname = os.path.join(outdir,
                                    'fig_mtd_{:s}.{:s}'.format(src_id, ext))
        plt.savefig(figure_fname, format=ext)
        plt.close()
Пример #12
0
def analyze_hypocentral_depth(folder_subcat: str,
                              *,
                              depth_min: float = 0,
                              depth_max: float = 300.0,
                              depth_binw: float = 10,
                              folder_out_figs: str = '',
                              show: bool = False,
                              depth_bins: str = '',
                              conf='',
                              use: str = [],
                              skip: str = []):
    """
    Analyses the distribution of hypocentral depths within a depth interval.
    """

    if len(use) > 0:
        use = get_list(use)
    if len(skip) > 0:
        skip = get_list(skip)

    create_folder(folder_out_figs)
    path = os.path.join(folder_subcat, 'subcatalogue*.csv')
    print("Storing figures in: {:s}".format(folder_out_figs))

    if len(depth_bins) > 0:
        depth_bins = get_list(depth_bins)

    if len(conf) > 0:
        model = toml.load(conf)

    # Select point in polygon
    for fname in sorted(glob.glob(path)):

        match = re.search('.*subcatalogue_zone_(.*).csv', fname)
        src_id = match.group(1)

        if (len(use) and src_id not in use) or (src_id in skip):
            continue

        figure_format = 'png'
        fmt = 'hypodepth_distribution_zone_{:s}.{:s}'
        tmp = fmt.format(src_id, figure_format)
        fname_figure_out = os.path.join(folder_out_figs, tmp)

        # Building the figure/statistics
        hist, depb = hypocentral_depth_analysis(fname, depth_min, depth_max,
                                                depth_binw, fname_figure_out,
                                                show, depth_bins, src_id,
                                                figure_format)

        if hist is None:
            continue

        THRESHOLD = 0.03
        if len(conf) > 0:

            midd = depb[:-1] + np.diff(depb) / 2

            hist = hist / np.sum(hist)
            idx = hist > THRESHOLD
            hist = hist[idx]
            midd = midd[idx]

            wei = np.around(hist, 2)
            wei = wei / np.sum(wei)
            wei = np.around(wei, 2)

            swei = np.sum(wei)
            if abs(1.0 - swei) > 1e-2:
                # Fixing
                wei[-1] += 1.0 - swei
                swei = np.sum(wei)
                if abs(1.0 - swei) > 1e-2:
                    fmt = "Weights do not sum to 1: {:f}\n{:s}"
                    msg = fmt.format(swei, fname)
                    warnings.warn(msg)
                    exit()

            var = model['sources'][src_id]
            tlist = []
            for w, m in zip(wei, midd):
                if w > 1e-10:
                    tlist.append([w, m])
            var['hypocenter_distribution'] = tlist

    if len(conf) > 0:
        # Saving results into the config file
        with open(conf, 'w') as fou:
            fou.write(toml.dumps(model))
            print('Updated {:s}'.format(conf))
def main(fname: str, *, example_flag: bool = False):
    """ Compares SES against a catalogue given a .toml configuration file """

    # Print an example of configuration file
    if example_flag:
        print_example()
        exit()

    # Load the .toml file containing the information required
    config_main = toml.load(fname)
    path = os.path.dirname(fname)

    print('Root path: {:s}'.format(path))

    # Read information in the config file
    fname_catalogues = []
    for tmp_name in config_main['main']['catalogues']:
        # If not absolute
        if not re.search('^/', tmp_name):
            tmp_name = os.path.join(path, tmp_name)
            assert os.path.exists(tmp_name)
            print('Catalogue: {:s}'.format(tmp_name))
        fname_catalogues.append(tmp_name)
    calc_id = config_main['main']['calc_id']
    ses_duration = config_main['main']['ses_duration']
    polygon_fname = os.path.join(path, config_main['main']['polygon'])
    output_dir = os.path.join(path, config_main['main']['output_dir'])
    descr = config_main['main']['description']
    binw = config_main['main'].get('bin_width', 0.2)
    min_magnitude = config_main['main'].get('min_magnitude', None)

    if ('tectonic_region' not in config_main['main']
            or config_main['main']['tectonic_region'] in ['', 'none', 'None']):
        tectonic_region = None
    else:
        tectonic_region = int(config_main['main']['tectonic_region'])

    # Checking
    msg = 'The polygon file does not exist:\n{:s}'.format(polygon_fname)
    assert os.path.exists(polygon_fname), msg
    if not os.path.exists(output_dir):
        create_folder(output_dir)

    # Reading ruptures from the datastore
    dstore = read(calc_id)
    dfr = dstore.read_df('ruptures')
    dfr = gpd.GeoDataFrame(dfr,
                           geometry=gpd.points_from_xy(dfr.hypo_0, dfr.hypo_1))
    if tectonic_region is not None:
        dfr = dfr.loc[dfr['trt_smr'] == tectonic_region]

    # Reading geojson polygon and create the shapely geometry
    with open(polygon_fname) as json_file:
        data = json.load(json_file)
    polygon = data['features'][0]['geometry']
    tmp = eval(geoj.dumps(polygon))
    geom = shape(tmp)

    # Get region limits
    coo = []
    for poly in geom.geoms:
        coo += list(zip(*poly.exterior.coords.xy))
    coo = np.array(coo)
    minlo = np.min(coo[:, 0])
    minla = np.min(coo[:, 1])
    maxlo = np.max(coo[:, 0])
    maxla = np.max(coo[:, 1])
    region = "{:f}/{:f}/{:f}/{:f}".format(minlo, maxlo, minla, maxla)

    # Read catalogue
    for i, fname in enumerate(fname_catalogues):
        if i == 0:
            tcat = _load_catalogue(fname)
        else:
            tcat.concatenate(_load_catalogue(fname))

    # Create a dataframe from the catalogue
    dfcat = to_df(tcat)
    dfcat = gpd.GeoDataFrame(dfcat,
                             geometry=gpd.points_from_xy(
                                 dfcat.longitude, dfcat.latitude))
    dfcat.head(n=1)

    # Select the events within the polygon and convert from df to catalogue
    idx = dfcat.within(geom)
    selcat_df = dfcat.loc[idx]
    selcat = from_df(selcat_df)

    if 'completeness_table' in config_main['main']:
        ctab = config_main['main']['completeness_table']
        ctab = np.array(ctab)
    else:
        fname_config = os.path.join(path, config_main['main']['fname_config'])
        msg = 'The config file does not exist:\n{:s}'.format(fname_config)
        assert os.path.exists(fname_config), msg
        config = toml.load(fname_config)
        completeness_label = config_main['main']['completeness_label']
        _, ctab = get_mmax_ctab(config, completeness_label)

    if len(selcat_df.magnitude) < 2:
        print('The catalogue contains less than 2 earthquakes')
        return

    selcat.data["dtime"] = selcat.get_decimal_time()
    cent_mag, t_per, n_obs = get_completeness_counts(selcat, ctab, binw)
    tmp = n_obs / t_per
    hiscml_cat = np.array([np.sum(tmp[i:]) for i in range(0, len(tmp))])

    # Take into account possible multiple occurrences in the SES
    df = dfr.loc[dfr.index.repeat(dfr.n_occ)]
    assert len(df) == np.sum(dfr.n_occ)

    # SES histogram
    idx = dfr.within(geom)
    bins = np.arange(min_magnitude, 9.0, binw)
    hisr, _ = np.histogram(df.loc[idx].mag, bins=bins)
    hisr = hisr / ses_duration
    hiscml = np.array([np.sum(hisr[i:]) for i in range(0, len(hisr))])

    # Plotting
    fig = plt.figure(figsize=(7, 5))
    # - cumulative
    plt.plot(bins[:-1], hiscml, '--x', label='SES')
    plt.plot(cent_mag - binw / 2, hiscml_cat, '-.x', label='Catalogue')
    # - incremental
    plt.bar(cent_mag,
            n_obs / t_per,
            width=binw * 0.7,
            fc='none',
            ec='red',
            alpha=0.5,
            align='center')
    plt.bar(bins[1:] - binw / 2,
            hisr,
            width=binw * 0.6,
            fc='none',
            ec='blue',
            alpha=0.5)
    plt.yscale('log')
    _ = plt.xlabel('Magnitude')
    _ = plt.ylabel('Annual frequency of exceedance')
    plt.grid()
    plt.legend()
    plt.title(descr)
    # - set xlim
    xlim = list(fig.gca().get_xlim())
    xlim[0] = min_magnitude if min_magnitude is not None else xlim[0]
    plt.xlim(xlim)
    plt.savefig(os.path.join(output_dir, 'ses.png'))

    # Plot map with the SES
    fig = pygmt.Figure()
    fig.basemap(region=region, projection="M15c", frame=True)
    fig.coast(land="#666666", water="skyblue")
    pygmt.makecpt(cmap="jet", series=[0, 300])
    fig.plot(x=dfr.loc[idx].hypo_0,
             y=dfr.loc[idx].hypo_1,
             style="c",
             color=dfr.loc[idx].hypo_2,
             cmap=True,
             size=0.01 * (1.5**dfr.loc[idx].mag),
             pen="black")
    fig.show()
    fig.savefig(os.path.join(output_dir, 'map_ses.png'))

    # Plot map with catalogue
    fig = pygmt.Figure()
    fig.basemap(region=region, projection="M15c", frame=True)
    fig.coast(land="#666666", water="skyblue")
    pygmt.makecpt(cmap="jet", series=[0, 300])
    fig.plot(x=selcat_df.longitude,
             y=selcat_df.latitude,
             style="c",
             color=selcat_df.depth,
             cmap=True,
             size=0.01 * (1.5**selcat_df.magnitude),
             pen="black")
    fig.show()
    fig.savefig(os.path.join(output_dir, 'map_eqks.png'))

    # Depth histogram
    deptw = 10.
    mmin = 5.0
    dfs = df.loc[idx]
    bins = np.arange(0.0, 200.0, deptw)
    fig = plt.figure()
    hisr, _ = np.histogram(dfs[dfs.mag > mmin].hypo_2, bins=bins)
    hiscat, _ = np.histogram(selcat_df[selcat_df.magnitude > mmin].depth,
                             bins=bins)
    fig = plt.Figure(figsize=(5, 8))
    plt.barh(bins[:-1],
             hisr / sum(hisr),
             align='edge',
             height=deptw * 0.6,
             fc='lightgreen',
             ec='blue',
             label='ses')
    plt.barh(bins[:-1],
             hiscat / sum(hiscat),
             align='edge',
             height=deptw * 0.5,
             fc='white',
             ec='red',
             alpha=0.5,
             lw=1.5,
             label='catalogue')
    for dep, val in zip(bins[:-1], hiscat):
        if val > 0:
            plt.text(val / sum(hiscat), dep, s='{:.2f}'.format(val))
    plt.gca().invert_yaxis()
    _ = plt.ylabel('Depth [km]')
    _ = plt.xlabel('Count')
    plt.grid()
    plt.legend()
    plt.title(descr)
    plt.savefig(os.path.join(output_dir, 'depth_normalized.png'))
Пример #14
0
def process_gcmt_datafames(fname_folder: str, folder_out: str):
    """
    :param fnames:
        A list containing the names of the files to be processed or a pattern
    :param folder_out:
        The name of the output folder
    """

    create_folder(folder_out)

    if isinstance(fname_folder, str):
        fnames = [f for f in glob(fname_folder)]
    else:
        fnames = fname_folder

    for fname in fnames:

        df = pd.read_csv(fname)
        if len(df.dip1) < 1:
            continue

        # See https://matplotlib.org/3.1.0/gallery/subplots_axes_and_figures/gridspec_nested.html
        f = plt.figure(figsize=(15, 15))
        gs0 = gridspec.GridSpec(2, 2, figure=f)
        src_id = _get_src_id(fname)

        ext = "png"
        fmt = "zone_{:s}.{:s}"
        figure_name = os.path.join(folder_out, fmt.format(src_id, ext))

        fmclassification = {}
        eventfm = {}
        dip_1 = {}
        dip_2 = {}
        strike_1 = {}
        strike_2 = {}
        for idx, row in df.iterrows():

            plungeb = row.loc['plunge_b']
            plungep = row['plunge_p']
            plunget = row['plunge_t']
            mclass = mecclass(plunget, plungeb, plungep)
            eventfm[idx] = mclass
            if mclass in fmclassification:
                fmclassification[mclass] += 1
                dip_1[mclass].append(row['dip1'])
                dip_2[mclass].append(row['dip2'])
                strike_1[mclass].append(row['strike1'])
                strike_2[mclass].append(row['strike2'])
            else:
                fmclassification[mclass] = 1
                dip_1[mclass] = [row['dip1']]
                dip_2[mclass] = [row['dip2']]
                strike_1[mclass] = [row['strike1']]
                strike_2[mclass] = [row['strike2']]

        title = "Source: {:s}".format(src_id)
        _ = plot_histogram(gs0[0, 0], fmclassification, title)
        plot_xx(gs0[0, 1], dip_1, dip_2, strike_1, strike_2)

        stk1 = get_simpler(strike_1)
        stk2 = get_simpler(strike_2)
        dip1 = get_simpler(dip_1)
        dip2 = get_simpler(dip_2)
        plot_yy(gs0[1, 0], dip1, dip2, stk1, stk2)

        plot_density_simple(gs0[1, 1], dip1, dip2, stk1, stk2)

        plt.savefig(figure_name, format=ext)
        plt.close()

    return fmclassification
def subcatalogues_analysis(fname_input_pattern,
                           fname_config,
                           outdir,
                           skip=[],
                           **kwargs):
    """
    Analyze the catalogue
    """

    create_folder(outdir)

    # Parsing config
    model = toml.load(fname_config)

    # Processing files
    for fname in sorted(glob(fname_input_pattern)):

        # Get source ID
        src_id = _get_src_id(fname)
        if src_id in skip:
            continue

        # Create figure
        out = create_mtd(fname,
                         src_id,
                         None,
                         False,
                         False,
                         0.5,
                         10,
                         pmint=1900)

        if out is None:
            continue

        if 'xlim' in kwargs:
            plt.xlim(kwargs['xlim'])

        if 'ylim' in kwargs:
            plt.ylim(kwargs['ylim'])

        print('src_id:', src_id)
        if ('sources' in model
                and 'completeness_table' in model['sources'][src_id]):
            ctab = numpy.array(model['sources'][src_id]['completeness_table'])
        else:
            ctab = numpy.array(model['default']['completeness_table'])

        n = len(ctab)
        for i in range(0, n - 1):
            plt.plot([ctab[i, 0], ctab[i, 0]], [ctab[i, 1], ctab[i + 1, 1]],
                     '-r')
            plt.plot([ctab[i, 0], ctab[i + 1, 0]],
                     [ctab[i + 1, 1], ctab[i + 1, 1]], '-r')

        ylim = plt.gca().get_ylim()
        xlim = plt.gca().get_xlim()

        plt.plot([ctab[n - 1, 0], ctab[n - 1, 0]], [ylim[1], ctab[n - 1, 1]],
                 '-r')
        plt.plot([ctab[0, 0], xlim[1]], [ctab[0, 1], ctab[0, 1]], '-r')

        ext = 'png'
        figure_fname = os.path.join(outdir,
                                    'fig_mtd_{:s}.{:s}'.format(src_id, ext))
        plt.savefig(figure_fname, format=ext)
        plt.close()
Пример #16
0
def calculate_ruptures(ini_fname,
                       only_plt=False,
                       ref_fdr=None,
                       agr=None,
                       bgr=None,
                       mmin=None,
                       mmax=None):
    """
    :param str ini_fname:
        The name of a .ini file
    :param only_plt:
        Boolean. When true only it only plots ruptures
    :param ref_fdr:
        The path to the reference folder used to set the paths in the .ini
        file. If not provided directly, we use the one set in the .ini file.
    """

    # Read config file
    config = configparser.ConfigParser()
    config.read_file(open(ini_fname))

    # Logging settings
    logging.basicConfig(format='rupture:%(levelname)s:%(message)s')

    # Reference folder
    if ref_fdr is None:
        if 'reference_folder' not in config['main']:
            msg = 'The .ini file does not contain the reference_folder param'
            raise ValueError(msg)
        ref_fdr = config.get('main', 'reference_folder')

    # Set parameters
    profile_sd_topsl = config.getfloat('main', 'profile_sd_topsl')
    edge_sd_topsl = config.getfloat('main', 'edge_sd_topsl')

    # This sampling distance is used to
    sampling = config.getfloat('main', 'sampling')
    float_strike = config.getfloat('main', 'float_strike')
    float_dip = config.getfloat('main', 'float_dip')
    slab_thickness = config.getfloat('main', 'slab_thickness')
    label = config.get('main', 'label')
    hspa = config.getfloat('main', 'hspa')
    vspa = config.getfloat('main', 'vspa')
    uniform_fraction = config.getfloat('main', 'uniform_fraction')

    # MFD params
    if agr is None:
        agr = config.getfloat('main', 'agr')
    if bgr is None:
        bgr = config.getfloat('main', 'bgr')
    if mmax is None:
        mmax = config.getfloat('main', 'mmax')
    if mmin is None:
        mmin = config.getfloat('main', 'mmin')

    # IDL
    if config.has_option('main', 'idl'):
        idl = config.get('main', 'idl')
    else:
        idl = False

    # Profile alignment at the top
    align = False
    if config.has_option('main', 'profile_alignment'):
        tmps = config.get('main', 'profile_alignment')
        if re.search('true', tmps.lower()):
            align = True

    # Set profile folder
    path = config.get('main', 'profile_folder')
    path = os.path.abspath(os.path.join(ref_fdr, path))

    # Catalogue
    cat_pickle_fname = config.get('main', 'catalogue_pickle_fname')
    cat_pickle_fname = os.path.abspath(os.path.join(ref_fdr, cat_pickle_fname))

    # Output
    hdf5_filename = config.get('main', 'out_hdf5_fname')
    hdf5_filename = os.path.abspath(os.path.join(ref_fdr, hdf5_filename))

    # Smoothing output
    out_hdf5_smoothing_fname = config.get('main', 'out_hdf5_smoothing_fname')
    tmps = os.path.join(ref_fdr, out_hdf5_smoothing_fname)
    out_hdf5_smoothing_fname = os.path.abspath(tmps)

    # Tectonic regionalisation
    treg_filename = config.get('main', 'treg_fname')
    if not re.search('[a-z]', treg_filename):
        treg_filename = None
    else:
        treg_filename = os.path.abspath(os.path.join(ref_fdr, treg_filename))

    # Dip angles used to create the virtual faults within the slab
    dips = list_of_floats_from_string(config.get('main', 'dips'))
    asprsstr = config.get('main', 'aspect_ratios')
    asprs = dict_of_floats_from_string(asprsstr)

    # Magnitude-scaling relationship
    msrstr = config.get('main', 'mag_scaling_relation')
    msrd = get_available_scalerel()
    if msrstr not in msrd.keys():
        raise ValueError('')
    msr = msrd[msrstr]()

    logging.info('Reading profiles from: {:s}'.format(path))
    profiles, pro_fnames = _read_profiles(path)
    assert len(profiles) > 0

    # Create mesh from profiles
    logging.info('Creating top of slab mesh')
    print('Creating top of slab mesh')
    msh = create_from_profiles(profiles, profile_sd_topsl, edge_sd_topsl, idl)

    # Create inslab meshes. The output (i.e ohs) is a dictionary with the
    # values of dip as keys. The values in the dictionary
    # are :class:`openquake.hazardlib.geo.line.Line` instances
    logging.info('Creating ruptures on virtual faults')
    print('Creating ruptures on virtual faults')
    ohs = create_inslab_meshes(msh, dips, slab_thickness, sampling)

    if only_plt:
        pass

    # TODO consider replacing wiith pyvista
    """
        azim = 10.
        elev = 20.
        dist = 20.

        f = mlab.figure(bgcolor=(1, 1, 1), size=(900, 600))
        vsc = -0.01
        #
        # profiles
        for ipro, (pro, fnme) in enumerate(zip(profiles, pro_fnames)):
            tmp = [[p.longitude, p.latitude, p.depth] for p in pro.points]
            tmp = np.array(tmp)
            tmp[tmp[:, 0] < 0, 0] = tmp[tmp[:, 0] < 0, 0] + 360
            mlab.plot3d(tmp[:, 0], tmp[:, 1], tmp[:, 2]*vsc, color=(1, 0, 0))
        #
        # top of the slab mesh
        plot_mesh_mayavi(msh, vsc, color=(0, 1, 0))
        #
        for key in ohs:
            for iii in range(len(ohs[key])):
                for line in ohs[key][iii]:
                    pnt = np.array([[p.longitude, p.latitude, p.depth]
                                    for p in line.points])
                    pnt[pnt[:, 0] < 0, 0] = pnt[pnt[:, 0] < 0, 0] + 360
                    mlab.plot3d(pnt[:, 0], pnt[:, 1], pnt[:, 2]*vsc,
                                color=(0, 0, 1))

        f.scene.camera.azimuth(azim)
        f.scene.camera.elevation(elev)
        mlab.view(distance=dist)
        mlab.show()
        mlab.show()

        exit(0)
    """

    if PLOTTING:
        vsc = 0.01
        fig = plt.figure(figsize=(10, 8))
        ax = fig.add_subplot(111, projection='3d')
        #
        # profiles
        for ipro, (pro, fnme) in enumerate(zip(profiles, pro_fnames)):
            tmp = [[p.longitude, p.latitude, p.depth] for p in pro.points]
            tmp = np.array(tmp)
            tmp[tmp[:, 0] < 0, 0] = tmp[tmp[:, 0] < 0, 0] + 360
            ax.plot(tmp[:, 0],
                    tmp[:, 1],
                    tmp[:, 2] * vsc,
                    'x--b',
                    markersize=2)
            tmps = '{:d}-{:s}'.format(ipro, os.path.basename(fnme))
            ax.text(tmp[0, 0], tmp[0, 1], tmp[0, 2] * vsc, tmps)

        # Top of the slab mesh
        # plot_mesh(ax, msh, vsc)

        for key in ohs:
            for iii in range(len(ohs[key])):
                for line in ohs[key][iii]:
                    pnt = np.array([[p.longitude, p.latitude, p.depth]
                                    for p in line.points])
                    pnt[pnt[:, 0] < 0, 0] = pnt[pnt[:, 0] < 0, 0] + 360
                    ax.plot(pnt[:, 0], pnt[:, 1], pnt[:, 2] * vsc, '-r')
        ax.invert_zaxis()
        ax.view_init(50, 55)
        plt.show()

    # The one created here describes the bottom of the slab
    lmsh = create_lower_surface_mesh(msh, slab_thickness)

    # Get min and max values of the mesh
    milo, mila, mide, malo, mala, made = get_min_max(msh, lmsh)

    # Discretizing the slab
    # omsh = Mesh(msh[:, :, 0], msh[:, :, 1], msh[:, :, 2])
    # olmsh = Mesh(lmsh[:, :, 0], lmsh[:, :, 1], lmsh[:, :, 2])

    # this `dlt` value [in degrees] is used to create a buffer around the mesh
    dlt = 5.0
    msh3d = Grid3d(milo - dlt, mila - dlt, mide, malo + dlt, mala + dlt, made,
                   hspa, vspa)
    # mlo, mla, mde = msh3d.select_nodes_within_two_meshesa(omsh, olmsh)
    mlo, mla, mde = msh3d.get_coordinates_vectors()

    # save data on hdf5 file
    if os.path.exists(hdf5_filename):
        os.remove(hdf5_filename)
    else:
        path = os.path.dirname(hdf5_filename)
        create_folder(path)
    logging.info('Creating {:s}'.format(hdf5_filename))
    fh5 = h5py.File(hdf5_filename, 'w')
    grp_slab = fh5.create_group('slab')
    dset = grp_slab.create_dataset('top', data=msh)
    dset.attrs['spacing'] = sampling
    grp_slab.create_dataset('bot', data=lmsh)
    fh5.close()

    # Get catalogue
    catalogue = get_catalogue(cat_pickle_fname, treg_filename, label)

    # smoothing
    values, smooth = smoothing(mlo, mla, mde, catalogue, hspa, vspa,
                               out_hdf5_smoothing_fname)

    # Spatial index
    r, proj = spatial_index(smooth)

    # magnitude-frequency distribution
    mfd = TruncatedGRMFD(min_mag=mmin,
                         max_mag=mmax,
                         bin_width=0.1,
                         a_val=agr,
                         b_val=bgr)

    # Create all the ruptures - the probability of occurrence is for one year
    # in this case
    _ = create_ruptures(mfd, dips, sampling, msr, asprs, float_strike,
                        float_dip, r, values, ohs, 1., hdf5_filename,
                        uniform_fraction, proj, idl, align, True)
Пример #17
0
def main(fname: str, path_point_sources: str, out_path: str, dst: float):
    create_folder(out_path)
    remove_buffer_around_faults(fname, path_point_sources, out_path, dst)
Пример #18
0
def weichert_analysis(fname_input_pattern,
                      fname_config,
                      folder_out=None,
                      folder_out_figs=None,
                      skip=[],
                      binw=None,
                      plt_show=False):
    """
    Computes GR parameters for a set of catalogues stored in a .csv file

    :param fname_input_pattern:
        It can be either a string (definining a pattern) or a list of
        .csv files. The file names must have the source ID at the end. The
        delimiter of the source ID on the left is `_`
    :param fname_config:
        The name of the .toml configuration file
    :param folder_out:
        The folder where to store the files with the counting of occurrences
    :param folder_out_figs:
        The folder where to store the figures
    :param skip:
        A list with the IDs of the sources to skip
    """

    if folder_out is not None:
        create_folder(folder_out)
    if folder_out_figs is not None:
        create_folder(folder_out_figs)

    # Parsing config
    if fname_config is not None:
        model = toml.load(fname_config)

    if binw is None and fname_config is not None:
        binw = model['bin_width']
    else:
        binw = 0.1

    if isinstance(fname_input_pattern, str):
        fname_list = [f for f in glob(fname_input_pattern)]
    else:
        fname_list = fname_input_pattern

    # Processing files
    for fname in sorted(fname_list):

        print(fname)

        # Get source ID
        src_id = _get_src_id(fname)
        if src_id in skip:
            print("   skipping")
            continue

        if 'sources' in model:
            if (src_id in model['sources']
                    and 'mmax' in model['sources'][src_id]):
                mmax = model['sources'][src_id]['mmax']
            else:
                mmax = model['default']['mmax']
            if (src_id in model['sources']
                    and 'completeness_table' in model['sources'][src_id]):
                key_tmp = 'completeness_table'
                ctab = numpy.array(model['sources'][src_id][key_tmp])
                print('Using source specific completeness')
            else:
                ctab = numpy.array(model['default']['completeness_table'])
        else:
            mmax = model['default']['mmax']
            ctab = numpy.array(model['default']['completeness_table'])

        # Processing catalogue
        tcat = _load_catalogue(fname)

        if tcat is None or len(tcat.data['magnitude']) < 2:
            print('    Source {:s} has less than 2 eqks'.format(src_id))
            continue

        tcat.data["dtime"] = tcat.get_decimal_time()
        cent_mag, t_per, n_obs = get_completeness_counts(tcat, ctab, binw)

        if folder_out is not None:
            df = pd.DataFrame()
            df['mag'] = cent_mag
            df['deltaT'] = t_per
            df['nobs'] = n_obs
            fmt = 'occ_count_zone_{:s}'
            fout = os.path.join(folder_out, fmt.format(src_id))
            df.to_csv(fout, index=False)

        # Computing GR a and b
        tcat = _add_defaults(tcat)
        weichert_config = {
            'magnitude_interval': binw,
            'reference_magnitude': 0.0
        }
        weichert = Weichert()
        bval_wei, sigmab, aval_wei, sigmaa = weichert.calculate(
            tcat, weichert_config, ctab)

        # Computing confidence intervals
        gwci = get_weichert_confidence_intervals
        lcl, ucl, ex_rates, ex_rates_scaled = gwci(cent_mag, n_obs, t_per,
                                                   bval_wei)

        if 'sources' not in model:
            model['sources'] = {}
        if src_id not in model['sources']:
            model['sources'][src_id] = {}

        tmp = "{:.5e}".format(aval_wei)
        model['sources'][src_id]['agr_weichert'] = float(tmp)
        tmp = "{:.3f}".format(bval_wei)
        model['sources'][src_id]['bgr_weichert'] = float(tmp)

        _ = plt.figure()
        ax = plt.gca()
        plt.plot(cent_mag, n_obs / t_per, 'o', markerfacecolor='none')
        plt.plot(cent_mag - binw / 2,
                 ex_rates_scaled,
                 's',
                 markerfacecolor='none',
                 color='red')

        plt.plot(cent_mag - binw / 2, lcl, '--', color='darkgrey')
        plt.plot(cent_mag - binw / 2, ucl, '--', color='darkgrey')

        xmag = numpy.arange(cent_mag[0] - binw / 2, mmax - 0.01 * binw,
                            binw / 2)
        exra = (10.0**(aval_wei - bval_wei * xmag) -
                10.0**(aval_wei - bval_wei * mmax))
        plt.plot(xmag, exra, '--', lw=3, color='green')

        plt.yscale('log')
        plt.xlabel('Magnitude')
        plt.ylabel('Annual rate of exceedance')
        plt.text(0.75,
                 0.95,
                 'b_GR = {:.2f}'.format(bval_wei),
                 transform=ax.transAxes)
        plt.grid(which='major', color='grey')
        plt.grid(which='minor', linestyle='--', color='lightgrey')
        plt.title(src_id)

        if plt_show:
            plt.show()

        # Saving figures
        if folder_out_figs is not None:
            ext = 'png'
            fmt = 'fig_mfd_{:s}.{:s}'
            figure_fname = os.path.join(folder_out_figs,
                                        fmt.format(src_id, ext))

            plt.savefig(figure_fname, format=ext)
            plt.close()

    # Saving results into the config file
    if fname_config is not None:
        with open(fname_config, 'w') as f:
            f.write(toml.dumps(model))
            print('Updated {:s}'.format(fname_config))
Пример #19
0
def compute_a_value(fname_input_pattern: str,
                    bval: float,
                    fname_config: str,
                    folder_out: str,
                    use: str = '',
                    folder_out_figs: str = None,
                    plt_show=False):
    """
    This function assignes an a-value to each source with a file selected by
    the provided `fname_input_pattern`.
    """

    if len(use) > 0:
        use = get_list(use)

    # Processing input parameters
    bval = float(bval)
    if folder_out is not None:
        create_folder(folder_out)
    if folder_out_figs is not None:
        create_folder(folder_out_figs)

    if isinstance(fname_input_pattern, str):
        fname_list = glob(fname_input_pattern)
    else:
        fname_list = fname_input_pattern

    # Parsing config
    model = toml.load(fname_config)
    binw = model['bin_width']

    # Processing files
    for fname in sorted(fname_list):

        # Get source ID
        src_id = _get_src_id(fname)
        if len(use) > 0 and src_id not in use:
            continue
        print(fname)

        mmax, ctab = get_mmax_ctab(model, src_id)

        # Processing catalogue
        tcat = _load_catalogue(fname)

        if tcat is None or len(tcat.data['magnitude']) < 2:
            continue

        # Completeness analysis
        tcat = _add_defaults(tcat)
        tcat.data["dtime"] = tcat.get_decimal_time()
        try:
            cent_mag, t_per, n_obs = get_completeness_counts(tcat, ctab, binw)
            if cent_mag is None:
                print('   Completeness analysis failed')
                continue
        except ValueError:
            print('   Completeness analysis failed')
            continue

        df = pd.DataFrame()
        df['mag'] = cent_mag
        df['deltaT'] = t_per
        df['nobs'] = n_obs
        fout = os.path.join(folder_out, 'occ_count_zone_{:s}'.format(src_id))
        df.to_csv(fout, index=False)

        # Computing GR a
        if 'sources' not in model:
            model['sources'] = {}
        if src_id not in model['sources']:
            model['sources'][src_id] = {}

        exrs = get_exrs(df, bval)
        aval = get_agr(df.mag[0] - binw / 2, bval, exrs[0])

        tmp = "{:.5e}".format(aval)
        model['sources'][src_id]['agr_counting'] = float(tmp)

        tmp = "{:.5e}".format(bval)
        model['sources'][src_id]['bgr_counting'] = float(tmp)

        gwci = get_weichert_confidence_intervals
        lcl, ucl, ex_rates, ex_rates_scaled = gwci(cent_mag, n_obs, t_per,
                                                   bval)

        _ = plt.figure()
        ax = plt.gca()
        plt.plot(cent_mag, n_obs / t_per, 'o', markerfacecolor='none')
        plt.plot(cent_mag - binw / 2,
                 ex_rates_scaled,
                 's',
                 markerfacecolor='none',
                 color='red')

        plt.plot(cent_mag - binw / 2, lcl, '--', color='black')
        plt.plot(cent_mag - binw / 2, ucl, '--', color='black')

        xmag = numpy.arange(cent_mag[0] - binw / 2, mmax - 0.01 * binw,
                            binw / 2)
        exra = (10.0**(aval - bval * xmag) - 10.0**(aval - bval * mmax))
        plt.plot(xmag, exra, '--', lw=3, color='green')

        plt.yscale('log')
        plt.xlabel('Magnitude')
        plt.ylabel('Annual rate of exceedance')
        plt.text(0.75,
                 0.95,
                 'Fixed b_GR = {:.2f}'.format(bval),
                 transform=ax.transAxes)
        plt.grid(which='major', color='grey')
        plt.grid(which='minor', linestyle='--', color='lightgrey')
        plt.title(src_id)

        if plt_show:
            plt.show()

        # Saving figures
        if folder_out_figs is not None:
            ext = 'png'
            fmt = 'fig_mfd_{:s}.{:s}'
            figure_fname = os.path.join(folder_out_figs,
                                        fmt.format(src_id, ext))

            plt.savefig(figure_fname, format=ext)
            plt.close()

    # Saving results into the config file
    with open(fname_config, 'w') as fou:
        fou.write(toml.dumps(model))
        print('Updated {:s}'.format(fname_config))
Пример #20
0
def create_nrml_sources(fname_input_pattern: str, fname_config: str, 
                        folder_out: str, fname_subzone_shp: str="", 
                        fname_subzone_config: str=""):

    create_folder(folder_out)
    
    # If true we take some of the information from subzones
    subzones = (len(fname_subzone_shp) > 0 and len(fname_subzone_config) > 0)
    if subzones:
        polygons_gdf = gpd.read_file(fname_subzone_shp)
        model_subz = toml.load(fname_subzone_config) 

    # This is used to instantiate the MSR
    module = importlib.import_module('openquake.hazardlib.scalerel')

    # Parsing config
    model = toml.load(fname_config)

    rms = model['rupture_mesh_spacing']
    mmin = model['mmin']
    bwid = model['bin_width']
    tom = PoissonTOM(1.0)

    # Processing files
    for fname in glob(fname_input_pattern):

        src_id = os.path.basename(fname).split('.')[0]
        rc_id = _get_src_id(fname)
        
        df = pd.read_csv(fname)
        
        # Create a geodataframe with the points in a given zone
        if subzones:

            # Create a geodataframe with points
            geom = [PointShapely(xy) for xy in zip(df.lon, df.lat)]
            gdf = gpd.GeoDataFrame(df, crs='epsg:4326', geometry=geom)

            # Select subzones within a zone
            tdf = polygons_gdf[polygons_gdf["parent"] == src_id]

            # Should contain the points within
            df = gpd.sjoin(gdf, tdf, op='within')

        # This is the information on the source in the config file
        srcd = model['sources'][src_id]

        # Looping over the points
        srcs = []
        for idx, pnt in df.iterrows():

            if subzones:
                srcd_sz = model_subz['sources'][pnt.id]

            pfx = model.get("source_prefix", "")
            pfx += "_" if len(pfx) else pfx
            sid = '{:s}{:s}_{:d}'.format(pfx, src_id, idx)
            name = ""

            trt = srcd['tectonic_region_type']
            msr_str = model['msr'][trt]

            my_class = getattr(module, msr_str)
            msr = my_class()

            # Get mmax and set the MFD
            mmx = srcd['mmax']
            mfd = TruncatedGRMFD(mmin, mmx, bwid, pnt.agr, pnt.bgr)

            key = 'rupture_aspect_ratio'
            rar = get_param(srcd, model['default'], key)

            key = 'upper_seismogenic_depth'
            usd = get_param(srcd, model['default'], key)

            key = 'lower_seismogenic_depth'
            lsd = get_param(srcd, model['default'], key)

            key = 'nodal_plane_distribution'
            tmp = get_param(srcd, model['default'], key)
            npd = _get_nodal_plane_distribution(tmp)

            key = 'hypocenter_distribution'
            tmp = get_param(srcd, model['default'], key)
            hyd = _get_hypocenter_distribution(tmp)

            if subzones:
                tmp = get_param(srcd_sz, model['default'], key)
                npd = _get_nodal_plane_distribution(tmp)

            loc = Point(pnt.lon, pnt.lat)
            src = PointSource(sid, name, trt, mfd, rms, msr, rar, tom,
                              usd, lsd, loc, npd, hyd)
            srcs.append(src)

        # Write output file
        fname_out = os.path.join(folder_out, 'src_{:s}.xml'.format(src_id))
        write_source_model(fname_out, srcs, 'Zone {:s}'.format(src_id))