Ejemplo n.º 1
0
    def calculate(self, catalogue, config, completeness=None):
        '''Calculates recurrence using the Weichert (1980) method'''
        # Input checks
        cmag, ctime, ref_mag, _, config = input_checks(catalogue,
                                                       config,
                                                       completeness)
        if not "dtime" in catalogue.data.keys() or not\
            len(catalogue.data["dtime"]):
            catalogue.data["dtime"] = catalogue.get_decimal_time()
        if not catalogue.end_year:
            catalogue.update_end_year()
        if completeness is None:
            start_year = float(np.min(catalogue.data["year"]))
            completeness = np.column_stack([ctime, cmag])
        # Apply Weichert preparation
        cent_mag, t_per, n_obs = get_completeness_counts(
            catalogue, completeness, config["magnitude_interval"])

        # A few more Weichert checks
        key_list = config.keys()
        if (not 'bvalue' in key_list) or (not config['bvalue']):
            config['bvalue'] = 1.0
        if (not 'itstab' in key_list) or (not config['itstab']):
            config['itstab'] = 1E-5
        if (not 'maxiter' in key_list) or (not config['maxiter']):
            config['maxiter'] = 1000
        bval, sigma_b, rate, sigma_rate, aval, sigma_a = \
            self.weichert_algorithm(t_per, cent_mag, n_obs, ref_mag,
            config['bvalue'], config['itstab'], config['maxiter'])
       
        if not config['reference_magnitude']:
            rate = np.log10(aval)
            sigma_rate = np.log10(aval + sigma_a) - np.log10(aval)

        return bval, sigma_b, rate, sigma_rate
Ejemplo n.º 2
0
def plot_observed_recurrence(
        catalogue, completeness, dmag, end_year=None, filename=None,
        figure_size=(8, 6), filetype='png', dpi=300, ax=None):
    """
    Plots the observed recurrence taking into account the completeness
    """
    # Get completeness adjusted recurrence table
    if isinstance(completeness, float):
        # Unique completeness
        completeness = np.array([[np.min(catalogue.data['year']),
                                  completeness]])
    if not end_year:
        end_year = catalogue.update_end_year()
    catalogue.data["dtime"] = catalogue.get_decimal_time()
    cent_mag, t_per, n_obs = get_completeness_counts(catalogue,
                                                     completeness,
                                                     dmag)
    obs_rates = n_obs / t_per
    cum_obs_rates = np.array([np.sum(obs_rates[i:])
                              for i in range(len(obs_rates))])

    if ax is None:
        fig, ax = plt.subplots(figsize=figure_size)
    else:
        fig = ax.get_figure()

    ax.semilogy(cent_mag, obs_rates, 'bo', label="Incremental")
    ax.semilogy(cent_mag, cum_obs_rates, 'rs', label="Cumulative")
    ax.set_xlim([cent_mag[0] - 0.1, cent_mag[-1] + 0.1])
    ax.set_xlabel('Magnitude')
    ax.set_ylabel('Annual Rate')
    ax.legend()
    _save_image(fig, filename, filetype, dpi)
Ejemplo n.º 3
0
def plot_observed_recurrence(
        catalogue, completeness, dmag, end_year=None, filename=None,
        figure_size=(8, 6), filetype='png', dpi=300, ax=None):
    """
    Plots the observed recurrence taking into account the completeness
    """
    # Get completeness adjusted recurrence table
    if isinstance(completeness, float):
        # Unique completeness
        completeness = np.array([[np.min(catalogue.data['year']),
                                  completeness]])
    if not end_year:
        end_year = catalogue.update_end_year()
    catalogue.data["dtime"] = catalogue.get_decimal_time()
    cent_mag, t_per, n_obs = get_completeness_counts(catalogue,
                                                     completeness,
                                                     dmag)
    obs_rates = n_obs / t_per
    cum_obs_rates = np.array([np.sum(obs_rates[i:])
                              for i in range(len(obs_rates))])

    if ax is None:
        fig, ax = plt.subplots(figsize=figure_size)
    else:
        fig = ax.get_figure()

    ax.semilogy(cent_mag, obs_rates, 'bo', label="Incremental")
    ax.semilogy(cent_mag, cum_obs_rates, 'rs', label="Cumulative")
    ax.set_xlim([cent_mag[0] - 0.1, cent_mag[-1] + 0.1])
    ax.set_xlabel('Magnitude')
    ax.set_ylabel('Annual Rate')
    ax.legend()
    _save_image(fig, filename, filetype, dpi)
Ejemplo n.º 4
0
    def calculate(self, catalogue, config, completeness=None):
        '''Calculates recurrence using the Weichert (1980) method'''
        # Input checks
        cmag, ctime, ref_mag, _, config = input_checks(catalogue,
                                                       config,
                                                       completeness)
        if not "dtime" in catalogue.data.keys() or not\
            len(catalogue.data["dtime"]):
            catalogue.data["dtime"] = catalogue.get_decimal_time()
        if not catalogue.end_year:
            catalogue.update_end_year()
        if completeness is None:
            start_year = float(np.min(catalogue.data["year"]))
            completeness = np.column_stack([ctime, cmag])
        # Apply Weichert preparation
        cent_mag, t_per, n_obs = get_completeness_counts(
            catalogue, completeness, config["magnitude_interval"])

        # A few more Weichert checks
        key_list = config.keys()
        if (not 'bvalue' in key_list) or (not config['bvalue']):
            config['bvalue'] = 1.0
        if (not 'itstab' in key_list) or (not config['itstab']):
            config['itstab'] = 1E-5
        if (not 'maxiter' in key_list) or (not config['maxiter']):
            config['maxiter'] = 1000
        bval, sigma_b, rate, sigma_rate, aval, sigma_a = \
            self.weichert_algorithm(t_per, cent_mag, n_obs, ref_mag,
            config['bvalue'], config['itstab'], config['maxiter'])
       
        if not config['reference_magnitude']:
            rate = np.log10(aval)
            sigma_rate = np.log10(aval + sigma_a) - np.log10(aval)

        return bval, sigma_b, rate, sigma_rate
Ejemplo n.º 5
0
def plot_observed_recurrence(catalogue,
                             completeness,
                             dmag,
                             end_year=None,
                             figure_size=DEFAULT_SIZE,
                             filename=None,
                             filetype='png',
                             dpi=300):
    """
    Plots the observed recurrence taking into account the completeness
    """
    # Get completeness adjusted recurrence table
    if isinstance(completeness, float):
        # Unique completeness
        completeness = np.array(
            [[np.min(catalogue.data['year']), completeness]])
    if not end_year:
        end_year = catalogue.update_end_year()
    catalogue.data["dtime"] = catalogue.get_decimal_time()
    cent_mag, t_per, n_obs = get_completeness_counts(catalogue, completeness,
                                                     dmag)
    obs_rates = n_obs / t_per
    cum_obs_rates = np.array(
        [np.sum(obs_rates[i:]) for i in range(len(obs_rates))])

    plt.figure(figsize=figure_size)
    plt.semilogy(cent_mag, obs_rates, 'bo', label="Incremental")
    plt.semilogy(cent_mag, cum_obs_rates, 'rs', label="Cumulative")
    plt.xlim([cent_mag[0] - 0.1, cent_mag[-1] + 0.1])
    plt.xlabel('Magnitude', fontsize=16)
    plt.ylabel('Annual Rate', fontsize=16)
    plt.legend(fontsize=14)
    plt.tick_params(labelsize=12)
    _save_image(filename, filetype, dpi)
    plt.show()
Ejemplo n.º 6
0
def plot_observed_recurrence(
        catalogue, completeness, dmag, end_year=None,
        figure_size=DEFAULT_SIZE, filename=None, filetype='png', dpi=300):
    """
    Plots the observed recurrence taking into account the completeness
    """
    # Get completeness adjusted recurrence table
    if isinstance(completeness, float):
        # Unique completeness
        completeness = np.array([[np.min(catalogue.data['year']),
                                  completeness]])
    if not end_year:
        end_year = catalogue.update_end_year()
    catalogue.data["dtime"] = catalogue.get_decimal_time()
    cent_mag, t_per, n_obs = get_completeness_counts(catalogue,
                                                     completeness,
                                                     dmag)
    obs_rates = n_obs / t_per
    cum_obs_rates = np.array([np.sum(obs_rates[i:])
                              for i in range(len(obs_rates))])

    plt.figure(figsize=figure_size)
    plt.semilogy(cent_mag, obs_rates, 'bo', label="Incremental")
    plt.semilogy(cent_mag, cum_obs_rates, 'rs', label="Cumulative")
    plt.xlim([cent_mag[0] - 0.1, cent_mag[-1] + 0.1])
    plt.xlabel('Magnitude', fontsize=16)
    plt.ylabel('Annual Rate', fontsize=16)
    plt.legend(fontsize=14)
    plt.tick_params(labelsize=12)
    _save_image(filename, filetype, dpi)
    plt.show()
Ejemplo n.º 7
0
def plot_recurrence_model(input_model, catalogue, completeness, dmag,
        figure_size=(10, 8), filename=None, filetype='png', dpi=300):
    """
    Plot a calculated recurrence model over an observed catalogue, adjusted for
    time-varying completeness
    """
    if figure_size is None:
        figure_size=(10, 8)
    if dmag is None:
        dmag = 0.1
    annual_rates, cumulative_rates = _get_recurrence_model(input_model)
    # Get observed annual recurrence
    if not catalogue.end_year:
        catalogue.update_end_year()
    cent_mag, t_per, n_obs = get_completeness_counts(catalogue,
                                                     completeness,
                                                     dmag)
    obs_rates = n_obs / t_per
    cum_obs_rates = np.array([np.sum(obs_rates[i:])
                              for i in range(len(obs_rates))])
    # Create plot
    plt.figure(figsize=figure_size)
    plt.semilogy(cent_mag, obs_rates, 'bo')
    plt.semilogy(annual_rates[:, 0], annual_rates[:, 1], 'b-')
    plt.semilogy(cent_mag, cum_obs_rates, 'rs')
    plt.semilogy(annual_rates[:, 0], cumulative_rates, 'r-')
    plt.grid(which='both')
    plt.xlabel('Magnitude', fontsize='16')
    plt.ylabel('Annual Rate', fontsize='16')
    plt.legend(['Observed Incremental Rate',
                'Model Incremental Rate',
                'Observed Cumulative Rate',
                'Model Cumulative Rate'], fontsize=14)
    plt.tick_params(labelsize=12)
    _save_image(filename, filetype, dpi)
Ejemplo n.º 8
0
def plot_recurrence_model(input_model, catalogue, completeness, dmag,
                          figure_size=(10, 8), filename=None, filetype='png', dpi=300):
    """
    Plot a calculated recurrence model over an observed catalogue, adjusted for
    time-varying completeness
    """
    if figure_size is None:
        figure_size = (10, 8)
    if dmag is None:
        dmag = 0.1
    annual_rates, cumulative_rates = _get_recurrence_model(input_model)
    # Get observed annual recurrence
    if not catalogue.end_year:
        catalogue.update_end_year()
    cent_mag, t_per, n_obs = get_completeness_counts(catalogue,
                                                     completeness,
                                                     dmag)
    obs_rates = n_obs / t_per
    cum_obs_rates = np.array([np.sum(obs_rates[i:])
                              for i in range(len(obs_rates))])
    # Create plot
    plt.figure(figsize=figure_size)
    plt.semilogy(cent_mag, obs_rates, 'bo')
    plt.semilogy(annual_rates[:, 0], annual_rates[:, 1], 'b-')
    plt.semilogy(cent_mag, cum_obs_rates, 'rs')
    plt.semilogy(annual_rates[:, 0], cumulative_rates, 'r-')
    plt.grid(which='both')
    plt.xlabel('Magnitude', fontsize='16')
    plt.ylabel('Annual Rate', fontsize='16')
    plt.legend(['Observed Incremental Rate',
                'Model Incremental Rate',
                'Observed Cumulative Rate',
                'Model Cumulative Rate'], fontsize=14)
    plt.tick_params(labelsize=12)
    _save_image(filename, filetype, dpi)
Ejemplo n.º 9
0
 def test_completeness_counts(self):
     """
     Assert that the correct counts are returned
     """
     expected_data = np.array([[3.25, 20.0, 1281.0], [3.75, 20.0, 468.0],
                               [4.25, 35.0, 275.0], [4.75, 35.0, 116.0],
                               [5.25, 50.0, 55.0], [5.75, 50.0, 17.0],
                               [6.25, 80.0, 11.0], [6.75, 80.0, 2.0],
                               [7.25, 100.0, 1.0]])
     cent_mag, t_per, n_obs = rec_utils.get_completeness_counts(
         self.catalogue, self.completeness, 0.5)
     np.testing.assert_array_almost_equal(cent_mag, expected_data[:, 0])
     np.testing.assert_array_almost_equal(t_per, expected_data[:, 1])
     np.testing.assert_array_almost_equal(n_obs, expected_data[:, 2])
     self.assertEqual(self.catalogue.get_number_events(),
                      int(np.sum(n_obs)))
Ejemplo n.º 10
0
 def test_completeness_counts(self):
     """
     Assert that the correct counts are returned
     """
     expected_data = np.array([[3.25, 20.0, 1281.0],
                               [3.75, 20.0,  468.0],
                               [4.25, 35.0,  275.0],
                               [4.75, 35.0,  116.0],
                               [5.25, 50.0,   55.0],
                               [5.75, 50.0,   17.0],
                               [6.25, 80.0,   11.0],
                               [6.75, 80.0,    2.0],
                               [7.25,100.0,    1.0]])
     cent_mag, t_per, n_obs = rec_utils.get_completeness_counts(
         self.catalogue, self.completeness, 0.5)
     np.testing.assert_array_almost_equal(cent_mag, expected_data[:, 0])
     np.testing.assert_array_almost_equal(t_per, expected_data[:, 1])
     np.testing.assert_array_almost_equal(n_obs, expected_data[:, 2])
     self.assertEqual(self.catalogue.get_number_events(),
                      int(np.sum(n_obs)))
def main(fname: str, *, example_flag: bool = False):
    """ Compares SES against a catalogue given a .toml configuration file """

    # Print an example of configuration file
    if example_flag:
        print_example()
        exit()

    # Load the .toml file containing the information required
    config_main = toml.load(fname)
    path = os.path.dirname(fname)

    print('Root path: {:s}'.format(path))

    # Read information in the config file
    fname_catalogues = []
    for tmp_name in config_main['main']['catalogues']:
        # If not absolute
        if not re.search('^/', tmp_name):
            tmp_name = os.path.join(path, tmp_name)
            assert os.path.exists(tmp_name)
            print('Catalogue: {:s}'.format(tmp_name))
        fname_catalogues.append(tmp_name)
    calc_id = config_main['main']['calc_id']
    ses_duration = config_main['main']['ses_duration']
    polygon_fname = os.path.join(path, config_main['main']['polygon'])
    output_dir = os.path.join(path, config_main['main']['output_dir'])
    descr = config_main['main']['description']
    binw = config_main['main'].get('bin_width', 0.2)
    min_magnitude = config_main['main'].get('min_magnitude', None)

    if ('tectonic_region' not in config_main['main']
            or config_main['main']['tectonic_region'] in ['', 'none', 'None']):
        tectonic_region = None
    else:
        tectonic_region = int(config_main['main']['tectonic_region'])

    # Checking
    msg = 'The polygon file does not exist:\n{:s}'.format(polygon_fname)
    assert os.path.exists(polygon_fname), msg
    if not os.path.exists(output_dir):
        create_folder(output_dir)

    # Reading ruptures from the datastore
    dstore = read(calc_id)
    dfr = dstore.read_df('ruptures')
    dfr = gpd.GeoDataFrame(dfr,
                           geometry=gpd.points_from_xy(dfr.hypo_0, dfr.hypo_1))
    if tectonic_region is not None:
        dfr = dfr.loc[dfr['trt_smr'] == tectonic_region]

    # Reading geojson polygon and create the shapely geometry
    with open(polygon_fname) as json_file:
        data = json.load(json_file)
    polygon = data['features'][0]['geometry']
    tmp = eval(geoj.dumps(polygon))
    geom = shape(tmp)

    # Get region limits
    coo = []
    for poly in geom.geoms:
        coo += list(zip(*poly.exterior.coords.xy))
    coo = np.array(coo)
    minlo = np.min(coo[:, 0])
    minla = np.min(coo[:, 1])
    maxlo = np.max(coo[:, 0])
    maxla = np.max(coo[:, 1])
    region = "{:f}/{:f}/{:f}/{:f}".format(minlo, maxlo, minla, maxla)

    # Read catalogue
    for i, fname in enumerate(fname_catalogues):
        if i == 0:
            tcat = _load_catalogue(fname)
        else:
            tcat.concatenate(_load_catalogue(fname))

    # Create a dataframe from the catalogue
    dfcat = to_df(tcat)
    dfcat = gpd.GeoDataFrame(dfcat,
                             geometry=gpd.points_from_xy(
                                 dfcat.longitude, dfcat.latitude))
    dfcat.head(n=1)

    # Select the events within the polygon and convert from df to catalogue
    idx = dfcat.within(geom)
    selcat_df = dfcat.loc[idx]
    selcat = from_df(selcat_df)

    if 'completeness_table' in config_main['main']:
        ctab = config_main['main']['completeness_table']
        ctab = np.array(ctab)
    else:
        fname_config = os.path.join(path, config_main['main']['fname_config'])
        msg = 'The config file does not exist:\n{:s}'.format(fname_config)
        assert os.path.exists(fname_config), msg
        config = toml.load(fname_config)
        completeness_label = config_main['main']['completeness_label']
        _, ctab = get_mmax_ctab(config, completeness_label)

    if len(selcat_df.magnitude) < 2:
        print('The catalogue contains less than 2 earthquakes')
        return

    selcat.data["dtime"] = selcat.get_decimal_time()
    cent_mag, t_per, n_obs = get_completeness_counts(selcat, ctab, binw)
    tmp = n_obs / t_per
    hiscml_cat = np.array([np.sum(tmp[i:]) for i in range(0, len(tmp))])

    # Take into account possible multiple occurrences in the SES
    df = dfr.loc[dfr.index.repeat(dfr.n_occ)]
    assert len(df) == np.sum(dfr.n_occ)

    # SES histogram
    idx = dfr.within(geom)
    bins = np.arange(min_magnitude, 9.0, binw)
    hisr, _ = np.histogram(df.loc[idx].mag, bins=bins)
    hisr = hisr / ses_duration
    hiscml = np.array([np.sum(hisr[i:]) for i in range(0, len(hisr))])

    # Plotting
    fig = plt.figure(figsize=(7, 5))
    # - cumulative
    plt.plot(bins[:-1], hiscml, '--x', label='SES')
    plt.plot(cent_mag - binw / 2, hiscml_cat, '-.x', label='Catalogue')
    # - incremental
    plt.bar(cent_mag,
            n_obs / t_per,
            width=binw * 0.7,
            fc='none',
            ec='red',
            alpha=0.5,
            align='center')
    plt.bar(bins[1:] - binw / 2,
            hisr,
            width=binw * 0.6,
            fc='none',
            ec='blue',
            alpha=0.5)
    plt.yscale('log')
    _ = plt.xlabel('Magnitude')
    _ = plt.ylabel('Annual frequency of exceedance')
    plt.grid()
    plt.legend()
    plt.title(descr)
    # - set xlim
    xlim = list(fig.gca().get_xlim())
    xlim[0] = min_magnitude if min_magnitude is not None else xlim[0]
    plt.xlim(xlim)
    plt.savefig(os.path.join(output_dir, 'ses.png'))

    # Plot map with the SES
    fig = pygmt.Figure()
    fig.basemap(region=region, projection="M15c", frame=True)
    fig.coast(land="#666666", water="skyblue")
    pygmt.makecpt(cmap="jet", series=[0, 300])
    fig.plot(x=dfr.loc[idx].hypo_0,
             y=dfr.loc[idx].hypo_1,
             style="c",
             color=dfr.loc[idx].hypo_2,
             cmap=True,
             size=0.01 * (1.5**dfr.loc[idx].mag),
             pen="black")
    fig.show()
    fig.savefig(os.path.join(output_dir, 'map_ses.png'))

    # Plot map with catalogue
    fig = pygmt.Figure()
    fig.basemap(region=region, projection="M15c", frame=True)
    fig.coast(land="#666666", water="skyblue")
    pygmt.makecpt(cmap="jet", series=[0, 300])
    fig.plot(x=selcat_df.longitude,
             y=selcat_df.latitude,
             style="c",
             color=selcat_df.depth,
             cmap=True,
             size=0.01 * (1.5**selcat_df.magnitude),
             pen="black")
    fig.show()
    fig.savefig(os.path.join(output_dir, 'map_eqks.png'))

    # Depth histogram
    deptw = 10.
    mmin = 5.0
    dfs = df.loc[idx]
    bins = np.arange(0.0, 200.0, deptw)
    fig = plt.figure()
    hisr, _ = np.histogram(dfs[dfs.mag > mmin].hypo_2, bins=bins)
    hiscat, _ = np.histogram(selcat_df[selcat_df.magnitude > mmin].depth,
                             bins=bins)
    fig = plt.Figure(figsize=(5, 8))
    plt.barh(bins[:-1],
             hisr / sum(hisr),
             align='edge',
             height=deptw * 0.6,
             fc='lightgreen',
             ec='blue',
             label='ses')
    plt.barh(bins[:-1],
             hiscat / sum(hiscat),
             align='edge',
             height=deptw * 0.5,
             fc='white',
             ec='red',
             alpha=0.5,
             lw=1.5,
             label='catalogue')
    for dep, val in zip(bins[:-1], hiscat):
        if val > 0:
            plt.text(val / sum(hiscat), dep, s='{:.2f}'.format(val))
    plt.gca().invert_yaxis()
    _ = plt.ylabel('Depth [km]')
    _ = plt.xlabel('Count')
    plt.grid()
    plt.legend()
    plt.title(descr)
    plt.savefig(os.path.join(output_dir, 'depth_normalized.png'))
Ejemplo n.º 12
0
def completeness_analysis(fname,
                          idxs,
                          years,
                          mags,
                          binw,
                          ref_mag,
                          bgrlim,
                          src_id,
                          folder_out_figs,
                          rewrite=False):

    tcat = _load_catalogue(fname)
    tcat = _add_defaults(tcat)
    tcat.data["dtime"] = tcat.get_decimal_time()
    print('\nSOURCE:', src_id)
    print('Catalogue contains {:d} events'.format(len(tcat.data['magnitude'])))

    # See http://shorturl.at/adsvA
    fname_disp = 'dispositions.npy'
    perms = np.load(fname_disp)
    mags = np.flipud(np.load('mags.npy'))
    years = np.load('years.npy')

    wei_conf = {'magnitude_interval': binw, 'reference_magnitude': 0.0}
    weichert = Weichert()
    rate = -1e10
    save = []
    mags = np.array(mags)

    for prm in perms:

        idx = prm.astype(int)
        tmp = np.array([(y, m) for y, m in zip(years, mags[idx])])
        ctab = clean_completeness(tmp)

        try:
            cent_mag, t_per, n_obs = get_completeness_counts(tcat, ctab, binw)
            bval, sigb, aval, siga = weichert.calculate(tcat, wei_conf, ctab)

            tmp_rate = 10**(-bval * ref_mag + aval)
            if tmp_rate > rate and bval <= bgrlim[1] and bval >= bgrlim[0]:
                rate = tmp_rate
                save = [aval, bval, rate, ctab]

                gwci = get_weichert_confidence_intervals
                lcl, ucl, ex_rates, ex_rates_scaled = gwci(
                    cent_mag, n_obs, t_per, bval)

                mmax = max(tcat.data['magnitude'])
                wei = [
                    cent_mag, n_obs, binw, t_per, ex_rates_scaled, lcl, ucl,
                    mmax, aval, bval
                ]

        except RuntimeWarning:
            logging.debug('Skipping', ctab)

        except UserWarning:
            logging.debug('Skipping', ctab)

        except:
            logging.debug('Skipping', ctab)

    if True:
        fmt = 'Maximum annual rate for {:.1f}: {:.4f}'
        print(fmt.format(ref_mag, save[2]))
        fmt = 'GR a and b                 : {:.4f} {:.4f}'
        print(fmt.format(save[0], save[1]))
        print('Completeness:\n', save[3])

    _weichert_plot(wei[0],
                   wei[1],
                   wei[2],
                   wei[3],
                   wei[4],
                   wei[5],
                   wei[6],
                   wei[7],
                   wei[8],
                   wei[9],
                   src_id=src_id)

    # Saving figure
    if folder_out_figs is not None:
        ext = 'png'
        fmt = 'fig_mfd_{:s}.{:s}'
        figure_fname = os.path.join(folder_out_figs, fmt.format(src_id, ext))
        plt.savefig(figure_fname, format=ext)
        plt.close()

    return save
Ejemplo n.º 13
0
def compute_a_value(fname_input_pattern: str,
                    bval: float,
                    fname_config: str,
                    folder_out: str,
                    use: str = '',
                    folder_out_figs: str = None,
                    plt_show=False):
    """
    This function assignes an a-value to each source with a file selected by
    the provided `fname_input_pattern`.
    """

    if len(use) > 0:
        use = get_list(use)

    # Processing input parameters
    bval = float(bval)
    if folder_out is not None:
        create_folder(folder_out)
    if folder_out_figs is not None:
        create_folder(folder_out_figs)

    if isinstance(fname_input_pattern, str):
        fname_list = glob(fname_input_pattern)
    else:
        fname_list = fname_input_pattern

    # Parsing config
    model = toml.load(fname_config)
    binw = model['bin_width']

    # Processing files
    for fname in sorted(fname_list):

        # Get source ID
        src_id = _get_src_id(fname)
        if len(use) > 0 and src_id not in use:
            continue
        print(fname)

        mmax, ctab = get_mmax_ctab(model, src_id)

        # Processing catalogue
        tcat = _load_catalogue(fname)

        if tcat is None or len(tcat.data['magnitude']) < 2:
            continue

        # Completeness analysis
        tcat = _add_defaults(tcat)
        tcat.data["dtime"] = tcat.get_decimal_time()
        try:
            cent_mag, t_per, n_obs = get_completeness_counts(tcat, ctab, binw)
            if cent_mag is None:
                print('   Completeness analysis failed')
                continue
        except ValueError:
            print('   Completeness analysis failed')
            continue

        df = pd.DataFrame()
        df['mag'] = cent_mag
        df['deltaT'] = t_per
        df['nobs'] = n_obs
        fout = os.path.join(folder_out, 'occ_count_zone_{:s}'.format(src_id))
        df.to_csv(fout, index=False)

        # Computing GR a
        if 'sources' not in model:
            model['sources'] = {}
        if src_id not in model['sources']:
            model['sources'][src_id] = {}

        exrs = get_exrs(df, bval)
        aval = get_agr(df.mag[0] - binw / 2, bval, exrs[0])

        tmp = "{:.5e}".format(aval)
        model['sources'][src_id]['agr_counting'] = float(tmp)

        tmp = "{:.5e}".format(bval)
        model['sources'][src_id]['bgr_counting'] = float(tmp)

        gwci = get_weichert_confidence_intervals
        lcl, ucl, ex_rates, ex_rates_scaled = gwci(cent_mag, n_obs, t_per,
                                                   bval)

        _ = plt.figure()
        ax = plt.gca()
        plt.plot(cent_mag, n_obs / t_per, 'o', markerfacecolor='none')
        plt.plot(cent_mag - binw / 2,
                 ex_rates_scaled,
                 's',
                 markerfacecolor='none',
                 color='red')

        plt.plot(cent_mag - binw / 2, lcl, '--', color='black')
        plt.plot(cent_mag - binw / 2, ucl, '--', color='black')

        xmag = numpy.arange(cent_mag[0] - binw / 2, mmax - 0.01 * binw,
                            binw / 2)
        exra = (10.0**(aval - bval * xmag) - 10.0**(aval - bval * mmax))
        plt.plot(xmag, exra, '--', lw=3, color='green')

        plt.yscale('log')
        plt.xlabel('Magnitude')
        plt.ylabel('Annual rate of exceedance')
        plt.text(0.75,
                 0.95,
                 'Fixed b_GR = {:.2f}'.format(bval),
                 transform=ax.transAxes)
        plt.grid(which='major', color='grey')
        plt.grid(which='minor', linestyle='--', color='lightgrey')
        plt.title(src_id)

        if plt_show:
            plt.show()

        # Saving figures
        if folder_out_figs is not None:
            ext = 'png'
            fmt = 'fig_mfd_{:s}.{:s}'
            figure_fname = os.path.join(folder_out_figs,
                                        fmt.format(src_id, ext))

            plt.savefig(figure_fname, format=ext)
            plt.close()

    # Saving results into the config file
    with open(fname_config, 'w') as fou:
        fou.write(toml.dumps(model))
        print('Updated {:s}'.format(fname_config))
Ejemplo n.º 14
0
def weichert_analysis(fname_input_pattern,
                      fname_config,
                      folder_out=None,
                      folder_out_figs=None,
                      skip=[],
                      binw=None,
                      plt_show=False):
    """
    Computes GR parameters for a set of catalogues stored in a .csv file

    :param fname_input_pattern:
        It can be either a string (definining a pattern) or a list of
        .csv files. The file names must have the source ID at the end. The
        delimiter of the source ID on the left is `_`
    :param fname_config:
        The name of the .toml configuration file
    :param folder_out:
        The folder where to store the files with the counting of occurrences
    :param folder_out_figs:
        The folder where to store the figures
    :param skip:
        A list with the IDs of the sources to skip
    """

    if folder_out is not None:
        create_folder(folder_out)
    if folder_out_figs is not None:
        create_folder(folder_out_figs)

    # Parsing config
    if fname_config is not None:
        model = toml.load(fname_config)

    if binw is None and fname_config is not None:
        binw = model['bin_width']
    else:
        binw = 0.1

    if isinstance(fname_input_pattern, str):
        fname_list = [f for f in glob(fname_input_pattern)]
    else:
        fname_list = fname_input_pattern

    # Processing files
    for fname in sorted(fname_list):

        print(fname)

        # Get source ID
        src_id = _get_src_id(fname)
        if src_id in skip:
            print("   skipping")
            continue

        if 'sources' in model:
            if (src_id in model['sources']
                    and 'mmax' in model['sources'][src_id]):
                mmax = model['sources'][src_id]['mmax']
            else:
                mmax = model['default']['mmax']
            if (src_id in model['sources']
                    and 'completeness_table' in model['sources'][src_id]):
                key_tmp = 'completeness_table'
                ctab = numpy.array(model['sources'][src_id][key_tmp])
                print('Using source specific completeness')
            else:
                ctab = numpy.array(model['default']['completeness_table'])
        else:
            mmax = model['default']['mmax']
            ctab = numpy.array(model['default']['completeness_table'])

        # Processing catalogue
        tcat = _load_catalogue(fname)

        if tcat is None or len(tcat.data['magnitude']) < 2:
            print('    Source {:s} has less than 2 eqks'.format(src_id))
            continue

        tcat.data["dtime"] = tcat.get_decimal_time()
        cent_mag, t_per, n_obs = get_completeness_counts(tcat, ctab, binw)

        if folder_out is not None:
            df = pd.DataFrame()
            df['mag'] = cent_mag
            df['deltaT'] = t_per
            df['nobs'] = n_obs
            fmt = 'occ_count_zone_{:s}'
            fout = os.path.join(folder_out, fmt.format(src_id))
            df.to_csv(fout, index=False)

        # Computing GR a and b
        tcat = _add_defaults(tcat)
        weichert_config = {
            'magnitude_interval': binw,
            'reference_magnitude': 0.0
        }
        weichert = Weichert()
        bval_wei, sigmab, aval_wei, sigmaa = weichert.calculate(
            tcat, weichert_config, ctab)

        # Computing confidence intervals
        gwci = get_weichert_confidence_intervals
        lcl, ucl, ex_rates, ex_rates_scaled = gwci(cent_mag, n_obs, t_per,
                                                   bval_wei)

        if 'sources' not in model:
            model['sources'] = {}
        if src_id not in model['sources']:
            model['sources'][src_id] = {}

        tmp = "{:.5e}".format(aval_wei)
        model['sources'][src_id]['agr_weichert'] = float(tmp)
        tmp = "{:.3f}".format(bval_wei)
        model['sources'][src_id]['bgr_weichert'] = float(tmp)

        _ = plt.figure()
        ax = plt.gca()
        plt.plot(cent_mag, n_obs / t_per, 'o', markerfacecolor='none')
        plt.plot(cent_mag - binw / 2,
                 ex_rates_scaled,
                 's',
                 markerfacecolor='none',
                 color='red')

        plt.plot(cent_mag - binw / 2, lcl, '--', color='darkgrey')
        plt.plot(cent_mag - binw / 2, ucl, '--', color='darkgrey')

        xmag = numpy.arange(cent_mag[0] - binw / 2, mmax - 0.01 * binw,
                            binw / 2)
        exra = (10.0**(aval_wei - bval_wei * xmag) -
                10.0**(aval_wei - bval_wei * mmax))
        plt.plot(xmag, exra, '--', lw=3, color='green')

        plt.yscale('log')
        plt.xlabel('Magnitude')
        plt.ylabel('Annual rate of exceedance')
        plt.text(0.75,
                 0.95,
                 'b_GR = {:.2f}'.format(bval_wei),
                 transform=ax.transAxes)
        plt.grid(which='major', color='grey')
        plt.grid(which='minor', linestyle='--', color='lightgrey')
        plt.title(src_id)

        if plt_show:
            plt.show()

        # Saving figures
        if folder_out_figs is not None:
            ext = 'png'
            fmt = 'fig_mfd_{:s}.{:s}'
            figure_fname = os.path.join(folder_out_figs,
                                        fmt.format(src_id, ext))

            plt.savefig(figure_fname, format=ext)
            plt.close()

    # Saving results into the config file
    if fname_config is not None:
        with open(fname_config, 'w') as f:
            f.write(toml.dumps(model))
            print('Updated {:s}'.format(fname_config))