예제 #1
0
def oq():
    args = set(sys.argv[1:])
    if 'engine' not in args and 'dbserver' not in args:
        # oq engine and oq dbserver define their own log levels
        level = logging.DEBUG if 'debug' in args else logging.INFO
        logging.basicConfig(level=level)
    sap.run(commands, prog='oq')
예제 #2
0
def oq():
    args = set(sys.argv[1:])
    if 'engine' not in args and 'dbserver' not in args:
        # oq engine and oq dbserver define their own log levels
        level = logging.DEBUG if 'debug' in args else logging.INFO
        logging.basicConfig(level=level)

    warnings.simplefilter(  # make sure we do not make efficiency errors
        "error",
        category=sparse.base.SparseEfficiencyWarning)
    sap.run(commands, prog='oq')
예제 #3
0
        gdf_poly = gpd.GeoDataFrame(df, geometry='Polygon', crs='epsg:4326')
        within = gpd.sjoin(gdf, gdf_poly, op='within')
        if len(within.magnitude) < 1:
            continue
        mmax = np.max(within.magnitude.to_numpy())
        lab = 'mmax_{:s}'.format(cat_lab)
        model['sources'][str(poly.id)][lab] = float(mmax)

        print('{:10s}: {:5.2f}'.format(src_id, mmax))

    # Saving results into the config file
    with open(fname_conf, 'w') as f:
        f.write(toml.dumps(model))
        print('Updated {:s}'.format(fname_conf))


descr = 'The name of the shapefile with the polygons'
mmax_per_zone.fname_poly = descr
descr = 'The name of the .csv file with the catalogue'
mmax_per_zone.fname_cat = descr
descr = 'The name of configuration file'
mmax_per_zone.fname_conf = descr
descr = 'The label used to identify the catalogue'
mmax_per_zone.cat_lab = descr
mmax_per_zone.use = 'A list with the ID of sources that should be considered'
msg = 'A list with the ID of sources that should not be considered'
mmax_per_zone.skip = msg

if __name__ == '__main__':
    sap.run(mmax_per_zone)
예제 #4
0
                    tmfd = ged(src.mfd, nmfd.bin_width)
                    nmfd.stack(tmfd)

            occ = np.array(nmfd.get_annual_occurrence_rates())

            bgr = model["sources"][src_id]["bgr_weichert"]
            agr = model["sources"][src_id]["agr_weichert"]

            tmp = occ[:, 0] - binw
            mfd = 10.0**(agr - bgr * tmp[:-1]) - 10.0**(agr - bgr *
                                                        (tmp[:-1] + binw))

            _ = plt.figure(figsize=(8, 6))
            plt.plot(occ[:, 0], occ[:, 1], 'o')
            plt.plot(tmp[:-1] + binw / 2, mfd, 'x')
            print(mfd)
            plt.title(fname)
            plt.xlabel('Magnitude')
            plt.ylabel('Annual occurrence rate')
            plt.yscale('log')

            plt.show()


check_mfds.fname_input_pattern = "Pattern for input .xml files"
check_mfds.fname_config = "Name of the configuration file"
check_mfds.src_id = "The ID of the source to use"

if __name__ == '__main__':
    sap.run(check_mfds)
예제 #5
0
        mean_ref, std_ref = pmap_ref[site_id_ref[lonlat]].array.T
        err = numpy.sqrt(std**2 + std_ref**2)
        for imt in imtls:
            sl = imtls(imt)
            ok = (numpy.abs(mean[sl] - mean_ref[sl]) < nsigma * err[sl]).all()
            if not ok:
                md = (numpy.abs(mean[sl] - mean_ref[sl])).max()
                plt.title('point=%s, imt=%s, maxdiff=%.2e' % (lonlat, imt, md))
                plt.loglog(imtls[imt],
                           mean_ref[sl] + std_ref[sl],
                           label=str(calc_ref),
                           color='black')
                plt.loglog(imtls[imt],
                           mean_ref[sl] - std_ref[sl],
                           color='black')
                plt.loglog(imtls[imt],
                           mean[sl] + std[sl],
                           label=str(calc),
                           color='red')
                plt.loglog(imtls[imt], mean[sl] - std[sl], color='red')
                plt.legend()
                plt.show()


compare_mean_curves.calc_ref = 'first calculation'
compare_mean_curves.calc = 'second calculation'
compare_mean_curves.nsigma = 'tolerance as number of sigma'

if __name__ == '__main__':
    sap.run(compare_mean_curves)
예제 #6
0
    f = h5py.File(fname, 'r')
    keys = sorted(f.keys())
    print('Total number of earthquakes: {:d}'.format(len(f[keys[0]])))

    for i1, k1 in enumerate(keys):
        for i2 in range(i1, len(keys)):
            k2 = keys[i2]
            if k1 == k2:
                continue
            chk = np.logical_and(f[k1], f[k2])
            if any(chk):
                print('{:s} and {:s} have common eqks'.format(k1, k2))
                print(len(chk))

    # Check total numbers
    tot = 0
    for key in f:
        smm = sum(f[key])
        print('{:10s}: {:5d}'.format(key, smm))
        tot += smm
    print(tot, '/', len(f[key]))

    # Close hdf5
    f.close()


check.fname = "Name of the .hdf5 file wiith the catalogue classification"

if __name__ == "__main__":
    sap.run(check)
                    tr_fname=tr_fname,
                    subcatalogues=subcatalogues,
                    olab='_uh',
                    save_af=True,
                    fix_defaults=True)

    declustering_meth = 'GardnerKnopoffType1'
    declustering_params = {
        'time_distance_window': 'GruenthalWindow',
        'fs_time_prop': 0.9
    }
    _ = decluster(fname,
                  declustering_meth,
                  declustering_params,
                  output_folder,
                  labels=labels,
                  tr_fname=tr_fname,
                  subcatalogues=subcatalogues,
                  olab='_gr',
                  save_af=True,
                  fix_defaults=True)


catalogue_declustering.fname = 'Name of the .csv formatted catalogue'
catalogue_declustering.output_folder = 'Path to the output folder'
msg = 'Boolean, when true it creates subcatalogues'
catalogue_declustering.subcatalogues = msg

if __name__ == '__main__':
    sap.run(catalogue_declustering)
예제 #8
0
    dct = {key: idx for idx, key in enumerate(cat.data['eventID'])}

    idx = []
    targ = []
    for line in open(eqlist, 'r'):
        eqid, target = line.split(',')
        idx.append(dct[eqid])
        targ.append(target)

    f = h5py.File(treg, "r")
    f2 = h5py.File(treg2, "w")
    for key in f.keys():
        tmp = f[key][:]
        for eid, tar in zip(idx, targ):
            if re.search(key, tar):
                tmp[eid] = True
            else:
                tmp[eid] = False
        f2[key] = tmp
    f.close()
    f2.close()

change.cat_pickle_filename = 'pickled catalogue'
change.treg = 'TR hdf5 filename'
msg = 'list of events to change. format <eventID>,<target class>'
change.eqlist = msg


if __name__ == "__main__":
    sap.run(change)
예제 #9
0
    for src_id in model['sources']:
        if exclude is not None and src_id in exclude:
            continue
        else:
            print("src_id:", src_id, " ", method)
            if use == "*" or src_id in use:
                output['sources'][src_id]['bgr'] = \
                    output['sources'][src_id][labb]
                output['sources'][src_id]['agr'] = \
                    output['sources'][src_id][laba]

    # Saving results into the config file
    with open(fname_conf, 'w') as f:
        f.write(toml.dumps(output))
        print('Updated {:s}'.format(fname_conf))


descr = 'The name of configuration file'
set_gr_params.fname_conf = descr
descr = 'Can be either a string with * or with source IDs separated by commas'
set_gr_params.use = descr
descr = 'The label with the method used to infer these parameters: '
descr += 'e.g. weichert, counting'
set_gr_params.method = descr
descr = 'A string with source IDs separated by commas'
set_gr_params.skip = descr

if __name__ == '__main__':
    sap.run(set_gr_params)
예제 #10
0
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake.  If not, see <http://www.gnu.org/licenses/>.
from openquake.baselib import sap
from openquake.risklib.countries import code2country

# example: utils/addcol.py country=VEN Exposure_Res_Venezuela.csv


def addcol(namevalue, fnames):
    name, value = namevalue.split('=')
    if name == 'country':
        assert value in code2country, value
    for fname in fnames:
        header, *lines = open(fname).readlines()
        out = [header.rstrip() + ',' + name]
        for line in lines:
            out.append(line.rstrip() + ',' + value)
        with open(fname, 'w') as f:
            for line in out:
                f.write(line + '\n')
        print('Added %s to %s' % (namevalue, fname))


addcol.namevalue = 'string of the form column_name=column_value'
addcol.fnames = dict(help='CSV files to update', nargs='+')

if __name__ == '__main__':
    sap.run(addcol)
예제 #11
0
    # Get catalogue
    catalogue = get_catalogue(cat_pickle_fname, treg_filename, label)

    # smoothing
    values, smooth = smoothing(mlo, mla, mde, catalogue, hspa, vspa,
                               out_hdf5_smoothing_fname)

    # Spatial index
    r, proj = spatial_index(smooth)

    # magnitude-frequency distribution
    mfd = TruncatedGRMFD(min_mag=mmin,
                         max_mag=mmax,
                         bin_width=0.1,
                         a_val=agr,
                         b_val=bgr)

    # Create all the ruptures - the probability of occurrence is for one year
    # in this case
    _ = create_ruptures(mfd, dips, sampling, msr, asprs, float_strike,
                        float_dip, r, values, ohs, 1., hdf5_filename,
                        uniform_fraction, proj, idl, align, True)


calculate_ruptures.ini_fname = '.ini filename'
calculate_ruptures.only_plt = 'Only plotting'
calculate_ruptures.ref_fdr = 'Reference folder for paths'

if __name__ == "__main__":
    sap.run(calculate_ruptures)
예제 #12
0
    model = toml.load(fname_conf)
    output = copy.copy(model)

    default = model['default'][key]

    # Iterate over sources
    for src_id in model['sources']:

        if (len(use) and src_id not in use) or (src_id in skip):
            continue

        output['sources'][src_id][key] = default

    # Saving results into the config file
    with open(fname_conf, 'w') as f:
        f.write(toml.dumps(output))
        print('Updated {:s}'.format(fname_conf))


descr = 'The name of configuration file'
set_property.fname_conf = descr
descr = 'Key defining the property'
set_property.key = descr
descr = 'A list of source IDs to be used'
set_property.use = descr
descr = 'A list of source IDs to be skipped'
set_property.skip = descr

if __name__ == '__main__':
    sap.run(set_property)
예제 #13
0
    print('# earthquakes: {:d}'.format(len(catalogue.data['longitude'])))

    # Sub-catalogue
    csv_filename = out_cata + "_TR_unclassified.csv"
    csv_filename = os.path.join(path_out, csv_filename)

    # Write the purged catalogue
    csvcat = CsvCatalogueWriter(csv_filename)
    csvcat.write_file(catalogue)
    print("Catalogue successfully written to %s" % csv_filename)


def create_sub_cata(pck_fname, treg_fname, *, out_cata='cat', path_out='.'):
    """
    Create a subcatalogue
    """
    aaa, alen = get_treg(treg_fname)
    tot_lab = create_sub_catalogue(alen, aaa, pck_fname, treg_fname, out_cata,
                                   path_out)
    get_unclassified(tot_lab, pck_fname, out_cata, path_out)


create_sub_cata.pck_fname = 'Name of the pickle file with the catalogue'
msg = 'Name of the .hdf5 file with the catalogue classification'
create_sub_cata.treg_fname = msg
create_sub_cata.out_cata = 'Prefix in for the output files [cat]'
create_sub_cata.path_out = 'Output path [./]'

if __name__ == "__main__":
    sap.run(create_sub_cata)
예제 #14
0
            else:
                fmclassification[mclass] = 1
                dip_1[mclass] = [row['dip1']]
                dip_2[mclass] = [row['dip2']]
                strike_1[mclass] = [row['strike1']]
                strike_2[mclass] = [row['strike2']]

        title = "Source: {:s}".format(src_id)
        _ = plot_histogram(gs0[0, 0], fmclassification, title)
        plot_xx(gs0[0, 1], dip_1, dip_2, strike_1, strike_2)

        stk1 = get_simpler(strike_1)
        stk2 = get_simpler(strike_2)
        dip1 = get_simpler(dip_1)
        dip2 = get_simpler(dip_2)
        plot_yy(gs0[1, 0], dip1, dip2, stk1, stk2)

        plot_density_simple(gs0[1, 1], dip1, dip2, stk1, stk2)

        plt.savefig(figure_name, format=ext)
        plt.close()

    return fmclassification


process_gcmt_datafames.fname_folder = 'Name of the folder with input files'
process_gcmt_datafames.folder_out = 'Name of the output folder'

if __name__ == '__main__':
    sap.run(process_gcmt_datafames)

def process(contacts_shp,
            outpath,
            datafolder,
            sidx_fname,
            boundaries_shp,
            imt_str,
            inland_shp,
            models_list=None,
            only_buffers=False):
    """
    This function processes all the models listed in the mosaic.DATA dictionary
    and creates homogenised curves.
    """
    process_maps(contacts_shp, outpath, datafolder, sidx_fname, boundaries_shp,
                 imt_str, inland_shp, models_list, only_buffers)


process.contacts_shp = 'Name of shapefile with contacts'
process.outpath = 'Output folder'
process.datafolder = 'Folder with the mosaic repository'
process.sidx_fname = 'Rtree spatial index file with ref. grid'
process.boundaries_shp = 'Name of shapefile with boundaries'
process.imt_str = 'String with the intensity measure type'
process.inland_shp = 'Name of shapefile with inland territories'
process.models_list = 'List of models to be processed'

if __name__ == "__main__":
    sap.run(process)
    # Read polygons
    polygons_gdf = gpd.read_file(fname_polygons)

    # Select point in polygon
    columns = [
        'eventID', 'year', 'month', 'day', 'magnitude', 'longitude',
        'latitude', 'depth'
    ]

    # Iterate over sources
    for idx, poly in polygons_gdf.iterrows():
        df = pd.DataFrame({'Name': [poly.id], 'Polygon': [poly.geometry]})
        gdf_poly = gpd.GeoDataFrame(df, geometry='Polygon', crs='epsg:4326')
        within = gpd.sjoin(gdf, gdf_poly, op='within')
        # Create output file
        if isinstance(poly.id, int):
            fname = 'subcatalogue_zone_{:d}.csv'.format(poly.id)
        else:
            fname = 'subcatalogue_zone_{:s}.csv'.format(poly.id)
        out_fname = os.path.join(folder_out, fname)
        within.to_csv(out_fname, index=False, columns=columns)


create_subcatalogues.fname_polygons = 'Name of a shapefile with polygons'
create_subcatalogues.fname_cat = 'Name of the .csv file with the catalog'
create_subcatalogues.folder_out = 'Name of the output folder'

if __name__ == '__main__':
    sap.run(create_subcatalogues)
예제 #17
0
    if min_mmax is not None:
        min_mmax = float(min_mmax)
    mmax_delta = float(mmax_delta)

    # Iterate over sources
    for src_id in model['sources']:
        mmax = 0.
        for param in model['sources'][src_id]:
            if re.search('^mmax_', param):
                mmax = max(mmax, model['sources'][src_id][param])
        if min_mmax is not None:
            mmax = max(mmax, min_mmax)
        output['sources'][src_id]['mmax'] = mmax + mmax_delta

    # Saving results into the config file
    with open(fname_conf, 'w') as f:
        f.write(toml.dumps(output))
        print('Updated {:s}'.format(fname_conf))


descr = 'The name of configuration file'
set_mmax_plus_delta.fname_conf = descr
descr = 'The increment to apply to mmax observed'
set_mmax_plus_delta.mmax_delta = descr
descr = 'The minimum mmax assigned'
set_mmax_plus_delta.min_mmax = descr

if __name__ == '__main__':
    sap.run(set_mmax_plus_delta)
예제 #18
0
                    tlist.append([w, m])
            var['hypocenter_distribution'] = tlist

    if len(conf) > 0:
        # Saving results into the config file
        with open(conf, 'w') as fou:
            fou.write(toml.dumps(model))
            print('Updated {:s}'.format(conf))


analyze_hypocentral_depth.folder_subcat = 'The folder with the subcatalogues'
analyze_hypocentral_depth.depth_min = 'The minimum hypocentral depth [km]'
analyze_hypocentral_depth.depth_max = 'The maximum hypocentral depth [km]'
analyze_hypocentral_depth.depth_binw = 'The depth bin width [km]'
descr = "The name of the folder where to store figures"
analyze_hypocentral_depth.folder_out_figs = descr
descr = "[true/false] when true show figures on screen"
analyze_hypocentral_depth.show = descr
descr = "String with the bins limits. Overrides depth-min, depth-max, "
descr += "depth-binw"
analyze_hypocentral_depth.depth_bins = descr
descr = "A .toml file. When provided, updated with new info"
analyze_hypocentral_depth.conf = descr
descr = "Source IDs to use"
analyze_hypocentral_depth.use = descr
descr = "Source IDs to skip"
analyze_hypocentral_depth.skip = descr

if __name__ == '__main__':
    sap.run(analyze_hypocentral_depth)
예제 #19
0
    from urllib import urlopen
except ImportError:
    from urllib.request import urlopen
from openquake.baselib import sap


def viewlog(calc_id, host='localhost', port=8000):
    """
    Extract the log of the given calculation ID from the WebUI
    """
    base_url = 'http://%s:%s/v1/calc/' % (host, port)
    start = 0
    psize = 10  # page size
    try:
        while True:
            url = base_url + '%d/log/%d:%d' % (calc_id, start, start + psize)
            rows = json.load(urlopen(url))
            for row in rows:
                print(' '.join(row))
            start += len(rows)
            time.sleep(1)
    except BaseException:
        pass


if __name__ == '__main__':
    viewlog.calc_id = 'calculation ID'
    viewlog.host = 'hostname of the engine server'
    viewlog.port = 'port of the engine server'
    sap.run(viewlog)
예제 #20
0
        print('src_id: {:s} '.format(src_id), end='')
        if ('sources' in model and src_id in model['sources']
                and 'completeness_table' in model['sources'][src_id]):
            print(' source specific completeness')
            ctab = numpy.array(model['sources'][src_id]['completeness_table'])
            ctab = ctab.astype(numpy.float)
        else:
            print(' default completeness')
            ctab = numpy.array(model['default']['completeness_table'])
            ctab = ctab.astype(numpy.float)

        print(ctab)
        _plot_ctab(ctab)

        ext = 'png'
        figure_fname = os.path.join(outdir,
                                    'fig_mtd_{:s}.{:s}'.format(src_id, ext))
        plt.savefig(figure_fname, format=ext)
        plt.close()


descr = 'Pattern for the .csv catalogue files'
subcatalogues_analysis.fname_input_pattern = descr
descr = 'Name of the .toml file with configuration parameters'
subcatalogues_analysis.fname_config = descr
subcatalogues_analysis.outdir = 'Name of the output folder'
subcatalogues_analysis.yealim = 'Year range used in the plot'

if __name__ == '__main__':
    sap.run(subcatalogues_analysis)
예제 #21
0
    dstore_small = datastore.read(calc_small)
    sitecol_big = dstore_big['sitecol']
    sitecol_small = dstore_small['sitecol']
    site_id_big = {(lon, lat): sid for sid, lon, lat in zip(
        sitecol_big.sids, sitecol_big.lons, sitecol_big.lats)}
    site_id_small = {(lon, lat): sid for sid, lon, lat in zip(
        sitecol_small.sids, sitecol_small.lons, sitecol_small.lats)}
    common = set(site_id_big) & set(site_id_small)
    if not common:
        raise RuntimeError('There are no common sites between calculation '
                           '%d and %d' % (calc_big, calc_small))
    sids_small = [site_id_small[lonlat] for lonlat in common]
    pmap_big = PmapGetter(dstore_big).get_mean()  # USA
    pmap_small = PmapGetter(dstore_big, sids=sids_small).get_mean()  # Cal
    for lonlat in common:
        pmap_big[site_id_big[lonlat]] |= pmap_small.get(
            site_id_small[lonlat], 0)
    out = 'combine_%d_%d.hdf5' % (calc_big, calc_small)
    with hdf5.File(out, 'w') as h5:
        h5['hcurves/mean'] = pmap_big
        h5['oqparam'] = dstore_big['oqparam']
        h5['sitecol'] = dstore_big['sitecol']
    print('Generated %s' % out)


combine_mean_curves.calc_big = 'first calculation'
combine_mean_curves.calc_small = 'second calculation'

if __name__ == '__main__':
    sap.run(combine_mean_curves)
예제 #22
0
import numpy as np
import pandas as pd
from openquake.baselib import sap


def fix_catalogue(fname):

    df = pd.read_csv(fname)
    print(df.head())

    condition = ((df.month < 1) | (df.month > 12))
    df.month = np.where(condition, 1, df.month)

    condition = ((df.day < 1) | (df.day > 31))
    df.day = np.where(condition, 1, df.day)

    df.to_csv(fname)


if __name__ == '__main__':
    sap.run(fix_catalogue)
예제 #23
0
        fxml = os.path.join(output_folder, '{:s}.xml'.format(mag))

        # Set the source ID
        mags = re.sub('\\.', 'pt', mag)
        sid = 'src_{:s}_{:s}'.format(label, mags)
        name = 'Ruptures for mag bin {:s}'.format(mags)

        # Creates a non-parametric seismic source
        src = create_source(grp, float(mag), sid, name, trt)

        # Create source group
        sgrp = SourceGroup(trt, [src])

        # Create source model
        name = 'Source model for {:s} magnitude {:s}'.format(label, mags)
        mdl = SourceModel([sgrp], name, investigation_t)

        # Write source model
        write_source_model(fxml, mdl, mag)

    f.close()


create.label = 'TR label'
create.rupture_hdf5_fname = 'hdf5 file with the ruptures'
create.output_folder = 'Name of the output folder'
create.investigation_t = 'Investigation time'

if __name__ == '__main__':
    sap.run(create)
예제 #24
0
            if subzones:
                tmp = get_param(srcd_sz, model['default'], key)
                npd = _get_nodal_plane_distribution(tmp)

            loc = Point(pnt.lon, pnt.lat)
            src = PointSource(sid, name, trt, mfd, rms, msr, rar, tom,
                              usd, lsd, loc, npd, hyd)
            srcs.append(src)

        # Write output file
        fname_out = os.path.join(folder_out, 'src_{:s}.xml'.format(src_id))
        write_source_model(fname_out, srcs, 'Zone {:s}'.format(src_id))


def get_param(dct, dct_default, key):
    if key in dct:
        return dct[key]
    else:
        return dct_default[key]
    

create_nrml_sources.fname_input_pattern = "Pattern for input .csv files"
create_nrml_sources.fname_config = "Name of the configuration file"
create_nrml_sources.folder_out = "Name of the output folder"
create_nrml_sources.fname_subzone_shp = "Name of the shapefile with subzones"
create_nrml_sources.fname_subzone_config = "Name of config file for subzones"

if __name__ == '__main__':
    sap.run(create_nrml_sources)
예제 #25
0
        Send a SIGTERM to all worker processes
        """
        for sock in self.workers:
            os.kill(sock.proc.pid, signal.SIGTERM)
        for sock in self.workers:
            sock.proc.join()
        return 'WorkerPool %s stopped' % self.ctrl_url

    def kill(self):
        """
        Send a SIGKILL to all worker processes
        """
        for sock in self.workers:
            os.kill(sock.proc.pid, signal.SIGKILL)
        for sock in self.workers:
            sock.proc.join()
        return 'WorkerPool %s killed' % self.ctrl_url


def workerpool(worker_url='tcp://0.0.0.0:1909', *, num_workers: int = -1):
    # start a workerpool without a streamer
    WorkerPool(worker_url, num_workers).start()


workerpool.worker_url = dict(
    help='ZMQ address (tcp:///w.x.y.z:port) of the worker')
workerpool.num_workers = dict(help='number of cores to use')

if __name__ == '__main__':
    sap.run(workerpool)
예제 #26
0
    fout = open(fname_out, 'w')
    for idx, poly in polygons_gdf.iterrows():

        geojson_poly = eval(json.dumps(shapely.geometry.mapping(
            poly.geometry)))

        # Revert the positions of lons and lats
        coo = [[c[1], c[0]] for c in geojson_poly['coordinates'][0]]
        geojson_poly['coordinates'] = [coo]

        # Discretizing
        hexagons = list(h3.polyfill(geojson_poly, h3_level))
        for hxg in hexagons:
            if isinstance(poly.id, str):
                fout.write("{:s},{:s}\n".format(hxg, poly.id))
            else:
                fout.write("{:s},{:d}\n".format(hxg, poly.id))

    fout.close()


descr = 'The level of the H3 grid'
discretize_zones_with_h3_grid.h3_level = descr
descr = 'The name of the shapefile with polygons'
discretize_zones_with_h3_grid.fname_poly = descr
descr = 'The name of the folder where to save output'
discretize_zones_with_h3_grid.folder_out = descr

if __name__ == '__main__':
    sap.run(discretize_zones_with_h3_grid)
예제 #27
0
    A very important parameter in this section is the `priority` parameter.

    The second (and the following sections) specify the parameters and the
    data required to create tectonically uniform families of earthquakes.

    The selection of crustal earthquakes requires a set of points (described in
    terms of longitude, latitude and depth) describing the bottom surface of
    the crust. Ideally, this set of points should be homogenously distributed
    over a regular grid.

    The selection of subduction interface earthquakes is perfomed by selecting
    the ones located at less than a thershold distance [in km] from
    surface describing the top of the subducted slab - interface part.
    The threshold distance is defined in the `.ini` configuration file.

    The selection of subduction inslab earthquakes is perfomed in a manner
    similar to the one used for subduction interface seismicity.

    """


msg = 'Path to the configuration fname - typically a .ini file for tr'
classify.ini_fname = msg
msg = 'Flag defining if the calculation of distances'
classify.compute_distances = msg
msg = 'Root folder (path are relative to this in the .ini file)'
classify.rf = msg

if __name__ == "__main__":
    sap.run(classify)
    # lons, lats, poes, hea, imls = csvt.read_hazard_curve_csv(fname_csv)
    # Compute the hazard maps
    if pex is not None and iml is None:
        dat = csvt.get_map_from_curves(imls, poes, pex)
    elif pex is None and iml is not None:
        raise ValueError('Not yet supported')
    else:
        raise ValueError('You cannot set both iml and pex')
    # Save the hazard map
    path_out = os.path.join(path_out, fname_out)
    write_hazard_map(path_out, lons, lats, pex, dat, imt_str)


def map(path_in, prefix, fname_out, path_out, imt_str, pex=None, iml=None):
    """
    Creates a hazard map from a set of hazard curves
    """
    create_map(path_in, prefix, fname_out, path_out, imt_str, pex, iml)


map.path_in = 'Name of the file with input .json files'
map.prefix = 'Prefix for selecting files'
map.fname_out = 'Name output csv file'
map.path_out = 'Path to the output folder'
map.imt_str = 'String describing the IMT'
map.pex = 'Probability of exceedance'
map.iml = 'Intensity measure level used for building the maps'

if __name__ == "__main__":
    sap.run(map)
#!/usr/bin/env python
# coding: utf-8

from openquake.baselib import sap
from openquake.wkf.ses import check_ses_vs_catalogue


def main(fname, example_flag=False):
    """
    Confronts the seismicity contained in a stochastic event set generated by
    the engine against a real earthquake catalogue.
    """
    check_ses_vs_catalogue(fname, example_flag)


descr = 'Name of the .toml configuration file'
main.fname = descr
descr = 'A flag. When true the code prints an example of .toml configuration'
main.example_flag = descr

if __name__ == '__main__':
    sap.run(main)
    # Info
    number_of_samples = numpy.ceil(lengths[longest_key] /
                                   float(max_sampl_dist))
    tmps = 'Number of subsegments for each profile: {:d}'
    logging.info(tmps.format(int(number_of_samples)))
    tmp = lengths[shortest_key] / number_of_samples
    logging.info('Shortest sampling [%s]: %.4f' % (shortest_key, tmp))
    tmp = lengths[longest_key] / number_of_samples
    logging.info('Longest sampling  [%s]: %.4f' % (longest_key, tmp))

    # Resampled profiles
    rsps = get_interpolated_profiles(sps, lengths, number_of_samples)

    # Store new profiles
    write_profiles_csv(rsps, out_path)

    # Store computed edges
    write_edges_csv(rsps, out_path)


build_complex_surface.in_path = 'Path to the input folder'
build_complex_surface.max_sampl_dist = 'Maximum profile sampling distance'
build_complex_surface.out_path = 'Path to the output folder'
build_complex_surface.upper_depth = 'Upper depth'
build_complex_surface.lower_depth = 'lower depth'
build_complex_surface.from_id = 'Index profile where to start the sampling'
build_complex_surface.to_id = 'Index profile where to stop the sampling'

if __name__ == "__main__":
    sap.run(build_complex_surface)