Ejemplo n.º 1
0
    def test_nankai(self):
        # source model for the Nankai region provided by M. Pagani
        source_model = os.path.join(os.path.dirname(__file__), 'nankai.xml')
        # it has a single group containing 15 mutex sources
        [group] = nrml.to_python(source_model)
        for i, src in enumerate(group):
            src.id = i
            src.grp_id = 0
        aae([src.mutex_weight for src in group], [
            0.0125, 0.0125, 0.0125, 0.0125, 0.1625, 0.1625, 0.0125, 0.0125,
            0.025, 0.025, 0.05, 0.05, 0.325, 0.025, 0.1
        ])
        param = dict(ses_per_logic_tree_path=10, ses_seed=42, imtls={})
        cmaker = contexts.ContextMaker('*', [SiMidorikawa1999SInter()], param)
        dic = sum(sample_ruptures(group, cmaker), {})
        self.assertEqual(len(dic['rup_array']), 8)
        self.assertEqual(len(dic['calc_times']), 15)  # mutex sources

        # test no filtering 1
        ruptures = list(stochastic_event_set(group))
        self.assertEqual(len(ruptures), 19)

        # test no filtering 2
        ruptures = sum(sample_ruptures(group, cmaker), {})['rup_array']
        self.assertEqual(len(ruptures), 8)
Ejemplo n.º 2
0
 def full_enum(self):
     # compute the mean curve with full enumeration
     srcs = []
     weights = []
     grp_id = trt_smr = 0
     for weight, branches in self.bs0.enumerate_paths():
         path = tuple(br.branch_id for br in branches)
         bset_values = self.bs0.get_bset_values(path)
         # first path: [(<b01 b02>, (4.6, 1.1)), (<b11 b12>, 7.0)]
         sg = lt.apply_uncertainties(bset_values, self.sg)
         for src in sg:
             src.grp_id = grp_id
             src.trt_smr = trt_smr
         grp_id += 1
         trt_smr += 1
         srcs.extend(sg)
         weights.append(weight)
     for i, src in enumerate(srcs):
         src.id = i
     N = len(self.sitecol.complete)
     time_span = srcs[0].temporal_occurrence_model.time_span
     idist = calc.filters.IntegrationDistance.new('200')
     params = dict(imtls=self.imtls,
                   truncation_level2=2,
                   collapse_level=2,
                   investigation_time=time_span,
                   maximum_distance=idist('default'))
     cmaker = contexts.ContextMaker(srcs[0].tectonic_region_type,
                                    self.gsims, params)
     res = classical(srcs, self.sitecol, cmaker)
     pmap = res['pmap']
     effrups = sum(nr for nr, ns, dt in res['calc_times'].values())
     curve = pmap.array(N)[0, :, 0]
     return curve, srcs, effrups, weights
Ejemplo n.º 3
0
def read_input(hparams, **extra):
    """
    :param hparams: a dictionary of hazard parameters
    :returns: an Input namedtuple (groups, sitecol, gsim_lt, cmakerdict)

    The dictionary must contain the keys

    - "maximum_distance"
    - "imtls"
    - "source_model_file" or "rupture_model_file"
    - "sites" or "site_model_file"
    - "gsim" or "gsim_logic_tree_file"

    Moreover:

    - if "source_model_file" is given, then "investigation_time" is mandatory
    - if "rupture_model_file" is given, the "number_of_ground_motion_fields"
      and "ses_seed" are mandatory
    - if there is an area source, then "area_source_discretization" is needed
    - if  "site_model_file" is missing, then global site parameters are needed

    The optional keys include

    - "rupture_mesh_spacing" (default 5.)
    - "complex_fault_mesh_spacing" (default rupture_mesh_spacing)
    - "width_of_mfd_bin" (default 1.)
    - "minimum_magnitude"
    - "discard_trts" (default "")
    - "number_of_logic_tree_samples" (default 0)
    - "ses_per_logic_tree_path" (default 1)
    """
    if isinstance(hparams, str):
        hparams = read_hparams(hparams)
    if extra:
        hparams = hparams.copy()
        hparams.update(extra)
    assert 'imts' in hparams or 'imtls' in hparams
    assert isinstance(hparams['maximum_distance'], IntegrationDistance)
    smfname = hparams.get('source_model_file')
    if smfname:  # nonscenario
        itime = hparams['investigation_time']
    else:
        itime = 50.  # ignored in scenario
    rmfname = hparams.get('rupture_model_file')
    if rmfname:
        ngmfs = hparams["number_of_ground_motion_fields"]
        ses_seed = hparams["ses_seed"]
    converter = sourceconverter.SourceConverter(
        itime,
        hparams.get('rupture_mesh_spacing', 5.),
        hparams.get('complex_fault_mesh_spacing'),
        hparams.get('width_of_mfd_bin', 1.0),
        hparams.get('area_source_discretization'),
        hparams.get('minimum_magnitude', {'default': 0}),
        hparams.get('source_id'),
        discard_trts=hparams.get('discard_trts', ''))
    if smfname:
        [sm] = nrml.read_source_models([smfname], converter)
        groups = sm.src_groups
    elif rmfname:
        ebrs = _get_ebruptures(rmfname, converter, ses_seed)
        groups = _rupture_groups(ebrs)
    else:
        raise KeyError('Missing source_model_file or rupture_file')
    trts = set(grp.trt for grp in groups)
    if 'gsim' in hparams:
        gslt = gsim_lt.GsimLogicTree.from_(hparams['gsim'])
    else:
        gslt = gsim_lt.GsimLogicTree(hparams['gsim_logic_tree_file'], trts)

    # fix source attributes
    idx = 0
    num_rlzs = gslt.get_num_paths()
    for grp_id, sg in enumerate(groups):
        assert len(sg)  # sanity check
        for src in sg:
            src.id = idx
            src.grp_id = grp_id
            src.trt_smr = grp_id
            src.samples = num_rlzs
            idx += 1

    cmakerdict = {}  # trt => cmaker
    start = 0
    n = hparams.get('number_of_logic_tree_samples', 0)
    s = hparams.get('random_seed', 42)
    for trt, rlzs_by_gsim in gslt.get_rlzs_by_gsim_trt(n, s).items():
        cmakerdict[trt] = contexts.ContextMaker(trt, rlzs_by_gsim, hparams)
        cmakerdict[trt].start = start
        start += len(rlzs_by_gsim)
    if rmfname:
        # for instance for 2 TRTs with 5x2 GSIMs and ngmfs=10, then the
        # number of occupation is 100 for each rupture, for a total
        # of 200 events, see scenario/case_13
        nrlzs = gslt.get_num_paths()
        for grp in groups:
            for ebr in grp:
                ebr.n_occ = ngmfs * nrlzs

    sitecol = _get_sitecol(hparams, gslt.req_site_params)
    return Input(groups, sitecol, gslt, cmakerdict)
Ejemplo n.º 4
0
def read_cmaker_df(gsim, csvfnames):
    """
    :param gsim:
        a GSIM instance
    :param csvfnames:
        a list of pathnames to CSV files in the format used in
        hazardlib/tests/gsim/data, i.e. with fields rup_XXX, site_XXX,
        dist_XXX, result_type and periods
    :returns: a list RuptureContexts, grouped by rupture parameters
    """
    # build a suitable ContextMaker
    dfs = [pandas.read_csv(fname) for fname in csvfnames]
    num_rows = sum(len(df) for df in dfs)
    if num_rows == 0:
        raise ValueError('The files %s are empty!' % ' '.join(csvfnames))
    logging.info('\n%s' % gsim)
    logging.info('num_checks = {:_d}'.format(num_rows))
    if not all_equals([sorted(df.columns) for df in dfs]):
        colset = set.intersection(*[set(df.columns) for df in dfs])
        cols = [col for col in dfs[0].columns if col in colset]
        extra = set()
        ncols = []
        for df in dfs:
            ncols.append(len(df.columns))
            extra.update(set(df.columns) - colset)
        print('\n%s\nThere are %d extra columns %s over a total of %s' %
              (csvfnames[0], len(extra), extra, ncols))
    else:
        cols = slice(None)
    df = pandas.concat(d[cols] for d in dfs)
    sizes = {r: len(d) for r, d in df.groupby('result_type')}
    if not all_equals(list(sizes.values())):
        raise ValueError('Inconsistent number of rows: %s' % sizes)
    imts = []
    cmap = {}
    for col in df.columns:
        try:
            im = str(imt.from_string(col.upper()))
        except KeyError:
            pass
        else:
            imts.append(im)
            cmap[col] = im
    if gsim.__class__.__name__.endswith('AvgSA'):  # special case
        imts.append('AvgSA')
    assert imts
    imtls = {im: [0] for im in sorted(imts)}
    trt = gsim.DEFINED_FOR_TECTONIC_REGION_TYPE
    cmaker = contexts.ContextMaker(trt.value if trt else "*", [gsim],
                                   {'imtls': imtls})
    for dist in cmaker.REQUIRES_DISTANCES:
        name = 'dist_' + dist
        df[name] = np.array(df[name].to_numpy(), cmaker.dtype[dist])
        logging.info(name, df[name].unique())
    for dist in cmaker.REQUIRES_SITES_PARAMETERS:
        name = 'site_' + dist
        df[name] = np.array(df[name].to_numpy(), cmaker.dtype[dist])
        logging.info(name, df[name].unique())
    for par in cmaker.REQUIRES_RUPTURE_PARAMETERS:
        name = 'rup_' + par
        if name not in df.columns:  # i.e. missing rake
            df[name] = np.zeros(len(df), cmaker.dtype[par])
        else:
            df[name] = np.array(df[name].to_numpy(), cmaker.dtype[par])
        logging.info(name, df[name].unique())
    logging.info('result_type', df['result_type'].unique())
    return cmaker, df.rename(columns=cmap)