Beispiel #1
0
def hazard_curves_per_rupture_subset(
        rupset_idx, ucerf_source, src_filter, imtls, cmaker,
        truncation_level=None, bbs=(), monitor=Monitor()):
    """
    Calculates the probabilities of exceedence from a set of rupture indices
    """
    imtls = DictArray(imtls)
    ctx_mon = monitor('making contexts', measuremem=False)
    pne_mon = monitor('computing poes', measuremem=False)
    disagg_mon = monitor('get closest points', measuremem=False)
    pmap = ProbabilityMap(len(imtls.array), len(cmaker.gsims))
    pmap.calc_times = []
    pmap.eff_ruptures = 0
    pmap.grp_id = ucerf_source.src_group_id
    nsites = len(src_filter.sitecol)
    with h5py.File(ucerf_source.source_file, "r") as hdf5:
        t0 = time.time()
        pmap |= ucerf_poe_map(hdf5, ucerf_source, rupset_idx,
                              src_filter.sitecol, imtls, cmaker,
                              truncation_level, bbs,
                              ctx_mon, pne_mon, disagg_mon)
        pmap.calc_times.append(
            (ucerf_source.source_id, nsites, time.time() - t0))
        pmap.eff_ruptures += pne_mon.counts
    return pmap
Beispiel #2
0
def pmap_from_grp(
        sources, source_site_filter, imtls, gsims, truncation_level=None,
        bbs=(), monitor=Monitor()):
    """
    Compute the hazard curves for a set of sources belonging to the same
    tectonic region type for all the GSIMs associated to that TRT.
    The arguments are the same as in :func:`calc_hazard_curves`, except
    for ``gsims``, which is a list of GSIM instances.

    :returns: a ProbabilityMap instance
    """
    if isinstance(sources, SourceGroup):
        group = sources
        sources = group.src_list
    else:
        group = SourceGroup(sources, 'src_group', 'indep', 'indep')
        sources = group.src_list
    trt = sources[0].tectonic_region_type
    try:
        maxdist = source_site_filter.integration_distance[trt]
    except:
        maxdist = source_site_filter.integration_distance
    if hasattr(gsims, 'keys'):
        gsims = [gsims[trt]]
    # check all the sources belong to the same tectonic region
    trts = set(src.tectonic_region_type for src in sources)
    assert len(trts) == 1, 'Multiple TRTs: %s' % ', '.join(trts)

    with GroundShakingIntensityModel.forbid_instantiation():
        imtls = DictArray(imtls)
        cmaker = ContextMaker(gsims, maxdist)
        ctx_mon = monitor('making contexts', measuremem=False)
        pne_mon = monitor('computing poes', measuremem=False)
        disagg_mon = monitor('get closest points', measuremem=False)
        src_indep = group.src_interdep == 'indep'
        pmap = ProbabilityMap(len(imtls.array), len(gsims))
        pmap.calc_times = []  # pairs (src_id, delta_t)
        pmap.grp_id = sources[0].src_group_id
        for src, s_sites in source_site_filter(sources):
            t0 = time.time()
            poemap = poe_map(
                src, s_sites, imtls, cmaker, truncation_level, bbs,
                group.rup_interdep == 'indep', ctx_mon, pne_mon, disagg_mon)
            if src_indep:  # usual composition of probabilities
                pmap |= poemap
            else:  # mutually exclusive probabilities
                weight = float(group.srcs_weights[src.source_id])
                for sid in poemap:
                    pmap[sid] += poemap[sid] * weight
            pmap.calc_times.append(
                (src.source_id, len(s_sites), time.time() - t0))
        # storing the number of contributing ruptures too
        pmap.eff_ruptures = {pmap.grp_id: pne_mon.counts}
        return pmap
Beispiel #3
0
 def acc0(self):
     """
     Initial accumulator, a dictionary (grp_id, gsim) -> curves
     """
     self.L = len(self.oqparam.imtls.array)
     zd = {r: ProbabilityMap(self.L) for r in range(self.R)}
     return zd
Beispiel #4
0
def get_pmap_from_csv(oqparam, fnames):
    """
    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param fnames:
        a space-separated list of .csv relative filenames
    :returns:
        the site mesh and the hazard curves read by the .csv files
    """
    read = functools.partial(hdf5.read_csv, dtypedict={None: float})
    imtls = {}
    dic = {}
    for wrapper in map(read, fnames):
        dic[wrapper.imt] = wrapper.array
        imtls[wrapper.imt] = levels_from(wrapper.dtype.names)
    oqparam.hazard_imtls = imtls
    oqparam.set_risk_imtls(get_risk_models(oqparam))
    array = wrapper.array
    mesh = geo.Mesh(array['lon'], array['lat'])
    num_levels = sum(len(imls) for imls in oqparam.imtls.values())
    data = numpy.zeros((len(mesh), num_levels))
    level = 0
    for im in oqparam.imtls:
        arr = dic[im]
        for poe in arr.dtype.names[3:]:
            data[:, level] = arr[poe]
            level += 1
        for field in ('lon', 'lat', 'depth'):  # sanity check
            numpy.testing.assert_equal(arr[field], array[field])
    return mesh, ProbabilityMap.from_array(data, range(len(mesh)))
Beispiel #5
0
def get_pmap_from_csv(oqparam, fnames):
    """
    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param fnames:
        a space-separated list of .csv relative filenames
    :returns:
        the site mesh and the hazard curves read by the .csv files
    """
    read = functools.partial(hdf5.read_csv, dtypedict={None: float})
    imtls = {}
    dic = {}
    for wrapper in map(read, fnames):
        dic[wrapper.imt] = wrapper.array
        imtls[wrapper.imt] = levels_from(wrapper.dtype.names)
    oqparam.hazard_imtls = imtls
    oqparam.set_risk_imtls(get_risk_models(oqparam))
    array = wrapper.array
    mesh = geo.Mesh(array['lon'], array['lat'])
    num_levels = sum(len(imls) for imls in oqparam.imtls.values())
    data = numpy.zeros((len(mesh), num_levels))
    level = 0
    for im in oqparam.imtls:
        arr = dic[im]
        for poe in arr.dtype.names[3:]:
            data[:, level] = arr[poe]
            level += 1
        for field in ('lon', 'lat', 'depth'):  # sanity check
            numpy.testing.assert_equal(arr[field], array[field])
    return mesh, ProbabilityMap.from_array(data, range(len(mesh)))
Beispiel #6
0
 def poe_map(self, src, s_sites, imtls, trunclevel, rup_indep=True):
     """
     :param src: a source object
     :param s_sites: a filtered SiteCollection of sites around the source
     :param imtls: intensity measure and levels
     :param trunclevel: truncation level
     :param rup_indep: True if the ruptures are independent
     :returns: a ProbabilityMap instance
     """
     pmap = ProbabilityMap.build(
         len(imtls.array), len(self.gsims), s_sites.sids,
         initvalue=rup_indep)
     eff_ruptures = 0
     for rup, sctx, dctx in self.gen_rup_contexts(src, s_sites):
         eff_ruptures += 1
         with self.poe_mon:
             pnes = self._make_pnes(rup, sctx, dctx, imtls, trunclevel)
             for sid, pne in zip(sctx.sids, pnes):
                 if rup_indep:
                     pmap[sid].array *= pne
                 else:
                     pmap[sid].array += (1.-pne) * rup.weight
     if rup_indep:
         pmap = ~pmap
     pmap.eff_ruptures = eff_ruptures
     return pmap
Beispiel #7
0
 def make_pmap(self, ruptures, imtls, trunclevel, rup_indep):
     """
     :param src: a source object
     :param ruptures: a list of "dressed" ruptures
     :param imtls: intensity measure and levels
     :param trunclevel: truncation level
     :param rup_indep: True if the ruptures are independent
     :returns: a ProbabilityMap instance
     """
     sids = set()
     for rup in ruptures:
         sids.update(rup.sctx.sites.sids)
     pmap = ProbabilityMap.build(len(imtls.array),
                                 len(self.gsims),
                                 sids,
                                 initvalue=rup_indep)
     for rup in ruptures:
         pnes = self._make_pnes(rup, imtls, trunclevel)
         for sid, pne in zip(rup.sctx.sites.sids, pnes):
             if rup_indep:
                 pmap[sid].array *= pne
             else:
                 pmap[sid].array += pne * rup.weight
     tildemap = ~pmap
     tildemap.eff_ruptures = len(ruptures)
     return tildemap
Beispiel #8
0
    def execute(self):
        """
        Run in parallel `core_task(sources, sitecol, monitor)`, by
        parallelizing on the ruptures according to their weight and
        tectonic region type.
        """
        oq = self.oqparam
        if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields:
            return
        if self.oqparam.ground_motion_fields:
            calc.check_overflow(self)

        with self.monitor('reading ruptures', autoflush=True):
            ruptures_by_grp = (self.precalc.result if self.precalc else
                               get_ruptures_by_grp(self.datastore.parent))

        self.sm_id = {
            tuple(sm.path): sm.ordinal
            for sm in self.csm.info.source_models
        }
        L = len(oq.imtls.array)
        rlzs = self.rlzs_assoc.realizations
        res = parallel.Starmap(self.core_task.__func__,
                               self.gen_args(ruptures_by_grp)).submit_all()
        self.gmdata = {}
        acc = res.reduce(self.combine_pmaps_and_save_gmfs,
                         {rlz.ordinal: ProbabilityMap(L, 1)
                          for rlz in rlzs})
        save_gmdata(self, len(rlzs))
        return acc
Beispiel #9
0
 def init(self, pmaps, grp_id):
     """
     Initialize the pmaps dictionary with zeros, if needed
     """
     if grp_id not in pmaps:
         L, G = self.imtls.size, len(self.cmakers[grp_id].gsims)
         pmaps[grp_id] = ProbabilityMap.build(L, G, self.sids)
    def execute(self):
        """
        Run in parallel `core_task(sources, sitecol, monitor)`, by
        parallelizing on the sources according to their weight and
        tectonic region type.
        """
        monitor = self.monitor(self.core_task.__name__)
        monitor.oqparam = oq = self.oqparam
        self.src_filter = SourceFilter(self.sitecol, oq.maximum_distance)
        self.nsites = []
        acc = AccumDict({
            grp_id: ProbabilityMap(len(oq.imtls.array), len(gsims))
            for grp_id, gsims in self.gsims_by_grp.items()
        })
        acc.calc_times = {}
        acc.eff_ruptures = AccumDict()  # grp_id -> eff_ruptures
        acc.bb_dict = {}  # just for API compatibility
        param = dict(imtls=oq.imtls, truncation_level=oq.truncation_level)
        for sm in self.csm.source_models:  # one branch at the time
            grp_id = sm.ordinal
            gsims = self.gsims_by_grp[grp_id]
            [[ucerf_source]] = sm.src_groups
            ucerf_source.nsites = len(self.sitecol)
            self.csm.infos[ucerf_source.source_id] = source.SourceInfo(
                ucerf_source)
            logging.info('Getting the background point sources')
            bckgnd_sources = ucerf_source.get_background_sources(
                self.src_filter)

            # since there are two kinds of tasks (background and rupture_set)
            # we divide the concurrent_tasks parameter by 2;
            # notice the "or 1" below, to avoid issues when
            # self.oqparam.concurrent_tasks is 0 or 1
            ct2 = (self.oqparam.concurrent_tasks // 2) or 1

            # parallelize on the background sources, small tasks
            args = (bckgnd_sources, self.src_filter, gsims, param, monitor)
            bg_res = parallel.Starmap.apply(classical,
                                            args,
                                            name='background_sources_%d' %
                                            grp_id,
                                            concurrent_tasks=ct2)

            # parallelize by rupture subsets
            rup_sets = numpy.arange(ucerf_source.num_ruptures)
            taskname = 'ucerf_classical_%d' % grp_id
            acc = parallel.Starmap.apply(
                ucerf_classical,
                (rup_sets, ucerf_source, self.src_filter, gsims, monitor),
                concurrent_tasks=ct2,
                name=taskname).reduce(self.agg_dicts, acc)

            # compose probabilities from background sources
            for pmap in bg_res:
                acc[grp_id] |= pmap[grp_id]

        with self.monitor('store source_info', autoflush=True):
            self.store_source_info(self.csm.infos, acc)
        return acc  # {grp_id: pmap}
Beispiel #11
0
def make_pmap(ctxs, gsims, imtls, trunclevel, investigation_time):
    RuptureContext.temporal_occurrence_model = PoissonTOM(investigation_time)
    # easy case of independent ruptures, useful for debugging
    imts = [from_string(im) for im in imtls]
    loglevels = DictArray(imtls)
    for imt, imls in imtls.items():
        if imt != 'MMI':
            loglevels[imt] = numpy.log(imls)
    pmap = ProbabilityMap(len(loglevels.array), len(gsims))
    for ctx in ctxs:
        mean_std = ctx.get_mean_std(imts, gsims)  # shape (2, N, M, G)
        poes = base.get_poes(mean_std, loglevels, trunclevel, gsims, None,
                             ctx.mag, None, ctx.rrup)  # (N, L, G)
        pnes = ctx.get_probability_no_exceedance(poes)
        for sid, pne in zip(ctx.sids, pnes):
            pmap.setdefault(sid, 1.).array *= pne
    return ~pmap
Beispiel #12
0
 def zerodict(self):
     """
     Initial accumulator, a dictionary (grp_id, gsim) -> curves
     """
     self.L = len(self.oqparam.imtls.array)
     zd = {r: ProbabilityMap(self.L) for r in range(self.R)}
     self.E = len(self.datastore['events'])
     return zd
Beispiel #13
0
 def _run_calculator(self, gdem, imtls, sites):
     """
     Executes the classical calculator
     """
     param = {
         "imtls": DictArray(imtls),
         "truncation_level": 3.,
         "filter_distance": "rrup"
     }
     curves = classical(self.source,
                        SourceFilter(sites, valid.floatdict("200")), [gdem],
                        param)
     pmap = ProbabilityMap(param["imtls"].array, 1)
     for res in [curves]:
         for grp_id in res:
             pmap |= res[grp_id]
     return pmap.convert(param["imtls"], len(sites))
Beispiel #14
0
    def acc0(self):
        """
        Initial accumulator, a dict grp_id -> ProbabilityMap(L, G)
        """
        zd = AccumDict()
        num_levels = len(self.oqparam.imtls.array)
        rparams = {
            'grp_id', 'occurrence_rate', 'weight', 'probs_occur', 'lon_',
            'lat_', 'rrup_'
        }
        gsims_by_trt = self.full_lt.get_gsims_by_trt()
        n = len(self.full_lt.sm_rlzs)
        trts = list(self.full_lt.gsim_lt.values)
        for sm in self.full_lt.sm_rlzs:
            for grp_id in self.full_lt.grp_ids(sm.ordinal):
                trt = trts[grp_id // n]
                gsims = gsims_by_trt[trt]
                cm = ContextMaker(trt, gsims)
                rparams.update(cm.REQUIRES_RUPTURE_PARAMETERS)
                for dparam in cm.REQUIRES_DISTANCES:
                    rparams.add(dparam + '_')
                zd[grp_id] = ProbabilityMap(num_levels, len(gsims))
        zd.eff_ruptures = AccumDict(accum=0)  # trt -> eff_ruptures
        if self.few_sites:
            self.rparams = sorted(rparams)
            for k in self.rparams:
                # variable length arrays
                if k == 'grp_id':
                    self.datastore.create_dset('rup/' + k, U16)
                elif k == 'probs_occur':  # vlen
                    self.datastore.create_dset('rup/' + k, hdf5.vfloat32)
                elif k.endswith('_'):  # array of shape (U, N)
                    self.datastore.create_dset('rup/' + k,
                                               F32,
                                               shape=(None, self.N),
                                               compression='gzip')
                else:
                    self.datastore.create_dset('rup/' + k, F32)
        else:
            self.rparams = {}
        self.by_task = {}  # task_no => src_ids
        self.totrups = 0  # total number of ruptures before collapsing
        self.maxradius = 0
        self.gidx = {
            tuple(grp_ids): i
            for i, grp_ids in enumerate(self.datastore['grp_ids'])
        }

        # estimate max memory per core
        max_num_gsims = max(len(gsims) for gsims in gsims_by_trt.values())
        max_num_grp_ids = max(len(grp_ids) for grp_ids in self.gidx)
        pmapbytes = self.N * num_levels * max_num_gsims * max_num_grp_ids * 8
        if pmapbytes > TWO32:
            logging.warning(TOOBIG % (self.N, num_levels, max_num_gsims,
                                      max_num_grp_ids, humansize(pmapbytes)))
        logging.info(MAXMEMORY % (self.N, num_levels, max_num_gsims,
                                  max_num_grp_ids, humansize(pmapbytes)))
        return zd
Beispiel #15
0
def classical(group, src_filter, gsims, param, monitor=Monitor()):
    """
    Compute the hazard curves for a set of sources belonging to the same
    tectonic region type for all the GSIMs associated to that TRT.
    The arguments are the same as in :func:`calc_hazard_curves`, except
    for ``gsims``, which is a list of GSIM instances.

    :returns:
        a dictionary {grp_id: pmap} with attributes .grp_ids, .calc_times,
        .eff_ruptures
    """
    grp_ids = set()
    for src in group:
        if not src.num_ruptures:
            # src.num_ruptures is set when parsing the XML, but not when
            # the source is instantiated manually, so it is set here
            src.num_ruptures = src.count_ruptures()
        grp_ids.update(src.src_group_ids)
    maxdist = src_filter.integration_distance
    imtls = param['imtls']
    trunclevel = param.get('truncation_level')
    cmaker = ContextMaker(gsims, maxdist, param, monitor)
    pmap = AccumDict({
        grp_id: ProbabilityMap(len(imtls.array), len(gsims))
        for grp_id in grp_ids
    })
    # AccumDict of arrays with 3 elements weight, nsites, calc_time
    pmap.calc_times = AccumDict(accum=numpy.zeros(3, numpy.float32))
    pmap.eff_ruptures = AccumDict()  # grp_id -> num_ruptures
    src_mutex = param.get('src_interdep') == 'mutex'
    rup_mutex = param.get('rup_interdep') == 'mutex'
    for src, s_sites in src_filter(group):  # filter now
        t0 = time.time()
        try:
            poemap = cmaker.poe_map(src, s_sites, imtls, trunclevel,
                                    not rup_mutex)
        except Exception as err:
            etype, err, tb = sys.exc_info()
            msg = '%s (source id=%s)' % (str(err), src.source_id)
            raise etype(msg).with_traceback(tb)
        if src_mutex:  # mutex sources, there is a single group
            for sid in poemap:
                pcurve = pmap[src.src_group_id].setdefault(sid, 0)
                pcurve += poemap[sid] * src.mutex_weight
        elif poemap:
            for gid in src.src_group_ids:
                pmap[gid] |= poemap
        pmap.calc_times[src.id] += numpy.array(
            [src.weight, len(s_sites),
             time.time() - t0])
        # storing the number of contributing ruptures too
        pmap.eff_ruptures += {
            gid: getattr(poemap, 'eff_ruptures', 0)
            for gid in src.src_group_ids
        }
    if src_mutex and param.get('grp_probability'):
        pmap[src.src_group_id] *= param['grp_probability']
    return pmap
Beispiel #16
0
    def test(self):
        pmap1 = ProbabilityMap.build(3, 1, sids=[0, 1, 2])
        pmap1[0].array[0] = .4

        pmap2 = ProbabilityMap.build(3, 1, sids=[0, 1, 2])
        pmap2[0].array[0] = .5

        # test probability composition
        pmap = pmap1 | pmap2
        numpy.testing.assert_equal(pmap[0].array, [[.7], [0], [0]])

        # test probability multiplication
        pmap = pmap1 * pmap2
        numpy.testing.assert_equal(pmap[0].array, [[.2], [0], [0]])

        # test pmap power
        pmap = pmap1**2
        numpy.testing.assert_almost_equal(pmap[0].array, [[.16], [0], [0]])
Beispiel #17
0
def classical(group, src_filter, gsims, param, monitor=Monitor()):
    """
    Compute the hazard curves for a set of sources belonging to the same
    tectonic region type for all the GSIMs associated to that TRT.
    The arguments are the same as in :func:`calc_hazard_curves`, except
    for ``gsims``, which is a list of GSIM instances.

    :returns:
        a dictionary {grp_id: pmap} with attributes .grp_ids, .calc_times,
        .eff_ruptures
    """
    if getattr(group, 'src_interdep', None) == 'mutex':
        mutex_weight = {
            src.source_id: weight
            for src, weight in zip(group.sources, group.srcs_weights)
        }
    else:
        mutex_weight = None
    grp_ids = set()
    for src in group:
        grp_ids.update(src.src_group_ids)
    maxdist = src_filter.integration_distance
    imtls = param['imtls']
    trunclevel = param.get('truncation_level')
    cmaker = ContextMaker(gsims, maxdist, param['filter_distance'], monitor)
    pmap = AccumDict({
        grp_id: ProbabilityMap(len(imtls.array), len(gsims))
        for grp_id in grp_ids
    })
    # AccumDict of arrays with 4 elements weight, nsites, calc_time, split
    pmap.calc_times = AccumDict(accum=numpy.zeros(4))
    pmap.eff_ruptures = AccumDict()  # grp_id -> num_ruptures
    for src, s_sites in src_filter(group):  # filter now
        t0 = time.time()
        indep = group.rup_interdep == 'indep' if mutex_weight else True
        poemap = cmaker.poe_map(src, s_sites, imtls, trunclevel, indep)
        if mutex_weight:  # mutex sources
            weight = mutex_weight[src.source_id]
            for sid in poemap:
                pcurve = pmap[group.id].setdefault(sid, 0)
                pcurve += poemap[sid] * weight
        elif poemap:
            for grp_id in src.src_group_ids:
                pmap[grp_id] |= poemap
        src_id = src.source_id.split(':', 1)[0]
        pmap.calc_times[src_id] += numpy.array(
            [src.weight, len(s_sites),
             time.time() - t0, 1])
        # storing the number of contributing ruptures too
        pmap.eff_ruptures += {
            grp_id: getattr(poemap, 'eff_ruptures', 0)
            for grp_id in src.src_group_ids
        }
    if mutex_weight and group.grp_probability is not None:
        pmap[group.id] *= group.grp_probability
    return pmap
Beispiel #18
0
    def get_args(self, acc0):
        """
        :returns: the outputs to pass to the Starmap, ordered by weight
        """
        oq = self.oqparam
        L = len(oq.imtls.array)
        sids = self.sitecol.complete.sids
        allargs = []
        src_groups = self.csm.src_groups
        tot_weight = 0
        et_ids = self.datastore['et_ids'][:]
        rlzs_by_gsim_list = self.full_lt.get_rlzs_by_gsim_list(et_ids)
        grp_id = 0
        for rlzs_by_gsim, sg in zip(rlzs_by_gsim_list, src_groups):
            acc0[grp_id] = ProbabilityMap.build(L, len(rlzs_by_gsim), sids)
            grp_id += 1
            for src in sg:
                src.ngsims = len(rlzs_by_gsim)
                tot_weight += src.weight
                if src.code == b'C' and src.num_ruptures > 20_000:
                    msg = ('{} is suspiciously large, containing {:_d} '
                           'ruptures with complex_fault_mesh_spacing={} km')
                    spc = oq.complex_fault_mesh_spacing
                    logging.info(msg.format(src, src.num_ruptures, spc))
        assert tot_weight
        C = oq.concurrent_tasks or 1
        max_weight = max(tot_weight / (2.5 * C), oq.min_weight)
        self.params['max_weight'] = max_weight
        logging.info('tot_weight={:_d}, max_weight={:_d}'.format(
            int(tot_weight), int(max_weight)))
        for rlzs_by_gsim, sg in zip(rlzs_by_gsim_list, src_groups):
            nb = 0
            if sg.atomic:
                # do not split atomic groups
                nb += 1
                allargs.append((sg, rlzs_by_gsim, self.params))
            else:  # regroup the sources in blocks
                blks = (groupby(sg, get_source_id).values()
                        if oq.disagg_by_src else block_splitter(
                            sg, max_weight, get_weight, sort=True))
                blocks = list(blks)
                nb += len(blocks)
                for block in blocks:
                    logging.debug('Sending %d source(s) with weight %d',
                                  len(block), sum(src.weight for src in block))
                    allargs.append((block, rlzs_by_gsim, self.params))

            w = sum(src.weight for src in sg)
            it = sorted(oq.maximum_distance.ddic[sg.trt].items())
            md = '%s->%d ... %s->%d' % (it[0] + it[-1])
            logging.info(
                'max_dist={}, gsims={}, weight={:_d}, blocks={}'.format(
                    md, len(rlzs_by_gsim), int(w), nb))
        allargs.sort(key=lambda args: sum(src.weight for src in args[0]),
                     reverse=True)
        return allargs
Beispiel #19
0
 def _make_src_mutex(self):
     pmap = ProbabilityMap(self.imtls.size, len(self.gsims))
     cm = self.cmaker
     for src, sites in self.srcfilter.filter(self.group):
         t0 = time.time()
         pm = ProbabilityMap(cm.imtls.size, len(cm.gsims))
         ctxs = self._get_ctxs(cm._ruptures(src), sites, src.id)
         nctxs = len(ctxs)
         nsites = sum(len(ctx) for ctx in ctxs)
         cm.get_pmap(ctxs, pm)
         p = pm
         if cm.rup_indep:
             p = ~p
         p *= src.mutex_weight
         pmap += p
         dt = time.time() - t0
         self.calc_times[basename(src)] += numpy.array([nctxs, nsites, dt])
         timer.save(src, nctxs, nsites, dt, cm.task_no)
     return pmap
Beispiel #20
0
 def get_pmap(self, src, s_sites, rup_indep=True):
     """
     :param src: a hazardlib source
     :param s_sites: the sites affected by it
     :returns: the probability map generated by the source
     """
     imts = self.imts
     fewsites = len(s_sites.complete) <= self.max_sites_disagg
     rupdata = RupData(self)
     nrups, nsites = 0, 0
     L, G = len(self.imtls.array), len(self.gsims)
     poemap = ProbabilityMap(L, G)
     dists = []
     for rup, sites, maxdist in self._gen_rup_sites(src, s_sites):
         if maxdist is not None:
             dists.append(maxdist)
         try:
             with self.ctx_mon:
                 r_sites, dctx = self.make_contexts(sites, rup, maxdist)
         except FarAwayRupture:
             continue
         with self.gmf_mon:
             mean_std = base.get_mean_std(  # shape (2, N, M, G)
                 r_sites, rup, dctx, imts, self.gsims)
         with self.poe_mon:
             pairs = zip(r_sites.sids, self._make_pnes(rup, mean_std))
         with self.pne_mon:
             if rup_indep:
                 for sid, pne in pairs:
                     poemap.setdefault(sid, rup_indep).array *= pne
             else:
                 for sid, pne in pairs:
                     poemap.setdefault(sid, rup_indep).array += (
                         1.-pne) * rup.weight
         nrups += 1
         nsites += len(r_sites)
         if fewsites:  # store rupdata
             rupdata.add(rup, src.id, r_sites, dctx)
     poemap.nrups = nrups
     poemap.nsites = nsites
     poemap.maxdist = numpy.mean(dists) if dists else None
     poemap.data = rupdata.data
     return poemap
Beispiel #21
0
 def zerodict(self):
     """
     Initial accumulator, a dictionary (grp_id, gsim) -> curves
     """
     self.R = self.csm_info.get_num_rlzs()
     self.L = len(self.oqparam.imtls.array)
     zd = AccumDict({r: ProbabilityMap(self.L) for r in range(self.R)})
     zd.eff_ruptures = AccumDict()
     self.grp_trt = self.csm_info.grp_by("trt")
     return zd
Beispiel #22
0
 def combine_curves(self, results):
     """
     :param results: dictionary (trt_model_id, gsim) -> curves
     :returns: a dictionary rlz -> aggregate curves
     """
     acc = {rlz: ProbabilityMap() for rlz in self.realizations}
     for key in results:
         for rlz in self.rlzs_assoc[key]:
             acc[rlz] |= results[key]
     return acc
Beispiel #23
0
 def make(self, src, sites, pmap, rup_data):
     """
     :param src: a hazardlib source
     :param sites: the sites affected by it
     :returns: the probability map generated by the source
     """
     with self.cmaker.mon('iter_ruptures', measuremem=False):
         self.mag_rups = [(mag, list(rups))
                          for mag, rups in itertools.groupby(
                              src.iter_ruptures(shift_hypo=self.shift_hypo),
                              key=operator.attrgetter('mag'))]
     rupdata = RupData(self.cmaker)
     totrups, numrups, nsites = 0, 0, 0
     L, G = len(self.imtls.array), len(self.gsims)
     poemap = ProbabilityMap(L, G)
     for rups, sites in self._gen_rups_sites(src, sites):
         with self.ctx_mon:
             ctxs = self.cmaker.make_ctxs(rups, sites)
             if ctxs:
                 totrups += len(ctxs)
                 ctxs = self.collapse(ctxs)
                 numrups += len(ctxs)
         for rup, r_sites, dctx in ctxs:
             if self.fewsites:  # store rupdata
                 rupdata.add(rup, r_sites, dctx)
             sids, poes = self._sids_poes(rup, r_sites, dctx, src.id)
             with self.pne_mon:
                 pnes = rup.get_probability_no_exceedance(poes)
                 if self.rup_indep:
                     for sid, pne in zip(sids, pnes):
                         poemap.setdefault(sid, self.rup_indep).array *= pne
                 else:
                     for sid, pne in zip(sids, pnes):
                         poemap.setdefault(
                             sid,
                             self.rup_indep).array += (1. -
                                                       pne) * rup.weight
             nsites += len(sids)
     poemap.totrups = totrups
     poemap.numrups = numrups
     poemap.nsites = nsites
     self._update(pmap, poemap, src)
     if len(rupdata.data):
         for gid in src.src_group_ids:
             rup_data['grp_id'].extend([gid] * numrups)
             for k, v in rupdata.data.items():
                 rup_data[k].extend(v)
     return poemap
def ucerf_classical(rupset_idx, ucerf_source, src_filter, gsims, monitor):
    """
    :param rupset_idx:
        indices of the rupture sets
    :param ucerf_source:
        an object taking the place of a source for UCERF
    :param src_filter:
        a source filter returning the sites affected by the source
    :param gsims:
        a list of GSIMs
    :param monitor:
        a monitor instance
    :returns:
        a ProbabilityMap
    """
    t0 = time.time()
    truncation_level = monitor.oqparam.truncation_level
    imtls = monitor.oqparam.imtls
    ucerf_source.src_filter = src_filter  # so that .iter_ruptures() work
    grp_id = ucerf_source.src_group_id
    mag = ucerf_source.mags[rupset_idx].max()
    ridx = set()
    for idx in rupset_idx:
        ridx.update(ucerf_source.get_ridx(idx))
    ucerf_source.rupset_idx = rupset_idx
    ucerf_source.num_ruptures = nruptures = len(rupset_idx)

    # prefilter the sites close to the rupture set
    s_sites = ucerf_source.get_rupture_sites(ridx, src_filter, mag)
    if s_sites is None:  # return an empty probability map
        pm = ProbabilityMap(len(imtls.array), len(gsims))
        acc = AccumDict({grp_id: pm})
        acc.calc_times = {
            ucerf_source.source_id:
            numpy.array([nruptures, 0, time.time() - t0, 1])
        }
        acc.eff_ruptures = {grp_id: 0}
        return acc

    # compute the ProbabilityMap
    cmaker = ContextMaker(gsims, src_filter.integration_distance)
    imtls = DictArray(imtls)
    ctx_mon = monitor('make_contexts', measuremem=False)
    poe_mon = monitor('get_poes', measuremem=False)
    pmap = cmaker.poe_map(ucerf_source, s_sites, imtls, truncation_level,
                          ctx_mon, poe_mon)
    nsites = len(s_sites)
    acc = AccumDict({grp_id: pmap})
    acc.calc_times = {
        ucerf_source.source_id:
        numpy.array([nruptures * nsites, nsites,
                     time.time() - t0, 1])
    }
    acc.eff_ruptures = {grp_id: ucerf_source.num_ruptures}
    return acc
Beispiel #25
0
def ucerf_classical_hazard_by_rupture_set(
        rupset_idx, branchname, ucerf_source, src_group_id, src_filter,
        gsims, monitor):
    """
    :param rupset_idx:
        indices of the rupture sets
    :param branchname:
        name of the branch
    :param ucerf_source:
        an object taking the place of a source for UCERF
    :param src_group_id:
        source group index
    :param src_filter:
        a source filter returning the sites affected by the source
    :param gsims:
        a list of GSIMs
    :param monitor:
        a monitor instance
    :returns:
        an AccumDict rlz -> curves
    """
    truncation_level = monitor.oqparam.truncation_level
    imtls = monitor.oqparam.imtls
    max_dist = monitor.oqparam.maximum_distance[DEFAULT_TRT]

    # Apply the initial rupture to site filtering
    rupset_idx, s_sites = \
        ucerf_source.filter_sites_by_distance_from_rupture_set(
            rupset_idx, src_filter.sitecol, max_dist)

    if len(s_sites):
        cmaker = ContextMaker(gsims, max_dist)
        pmap = hazard_curves_per_rupture_subset(
            rupset_idx, ucerf_source, src_filter, imtls, cmaker,
            truncation_level, bbs=[], monitor=monitor)
    else:
        pmap = ProbabilityMap(len(imtls.array), len(gsims))
        pmap.calc_times = []
        pmap.eff_ruptures = {src_group_id: 0}
    pmap.grp_id = ucerf_source.src_group_id
    return pmap
Beispiel #26
0
 def _make_src_mutex(self):
     pmap = ProbabilityMap(self.imtls.size, len(self.gsims))
     for src, indices in self.srcfilter.filter(self.group):
         t0 = time.time()
         sites = self.srcfilter.sitecol.filtered(indices)
         self.numctxs = 0
         self.numsites = 0
         rups = self._ruptures(src)
         pm = ProbabilityMap(self.cmaker.imtls.size, len(self.cmaker.gsims))
         self._update_pmap(self._gen_ctxs(rups, sites, src.id), pm)
         p = pm
         if self.cmaker.rup_indep:
             p = ~p
         p *= src.mutex_weight
         pmap += p
         dt = time.time() - t0
         self.calc_times[src.id] += numpy.array(
             [self.numctxs, self.numsites, dt])
         timer.save(src, self.numctxs, self.numsites, dt,
                    self.cmaker.task_no)
     return pmap
Beispiel #27
0
def pmap_from_grp(group, src_filter, gsims, param, monitor=Monitor()):
    """
    Compute the hazard curves for a set of sources belonging to the same
    tectonic region type for all the GSIMs associated to that TRT.
    The arguments are the same as in :func:`calc_hazard_curves`, except
    for ``gsims``, which is a list of GSIM instances.

    :returns: a dictionary {grp_id: ProbabilityMap instance}
    """
    mutex_weight = {
        src.source_id: weight
        for src, weight in zip(group.sources, group.srcs_weights)
    }
    maxdist = src_filter.integration_distance
    srcs = sum([split_source(src) for src in group.sources], [])
    with GroundShakingIntensityModel.forbid_instantiation():
        imtls = param['imtls']
        trunclevel = param.get('truncation_level')
        cmaker = ContextMaker(gsims, maxdist)
        ctx_mon = monitor('make_contexts', measuremem=False)
        poe_mon = monitor('get_poes', measuremem=False)
        pmap = ProbabilityMap(len(imtls.array), len(gsims))
        calc_times = []  # pairs (src_id, delta_t)
        for src, s_sites in src_filter(srcs):
            t0 = time.time()
            poemap = cmaker.poe_map(src, s_sites, imtls, trunclevel, ctx_mon,
                                    poe_mon, group.rup_interdep == 'indep')
            weight = mutex_weight[src.source_id]
            for sid in poemap:
                pcurve = pmap.setdefault(sid, 0)
                pcurve += poemap[sid] * weight
            calc_times.append(
                (src.source_id, src.weight, len(s_sites), time.time() - t0))
        if group.grp_probability is not None:
            pmap *= group.grp_probability
        acc = AccumDict({group.id: pmap})
        # adding the number of contributing ruptures too
        acc.eff_ruptures = {group.id: ctx_mon.counts}
        acc.calc_times = calc_times
        return acc
Beispiel #28
0
 def post_execute(self, result):
     """
     :param result:
         a dictionary (src_group_id, gsim) -> haz_curves or an empty
         dictionary if hazard_curves_from_gmfs is false
     """
     oq = self.oqparam
     if not oq.hazard_curves_from_gmfs and not oq.ground_motion_fields:
         return
     elif oq.hazard_curves_from_gmfs:
         rlzs = self.rlzs_assoc.realizations
         # save individual curves
         for i in sorted(result):
             key = 'hcurves/rlz-%03d' % i
             if result[i]:
                 self.datastore[key] = result[i]
             else:
                 self.datastore[key] = ProbabilityMap(oq.imtls.array.size)
                 logging.info('Zero curves for %s', key)
         # compute and save statistics; this is done in process
         # we don't need to parallelize, since event based calculations
         # involves a "small" number of sites (<= 65,536)
         weights = [rlz.weight for rlz in rlzs]
         hstats = self.oqparam.hazard_stats()
         if len(hstats) and len(rlzs) > 1:
             for kind, stat in hstats:
                 pmap = compute_pmap_stats(result.values(), [stat], weights)
                 self.datastore['hcurves/' + kind] = pmap
     if self.datastore.parent:
         self.datastore.parent.open()
     if 'gmf_data' in self.datastore:
         self.save_gmf_bytes()
     if oq.compare_with_classical:  # compute classical curves
         export_dir = os.path.join(oq.export_dir, 'cl')
         if not os.path.exists(export_dir):
             os.makedirs(export_dir)
         oq.export_dir = export_dir
         # one could also set oq.number_of_logic_tree_samples = 0
         self.cl = ClassicalCalculator(oq, self.monitor('classical'))
         # TODO: perhaps it is possible to avoid reprocessing the source
         # model, however usually this is quite fast and do not dominate
         # the computation
         self.cl.run(close=False)
         cl_mean_curves = get_mean_curves(self.cl.datastore)
         eb_mean_curves = get_mean_curves(self.datastore)
         for imt in eb_mean_curves.dtype.names:
             rdiff, index = util.max_rel_diff_index(cl_mean_curves[imt],
                                                    eb_mean_curves[imt])
             logging.warn(
                 'Relative difference with the classical '
                 'mean curves for IMT=%s: %d%% at site index %d', imt,
                 rdiff * 100, index)
Beispiel #29
0
 def get_pmap(self, src, s_sites, rup_indep=True):
     """
     :param src: a hazardlib source
     :param s_sites: the sites affected by it
     :returns: the probability map generated by the source
     """
     imts = self.imts
     sitecol = s_sites.complete
     N, M = len(sitecol), len(imts)
     fewsites = N <= self.max_sites_disagg
     rupdata = RupData(self)
     nrups, nsites = 0, 0
     L, G = len(self.imtls.array), len(self.gsims)
     poemap = ProbabilityMap(L, G)
     for rup, sites in self._gen_rup_sites(src, s_sites):
         try:
             with self.ctx_mon:
                 r_sites, dctx = self.make_contexts(sites, rup)
         except FarAwayRupture:
             continue
         with self.gmf_mon:
             mean_std = numpy.zeros((2, len(r_sites), M, G))
             for g, gsim in enumerate(self.gsims):
                 dctx_ = dctx.roundup(gsim.minimum_distance)
                 mean_std[:, :, :,
                          g] = gsim.get_mean_std(r_sites, rup, dctx_, imts)
         with self.poe_mon:
             pairs = zip(r_sites.sids, self._make_pnes(rup, mean_std))
         with self.pne_mon:
             if rup_indep:
                 for sid, pne in pairs:
                     poemap.setdefault(sid, rup_indep).array *= pne
             else:
                 for sid, pne in pairs:
                     poemap.setdefault(
                         sid, rup_indep).array += (1. - pne) * rup.weight
         nrups += 1
         nsites += len(r_sites)
         if fewsites:  # store rupdata
             rupdata.add(rup, src.id, r_sites, dctx)
     poemap.nrups = nrups
     poemap.nsites = nsites
     poemap.data = rupdata.data
     return poemap
Beispiel #30
0
 def acc0(self):
     """
     Initial accumulator, a dict grp_id -> ProbabilityMap(L, G)
     """
     csm_info = self.csm.info
     zd = AccumDict()
     num_levels = len(self.oqparam.imtls.array)
     for grp in self.csm.src_groups:
         num_gsims = len(csm_info.gsim_lt.get_gsims(grp.trt))
         zd[grp.id] = ProbabilityMap(num_levels, num_gsims)
     zd.eff_ruptures = AccumDict()  # grp_id -> eff_ruptures
     zd.nsites = AccumDict()  # src.id -> nsites
     return zd
Beispiel #31
0
 def make(self):
     self.rupdata = []
     imtls = self.cmaker.imtls
     L, G = imtls.size, len(self.gsims)
     self.pmap = ProbabilityMap(L, G)
     # AccumDict of arrays with 3 elements nrups, nsites, calc_time
     self.calc_times = AccumDict(accum=numpy.zeros(3, numpy.float32))
     if self.src_mutex:
         pmap = self._make_src_mutex()
     else:
         pmap = self._make_src_indep()
     rupdata = self.dictarray(self.rupdata)
     return pmap, rupdata, self.calc_times
Beispiel #32
0
 def make(self):
     self.rupdata = RupData(self.cmaker)
     imtls = self.cmaker.imtls
     L, G = len(imtls.array), len(self.gsims)
     self.pmap = AccumDict(accum=ProbabilityMap(L, G))  # grp_id -> pmap
     # AccumDict of arrays with 3 elements nrups, nsites, calc_time
     self.calc_times = AccumDict(accum=numpy.zeros(3, numpy.float32))
     self.totrups = 0
     if self.src_mutex:
         pmap = self._make_src_mutex()
     else:
         pmap = self._make_src_indep()
     rdata = {k: numpy.array(v) for k, v in self.rupdata.data.items()}
     return pmap, rdata, self.calc_times, dict(totrups=self.totrups)
Beispiel #33
0
 def poe_map(self, src, sites, imtls, trunclevel, rup_indep=True):
     """
     :param src: a source object
     :param sites: a filtered SiteCollection
     :param imtls: intensity measure and levels
     :param trunclevel: truncation level
     :param rup_indep: True if the ruptures are independent
     :returns: a ProbabilityMap instance
     """
     pmap = ProbabilityMap.build(len(imtls.array),
                                 len(self.gsims),
                                 sites.sids,
                                 initvalue=rup_indep)
     eff_ruptures = 0
     with self.ir_mon:
         if hasattr(src, 'location'):
             dist = src.location.distance_to_mesh(sites).min()
             if (self.hypo_dist_collapsing_distance is not None
                     and dist > self.hypo_dist_collapsing_distance):
                 # disable floating
                 src.hypocenter_distribution.reduce()
             if (self.nodal_dist_collapsing_distance is not None
                     and dist > self.nodal_dist_collapsing_distance):
                 # disable spinning
                 src.nodal_plane_distribution.reduce()
         rups = list(src.iter_ruptures())
     # normally len(rups) == src.num_ruptures, but in UCERF .iter_ruptures
     # discards far away ruptures: len(rups) < src.num_ruptures can happen
     if len(rups) > src.num_ruptures:
         raise ValueError('Expected at max %d ruptures, got %d' %
                          (src.num_ruptures, len(rups)))
     weight = 1. / len(rups)
     for rup in rups:
         rup.weight = weight
         try:
             with self.ctx_mon:
                 sctx, dctx = self.make_contexts(sites, rup)
         except FarAwayRupture:
             continue
         eff_ruptures += 1
         with self.poe_mon:
             pnes = self._make_pnes(rup, sctx, dctx, imtls, trunclevel)
             for sid, pne in zip(sctx.sids, pnes):
                 if rup_indep:
                     pmap[sid].array *= pne
                 else:
                     pmap[sid].array += pne * rup.weight
     pmap = ~pmap
     pmap.eff_ruptures = eff_ruptures
     return pmap
Beispiel #34
0
 def get_pmap(self, ctxs, probmap=None):
     """
     :param ctxs: a list of contexts
     :param probmap: if not None, update it
     :returns: a new ProbabilityMap if probmap is None
     """
     tom = self.tom
     rup_indep = self.rup_indep
     if probmap is None:  # create new pmap
         pmap = ProbabilityMap(self.imtls.size, len(self.gsims))
     else:  # update passed probmap
         pmap = probmap
     for block in block_splitter(ctxs, 20_000, len):
         for ctx, poes in self.gen_poes(block):
             # pnes and poes of shape (N, L, G)
             with self.pne_mon:
                 pnes = get_probability_no_exceedance(ctx, poes, tom)
                 for sid, pne in zip(ctx.sids, pnes):
                     probs = pmap.setdefault(sid, self.rup_indep).array
                     if rup_indep:
                         probs *= pne
                     else:  # rup_mutex
                         probs += (1. - pne) * ctx.weight
Beispiel #35
0
 def execute(self):
     """
     Run in parallel `core_task(sources, sitecol, monitor)`, by
     parallelizing on the sources according to their weight and
     tectonic region type.
     """
     oq = self.oqparam
     if oq.hazard_calculation_id:
         parent = self.datastore.parent
         if '_poes' in parent:
             self.post_classical()  # repeat post-processing
             return {}
         else:  # after preclassical, like in case_36
             self.csm = parent['_csm']
             self.full_lt = parent['full_lt']
             self.datastore['source_info'] = parent['source_info'][:]
             max_weight = self.csm.get_max_weight(oq)
     else:
         max_weight = self.max_weight
     self.create_dsets()  # create the rup/ datasets BEFORE swmr_on()
     srcidx = {
         rec[0]: i
         for i, rec in enumerate(self.csm.source_info.values())
     }
     self.haz = Hazard(self.datastore, self.full_lt, srcidx)
     # only groups generating more than 1 task preallocate memory
     num_gs = [len(cm.gsims) for grp, cm in enumerate(self.haz.cmakers)]
     L = oq.imtls.size
     tiles = self.sitecol.split_max(oq.max_sites_per_tile)
     if len(tiles) > 1:
         sizes = [len(tile) for tile in tiles]
         logging.info('There are %d tiles of sizes %s', len(tiles), sizes)
         for size in sizes:
             assert size > oq.max_sites_disagg, (size, oq.max_sites_disagg)
     self.source_data = AccumDict(accum=[])
     self.n_outs = AccumDict(accum=0)
     acc = {}
     for t, tile in enumerate(tiles, 1):
         self.check_memory(len(tile), L, num_gs)
         sids = tile.sids if len(tiles) > 1 else None
         smap = self.submit(sids, self.haz.cmakers, max_weight)
         for cm in self.haz.cmakers:
             acc[cm.grp_id] = ProbabilityMap.build(L, len(cm.gsims))
         smap.reduce(self.agg_dicts, acc)
         logging.debug("busy time: %s", smap.busytime)
         logging.info('Finished tile %d of %d', t, len(tiles))
     self.store_info()
     self.haz.store_disagg(acc)
     return True
Beispiel #36
0
def poe_map(src, s_sites, imtls, cmaker, trunclevel, bbs, rup_indep,
            ctx_mon, pne_mon, disagg_mon):
    """
    Compute the ProbabilityMap generated by the given source. Also,
    store some information in the monitors and optionally in the
    bounding boxes.
    """
    pmap = ProbabilityMap.build(
        len(imtls.array), len(cmaker.gsims), s_sites.sids, initvalue=rup_indep)
    try:
        for rup, weight in rupture_weight_pairs(src):
            with ctx_mon:  # compute distances
                try:
                    sctx, rctx, dctx = cmaker.make_contexts(s_sites, rup)
                except FarAwayRupture:
                    continue
            with pne_mon:  # compute probabilities and updates the pmap
                pnes = get_probability_no_exceedance(
                    rup, sctx, rctx, dctx, imtls, cmaker.gsims, trunclevel)
                for sid, pne in zip(sctx.sites.sids, pnes):
                    if rup_indep:
                        pmap[sid].array *= pne
                    else:
                        pmap[sid].array += pne * weight
            # add optional disaggregation information (bounding boxes)
            if bbs:
                with disagg_mon:
                    sids = set(sctx.sites.sids)
                    jb_dists = dctx.rjb
                    closest_points = rup.surface.get_closest_points(
                        sctx.sites.mesh)
                    bs = [bb for bb in bbs if bb.site_id in sids]
                    # NB: the assert below is always true; we are
                    # protecting against possible refactoring errors
                    assert len(bs) == len(jb_dists) == len(closest_points)
                    for bb, dist, p in zip(bs, jb_dists, closest_points):
                        bb.update([dist], [p.longitude], [p.latitude])
    except Exception as err:
        etype, err, tb = sys.exc_info()
        msg = 'An error occurred with source id=%s. Error: %s'
        msg %= (src.source_id, str(err))
        raise_(etype, msg, tb)
    return ~pmap
Beispiel #37
0
def make_hmap(pmap, imtls, poes):
    """
    Compute the hazard maps associated to the passed probability map.

    :param pmap: hazard curves in the form of a ProbabilityMap
    :param imtls: I intensity measure types and levels
    :param poes: P PoEs where to compute the maps
    :returns: a ProbabilityMap with size (N, I * P, 1)
    """
    I, P = len(imtls), len(poes)
    hmap = ProbabilityMap.build(I * P, 1, pmap)
    for i, imt in enumerate(imtls):
        curves = numpy.array([pmap[sid].array[imtls.slicedic[imt], 0]
                              for sid in pmap.sids])
        data = compute_hazard_maps(curves, imtls[imt], poes)  # array N x P
        for sid, value in zip(pmap.sids, data):
            array = hmap[sid].array
            for j, val in enumerate(value):
                array[i * P + j] = val
    return hmap
Beispiel #38
0
def calc_hazard_curves(
        sources, sites, imtls, gsim_by_trt, truncation_level=None,
        source_site_filter=filters.source_site_noop_filter):
    """
    Compute hazard curves on a list of sites, given a set of seismic sources
    and a set of ground shaking intensity models (one per tectonic region type
    considered in the seismic sources).


    Probability of ground motion exceedance is computed using the following
    formula ::

        P(X≥x|T) = 1 - ∏ ∏ Prup_ij(X<x|T)

    where ``P(X≥x|T)`` is the probability that the ground motion parameter
    ``X`` is exceeding level ``x`` one or more times in a time span ``T``, and
    ``Prup_ij(X<x|T)`` is the probability that the j-th rupture of the i-th
    source is not producing any ground motion exceedance in time span ``T``.
    The first product ``∏`` is done over sources, while the second one is done
    over ruptures in a source.

    The above formula computes the probability of having at least one ground
    motion exceedance in a time span as 1 minus the probability that none of
    the ruptures in none of the sources is causing a ground motion exceedance
    in the same time span. The basic assumption is that seismic sources are
    independent, and ruptures in a seismic source are also independent.

    :param sources:
        A sequence of seismic sources objects (instances of subclasses
        of :class:`~openquake.hazardlib.source.base.BaseSeismicSource`).
    :param sites:
        Instance of :class:`~openquake.hazardlib.site.SiteCollection` object,
        representing sites of interest.
    :param imtls:
        Dictionary mapping intensity measure type strings
        to lists of intensity measure levels.
    :param gsim_by_trt:
        Dictionary mapping tectonic region types (members
        of :class:`openquake.hazardlib.const.TRT`) to
        :class:`~openquake.hazardlib.gsim.base.GMPE` or
        :class:`~openquake.hazardlib.gsim.base.IPE` objects.
    :param truncation_level:
        Float, number of standard deviations for truncation of the intensity
        distribution.
    :param source_site_filter:
        Optional source-site filter function. See
        :mod:`openquake.hazardlib.calc.filters`.

    :returns:
        An array of size N, where N is the number of sites, which elements
        are records with fields given by the intensity measure types; the
        size of each field is given by the number of levels in ``imtls``.
    """
    imtls = DictArray(imtls)
    sources_by_trt = groupby(
        sources, operator.attrgetter('tectonic_region_type'))
    pmap = ProbabilityMap(len(imtls.array), 1)
    for trt in sources_by_trt:
        pmap |= pmap_from_grp(
            sources_by_trt[trt], sites, imtls, [gsim_by_trt[trt]],
            truncation_level, source_site_filter)
    return pmap.convert(imtls, len(sites))
Beispiel #39
0
def calc_hazard_curves_ext(
        groups, source_site_filter, imtls, gsim_by_trt, truncation_level=None,
        apply=Sequential.apply):
    """
    Compute hazard curves on a list of sites, given a set of seismic source
    groups and a dictionary of ground shaking intensity models (one per
    tectonic region type).

    Probability of ground motion exceedance is computed in different ways
    depending if the sources are independent or mutually exclusive.

    :param group:
        A sequence of groups of seismic sources objects (instances of
        of :class:`~openquake.hazardlib.source.base.BaseSeismicSource`).
    :param source_site_filter:
        A source filter over the site collection or the site collection itself
    :param imtls:
        Dictionary mapping intensity measure type strings
        to lists of intensity measure levels.
    :param gsim_by_trt:
        Dictionary mapping tectonic region types (members
        of :class:`openquake.hazardlib.const.TRT`) to
        :class:`~openquake.hazardlib.gsim.base.GMPE` or
        :class:`~openquake.hazardlib.gsim.base.IPE` objects.
    :param truncation_level:
        Float, number of standard deviations for truncation of the intensity
        distribution.
    :param source_site_filter:
        Optional source-site filter function. See
        :mod:`openquake.hazardlib.calc.filters`.
    :param maximum_distance:
        The integration distance, if any
    :returns:
        An array of size N, where N is the number of sites, which elements
        are records with fields given by the intensity measure types; the
        size of each field is given by the number of levels in ``imtls``.
    """
    # This is ensuring backward compatibility i.e. processing a list of
    # sources
    if not isinstance(groups[0], SourceGroup):
        groups = [SourceGroup(groups, 'src_group', 'indep', 'indep')]

    imtls = DictArray(imtls)
    sitecol = source_site_filter.sitecol
    pmap = ProbabilityMap(len(imtls.array), 1)
    # Processing groups
    for group in groups:
        indep = group.src_interdep == 'indep'
        # Prepare a dictionary
        sources_by_trt = collections.defaultdict(list)
        weights_by_trt = collections.defaultdict(dict)
        # Fill the dictionary with sources for the different tectonic regions
        # belonging to this group
        if indep:
            for src in group.src_list:
                sources_by_trt[src.tectonic_region_type].append(src)
                weights_by_trt[src.tectonic_region_type][src.source_id] = 1
        else:
            for src in group.src_list:
                sources_by_trt[src.tectonic_region_type].append(src)
                w = group.srcs_weights[src.source_id]
                weights_by_trt[src.tectonic_region_type][src.source_id] = w
        # Aggregate results. Note that for now we assume that source groups
        # are independent.
        for trt in sources_by_trt:
            gsim = gsim_by_trt[trt]
            # Create a temporary group
            tmp_group = SourceGroup(sources_by_trt[trt],
                                    'temp',
                                    group.src_interdep,
                                    group.rup_interdep,
                                    weights_by_trt[trt].values(),
                                    False)
            if indep:
                pmap |= pmap_from_grp(
                    tmp_group, source_site_filter, imtls, [gsim],
                    truncation_level)
            else:
                # since in this case the probability for each source have
                # been already accounted, we use a weight equal to unity
                pmap += pmap_from_grp(
                    tmp_group, sitecol, imtls, [gsim], truncation_level)
    return pmap.convert(imtls, len(sitecol.complete))
Beispiel #40
0
def ucerf_poe_map(hdf5, ucerf_source, rupset_idx, s_sites, imtls, cmaker,
                  trunclevel, bbs, ctx_mon, pne_mon, disagg_mon):
    """
    Compute a ProbabilityMap generated by the given set of indices.

    :param hdf5:
        UCERF file as instance of open h5py.File object
    :param ucerf_source:
        UCERFControl object
    :param list rupset_idx:
        List of rupture indices
    """
    pmap = ProbabilityMap.build(len(imtls.array), len(cmaker.gsims),
                                s_sites.sids, initvalue=1.)
    try:
        for ridx in rupset_idx:
            # Get the ucerf rupture
            if not hdf5[ucerf_source.idx_set["rate_idx"]][ridx]:
                # Ruptures seem to have a zero probability from time to time
                # If this happens, skip it
                continue

            rup, ridx_string = get_ucerf_rupture(
                hdf5, ridx,
                ucerf_source.idx_set,
                ucerf_source.tom, s_sites,
                ucerf_source.integration_distance,
                ucerf_source.mesh_spacing,
                ucerf_source.tectonic_region_type)
            if not rup:
                # rupture outside of integration distance
                continue
            with ctx_mon:  # compute distances
                try:
                    sctx, rctx, dctx = cmaker.make_contexts(s_sites, rup)
                except FarAwayRupture:
                    continue
            with pne_mon:  # compute probabilities and updates the pmap
                pnes = get_probability_no_exceedance(
                    rup, sctx, rctx, dctx, imtls, cmaker.gsims, trunclevel)
                for sid, pne in zip(sctx.sites.sids, pnes):
                    pmap[sid].array *= pne

            # add optional disaggregation information (bounding boxes)
            if bbs:
                with disagg_mon:
                    sids = set(sctx.sites.sids)
                    jb_dists = dctx.rjb
                    closest_points = rup.surface.get_closest_points(
                        sctx.sites.mesh)
                    bs = [bb for bb in bbs if bb.site_id in sids]
                    # NB: the assert below is always true; we are
                    # protecting against possible refactoring errors
                    assert len(bs) == len(jb_dists) == len(closest_points)
                    for bb, dist, p in zip(bs, jb_dists, closest_points):
                        bb.update([dist], [p.longitude], [p.latitude])
    except Exception as err:
        etype, err, tb = sys.exc_info()
        msg = 'An error occurred with rupture=%s. Error: %s'
        msg %= (ridx, str(err))
        raise_(etype, msg, tb)
    return ~pmap
Beispiel #41
0
def calc_hazard_curves(
        groups, ss_filter, imtls, gsim_by_trt, truncation_level=None,
        apply=sequential_apply, filter_distance='rjb', reqv=None):
    """
    Compute hazard curves on a list of sites, given a set of seismic source
    groups and a dictionary of ground shaking intensity models (one per
    tectonic region type).

    Probability of ground motion exceedance is computed in different ways
    depending if the sources are independent or mutually exclusive.

    :param groups:
        A sequence of groups of seismic sources objects (instances of
        of :class:`~openquake.hazardlib.source.base.BaseSeismicSource`).
    :param ss_filter:
        A source filter over the site collection or the site collection itself
    :param imtls:
        Dictionary mapping intensity measure type strings
        to lists of intensity measure levels.
    :param gsim_by_trt:
        Dictionary mapping tectonic region types (members
        of :class:`openquake.hazardlib.const.TRT`) to
        :class:`~openquake.hazardlib.gsim.base.GMPE` or
        :class:`~openquake.hazardlib.gsim.base.IPE` objects.
    :param truncation_level:
        Float, number of standard deviations for truncation of the intensity
        distribution.
    :param apply:
        apply function to use (default sequential_apply)
    :param filter_distance:
        The distance used to filter the ruptures (default rjb)
    :param reqv:
        If not None, an instance of RjbEquivalent
    :returns:
        An array of size N, where N is the number of sites, which elements
        are records with fields given by the intensity measure types; the
        size of each field is given by the number of levels in ``imtls``.
    """
    # This is ensuring backward compatibility i.e. processing a list of
    # sources
    if not isinstance(groups[0], SourceGroup):  # sent a list of sources
        odic = groupby(groups, operator.attrgetter('tectonic_region_type'))
        groups = [SourceGroup(trt, odic[trt], 'src_group', 'indep', 'indep')
                  for trt in odic]
    # ensure the sources have the right src_group_id
    for i, grp in enumerate(groups):
        for src in grp:
            if src.src_group_id is None:
                src.src_group_id = i
    imtls = DictArray(imtls)
    param = dict(imtls=imtls, truncation_level=truncation_level,
                 filter_distance=filter_distance, reqv=reqv,
                 cluster=grp.cluster)
    pmap = ProbabilityMap(len(imtls.array), 1)
    # Processing groups with homogeneous tectonic region
    gsim = gsim_by_trt[groups[0][0].tectonic_region_type]
    mon = Monitor()
    for group in groups:
        if group.atomic:  # do not split
            it = [classical(group, ss_filter, [gsim], param, mon)]
        else:  # split the group and apply `classical` in parallel
            it = apply(
                classical, (group.sources, ss_filter, [gsim], param, mon),
                weight=operator.attrgetter('weight'))
        for dic in it:
            for grp_id, pval in dic['pmap'].items():
                pmap |= pval
    sitecol = getattr(ss_filter, 'sitecol', ss_filter)
    return pmap.convert(imtls, len(sitecol.complete))