Ejemplo n.º 1
0
def compute_losses(ssm, src_filter, param, riskmodel, monitor):
    """
    Compute the losses for a single source model. Returns the ruptures
    as an attribute `.ruptures_by_grp` of the list of losses.

    :param ssm: CompositeSourceModel containing a single source model
    :param sitecol: a SiteCollection instance
    :param param: a dictionary of extra parameters
    :param riskmodel: a RiskModel instance
    :param monitor: a Monitor instance
    :returns: a List containing the losses by taxonomy and some attributes
    """
    [grp] = ssm.src_groups
    res = List()
    rlzs_assoc = ssm.info.get_rlzs_assoc()
    rlzs_by_gsim = rlzs_assoc.get_rlzs_by_gsim(DEFAULT_TRT)
    hazard = compute_hazard(grp, src_filter, rlzs_by_gsim, param, monitor)
    [(grp_id, ebruptures)] = hazard['ruptures'].items()

    samples = ssm.info.get_samples_by_grp()
    num_rlzs = len(rlzs_assoc.realizations)
    rlzs_by_gsim = rlzs_assoc.get_rlzs_by_gsim(DEFAULT_TRT)
    getter = getters.GmfGetter(rlzs_by_gsim, ebruptures, src_filter.sitecol,
                               param['oqparam'], param['min_iml'],
                               samples[grp_id])
    ri = riskinput.RiskInput(getter, param['assetcol'].assets_by_site())
    res.append(ucerf_risk(ri, riskmodel, param, monitor))
    res.sm_id = ssm.sm_id
    res.num_events = len(ri.hazard_getter.eids)
    start = res.sm_id * num_rlzs
    res.rlz_slice = slice(start, start + num_rlzs)
    res.events_by_grp = hazard.events_by_grp
    res.eff_ruptures = hazard.eff_ruptures
    return res
Ejemplo n.º 2
0
def ebrisk(rupgetters, srcfilter, param, monitor):
    """
    :param rupgetters: RuptureGetters with 1 rupture each
    :param srcfilter: a SourceFilter
    :param param: dictionary of parameters coming from oqparam
    :param monitor: a Monitor instance
    :returns: a dictionary with keys elt, alt, ...
    """
    mon_haz = monitor('getting hazard', measuremem=False)
    computers = []
    with monitor('getting ruptures'):
        for rupgetter in rupgetters:
            gg = getters.GmfGetter(rupgetter, srcfilter, param['oqparam'])
            gg.init()
            computers.extend(gg.computers)
    if not computers:  # all filtered out
        return {}
    rupgetters.clear()
    computers.sort(key=lambda c: c.rupture.ridx)
    hazard = dict(gmfs=[], events=[], gmf_info=[])
    for c in computers:
        with mon_haz:
            data = c.compute_all(gg.min_iml, gg.rlzs_by_gsim)
            hazard['gmfs'].append(data)
            hazard['events'].append(c.rupture.get_events(gg.rlzs_by_gsim))
        hazard['gmf_info'].append((c.rupture.ridx, mon_haz.task_no,
                                   len(c.sids), data.nbytes, mon_haz.dt))
    computers.clear()
    acc = _calc_risk(hazard, param, monitor)
    return acc
Ejemplo n.º 3
0
def ebrisk(rupgetter, param, monitor):
    """
    :param rupgetter: RuptureGetter with multiple ruptures
    :param param: dictionary of parameters coming from oqparam
    :param monitor: a Monitor instance
    :returns: a dictionary of arrays
    """
    mon_rup = monitor('getting ruptures', measuremem=False)
    mon_haz = monitor('getting hazard', measuremem=True)
    alldata = general.AccumDict(accum=[])
    gmf_info = []
    srcfilter = monitor.read('srcfilter')
    param['N'] = len(srcfilter.sitecol.complete)
    gg = getters.GmfGetter(rupgetter, srcfilter, param['oqparam'],
                           param['amplifier'])
    with mon_haz:
        for c in gg.gen_computers(mon_rup):
            data, time_by_rup = c.compute_all(gg.min_iml, gg.rlzs_by_gsim)
            if len(data):
                for key, val in data.items():
                    alldata[key].extend(data[key])
                nbytes = len(data['sid']) * len(data) * 4
                gmf_info.append((c.ebrupture.id, mon_haz.task_no, len(c.sids),
                                 nbytes, mon_haz.dt))
    if not alldata:
        return {}
    for key, val in sorted(alldata.items()):
        if key in 'eid sid rlz':
            alldata[key] = U32(alldata[key])
        else:
            alldata[key] = F32(alldata[key])
    yield from event_based_risk(pandas.DataFrame(alldata), param, monitor)
    if gmf_info:
        yield {'gmf_info': numpy.array(gmf_info, gmf_info_dt)}
Ejemplo n.º 4
0
def ebrisk(rupgetter, param, monitor):
    """
    :param rupgetter: RuptureGetter with multiple ruptures
    :param param: dictionary of parameters coming from oqparam
    :param monitor: a Monitor instance
    :returns: a dictionary with keys elt, alt, ...
    """
    mon_rup = monitor('getting ruptures', measuremem=False)
    mon_haz = monitor('getting hazard', measuremem=True)
    gmfs = []
    gmf_info = []
    srcfilter = monitor.read('srcfilter')
    gg = getters.GmfGetter(rupgetter, srcfilter, param['oqparam'],
                           param['amplifier'])
    nbytes = 0
    with mon_haz:
        for c in gg.gen_computers(mon_rup):
            data, time_by_rup = c.compute_all(gg.min_iml, gg.rlzs_by_gsim)
            if len(data):
                gmfs.append(data)
                nbytes += data.nbytes
                gmf_info.append((c.ebrupture.id, mon_haz.task_no, len(c.sids),
                                 data.nbytes, mon_haz.dt))
    if not gmfs:
        return {}
    res = calc_risk(numpy.concatenate(gmfs), param, monitor)
    if gmf_info:
        res['gmf_info'] = numpy.array(gmf_info, gmf_info_dt)
    return res
Ejemplo n.º 5
0
def ebrisk(rupgetter, srcfilter, param, monitor):
    """
    :param rupgetter: RuptureGetter with multiple ruptures
    :param srcfilter: a SourceFilter
    :param param: dictionary of parameters coming from oqparam
    :param monitor: a Monitor instance
    :returns: a dictionary with keys elt, alt, ...
    """
    mon_haz = monitor('getting hazard', measuremem=False)
    mon_rup = monitor('getting ruptures', measuremem=False)
    hazard = dict(gmfs=[], events=[], gmf_info=[])
    for rg in rupgetter.split():
        with mon_rup:
            gg = getters.GmfGetter(rg, srcfilter, param['oqparam'])
            gg.init()
        if not gg.computers:  # filtered out rupture
            continue
        [c] = gg.computers
        with mon_haz:
            data = c.compute_all(gg.min_iml, gg.rlzs_by_gsim)
            hazard['gmfs'].append(data)
            hazard['events'].append(c.rupture.get_events(gg.rlzs_by_gsim))
        hazard['gmf_info'].append(
            (c.rupture.id, mon_haz.task_no, len(c.sids),
             data.nbytes, mon_haz.dt))
    if not hazard['gmfs']:
        return {}
    with monitor('getting assets'):
        assetcol = datastore.read(param['hdf5path'])['assetcol']
    return calc_risk(hazard, assetcol, param, monitor)
Ejemplo n.º 6
0
def compute_hazard(sources, src_filter, rlzs_by_gsim, param, monitor):
    """
    :param sources: a list with a single UCERF source
    :param src_filter: a SourceFilter instance
    :param rlzs_by_gsim: a dictionary gsim -> rlzs
    :param param: extra parameters
    :param monitor: a Monitor instance
    :returns: an AccumDict grp_id -> EBRuptures
    """
    [src] = sources
    res = AccumDict()
    res.calc_times = []
    serial = 1
    sampl_mon = monitor('sampling ruptures', measuremem=True)
    filt_mon = monitor('filtering ruptures', measuremem=False)
    res.trt = DEFAULT_TRT
    ebruptures = []
    background_sids = src.get_background_sids(src_filter)
    sitecol = src_filter.sitecol
    cmaker = ContextMaker(rlzs_by_gsim, src_filter.integration_distance)
    for sample in range(param['samples']):
        for ses_idx, ses_seed in param['ses_seeds']:
            seed = sample * TWO16 + ses_seed
            with sampl_mon:
                rups, n_occs = generate_event_set(src, background_sids,
                                                  src_filter, seed)
            with filt_mon:
                for rup, n_occ in zip(rups, n_occs):
                    rup.serial = serial
                    try:
                        rup.sctx, rup.dctx = cmaker.make_contexts(sitecol, rup)
                        indices = rup.sctx.sids
                    except FarAwayRupture:
                        continue
                    events = []
                    for _ in range(n_occ):
                        events.append((0, src.src_group_id, ses_idx, sample))
                    if events:
                        evs = numpy.array(events, stochastic.event_dt)
                        ebruptures.append(EBRupture(rup, src.id, indices, evs))
                        serial += 1
    res.num_events = len(stochastic.set_eids(ebruptures))
    res['ruptures'] = {src.src_group_id: ebruptures}
    if param['save_ruptures']:
        res.ruptures_by_grp = {src.src_group_id: ebruptures}
    else:
        res.events_by_grp = {
            src.src_group_id: event_based.get_events(ebruptures)
        }
    res.eff_ruptures = {src.src_group_id: src.num_ruptures}
    if param.get('gmf'):
        getter = getters.GmfGetter(rlzs_by_gsim, ebruptures, sitecol,
                                   param['oqparam'], param['min_iml'],
                                   param['samples'])
        res.update(getter.compute_gmfs_curves(monitor))
    return res
Ejemplo n.º 7
0
def compute_hazard(sources, src_filter, rlzs_by_gsim, param, monitor):
    """
    :param sources: a list with a single UCERF source
    :param src_filter: a SourceFilter instance
    :param rlzs_by_gsim: a dictionary gsim -> rlzs
    :param param: extra parameters
    :param monitor: a Monitor instance
    :returns: an AccumDict grp_id -> EBRuptures
    """
    [src] = sources
    res = AccumDict()
    res.calc_times = []
    serial = 1
    sampl_mon = monitor('sampling ruptures', measuremem=True)
    filt_mon = monitor('filtering ruptures', measuremem=False)
    res.trt = DEFAULT_TRT
    background_sids = src.get_background_sids(src_filter)
    sitecol = src_filter.sitecol
    cmaker = ContextMaker(rlzs_by_gsim, src_filter.integration_distance)
    num_ses = param['ses_per_logic_tree_path']
    samples = getattr(src, 'samples', 1)
    n_occ = AccumDict(accum=numpy.zeros((samples, num_ses), numpy.uint16))
    with sampl_mon:
        for sam_idx in range(samples):
            for ses_idx, ses_seed in param['ses_seeds']:
                seed = sam_idx * TWO16 + ses_seed
                rups, occs = generate_event_set(src, background_sids,
                                                src_filter, seed)
                for rup, occ in zip(rups, occs):
                    n_occ[rup][sam_idx, ses_idx] = occ
                    rup.serial = serial
                    serial += 1
    with filt_mon:
        rlzs = numpy.concatenate(list(rlzs_by_gsim.values()))
        ebruptures = stochastic.build_eb_ruptures(src, rlzs, num_ses, cmaker,
                                                  sitecol, n_occ.items())
    res.num_events = sum(ebr.multiplicity for ebr in ebruptures)
    res['ruptures'] = {src.src_group_id: ebruptures}
    if param['save_ruptures']:
        res.ruptures_by_grp = {src.src_group_id: ebruptures}
    else:
        res.events_by_grp = {
            src.src_group_id: event_based.get_events(ebruptures)
        }
    res.eff_ruptures = {src.src_group_id: src.num_ruptures}
    if param.get('gmf'):
        getter = getters.GmfGetter(rlzs_by_gsim, ebruptures, sitecol,
                                   param['oqparam'], param['min_iml'], samples)
        res.update(getter.compute_gmfs_curves(monitor))
    return res
Ejemplo n.º 8
0
def compute_losses(ssm, src_filter, param, riskmodel,
                   imts, trunc_level, correl_model, min_iml, monitor):
    """
    Compute the losses for a single source model. Returns the ruptures
    as an attribute `.ruptures_by_grp` of the list of losses.

    :param ssm: CompositeSourceModel containing a single source model
    :param sitecol: a SiteCollection instance
    :param param: a dictionary of parameters
    :param riskmodel: a RiskModel instance
    :param imts: a list of Intensity Measure Types
    :param trunc_level: truncation level
    :param correl_model: correlation model
    :param min_iml: vector of minimum intensities, one per IMT
    :param monitor: a Monitor instance
    :returns: a List containing the losses by taxonomy and some attributes
    """
    [grp] = ssm.src_groups
    res = List()
    gsims = ssm.gsim_lt.values[DEFAULT_TRT]
    ruptures_by_grp = compute_ruptures(
        grp, src_filter, gsims, param, monitor)
    [(grp_id, ebruptures)] = ruptures_by_grp.items()
    rlzs_assoc = ssm.info.get_rlzs_assoc()
    samples = ssm.info.get_samples_by_grp()
    num_rlzs = len(rlzs_assoc.realizations)
    rlzs_by_gsim = rlzs_assoc.get_rlzs_by_gsim(DEFAULT_TRT)
    getter = getters.GmfGetter(
        rlzs_by_gsim, ebruptures, src_filter.sitecol, imts, min_iml,
        src_filter.integration_distance, trunc_level, correl_model,
        samples[grp_id])
    ri = riskinput.RiskInput(getter, param['assetcol'].assets_by_site())
    res.append(event_based_risk(ri, riskmodel, param, monitor))
    res.sm_id = ssm.sm_id
    res.num_events = len(ri.hazard_getter.eids)
    start = res.sm_id * num_rlzs
    res.rlz_slice = slice(start, start + num_rlzs)
    res.events_by_grp = ruptures_by_grp.events_by_grp
    res.eff_ruptures = ruptures_by_grp.eff_ruptures
    return res
Ejemplo n.º 9
0
def ebrisk(rupgetter, srcfilter, param, monitor):
    """
    :param rupgetter: RuptureGetter with multiple ruptures
    :param srcfilter: a SourceFilter
    :param param: dictionary of parameters coming from oqparam
    :param monitor: a Monitor instance
    :returns: a dictionary with keys elt, alt, ...
    """
    mon_rup = monitor('getting ruptures', measuremem=False)
    mon_haz = monitor('getting hazard', measuremem=False)
    gmfs = []
    gmf_info = []
    gg = getters.GmfGetter(rupgetter, srcfilter, param['oqparam'],
                           param['amplifier'])
    nbytes = 0
    for c in gg.gen_computers(mon_rup):
        with mon_haz:
            data, time_by_rup = c.compute_all(gg.min_iml, gg.rlzs_by_gsim)
        if len(data):
            gmfs.append(data)
            nbytes += data.nbytes
        gmf_info.append((c.ebrupture.id, mon_haz.task_no, len(c.sids),
                         data.nbytes, mon_haz.dt))
        if nbytes > param['ebrisk_maxsize']:
            msg = 'produced subtask'
            try:
                logs.dbcmd('log', monitor.calc_id, datetime.utcnow(), 'DEBUG',
                           'ebrisk#%d' % monitor.task_no, msg)
            except Exception:  # for `oq run`
                print(msg)
            yield calc_risk, numpy.concatenate(gmfs), param
            nbytes = 0
            gmfs = []
    res = {}
    if gmfs:
        res.update(calc_risk(numpy.concatenate(gmfs), param, monitor))
    if gmf_info:
        res['gmf_info'] = numpy.array(gmf_info, gmf_info_dt)
    yield res
Ejemplo n.º 10
0
def ebrisk(rupgetter, srcfilter, param, monitor):
    """
    :param rupgetter:
        a RuptureGetter instance
    :param srcfilter:
        a SourceFilter instance
    :param param:
        a dictionary of parameters
    :param monitor:
        :class:`openquake.baselib.performance.Monitor` instance
    :returns:
        an ArrayWrapper with shape (E, L, T, ...)
    """
    riskmodel = param['riskmodel']
    E = rupgetter.num_events
    L = len(riskmodel.lti)
    N = len(srcfilter.sitecol.complete)
    e1 = rupgetter.first_event
    with monitor('getting assets', measuremem=False):
        with datastore.read(srcfilter.filename) as dstore:
            assetcol = dstore['assetcol']
        assets_by_site = assetcol.assets_by_site()
    A = len(assetcol)
    getter = getters.GmfGetter(rupgetter, srcfilter, param['oqparam'])
    with monitor('getting hazard'):
        getter.init()  # instantiate the computers
        hazard = getter.get_hazard_by_sid()  # sid -> (sid, eid, gmv)
    mon_risk = monitor('computing risk', measuremem=False)
    mon_agg = monitor('aggregating losses', measuremem=False)
    events = rupgetter.get_eid_rlz()
    # numpy.testing.assert_equal(events['eid'], sorted(events['eid']))
    eid2idx = dict(zip(events['eid'], range(e1, e1 + E)))
    tagnames = param['aggregate_by']
    shape = assetcol.tagcol.agg_shape((E, L), tagnames)
    elt_dt = [('eid', U64), ('rlzi', U16), ('loss', (F32, shape[1:]))]
    if param['asset_loss_table']:
        alt = numpy.zeros((A, E, L), F32)
    acc = numpy.zeros(shape, F32)  # shape (E, L, T...)
    if param['avg_losses']:
        losses_by_A = numpy.zeros((A, L), F32)
    else:
        losses_by_A = 0
    # NB: IMT-dependent weights are not supported in ebrisk
    times = numpy.zeros(N)  # risk time per site_id
    num_events_per_sid = 0
    epspath = param['epspath']
    gmf_nbytes = 0
    for sid, haz in hazard.items():
        gmf_nbytes += haz.nbytes
        t0 = time.time()
        assets_on_sid = assets_by_site[sid]
        if len(assets_on_sid) == 0:
            continue
        num_events_per_sid += len(haz)
        if param['avg_losses']:
            weights = getter.weights[[
                getter.eid2rlz[eid] for eid in haz['eid']
            ]]
        assets_by_taxo = get_assets_by_taxo(assets_on_sid, epspath)
        eidx = numpy.array([eid2idx[eid] for eid in haz['eid']]) - e1
        haz['eid'] = eidx + e1
        with mon_risk:
            out = riskmodel.get_output(assets_by_taxo, haz)
        with mon_agg:
            for a, asset in enumerate(assets_on_sid):
                aid = asset['ordinal']
                tagi = asset[tagnames] if tagnames else ()
                tagidxs = tuple(idx - 1 for idx in tagi)
                for lti, lt in enumerate(riskmodel.loss_types):
                    lratios = out[lt][a]
                    if lt == 'occupants':
                        losses = lratios * asset['occupants_None']
                    else:
                        losses = lratios * asset['value-' + lt]
                    if param['asset_loss_table']:
                        alt[aid, eidx, lti] = losses
                    acc[(eidx, lti) + tagidxs] += losses
                    if param['avg_losses']:
                        losses_by_A[aid, lti] += losses @ weights
            times[sid] = time.time() - t0
    if hazard:
        num_events_per_sid /= len(hazard)
    with monitor('building event loss table'):
        elt = numpy.fromiter(
            ((event['eid'], event['rlz'], losses)
             for event, losses in zip(events, acc) if losses.sum()), elt_dt)
        agg = general.AccumDict(accum=numpy.zeros(shape[1:], F32))  # rlz->agg
        for rec in elt:
            agg[rec['rlzi']] += rec['loss'] * param['ses_ratio']
    res = {
        'elt': elt,
        'agg_losses': agg,
        'times': times,
        'events_per_sid': num_events_per_sid,
        'gmf_nbytes': gmf_nbytes
    }
    if param['avg_losses']:
        res['losses_by_A'] = losses_by_A * param['ses_ratio']
    if param['asset_loss_table']:
        eidx = numpy.array([eid2idx[eid] for eid in events['eid']])
        res['alt_eidx'] = alt, eidx
    return res
Ejemplo n.º 11
0
def ebrisk(rupgetter, srcfilter, param, monitor):
    """
    :param rupgetter:
        a RuptureGetter instance
    :param srcfilter:
        a SourceFilter instance
    :param param:
        a dictionary of parameters
    :param monitor:
        :class:`openquake.baselib.performance.Monitor` instance
    :returns:
        an ArrayWrapper with shape (E, L, T, ...)
    """
    riskmodel = param['riskmodel']
    L = len(riskmodel.lti)
    N = len(srcfilter.sitecol.complete)
    mon = monitor('getting assets', measuremem=False)
    with datastore.read(srcfilter.filename) as dstore:
        assgetter = getters.AssetGetter(dstore)
    getter = getters.GmfGetter(rupgetter, srcfilter, param['oqparam'])
    with monitor('getting hazard'):
        getter.init()  # instantiate the computers
        hazard = getter.get_hazard()  # sid -> (rlzi, sid, eid, gmv)
    mon_risk = monitor('computing losses', measuremem=False)
    mon_agg = monitor('aggregating losses', measuremem=False)
    imts = getter.imts
    events = rupgetter.get_eid_rlz()
    eid2idx = {eid: idx for idx, eid in enumerate(events['eid'])}
    tagnames = param['aggregate_by']
    shape = assgetter.tagcol.agg_shape((len(events), L), tagnames)
    elt_dt = [('eid', U64), ('rlzi', U16), ('loss', (F32, shape[1:]))]
    acc = numpy.zeros(shape, F32)  # shape (E, L, T...)
    if param['avg_losses']:
        losses_by_A = numpy.zeros((assgetter.num_assets, L), F32)
    else:
        losses_by_A = 0
    times = numpy.zeros(N)  # risk time per site_id
    for sid, haz in hazard.items():
        t0 = time.time()
        weights = getter.weights[haz['rlzi']]
        assets_on_sid, tagidxs = assgetter.get(sid, tagnames)
        eidx = [eid2idx[eid] for eid in haz['eid']]
        mon.duration += time.time() - t0
        mon.counts += 1
        with mon_risk:
            assets_ratios = riskmodel.get_assets_ratios(
                assets_on_sid, haz['gmv'], imts)
        with mon_agg:
            for assets, ratios in assets_ratios:
                taxo = assets[0].taxonomy
                ws_by_lti = [
                    weights[vf.imt]
                    for vf in riskmodel[taxo].risk_functions.values()
                ]
                for lti, loss_ratios in enumerate(ratios):
                    ws = ws_by_lti[lti]
                    lt = riskmodel.loss_types[lti]
                    for asset in assets:
                        aid = asset.ordinal
                        losses = loss_ratios * asset.value(lt)
                        acc[(eidx, lti) + tagidxs[aid]] += losses
                        if param['avg_losses']:
                            losses_by_A[aid, lti] += losses @ ws
            times[sid] = time.time() - t0
    with monitor('building event loss table'):
        elt = numpy.fromiter(
            ((event['eid'], event['rlz'], losses)
             for event, losses in zip(events, acc) if losses.sum()), elt_dt)
        agg = general.AccumDict(accum=numpy.zeros(shape[1:], F32))  # rlz->agg
        for rec in elt:
            agg[rec['rlzi']] += rec['loss'] * param['ses_ratio']
    res = {'elt': elt, 'agg_losses': agg, 'times': times}
    if param['avg_losses']:
        res['losses_by_A'] = losses_by_A * param['ses_ratio']
    return res
Ejemplo n.º 12
0
    def start_tasks(self, sm_id, sitecol, assetcol, riskmodel, imtls,
                    trunc_level, correl_model, min_iml):
        """
        :param sm_id: source model ordinal
        :param sitecol: a SiteCollection instance
        :param assetcol: an AssetCollection instance
        :param riskmodel: a RiskModel instance
        :param imtls: Intensity Measure Types and Levels
        :param trunc_level: truncation level
        :param correl_model: correlation model
        :param min_iml: vector of minimum intensities, one per IMT
        :returns: an IterResult instance
        """
        sm_info = self.csm_info.get_info(sm_id)
        grp_ids = sorted(sm_info.get_sm_by_grp())
        rlzs_assoc = sm_info.get_rlzs_assoc()
        # prepare the risk inputs
        allargs = []
        ruptures_per_block = self.oqparam.ruptures_per_block
        try:
            csm_info = self.csm.info
        except AttributeError:  # there is no .csm if --hc was given
            csm_info = self.datastore['csm_info']
        samples_by_grp = csm_info.get_samples_by_grp()
        num_events = 0
        num_ruptures = {}
        taskname = '%s#%d' % (event_based_risk.__name__, sm_id + 1)
        monitor = self.monitor(taskname)
        for grp_id in grp_ids:
            ruptures = self.ruptures_by_grp.get(grp_id, [])
            rlzs_by_gsim = rlzs_assoc.get_rlzs_by_gsim(grp_id)
            samples = samples_by_grp[grp_id]
            num_ruptures[grp_id] = len(ruptures)
            from_parent = hasattr(ruptures, 'split')
            if from_parent:  # read the ruptures from the parent datastore
                logging.info('Reading ruptures group #%d', grp_id)
                with self.monitor('reading ruptures', measuremem=True):
                    blocks = ruptures.split(ruptures_per_block)
            else:  # the ruptures are already in memory
                blocks = block_splitter(ruptures, ruptures_per_block)
            for rupts in blocks:
                n_events = (rupts.n_events if from_parent
                            else sum(ebr.multiplicity for ebr in rupts))
                eps = self.get_eps(self.start, self.start + n_events)
                num_events += n_events
                self.start += n_events
                getter = getters.GmfGetter(
                    rlzs_by_gsim, rupts, sitecol, imtls, min_iml,
                    self.oqparam.maximum_distance, trunc_level, correl_model,
                    self.oqparam.filter_distance, samples)
                ri = riskinput.RiskInput(getter, self.assets_by_site, eps)
                allargs.append((ri, riskmodel, assetcol, monitor))

        if self.datastore.parent:  # avoid hdf5 fork issues
            self.datastore.parent.close()
        ires = parallel.Starmap(
            event_based_risk, allargs, name=taskname).submit_all()
        ires.num_ruptures = num_ruptures
        ires.num_events = num_events
        ires.num_rlzs = len(rlzs_assoc.realizations)
        ires.sm_id = sm_id
        return ires
    def eval_calc(self):
        """
        Evaluate each calculators for different IMs
        """

        # Define the GmfGetter

        #for args_tag in range(len(self.args)-1):
        # Looping over all source models (Note: the last attribute in self.args is a monitor - so skipping it)

        cur_getter = getters.GmfGetter(
            self.args[0][0],
            calc.filters.SourceFilter(self.dstore['sitecol'],
                                      self.dstore['oqparam'].maximum_distance),
            self.calculator.param['oqparam'],
            self.calculator.param['amplifier'],
            self.calculator.param['sec_perils'])

        print('eval_calc: cur_getter = ')
        print(cur_getter)

        # Evaluate each computer
        print('FetchOpenQuake: Evaluting ground motion models.')
        for computer in cur_getter.gen_computers(self.mon):
            # Looping over rupture(s) in the current realization
            sids = computer.sids
            print('eval_calc: site ID sids = ')
            print(sids)
            eids_by_rlz = computer.ebrupture.get_eids_by_rlz(
                cur_getter.rlzs_by_gsim)
            mag = computer.ebrupture.rupture.mag
            data = general.AccumDict(accum=[])
            cur_T = self.event_info['IntensityMeasure'].get('Periods', None)
            for cur_gs, rlzs in cur_getter.rlzs_by_gsim.items():
                # Looping over GMPE(s)
                print('eval_calc: cur_gs = ')
                print(cur_gs)
                num_events = sum(len(eids_by_rlz[rlz]) for rlz in rlzs)
                if num_events == 0:  # it may happen
                    continue
                    # NB: the trick for performance is to keep the call to
                    # .compute outside of the loop over the realizations;
                    # it is better to have few calls producing big arrays
                tmpMean = []
                tmpstdtot = []
                tmpstdinter = []
                tmpstdintra = []
                for imti, imt in enumerate(computer.imts):
                    # Looping over IM(s)
                    #print('eval_calc: imt = ')
                    #print(imt)
                    if str(imt) in ['PGA', 'PGV', 'PGD']:
                        cur_T = [0.0]
                        imTag = 'ln' + str(imt)
                    else:
                        imTag = 'lnSA'
                    if isinstance(cur_gs, gsim.multi.MultiGMPE):
                        gs = cur_gs[str(imt)]  # MultiGMPE
                    else:
                        gs = cur_gs  # regular GMPE
                    try:
                        dctx = computer.dctx.roundup(cur_gs.minimum_distance)
                        if computer.distribution is None:
                            if computer.correlation_model:
                                raise ValueError('truncation_level=0 requires '
                                                 'no correlation model')
                            mean, _stddevs = cur_gs.get_mean_and_stddevs(
                                computer.sctx,
                                computer.rctx,
                                dctx,
                                imt,
                                stddev_types=[])
                        num_sids = len(computer.sids)
                        if cur_gs.DEFINED_FOR_STANDARD_DEVIATION_TYPES == {
                                const.StdDev.TOTAL
                        }:
                            # If the GSIM provides only total standard deviation, we need
                            # to compute mean and total standard deviation at the sites
                            # of interest.
                            # In this case, we also assume no correlation model is used.
                            if computer.correlation_model:
                                raise CorrelationButNoInterIntraStdDevs(
                                    computer.correlation_model, cur_gs)

                            mean, [stddev_total] = cur_gs.get_mean_and_stddevs(
                                computer.sctx, computer.rctx, dctx, imt,
                                [const.StdDev.TOTAL])
                            stddev_total = stddev_total.reshape(
                                stddev_total.shape + (1, ))
                            mean = mean.reshape(mean.shape + (1, ))
                            if imti == 0:
                                tmpMean = mean
                                tmpstdtot = stddev_total
                            else:
                                tmpMean = np.concatenate((tmpMean, mean),
                                                         axis=0)
                                tmpstdtot = np.concatenate(
                                    (tmpstdtot, stddev_total), axis=0)
                        else:
                            mean, [stddev_inter,
                                   stddev_intra] = cur_gs.get_mean_and_stddevs(
                                       computer.sctx, computer.rctx, dctx, imt,
                                       [
                                           const.StdDev.INTER_EVENT,
                                           const.StdDev.INTRA_EVENT
                                       ])
                            stddev_intra = stddev_intra.reshape(
                                stddev_intra.shape + (1, ))
                            stddev_inter = stddev_inter.reshape(
                                stddev_inter.shape + (1, ))
                            mean = mean.reshape(mean.shape + (1, ))

                            if imti == 0:
                                tmpMean = mean
                                tmpstdinter = stddev_inter
                                tmpstdintra = stddev_intra
                                tmpstdtot = np.sqrt(
                                    stddev_inter * stddev_inter +
                                    stddev_intra * stddev_intra)
                            else:
                                tmpMean = np.concatenate((tmpMean, mean),
                                                         axis=1)
                                tmpstdinter = np.concatenate(
                                    (tmpstdinter, stddev_inter), axis=1)
                                tmpstdintra = np.concatenate(
                                    (tmpstdintra, stddev_intra), axis=1)
                                tmpstdtot = np.concatenate(
                                    (tmpstdtot,
                                     np.sqrt(stddev_inter * stddev_inter +
                                             stddev_intra * stddev_intra)),
                                    axis=1)

                    except Exception as exc:
                        raise RuntimeError(
                            '(%s, %s, source_id=%r) %s: %s' %
                            (gs, imt, computer.source_id.decode('utf8'),
                             exc.__class__.__name__, exc)).with_traceback(
                                 exc.__traceback__)

                # initialize
                gm_collector = []
                # transpose
                """
                if len(tmpMean):
                    tmpMean = tmpMean.transpose()
                if len(tmpstdinter):
                    tmpstdinter = tmpstdinter.transpose()
                if len(tmpstdintra):
                    tmpstdintra = tmpstdintra.transpose()
                if len(tmpstdtot):
                    tmpstdtot = tmpstdtot.transpose()
                """
                # collect data
                for k in range(tmpMean.shape[0]):
                    imResult = {}
                    if len(tmpMean):
                        imResult.update(
                            {'Mean': [float(x) for x in tmpMean[k].tolist()]})
                    if len(tmpstdtot):
                        imResult.update({
                            'TotalStdDev':
                            [float(x) for x in tmpstdtot[k].tolist()]
                        })
                    if len(tmpstdinter):
                        imResult.update({
                            'InterEvStdDev':
                            [float(x) for x in tmpstdinter[k].tolist()]
                        })
                    if len(tmpstdintra):
                        imResult.update({
                            'IntraEvStdDev':
                            [float(x) for x in tmpstdintra[k].tolist()]
                        })
                    gm_collector.append({imTag: imResult})
                print(gm_collector)

        # close datastore instance
        self.calculator.datastore.close()

        # stop dbserver
        cdbs.main('stop')

        # Final results
        res = {
            'Magnitude': mag,
            'Periods': cur_T,
            'GroundMotions': gm_collector
        }

        # return
        return res