Example #1
0
 def medians(self, case):
     [gmfa] = self.execute(case.__file__, 'job.ini').values()
     median = {imt: [] for imt in self.calc.oqparam.imtls}
     for imti, imt in enumerate(self.calc.oqparam.imtls):
         gmfa_by_imt = get_array(gmfa, imti=imti)
         for sid in self.calc.sitecol.sids:
             gmvs = get_array(gmfa_by_imt, sid=sid)['gmv']
             median[imt].append(numpy.median(gmvs))
     return median
Example #2
0
 def frequencies(self, case, fst_value, snd_value):
     [gmfa] = self.execute(case.__file__, 'job.ini').values()
     [imt] = self.calc.oqparam.imtls
     gmvs0 = get_array(gmfa, sid=0, imti=0)['gmv']
     gmvs1 = get_array(gmfa, sid=1, imti=0)['gmv']
     realizations = float(self.calc.oqparam.number_of_ground_motion_fields)
     gmvs_within_range_fst = count_close(fst_value, gmvs0, gmvs1)
     gmvs_within_range_snd = count_close(snd_value, gmvs0, gmvs1)
     return (gmvs_within_range_fst / realizations,
             gmvs_within_range_snd / realizations)
Example #3
0
 def __call__(self, rlz):
     gsim = self.gsims[rlz.ordinal]
     gmfdict = collections.defaultdict(dict)
     for computer in self.computers:
         rup = computer.rupture
         if self.samples > 1:
             eids = get_array(rup.events, sample=rlz.sampleid)['eid']
         else:
             eids = rup.events['eid']
         array = computer.compute(gsim, len(eids))  # (i, n, e)
         for imti, imt in enumerate(self.imts):
             min_gmv = self.min_iml[imti]
             for eid, gmf in zip(eids, array[imti].T):
                 for sid, gmv in zip(computer.sites.sids, gmf):
                     if gmv > min_gmv:
                         dic = gmfdict[sid]
                         if imt in dic:
                             dic[imt].append((gmv, eid))
                         else:
                             dic[imt] = [(gmv, eid)]
     dicts = []  # a list of dictionaries imt -> array(gmv, eid)
     for sid in self.sids:
         dic = gmfdict[sid]
         for imt in dic:
             dic[imt] = arr = numpy.array(dic[imt], self.dt)
             self.gmfbytes += arr.nbytes
         dicts.append(dic)
     return dicts
Example #4
0
    def calcgmfs(self, seed, events, rlzs_by_gsim, min_iml=None):
        """
        Yield the ground motion field for each seismic event.

        :param seed:
            seed for the numpy random number generator
        :param events:
            composite array of seismic events (eid, ses, occ, samples)
        :param rlzs_by_gsim:
            a dictionary {gsim instance: realizations}
        :yields:
            tuples (eid, imti, rlz, gmf_sids)
        """
        sids = self.sites.sids
        imt_range = range(len(self.imts))
        for i, gsim in enumerate(self.gsims):
            for j, rlz in enumerate(rlzs_by_gsim[gsim]):
                if self.samples > 1:
                    eids = get_array(events, sample=rlz.sampleid)['eid']
                else:
                    eids = events['eid']
                arr = self.compute(seed + j, gsim, len(eids)).transpose(
                    0, 2, 1)  # array of shape (I, E, S)
                for imti in imt_range:
                    for eid, gmf in zip(eids, arr[imti]):
                        if min_iml is not None:  # is an array
                            ok = gmf >= min_iml[imti]
                            gmf_sids = (gmf[ok], sids[ok])
                        else:
                            gmf_sids = (gmf, sids)
                        if len(gmf):
                            yield eid, imti, rlz, gmf_sids
Example #5
0
def export_all_loss_ratios(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    loss_types = dstore.get_attr('composite_risk_model', 'loss_types')
    name, ext = export.keyfunc(ekey)
    ass_losses = dstore[name]
    assetcol = dstore['assetcol']
    oq = dstore['oqparam']
    dtlist = [('event_tag', (numpy.string_, 100)), ('year', U32),
              ('aid', U32)] + oq.loss_dt_list()
    elt_dt = numpy.dtype(dtlist)
    rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
    sm_id, eid = get_sm_id_eid(ekey[0])
    if sm_id is None:
        return []
    sm_id, eid = int(sm_id), int(eid)
    sm_ids = [sm_id]
    zero = [0, 0] if oq.insured_losses else 0
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    for sm_id in sm_ids:
        rlzs = rlzs_assoc.rlzs_by_smodel[sm_id]
        try:
            event = dstore['events/sm-%04d' % sm_id][eid]
        except KeyError:
            continue
        [event_tag] = build_etags([event])
        for rlz in rlzs:
            exportname = 'losses-sm=%04d-eid=%d' % (sm_id, eid)
            dest = dstore.build_fname(exportname, rlz, 'csv')
            losses_by_aid = AccumDict()
            rlzname = 'rlz-%03d' % rlz.ordinal
            for loss_type in ass_losses[rlzname]:
                data = get_array(ass_losses['%s/%s' % (rlzname, loss_type)],
                                 eid=eid)
                losses_by_aid += group_by_aid(data, loss_type)
            elt = numpy.zeros(len(losses_by_aid), elt_dt)
            elt['event_tag'] = event_tag
            elt['year'] = event['year']
            elt['aid'] = sorted(losses_by_aid)
            for i, aid in numpy.ndenumerate(elt['aid']):
                for loss_type in loss_types:
                    value = assetcol[int(aid)].value(loss_type, oq.time_event)
                    loss = value * losses_by_aid[aid].get(loss_type, zero)
                    if oq.insured_losses:
                        elt[loss_type][i] = loss[0]
                        elt[loss_type + '_ins'][i] = loss[1]
                    else:
                        elt[loss_type][i] = loss

            elt.sort(order='event_tag')
            writer.save(elt, dest)
    return writer.getsaved()
Example #6
0
def get_gmfs(dstore, precalc=None):
    """
    :param dstore: a datastore
    :param precalc: a scenario calculator with attribute .gmfa
    :returns: a dictionary grp_id, gsid -> gmfa
    """
    oq = dstore['oqparam']
    if 'gmfs' in oq.inputs:  # from file
        logging.info('Reading gmfs from file')
        sitecol, etags, gmfs_by_imt = readinput.get_gmfs(oq)

        # reduce the gmfs matrices to the filtered sites
        for imt in oq.imtls:
            gmfs_by_imt[imt] = gmfs_by_imt[imt][sitecol.indices]

        logging.info('Preparing the risk input')
        return etags, [gmfs_by_imt]

    rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
    rlzs = rlzs_assoc.realizations
    sitecol = dstore['sitecol']
    # NB: if the hazard site collection has N sites, the hazard
    # filtered site collection for the nonzero GMFs has N' <= N sites
    # whereas the risk site collection associated to the assets
    # has N'' <= N' sites
    if dstore.parent:
        haz_sitecol = dstore.parent['sitecol']  # N' values
    else:
        haz_sitecol = sitecol
    risk_indices = set(sitecol.indices)  # N'' values
    N = len(haz_sitecol.complete)
    imt_dt = numpy.dtype([(str(imt), F32) for imt in oq.imtls])
    E = oq.number_of_ground_motion_fields
    etags = numpy.array(sorted(b'scenario-%010d~ses=1' % i for i in range(E)))
    gmfs = numpy.zeros((len(rlzs_assoc), N, E), imt_dt)
    if precalc:
        for i, gsim in enumerate(precalc.gsims):
            for imti, imt in enumerate(oq.imtls):
                gmfs[imt][i, sitecol.sids] = precalc.gmfa[gsim][imti]
        return etags, gmfs

    # else read from the datastore
    for i, rlz in enumerate(rlzs):
        data = group_array(dstore['gmf_data/sm-0000/%04d' % i], 'sid')
        for sid, array in data.items():
            if sid in risk_indices:
                for imti, imt in enumerate(oq.imtls):
                    a = get_array(array, imti=imti)
                    gmfs[imt][i, sid, a['eid']] = a['gmv']
    return etags, gmfs
Example #7
0
def gmvs_to_poe_map(gmvs_by_sid, imtls, invest_time, duration):
    """
    Convert a dictionary sid -> gmva into a ProbabilityMap
    """
    pmap = ProbabilityMap()
    for sid in gmvs_by_sid:
        data = []
        for imti, imt in enumerate(imtls):
            gmvs = get_array(gmvs_by_sid[sid], imti=imti)['gmv']
            data.append(
                _gmvs_to_haz_curve(gmvs, imtls[imt], invest_time, duration))
        # the array underlying the ProbabilityCurve has size (num_levels, 1)
        array = numpy.concatenate(data).reshape(-1, 1)
        pmap[sid] = ProbabilityCurve(array)
    return pmap
Example #8
0
    def test_spatial_correlation(self):
        expected = {sc1: [0.99, 0.41],
                    sc2: [0.99, 0.64],
                    sc3: [0.99, 0.22]}

        for case in expected:
            self.run_calc(case.__file__, 'job.ini')
            oq = self.calc.oqparam
            self.assertEqual(list(oq.imtls), ['PGA'])
            dstore = read(self.calc.datastore.calc_id)
            gmvs = dstore['gmf_data/0000'].value
            dstore.close()
            gmvs_site_1 = get_array(gmvs, sid=0, imti=0)['gmv']
            gmvs_site_2 = get_array(gmvs, sid=1, imti=0)['gmv']
            joint_prob_0_5 = joint_prob_of_occurrence(
                gmvs_site_1, gmvs_site_2, 0.5, oq.investigation_time,
                oq.ses_per_logic_tree_path)
            joint_prob_1_0 = joint_prob_of_occurrence(
                gmvs_site_1, gmvs_site_2, 1.0, oq.investigation_time,
                oq.ses_per_logic_tree_path)

            p05, p10 = expected[case]
            numpy.testing.assert_almost_equal(joint_prob_0_5, p05, decimal=1)
            numpy.testing.assert_almost_equal(joint_prob_1_0, p10, decimal=1)
Example #9
0
def get_gmfs(dstore):
    """
    :param dstore: a datastore
    :returns: a dictionary grp_id, gsid -> gmfa
    """
    oq = dstore['oqparam']
    if 'gmfs' in oq.inputs:  # from file
        logging.info('Reading gmfs from file')
        sitecol, etags, gmfs_by_imt = readinput.get_gmfs(oq)

        # reduce the gmfs matrices to the filtered sites
        for imt in oq.imtls:
            gmfs_by_imt[imt] = gmfs_by_imt[imt][sitecol.indices]

        logging.info('Preparing the risk input')
        return etags, {(0, 'FromFile'): gmfs_by_imt}

    # else from datastore
    rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
    rlzs = rlzs_assoc.realizations
    sitecol = dstore['sitecol']
    # NB: if the hazard site collection has N sites, the hazard
    # filtered site collection for the nonzero GMFs has N' <= N sites
    # whereas the risk site collection associated to the assets
    # has N'' <= N' sites
    if dstore.parent:
        haz_sitecol = dstore.parent['sitecol']  # N' values
    else:
        haz_sitecol = sitecol
    risk_indices = set(sitecol.indices)  # N'' values
    N = len(haz_sitecol.complete)
    imt_dt = dtype((imt, F32) for imt in oq.imtls)
    E = oq.number_of_ground_motion_fields
    # build a matrix N x E for each GSIM realization
    gmfs = {(grp_id, gsim): numpy.zeros((N, E), imt_dt)
            for grp_id, gsim in rlzs_assoc}
    for i, rlz in enumerate(rlzs):
        data = group_array(dstore['gmf_data/%04d' % i], 'sid')
        for sid, array in data.items():
            if sid in risk_indices:
                for imti, imt in enumerate(oq.imtls):
                    a = get_array(array, imti=imti)
                    gs = str(rlz.gsim_rlz)
                    gmfs[0, gs][imt][sid, a['eid']] = a['gmv']
    etags = numpy.array(
        sorted([b'scenario-%010d~ses=1' % i
                for i in range(oq.number_of_ground_motion_fields)]))
    return etags, gmfs
Example #10
0
 def __iter__(self):
     completemesh = self.sitecol.complete.mesh
     gmfset = collections.defaultdict(list)
     for imti, imt_str in enumerate(self.imts):
         imt, sa_period, sa_damping = from_string(imt_str)
         for rupture in self.ruptures:
             mesh = completemesh[rupture.indices]
             gmf = get_array(rupture.gmfa, imti=imti)['gmv']
             assert len(mesh) == len(gmf), (len(mesh), len(gmf))
             nodes = (GroundMotionFieldNode(gmv, loc)
                      for gmv, loc in zip(gmf, mesh))
             gmfset[rupture.ses_idx].append(
                 GroundMotionField(
                     imt, sa_period, sa_damping, rupture.etag, nodes))
     for ses_idx in sorted(gmfset):
         yield GmfSet(gmfset[ses_idx], self.investigation_time, ses_idx)
Example #11
0
def get_gmfs(dstore):
    """
    :param dstore: a datastore
    :returns: a dictionary trt_id, gsid -> gmfa
    """
    oq = dstore["oqparam"]
    if "gmfs" in oq.inputs:  # from file
        logging.info("Reading gmfs from file")
        sitecol, etags, gmfs_by_imt = readinput.get_gmfs(oq)

        # reduce the gmfs matrices to the filtered sites
        for imt in oq.imtls:
            gmfs_by_imt[imt] = gmfs_by_imt[imt][sitecol.indices]

        logging.info("Preparing the risk input")
        return etags, {(0, "FromFile"): gmfs_by_imt}

    # else from datastore
    rlzs_assoc = dstore["csm_info"].get_rlzs_assoc()
    rlzs = rlzs_assoc.realizations
    sitecol = dstore["sitecol"]
    # NB: if the hazard site collection has N sites, the hazard
    # filtered site collection for the nonzero GMFs has N' <= N sites
    # whereas the risk site collection associated to the assets
    # has N'' <= N' sites
    if dstore.parent:
        haz_sitecol = dstore.parent["sitecol"]  # N' values
    else:
        haz_sitecol = sitecol
    risk_indices = set(sitecol.indices)  # N'' values
    N = len(haz_sitecol.complete)
    imt_dt = numpy.dtype([(bytes(imt), F32) for imt in oq.imtls])
    E = oq.number_of_ground_motion_fields
    # build a matrix N x E for each GSIM realization
    gmfs = {(trt_id, gsim): numpy.zeros((N, E), imt_dt) for trt_id, gsim in rlzs_assoc}
    for i, rlz in enumerate(rlzs):
        data = general.group_array(dstore["gmf_data/%04d" % i], "sid")
        for sid, array in data.items():
            if sid in risk_indices:
                for imti, imt in enumerate(oq.imtls):
                    a = general.get_array(array, imti=imti)
                    gs = str(rlz.gsim_rlz)
                    gmfs[0, gs][imt][sid, a["eid"]] = a["gmv"]
    return dstore["etags"].value, gmfs
Example #12
0
def _get_gmfs(dstore, serial, eid):
    oq = dstore['oqparam']
    min_iml = event_based.fix_minimum_intensity(oq.minimum_intensity, oq.imtls)
    rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
    sitecol = dstore['sitecol'].complete
    N = len(sitecol.complete)
    rup = dstore['sescollection/' + serial]
    correl_model = readinput.get_correl_model(oq)
    gsims = rlzs_assoc.gsims_by_trt_id[rup.trt_id]
    rlzs = [rlz for gsim in map(str, gsims)
            for rlz in rlzs_assoc[rup.trt_id, gsim]]
    gmf_dt = numpy.dtype([('%03d' % rlz.ordinal, F32) for rlz in rlzs])
    gmfadict = create(event_based.GmfColl,
                      [rup], sitecol, oq.imtls, rlzs_assoc,
                      oq.truncation_level, correl_model, min_iml).by_rlzi()
    for imti, imt in enumerate(oq.imtls):
        gmfa = numpy.zeros(N, gmf_dt)
        for rlzname in gmf_dt.names:
            rlzi = int(rlzname)
            gmvs = get_array(gmfadict[rlzi], eid=eid, imti=imti)['gmv']
            gmfa[rlzname][rup.indices] = gmvs
        yield gmfa, imt
Example #13
0
def view_task_hazard(token, dstore):
    """
    Display info about a given task. Here are a few examples of usage::

     $ oq show task_hazard:0  # the fastest task
     $ oq show task_hazard:-1  # the slowest task
    """
    tasks = set(dstore['task_info'])
    if 'source_data' not in dstore:
        return 'Missing source_data'
    if 'classical_split_filter' in tasks:
        data = dstore['task_info/classical_split_filter'][()]
    else:
        data = dstore['task_info/compute_gmfs'][()]
    data.sort(order='duration')
    rec = data[int(token.split(':')[1])]
    taskno = rec['taskno']
    arr = get_array(dstore['source_data'][()], taskno=taskno)
    st = [stats('nsites', arr['nsites']), stats('weight', arr['weight'])]
    sources = dstore['task_sources'][taskno - 1].split()
    srcs = set(decode(s).split(':', 1)[0] for s in sources)
    res = 'taskno=%d, weight=%d, duration=%d s, sources="%s"\n\n' % (
        taskno, rec['weight'], rec['duration'], ' '.join(sorted(srcs)))
    return res + rst_table(st, header='variable mean stddev min max n'.split())
Example #14
0
def view_task_hazard(token, dstore):
    """
    Display info about a given task. Here are a few examples of usage::

     $ oq show task:classical:0  # the fastest task
     $ oq show task:classical:-1  # the slowest task
    """
    _, name, index = token.split(':')
    if 'by_task' not in dstore:
        return 'Missing by_task'
    data = get_array(dstore['task_info'][()], taskname=encode(name))
    if len(data) == 0:
        raise RuntimeError('No task_info for %s' % name)
    data.sort(order='duration')
    rec = data[int(index)]
    taskno = rec['task_no']
    eff_ruptures = dstore['by_task/eff_ruptures'][taskno]
    eff_sites = dstore['by_task/eff_sites'][taskno]
    srcids = dstore['by_task/srcids'][taskno]
    srcs = dstore['source_info']['source_id'][srcids]
    res = ('taskno=%d, eff_ruptures=%d, eff_sites=%d, duration=%d s\n'
           'sources="%s"' %
           (taskno, eff_ruptures, eff_sites, rec['duration'], ' '.join(srcs)))
    return res
Example #15
0
def view_task_hazard(token, dstore):
    """
    Display info about a given task. Here are a few examples of usage::

     $ oq show task_hazard:0  # the fastest task
     $ oq show task_hazard:-1  # the slowest task
    """
    tasks = set(dstore['task_info'])
    if 'source_data' not in dstore:
        return 'Missing source_data'
    if 'classical' in tasks:
        data = dstore['task_info/classical'].value
    else:
        data = dstore['task_info/compute_gmfs'].value
    data.sort(order='duration')
    rec = data[int(token.split(':')[1])]
    taskno = rec['taskno']
    arr = get_array(dstore['source_data'].value, taskno=taskno)
    st = [stats('nsites', arr['nsites']), stats('weight', arr['weight'])]
    sources = dstore['task_sources'][taskno - 1].split()
    srcs = set(decode(s).split(':', 1)[0] for s in sources)
    res = 'taskno=%d, weight=%d, duration=%d s, sources="%s"\n\n' % (
        taskno, rec['weight'], rec['duration'], ' '.join(sorted(srcs)))
    return res + rst_table(st, header='variable mean stddev min max n'.split())