Exemple #1
0
def extract_tot_curves(dstore, what):
    """
    Aggregate loss curves from the ebrisk calculator:

    /extract/tot_curves?
    kind=stats&absolute=1&loss_type=occupants

    Returns an array of shape (P, S) or (P, R)
    """
    info = get_info(dstore)
    qdic = parse(what, info)
    k = qdic['k']  # rlz or stat index
    [l] = qdic['loss_type']  # loss type index
    if qdic['rlzs']:
        kinds = ['rlz-%d' % r for r in k]
        name = 'agg_curves-rlzs'
    else:
        kinds = list(info['stats'])
        name = 'agg_curves-stats'
    shape_descr = hdf5.get_shape_descr(dstore.get_attr(name, 'json'))
    units = dstore.get_attr(name, 'units')
    rps = shape_descr['return_period']
    K = shape_descr.get('K', 0)
    arr = dstore[name][K, k, l].T  # shape P, R
    if qdic['absolute'] == [1]:
        pass
    elif qdic['absolute'] == [0]:  # relative
        arr /= dstore['agg_values'][K, l]
    else:
        raise ValueError('"absolute" must be 0 or 1 in %s' % what)
    attrs = dict(shape_descr=['return_period', 'kind'])
    attrs['return_period'] = rps
    attrs['kind'] = kinds
    attrs['units'] = list(units)  # used by the QGIS plugin
    return ArrayWrapper(arr, dict(json=hdf5.dumps(attrs)))
Exemple #2
0
def crm_attrs(dstore, what):
    """
    :returns:
        the attributes of the risk model, i.e. limit_states, loss_types,
        min_iml and covs, needed by the risk exporters.
    """
    attrs = dstore.get_attrs('crm')
    return ArrayWrapper((), dict(json=hdf5.dumps(attrs)))
Exemple #3
0
def extract_agg_curves(dstore, what):
    """
    Aggregate loss curves from the ebrisk calculator:

    /extract/agg_curves?
    kind=stats&absolute=1&loss_type=occupants&occupancy=RES

    Returns an array of shape (P, S, 1...) or (P, R, 1...)
    """
    info = get_info(dstore)
    qdic = parse(what, info)
    tagdict = qdic.copy()
    for a in ('k', 'rlzs', 'kind', 'loss_type', 'absolute'):
        del tagdict[a]
    k = qdic['k']  # rlz or stat index
    [l] = qdic['loss_type']  # loss type index
    tagnames = sorted(tagdict)
    if set(tagnames) != set(info['tagnames']):
        raise ValueError('Expected tagnames=%s, got %s' %
                         (info['tagnames'], tagnames))
    tagvalues = [tagdict[t][0] for t in tagnames]
    idx = -1
    if tagnames:
        for i, tags in enumerate(dstore['agg_keys'][:][tagnames]):
            if list(tags) == tagvalues:
                idx = i
                break
    if qdic['rlzs']:
        kinds = ['rlz-%d' % r for r in k]
        name = 'agg_curves-rlzs'
    else:
        kinds = list(info['stats'])
        name = 'agg_curves-stats'
    units = dstore.get_attr(name, 'units')
    shape_descr = hdf5.get_shape_descr(dstore.get_attr(name, 'json'))
    units = dstore.get_attr(name, 'units')
    rps = shape_descr['return_period']
    tup = (idx, k, l)
    arr = dstore[name][tup].T  # shape P, R
    if qdic['absolute'] == [1]:
        pass
    elif qdic['absolute'] == [0]:
        evalue = dstore['agg_values'][idx, l]  # shape K, L
        arr /= evalue
    else:
        raise ValueError('"absolute" must be 0 or 1 in %s' % what)
    attrs = dict(shape_descr=['return_period', 'kind'] + tagnames)
    attrs['return_period'] = list(rps)
    attrs['kind'] = kinds
    attrs['units'] = list(units)  # used by the QGIS plugin
    for tagname, tagvalue in zip(tagnames, tagvalues):
        attrs[tagname] = [tagvalue]
    if tagnames:
        arr = arr.reshape(arr.shape + (1,) * len(tagnames))
    return ArrayWrapper(arr, dict(json=hdf5.dumps(attrs)))
Exemple #4
0
def extract_gridded_sources(dstore, what):
    """
    Extract information about the gridded sources (requires ps_grid_spacing)
    Use it as /extract/gridded_sources?task_no=0.
    Returns a json string id -> lonlats
    """
    qdict = parse(what)
    task_no = int(qdict.get('task_no', ['0'])[0])
    dic = {}
    for i, lonlats in enumerate(dstore['ps_grid/%02d' % task_no][()]):
        dic[i] = numpy.round(F64(lonlats), 3)
    return ArrayWrapper((), {'json': hdf5.dumps(dic)})
Exemple #5
0
def extract_disagg_by_src(dstore, what):
    """
    Extract the disagg_by_src information Example:
    http://127.0.0.1:8800/v1/calc/30/extract/disagg_by_src?site_id=0&imt_id=0&rlz_id=0&lvl_id=-1
    """
    qdict = parse(what)
    dic = hdf5.get_shape_descr(dstore['disagg_by_src'].attrs['json'])
    src_id = dic['src_id']
    f = norm(qdict, 'site_id rlz_id lvl_id imt_id'.split())
    poe = dstore['disagg_by_src'][
        f['site_id'], f['rlz_id'], f['imt_id'], f['lvl_id']]
    arr = numpy.zeros(len(src_id), [('src_id', '<S16'), ('poe', '<f8')])
    arr['src_id'] = src_id
    arr['poe'] = poe
    arr.sort(order='poe')
    return ArrayWrapper(arr[::-1], dict(json=hdf5.dumps(f)))
Exemple #6
0
def extract_exposure_metadata(dstore, what):
    """
    Extract the loss categories and the tags of the exposure.
    Use it as /extract/exposure_metadata
    """
    dic = {}
    dic1, dic2 = dstore['assetcol/tagcol'].__toh5__()
    dic.update(dic1)
    dic.update(dic2)
    if 'asset_risk' in dstore:
        dic['multi_risk'] = sorted(
            set(dstore['asset_risk'].dtype.names) -
            set(dstore['assetcol/array'].dtype.names))
    dic['names'] = [name for name in dstore['assetcol/array'].dtype.names
                    if name.startswith(('value-', 'number', 'occupants'))
                    and name != 'value-occupants']
    return ArrayWrapper((), dict(json=hdf5.dumps(dic)))
Exemple #7
0
def extract_assets(dstore, what):
    """
    Extract an array of assets, optionally filtered by tag.
    Use it as /extract/assets?taxonomy=RC&taxonomy=MSBC&occupancy=RES
    """
    qdict = parse(what)
    dic = {}
    dic1, dic2 = dstore['assetcol/tagcol'].__toh5__()
    dic.update(dic1)
    dic.update(dic2)
    arr = dstore['assetcol/array'][()]
    for tag, vals in qdict.items():
        cond = numpy.zeros(len(arr), bool)
        for val in vals:
            tagidx, = numpy.where(dic[tag] == val)
            cond |= arr[tag] == tagidx
        arr = arr[cond]
    return ArrayWrapper(arr, dict(json=hdf5.dumps(dic)))
Exemple #8
0
def extract_asset_risk(dstore, what):
    """
    Extract an array of assets + risk fields, optionally filtered by tag.
    Use it as /extract/asset_risk?taxonomy=RC&taxonomy=MSBC&occupancy=RES
    """
    qdict = parse(what)
    dic = {}
    dic1, dic2 = dstore['assetcol/tagcol'].__toh5__()
    dic.update(dic1)
    dic.update(dic2)
    arr = dstore['asset_risk'][()]
    names = list(arr.dtype.names)
    for i, name in enumerate(names):
        if name == 'id':
            names[i] = 'asset_id'  # for backward compatibility
    arr.dtype.names = names
    for tag, vals in qdict.items():
        cond = numpy.zeros(len(arr), bool)
        for val in vals:
            tagidx, = numpy.where(dic[tag] == val)
            cond |= arr[tag] == tagidx
        arr = arr[cond]
    return ArrayWrapper(arr, dict(json=hdf5.dumps(dic)))
Exemple #9
0
    def test(self):
        dic = dict(imts=numpy.array([0.1, 0.2, 0.3]))
        self.assertEqual(dumps(dic), '{\n"imts": [0.1, 0.2, 0.3]}')

        dic = dict(base_path=r"C:\Users\test")
        self.assertEqual(dumps(dic), '{\n"base_path": "C:\\\\Users\\\\test"}')
Exemple #10
0
def extract_oqparam(dstore, dummy):
    """
    Extract job parameters as a JSON npz. Use it as /extract/oqparam
    """
    js = hdf5.dumps(vars(dstore['oqparam']))
    return ArrayWrapper((), {'json': js})