def crm_attrs(dstore, what): """ :returns: the attributes of the risk model, i.e. limit_states, loss_types, min_iml and covs, needed by the risk exporters. """ return ArrayWrapper((), dstore.get_attrs('risk_model'))
def extract_disagg_layer(dstore, what): """ Extract a disaggregation output containing all sites for the first realization or the mean. Example: http://127.0.0.1:8800/v1/calc/30/extract/ disagg_layer?kind=Mag_Dist&imt=PGA&poe_id=0 """ qdict = parse(what) [label] = qdict['kind'] [imt] = qdict['imt'] poe_id = int(qdict['poe_id'][0]) grp = disagg_outputs(dstore, imt, 0, poe_id)[0] dset = grp[label] edges = {k: grp.attrs[k] for k in grp.attrs if k.endswith('_edges')} dt = [('site_id', U32), ('lon', F32), ('lat', F32), ('poes', (dset.dtype, dset.shape))] sitecol = dstore['sitecol'] out = numpy.zeros(len(sitecol), dt) out[0] = (0, sitecol.lons[0], sitecol.lats[0], dset[()]) for sid, lon, lat, rec in zip(sitecol.sids, sitecol.lons, sitecol.lats, out): if sid > 0: grp = disagg_outputs(dstore, imt, sid, poe_id)[0] rec['site_id'] = sid rec['lon'] = lon rec['lat'] = lat rec['poes'] = grp[label][()] return ArrayWrapper(out, edges)
def extract_mfd(dstore, what): """ Display num_ruptures by magnitude for event based calculations. Example: http://127.0.0.1:8800/v1/calc/30/extract/event_based_mfd?kind=mean """ oq = dstore['oqparam'] qdic = parse(what) kind_mean = 'mean' in qdic.get('kind', []) kind_by_group = 'by_group' in qdic.get('kind', []) weights = dstore['csm_info/sm_data']['weight'] sm_idx = dstore['csm_info/sg_data']['sm_id'] grp_weight = weights[sm_idx] duration = oq.investigation_time * oq.ses_per_logic_tree_path dic = {'duration': duration} dd = collections.defaultdict(float) rups = dstore['ruptures']['grp_id', 'mag', 'n_occ'] mags = sorted(numpy.unique(rups['mag'])) magidx = {mag: idx for idx, mag in enumerate(mags)} num_groups = rups['grp_id'].max() + 1 frequencies = numpy.zeros((len(mags), num_groups), float) for grp_id, mag, n_occ in rups: if kind_mean: dd[mag] += n_occ * grp_weight[grp_id] / duration if kind_by_group: frequencies[magidx[mag], grp_id] += n_occ / duration dic['magnitudes'] = numpy.array(mags) if kind_mean: dic['mean_frequency'] = numpy.array([dd[mag] for mag in mags]) if kind_by_group: for grp_id, freqs in enumerate(frequencies.T): dic['grp-%02d_frequency' % grp_id] = freqs return ArrayWrapper((), dic)
def extract_aggregate_by(dstore, what): """ /extract/aggregate_by/taxonomy,occupancy/curves/structural yield pairs (<stat>, <array of shape (T, O, S, P)>) /extract/aggregate_by/taxonomy,occupancy/avg_losses/structural yield pairs (<stat>, <array of shape (T, O, S)>) """ try: tagnames, name, loss_type = what.split('/') except ValueError: # missing '/' at the end tagnames, name = what.split('/') loss_type = '' assert name in ('avg_losses', 'curves'), name tagnames = tagnames.split(',') assetcol = dstore['assetcol'] oq = dstore['oqparam'] dset, stats = _get(dstore, name) for s, stat in enumerate(stats): if loss_type: array = dset[:, s, oq.lti[loss_type]] else: array = dset[:, s] aw = ArrayWrapper(assetcol.aggregate_by(tagnames, array), {}) for tagname in tagnames: setattr(aw, tagname, getattr(assetcol.tagcol, tagname)) if not loss_type: aw.extra = ('loss_type', ) + oq.loss_dt().names if name == 'curves': aw.return_period = dset.attrs['return_periods'] aw.tagnames = encode(tagnames + ['return_period']) else: aw.tagnames = encode(tagnames) yield decode(stat), aw
def extract_rupture_info(dstore, what): """ Extract some information about the ruptures, including the boundary. Example: http://127.0.0.1:8800/v1/calc/30/extract/rupture_info?min_mag=6 """ qdict = parse(what) if 'min_mag' in qdict: [min_mag] = qdict['min_mag'] else: min_mag = 0 oq = dstore['oqparam'] dtlist = [('rup_id', U32), ('multiplicity', U16), ('mag', F32), ('centroid_lon', F32), ('centroid_lat', F32), ('centroid_depth', F32), ('trt', '<S50'), ('strike', F32), ('dip', F32), ('rake', F32)] rows = [] boundaries = [] sf = filters.SourceFilter(dstore['sitecol'], oq.maximum_distance) for rgetter in getters.gen_rupture_getters(dstore): rups = rgetter.get_ruptures(sf, min_mag) rup_data = RuptureData(rgetter.trt, rgetter.rlzs_by_gsim) for r, rup in zip(rup_data.to_array(rups), rups): coords = ['%.5f %.5f' % xyz[:2] for xyz in zip(*r['boundaries'])] boundaries.append('POLYGON((%s))' % ', '.join(coords)) rows.append( (r['rup_id'], r['multiplicity'], r['mag'], r['lon'], r['lat'], r['depth'], rgetter.trt, r['strike'], r['dip'], r['rake'])) arr = numpy.array(rows, dtlist) geoms = gzip.compress('\n'.join(boundaries).encode('utf-8')) return ArrayWrapper( arr, dict(investigation_time=oq.investigation_time, boundaries=geoms))
def crm_attrs(dstore, what): """ :returns: the attributes of the risk model, i.e. limit_states, loss_types, min_iml and covs, needed by the risk exporters. """ name = dstore['oqparam'].risk_model return ArrayWrapper(0, dstore.get_attrs(name))
def crm_attrs(dstore, what): """ :returns: the attributes of the risk model, i.e. limit_states, loss_types, min_iml and covs, needed by the risk exporters. """ attrs = dstore.get_attrs('crm') return ArrayWrapper((), dict(json=hdf5.dumps(attrs)))
def extract_disagg(dstore, what): """ Extract a disaggregation output Example: http://127.0.0.1:8800/v1/calc/30/extract/ disagg?kind=Mag_Dist&imt=PGA&poe_id=0&site_id=1&rlz=0 """ qdict = parse(what) label = qdict['kind'][0] imt = qdict['imt'][0] poe_idx = int(qdict['poe_id'][0]) sid = int(qdict['site_id'][0]) rlz = int(qdict['rlz'][0]) if 'rlz' in qdict else None allnames = [] allvalues = [] for dset in disagg_outputs(dstore, imt, sid, poe_idx, rlz): matrix = dset[label][()] # adapted from the nrml_converters disag_tup = tuple(label.split('_')) if disag_tup == ('Mag', 'Lon', 'Lat'): matrix = numpy.swapaxes(matrix, 0, 1) matrix = numpy.swapaxes(matrix, 1, 2) disag_tup = ('Lon', 'Lat', 'Mag') axis = [dset.attrs[v.lower() + '_bin_edges'] for v in disag_tup] # compute axis mid points axis = [(ax[:-1] + ax[1:]) / 2. if ax.dtype == float else ax for ax in axis] values = None if len(axis) == 1: values = numpy.array([axis[0], matrix.flatten()]).T else: grids = numpy.meshgrid(*axis, indexing='ij') values = [g.flatten() for g in grids] values.append(matrix.flatten()) values = numpy.array(values).T allnames.append(os.path.basename(dset.name)) allvalues.append(values) if not allnames: raise KeyError('No data for ' + what) elif len(allnames) == 1: return ArrayWrapper(values, qdict) else: qdict['names'] = allnames return ArrayWrapper(numpy.array(allvalues), qdict)
def get_output(self, assets, data_by_lt, epsgetter): """ returns an ArrayWrapper of shape (L, ...) """ out = [ self(lt, assets, data, epsgetter) for lt, data in zip(self.loss_types, data_by_lt) ] return ArrayWrapper(numpy.array(out), {})
def extract_agg_curves(dstore, what): """ Aggregate loss curves from the ebrisk calculator: /extract/agg_curves? kind=stats&absolute=1&loss_type=occupants&occupancy=RES Returns an array of shape (P, S, 1...) or (P, R, 1...) """ info = get_info(dstore) qdic = parse(what, info) tagdict = qdic.copy() for a in ('k', 'rlzs', 'kind', 'loss_type', 'absolute'): del tagdict[a] k = qdic['k'] # rlz or stat index [l] = qdic['loss_type'] # loss type index tagnames = sorted(tagdict) if set(tagnames) != set(info['tagnames']): raise ValueError('Expected tagnames=%s, got %s' % (info['tagnames'], tagnames)) tagvalues = [tagdict[t][0] for t in tagnames] tagidx = [] if tagnames: tagcol = dstore['assetcol/tagcol'] for tagname, tagvalue in zip(tagnames, tagvalues): values = list(getattr(tagcol, tagname)[1:]) tagidx.append(values.index(tagvalue)) tup = tuple([slice(None), k, l] + tagidx) if qdic['rlzs']: kinds = ['rlz-%d' % r for r in k] arr = dstore['agg_curves-rlzs'][tup] # shape P, R units = dstore.get_attr('agg_curves-rlzs', 'units') rps = dstore.get_attr('agg_curves-rlzs', 'return_periods') else: kinds = list(info['stats']) arr = dstore['agg_curves-stats'][tup] # shape P, S units = dstore.get_attr('agg_curves-stats', 'units') rps = dstore.get_attr('agg_curves-stats', 'return_periods') if qdic['absolute'] == [1]: pass elif qdic['absolute'] == [0]: aggname = '_'.join(['agg'] + tagnames) tl = tuple(tagidx) + (l, ) evalue = dstore['exposed_values/' + aggname][tl] # shape T... arr /= evalue else: raise ValueError('"absolute" must be 0 or 1 in %s' % what) attrs = dict(shape_descr=['return_period', 'kind'] + tagnames) attrs['return_period'] = list(rps) attrs['kind'] = kinds attrs['units'] = units # used by the QGIS plugin for tagname, tagvalue in zip(tagnames, tagvalues): attrs[tagname] = [tagvalue] if tagnames: arr = arr.reshape(arr.shape + (1, ) * len(tagnames)) return ArrayWrapper(arr, attrs)
def extract_rupture(dstore, rup_id): """ Extract information about the given event index. Example: http://127.0.0.1:8800/v1/calc/30/extract/rupture/1066 """ ridx = list(dstore['ruptures']['id']).index(int(rup_id)) [getter] = getters.gen_rupture_getters(dstore, slice(ridx, ridx + 1)) [ebr] = getter.get_ruptures() return ArrayWrapper((), ebr.rupture.todict())
def get_output(self, assets, data_by_lt, epsgetter): """ :param assets: a list of assets with the same taxonomy :param data_by_lt: hazards for each loss type :param epsgetter: an epsilon getter function :returns: an ArrayWrapper of shape (L, ...) """ out = [self(lt, assets, data, epsgetter) for lt, data in zip(self.loss_types, data_by_lt)] return ArrayWrapper(numpy.array(out), dict(assets=assets))
def extract_agg_curves(dstore, what): """ Aggregate loss curves from the ebrisk calculator: /extract/agg_curves? kind=stats&absolute=1&loss_type=occupants&occupancy=RES Returns an array of shape (P, S, 1...) or (P, R, 1...) """ info = get_info(dstore) qdic = parse(what, info) tagdict = qdic.copy() for a in ('k', 'rlzs', 'kind', 'loss_type', 'absolute'): del tagdict[a] k = qdic['k'] # rlz or stat index [l] = qdic['loss_type'] # loss type index tagnames = sorted(tagdict) if set(tagnames) != set(info['tagnames']): raise ValueError('Expected tagnames=%s, got %s' % (info['tagnames'], tagnames)) tagvalues = [tagdict[t][0] for t in tagnames] idx = -1 if tagnames: for i, tags in enumerate(dstore['agg_keys'][:][tagnames]): if list(tags) == tagvalues: idx = i break if qdic['rlzs']: kinds = ['rlz-%d' % r for r in k] name = 'agg_curves-rlzs' else: kinds = list(info['stats']) name = 'agg_curves-stats' units = dstore.get_attr(name, 'units') shape_descr = hdf5.get_shape_descr(dstore.get_attr(name, 'json')) units = dstore.get_attr(name, 'units') rps = shape_descr['return_period'] tup = (idx, k, l) arr = dstore[name][tup].T # shape P, R if qdic['absolute'] == [1]: pass elif qdic['absolute'] == [0]: evalue = dstore['agg_values'][idx, l] # shape K, L arr /= evalue else: raise ValueError('"absolute" must be 0 or 1 in %s' % what) attrs = dict(shape_descr=['return_period', 'kind'] + tagnames) attrs['return_period'] = list(rps) attrs['kind'] = kinds attrs['units'] = list(units) # used by the QGIS plugin for tagname, tagvalue in zip(tagnames, tagvalues): attrs[tagname] = [tagvalue] if tagnames: arr = arr.reshape(arr.shape + (1,) * len(tagnames)) return ArrayWrapper(arr, dict(json=hdf5.dumps(attrs)))
def extract_sources(dstore, what): """ Extract information about a source model. Use it as /extract/sources?sm_id=0 """ qdict = parse(what) sm_id = int(qdict['sm_id'][0]) arr = dstore['source_info']['sm_id', 'source_id', 'eff_ruptures', 'wkt'] if sm_id not in numpy.unique(arr['sm_id']): raise ValueError('There is no source model #%d' % sm_id) return ArrayWrapper(get_array(arr, sm_id=sm_id), {'sm_id': sm_id})
def get(self, what): """ :param what: what to extract :returns: an ArrayWrapper instance """ url = '%s/v1/calc/%d/extract/%s' % (self.server, self.calc_id, what) resp = self.sess.get(url) if resp.status_code != 200: raise WebAPIError(resp.text) npz = numpy.load(io.BytesIO(resp.content)) attrs = {k: npz[k] for k in npz if k != 'array'} return ArrayWrapper(npz['array'], attrs)
def extract_effect(dstore, what): """ Extracts the effect of ruptures. Use it as /extract/effect """ grp = dstore['effect_by_mag_dst_trt'] dist_bins = dict(grp.attrs) ndists = len(dist_bins[next(iter(dist_bins))]) arr = numpy.zeros((len(grp), ndists, len(dist_bins))) for i, mag in enumerate(grp): arr[i] = dstore['effect_by_mag_dst_trt/' + mag][()] return ArrayWrapper(arr, dict(dist_bins=dist_bins, ndists=ndists, mags=[float(mag) for mag in grp]))
def extract_gridded_sources(dstore, what): """ Extract information about the gridded sources (requires ps_grid_spacing) Use it as /extract/gridded_sources?task_no=0. Returns a json string id -> lonlats """ qdict = parse(what) task_no = int(qdict.get('task_no', ['0'])[0]) dic = {} for i, lonlats in enumerate(dstore['ps_grid/%02d' % task_no][()]): dic[i] = numpy.round(F64(lonlats), 3) return ArrayWrapper((), {'json': hdf5.dumps(dic)})
def extract_disagg_by_src(dstore, what): """ Extract the disagg_by_src information Example: http://127.0.0.1:8800/v1/calc/30/extract/disagg_by_src?site_id=0&imt_id=0&rlz_id=0&lvl_id=-1 """ qdict = parse(what) src_id = dstore['disagg_by_src'].attrs['src_id'] f = norm(qdict, 'site_id rlz_id lvl_id imt_id'.split()) poe = dstore['disagg_by_src'][f['site_id'], f['rlz_id'], f['imt_id'], f['lvl_id']] arr = numpy.zeros(len(src_id), [('src_id', '<S16'), ('poe', '<f8')]) arr['src_id'] = src_id arr['poe'] = poe arr.sort(order='poe') return ArrayWrapper(arr[::-1], dict(json=json.dumps(f)))
def make_iml4(R, imtls, iml_disagg, poes_disagg=(None,), curves=()): """ :returns: an ArrayWrapper over a 4D array of shape (N, R, M, P) """ N = len(curves) or 1 M = len(imtls) P = len(poes_disagg) arr = numpy.zeros((N, R, M, P)) imts = [from_string(imt) for imt in imtls] for m, imt in enumerate(imtls): imls = imtls[imt] for p, poe in enumerate(poes_disagg): for r in range(R): arr[:, r, m, p] = _imls(curves, poe, imt, imls, r) return ArrayWrapper(arr, dict(poes_disagg=poes_disagg, imts=imts))
def extract_sources(dstore, what): """ Extract information about a source model. Use it as /extract/sources?sm_id=0&limit=10 or /extract/sources?sm_id=0&source_id=1&source_id=2 or /extract/sources?sm_id=0&code=A&code=B """ qdict = parse(what) sm_id = int(qdict.get('sm_id', ['0'])[0]) limit = int(qdict.get('limit', ['100'])[0]) source_ids = qdict.get('source_id', None) if source_ids is not None: source_ids = [str(source_id) for source_id in source_ids] codes = qdict.get('code', None) if codes is not None: codes = [code.encode('utf8') for code in codes] info = dstore['source_info'][()] info = info[info['sm_id'] == sm_id] arrays = [] if source_ids is not None: logging.info('Extracting sources with ids: %s', source_ids) info = info[numpy.isin(info['source_id'], source_ids)] if len(info) == 0: raise NotFound('There is no source with id %s' % source_ids) if codes is not None: logging.info('Extracting sources with codes: %s', codes) info = info[numpy.isin(info['code'], codes)] if len(info) == 0: raise NotFound('There is no source with code in %s' % codes) for code, rows in general.group_array(info, 'code').items(): if limit < len(rows): logging.info('Code %s: extracting %d sources out of %s', code, limit, len(rows)) arrays.append(rows[:limit]) info = numpy.concatenate(arrays) if len(info) == 0: raise ValueError('There is no source model #%d' % sm_id) wkt_gz = gzip.compress(';'.join(info['wkt']).encode('utf8')) src_gz = gzip.compress(';'.join(info['source_id']).encode('utf8')) oknames = [n for n in info.dtype.names if n not in ('source_id', 'wkt')] arr = numpy.zeros(len(info), [(n, info.dtype[n]) for n in oknames]) for n in oknames: arr[n] = info[n] return ArrayWrapper(arr, { 'sm_id': sm_id, 'wkt_gz': wkt_gz, 'src_gz': src_gz })
def extract_sources(dstore, what): """ Extract information about a source model. Use it as /extract/sources?limit=10 or /extract/sources?source_id=1&source_id=2 or /extract/sources?code=A&code=B """ qdict = parse(what) limit = int(qdict.get('limit', ['100'])[0]) source_ids = qdict.get('source_id', None) if source_ids is not None: source_ids = [str(source_id) for source_id in source_ids] codes = qdict.get('code', None) if codes is not None: codes = [code.encode('utf8') for code in codes] fields = 'source_id code multiplicity num_sites eff_ruptures' info = dstore['source_info'][()][fields.split()] wkt = dstore['source_wkt'][()] arrays = [] if source_ids is not None: logging.info('Extracting sources with ids: %s', source_ids) info = info[numpy.isin(info['source_id'], source_ids)] if len(info) == 0: raise NotFound('There is no source with id %s' % source_ids) if codes is not None: logging.info('Extracting sources with codes: %s', codes) info = info[numpy.isin(info['code'], codes)] if len(info) == 0: raise NotFound('There is no source with code in %s' % codes) for code, rows in general.group_array(info, 'code').items(): if limit < len(rows): logging.info('Code %s: extracting %d sources out of %s', code, limit, len(rows)) arrays.append(rows[:limit]) if not arrays: raise ValueError('There no sources') info = numpy.concatenate(arrays) wkt_gz = gzip.compress(';'.join(wkt).encode('utf8')) src_gz = gzip.compress(';'.join(info['source_id']).encode('utf8')) oknames = [ name for name in info.dtype.names # avoid pickle issues if name not in ('source_id', 'grp_ids') ] arr = numpy.zeros(len(info), [(n, info.dtype[n]) for n in oknames]) for n in oknames: arr[n] = info[n] return ArrayWrapper(arr, {'wkt_gz': wkt_gz, 'src_gz': src_gz})
def extract_exposure_metadata(dstore, what): """ Extract the loss categories and the tags of the exposure. Use it as /extract/exposure_metadata """ dic = {} dic1, dic2 = dstore['assetcol/tagcol'].__toh5__() dic.update(dic1) dic.update(dic2) if 'asset_risk' in dstore: dic['multi_risk'] = sorted( set(dstore['asset_risk'].dtype.names) - set(dstore['assetcol/array'].dtype.names)) dic['names'] = [name for name in dstore['assetcol/array'].dtype.names if name.startswith(('value-', 'number', 'occupants_')) and not name.endswith('_None')] return ArrayWrapper((), dict(json=dumps(dic)))
def get(self, what): """ :param what: what to extract :returns: an ArrayWrapper instance """ url = '%s/v1/calc/%d/extract/%s' % (self.server, self.calc_id, what) logging.info('GET %s', url) resp = self.sess.get(url) if resp.status_code != 200: raise WebAPIError(resp.text) npz = numpy.load(io.BytesIO(resp.content), allow_pickle=True) attrs = {k: npz[k] for k in npz if k != 'array'} try: arr = npz['array'] except KeyError: arr = () return ArrayWrapper(arr, attrs)
def extract_disagg_layer(dstore, what): """ Extract a disaggregation layer containing all sites and outputs Example: http://127.0.0.1:8800/v1/calc/30/extract/disagg_layer? """ qdict = parse(what) oq = dstore['oqparam'] oq.maximum_distance = filters.MagDepDistance(oq.maximum_distance) if 'kind' in qdict: kinds = qdict['kind'] else: kinds = oq.disagg_outputs sitecol = dstore['sitecol'] poes_disagg = oq.poes_disagg or (None, ) edges, shapedic = disagg.get_edges_shapedic(oq, sitecol, dstore['source_mags']) dt = _disagg_output_dt(shapedic, kinds, oq.imtls, poes_disagg) out = numpy.zeros(len(sitecol), dt) realizations = numpy.array(dstore['full_lt'].get_realizations()) hmap4 = dstore['hmap4'][:] best_rlzs = dstore['best_rlzs'][:] arr = {kind: dstore['disagg/' + kind][:] for kind in kinds} for sid, lon, lat, rec in zip(sitecol.sids, sitecol.lons, sitecol.lats, out): rlzs = realizations[best_rlzs[sid]] rec['site_id'] = sid rec['lon'] = lon rec['lat'] = lat rec['lon_bins'] = edges[2][sid] rec['lat_bins'] = edges[3][sid] for m, imt in enumerate(oq.imtls): ws = numpy.array([rlz.weight[imt] for rlz in rlzs]) ws /= ws.sum() # normalize to 1 for p, poe in enumerate(poes_disagg): for kind in kinds: key = '%s-%s-%s' % (kind, imt, poe) rec[key] = arr[kind][sid, m, p] @ ws rec['iml-%s-%s' % (imt, poe)] = hmap4[sid, m, p] return ArrayWrapper( out, dict(mag=edges[0], dist=edges[1], eps=edges[-2], trt=numpy.array(encode(edges[-1]))))
def read_composite_array(fname, sep=','): r""" Convert a CSV file with header into an ArrayWrapper object. >>> from openquake.baselib.general import gettemp >>> fname = gettemp('PGA:3,PGV:2,avg:1\n' ... '.1 .2 .3,.4 .5,.6\n') >>> print(read_composite_array(fname).array) # array of shape (1,) [([0.1, 0.2, 0.3], [0.4, 0.5], [0.6])] """ with open(fname) as f: header = next(f) if header.startswith('#'): # the first line is a comment, skip it attrs = dict(parse_comment(header[1:])) header = next(f) else: attrs = {} transheader = htranslator.read(header.split(sep)) fields, dtype = parse_header(transheader) ts_pairs = [] # [(type, shape), ...] for name in fields: dt = dtype.fields[name][0] ts_pairs.append( (dt.subdtype[0].type if dt.subdtype else dt.type, dt.shape)) col_ids = list(range(1, len(ts_pairs) + 1)) num_columns = len(col_ids) records = [] col, col_id = '', 0 for i, line in enumerate(f, 2): row = line.split(sep) if len(row) != num_columns: raise InvalidFile( 'expected %d columns, found %d in file %s, line %d' % (num_columns, len(row), fname, i)) try: record = [] for (ntype, shape), col, col_id in zip(ts_pairs, row, col_ids): record.append(_cast(col, ntype, shape, i, fname)) records.append(tuple(record)) except Exception as e: raise InvalidFile( 'Could not cast %r in file %s, line %d, column %d ' 'using %s: %s' % (col, fname, i, col_id, (ntype.__name__, ) + shape, e)) return ArrayWrapper(numpy.array(records, dtype), attrs)
def extract_assets(dstore, what): """ Extract an array of assets, optionally filtered by tag. Use it as /extract/assets?taxonomy=RC&taxonomy=MSBC&occupancy=RES """ qdict = parse(what) dic = {} dic1, dic2 = dstore['assetcol/tagcol'].__toh5__() dic.update(dic1) dic.update(dic2) arr = dstore['assetcol/array'][()] for tag, vals in qdict.items(): cond = numpy.zeros(len(arr), bool) for val in vals: tagidx, = numpy.where(dic[tag] == val) cond |= arr[tag] == tagidx arr = arr[cond] return ArrayWrapper(arr, dic)
def extract_disagg_layer(dstore, what): """ Extract a disaggregation layer containing all sites and outputs Example: http://127.0.0.1:8800/v1/calc/30/extract/disagg_layer? """ qdict = parse(what) oq = dstore['oqparam'] if 'kind' in qdict: kinds = qdict['kind'] else: kinds = list(oq.disagg_outputs or disagg.pmf_map) sitecol = dstore['sitecol'] poes_disagg = oq.poes_disagg or (None, ) edges, shapedic = disagg.get_edges_shapedic(oq, sitecol, dstore['source_mags']) dt = _disagg_output_dt(shapedic, kinds, oq.imtls, poes_disagg) out = numpy.zeros(len(sitecol), dt) try: best_rlzs = dstore['best_rlzs'] except KeyError: best_rlzs = numpy.zeros((len(sitecol), shapedic['Z']), U16) for sid, lon, lat, rec in zip(sitecol.sids, sitecol.lons, sitecol.lats, out): rec['site_id'] = sid rec['lon'] = lon rec['lat'] = lat rec['rlz_id'] = rlzs = best_rlzs[sid] rec['lon_bins'] = edges[2][sid] rec['lat_bins'] = edges[3][sid] for kind in kinds: for imt in oq.imtls: for p, poe in enumerate(poes_disagg): for rlz in rlzs: key = '%s-%s-%s' % (kind, imt, poe) label = 'disagg/rlz-%d-%s-sid-%d-poe-%s/%s' % ( rlz, imt, sid, p, kind) rec[key] = dstore[label][()] return ArrayWrapper( out, dict(mag=edges[0], dist=edges[1], eps=edges[-2], trt=encode(edges[-1])))
def extract_agg_curves(dstore, what): """ Aggregate loss curves from the ebrisk calculator: /extract/agg_curves? kind=stats&absolute=1&loss_type=occupants&tagname=occupancy&tagvalue=RES Returns an array of shape (P, S, T...) or (P, R, T...) """ info = get_info(dstore) qdic = parse(what, info) k = qdic['k'] # rlz or stat index [l] = qdic['loss_type'] # loss type index if qdic['rlzs']: kinds = ['rlz-%d' % r for r in k] arr = dstore['agg_curves-rlzs'][:, k, l] # shape P, T... rps = dstore.get_attr('agg_curves-rlzs', 'return_periods') else: kinds = list(info['stats']) arr = dstore['agg_curves-stats'][:, k, l] # shape P, T... rps = dstore.get_attr('agg_curves-stats', 'return_periods') tagnames = qdic.get('tagname', []) if set(tagnames) != set(info['tagnames']): raise ValueError('Expected tagnames=%s, got %s' % (info['tagnames'], tagnames)) tagvalues = qdic.get('tagvalue', []) if qdic['absolute'] == [1]: pass elif qdic['absolute'] == [0]: aggname = '_'.join(['agg'] + tagnames) evalue = dstore['exposed_values/' + aggname][l] # shape T... arr /= evalue else: raise ValueError('"absolute" must be 0 or 1 in %s' % what) attrs = dict(shape_descr=['return_period', 'kind'] + tagnames) attrs['return_period'] = [numpy.nan] + list(rps) attrs['kind'] = ['?'] + kinds for tagname, tagvalue in zip(tagnames, tagvalues): attrs[tagname] = [tagvalue] return ArrayWrapper(arr, attrs)
def extract_rupture_info(dstore, what): """ Extract some information about the ruptures, including the boundary. Example: http://127.0.0.1:8800/v1/calc/30/extract/rupture_info?min_mag=6 """ qdict = parse(what) if 'min_mag' in qdict: [min_mag] = qdict['min_mag'] else: min_mag = 0 oq = dstore['oqparam'] dtlist = [('rup_id', U32), ('occurrence_rate', F32), ('multiplicity', U16), ('mag', F32), ('centroid_lon', F32), ('centroid_lat', F32), ('centroid_depth', F32), ('trt', '<S50'), ('strike', F32), ('dip', F32), ('rake', F32)] rows = [] boundaries = [] for rgetter in getters.gen_rgetters(dstore): proxies = rgetter.get_proxies(min_mag) rup_data = RuptureData(rgetter.trt, rgetter.samples, rgetter.rlzs_by_gsim) for r in rup_data.to_array(proxies): coords = ['%.5f %.5f' % xyz[:2] for xyz in zip(*r['boundaries'])] coordset = sorted(set(coords)) if len(coordset) < 4: # degenerate to line boundaries.append('LINESTRING(%s)' % ', '.join(coordset)) else: # good polygon boundaries.append('POLYGON((%s))' % ', '.join(coords)) rows.append((r['rup_id'], r['occurrence_rate'], r['multiplicity'], r['mag'], r['lon'], r['lat'], r['depth'], rgetter.trt, r['strike'], r['dip'], r['rake'])) arr = numpy.array(rows, dtlist) geoms = gzip.compress('\n'.join(boundaries).encode('utf-8')) return ArrayWrapper( arr, dict(investigation_time=oq.investigation_time, boundaries=geoms))
def extract_asset_risk(dstore, what): """ Extract an array of assets + risk fields, optionally filtered by tag. Use it as /extract/asset_risk?taxonomy=RC&taxonomy=MSBC&occupancy=RES """ qdict = parse(what) dic = {} dic1, dic2 = dstore['assetcol/tagcol'].__toh5__() dic.update(dic1) dic.update(dic2) arr = dstore['asset_risk'][()] names = list(arr.dtype.names) for i, name in enumerate(names): if name == 'id': names[i] = 'asset_id' # for backward compatibility arr.dtype.names = names for tag, vals in qdict.items(): cond = numpy.zeros(len(arr), bool) for val in vals: tagidx, = numpy.where(dic[tag] == val) cond |= arr[tag] == tagidx arr = arr[cond] return ArrayWrapper(arr, dic)