def make_curve_builders(self, oqparam): """ Populate the inner lists .loss_types, .curve_builders. """ default_loss_ratios = numpy.linspace( 0, 1, oqparam.loss_curve_resolution + 1)[1:] loss_types = self._get_loss_types() for l, loss_type in enumerate(loss_types): if oqparam.calculation_mode == 'classical_risk': all_ratios = [self[key].loss_ratios[loss_type] for key in sorted(self)] curve_resolutions = map(len, all_ratios) if len(set(curve_resolutions)) > 1: lines = [] for wf, cr in zip(self.values(), curve_resolutions): lines.append( '%s %d' % (wf.risk_functions[loss_type], cr)) logging.warn('Inconsistent num_loss_ratios:\n%s', '\n'.join(lines)) cb = scientific.CurveBuilder( loss_type, all_ratios[0], True, oqparam.conditional_loss_poes, oqparam.insured_losses) elif loss_type in oqparam.loss_ratios: # loss_ratios provided cb = scientific.CurveBuilder( loss_type, oqparam.loss_ratios[loss_type], True, oqparam.conditional_loss_poes, oqparam.insured_losses) else: # no loss_ratios provided cb = scientific.CurveBuilder( loss_type, default_loss_ratios, False, oqparam.conditional_loss_poes, oqparam.insured_losses) self.curve_builders.append(cb) self.loss_types.append(loss_type) self.lti[loss_type] = l
def make_curve_builders(self, oqparam): """ Populate the inner lists .loss_types, .curve_builders. """ default_loss_ratios = numpy.linspace( 0, 1, oqparam.loss_curve_resolution + 1)[1:] for i, loss_type in enumerate(self.get_loss_types()): if loss_type in oqparam.loss_ratios: cb = scientific.CurveBuilder(loss_type, oqparam.loss_ratios[loss_type], user_provided=True) else: cb = scientific.CurveBuilder(loss_type, default_loss_ratios, user_provided=False) self.curve_builders.append(cb) self.loss_types.append(loss_type) self.lti[loss_type] = i
def test_lrem_lr_cov_special_cases(self): # Test LREM computation for points in a vuln curve where the loss ratio # > 0 and the CoV = 0, or loss ratio = 0 and CoV = 0. # If LR > 0 and CoV = 0, the PoE for values <= this LR are 1, and > # this LR are 0. # If LR = 0 and CoV = 0, the PoE will be 0. curve = scientific.VulnerabilityFunction( self.ID, self.IMT, [0.1, 0.2, 0.3, 0.45, 0.6], # IMLs [0.0, 0.1, 0.2, 0.4, 1.2], # loss ratios [0.0, 0.0, 0.3, 0.2, 0.1], # CoVs 'LN') loss_ratios, lrem = curve.loss_ratio_exceedance_matrix(5) expected_lrem = numpy.array([ [0.000, 1.000, 1.000, 1.000, 1.000], [0.000, 1.000, 1.000, 1.000, 1.000], [0.000, 1.000, 1.000, 1.000, 1.000], [0.000, 1.000, 1.000, 1.000, 1.000], [0.000, 1.000, 0.999, 1.000, 1.000], [0.000, 1.000, 0.987, 1.000, 1.000], [0.000, 0.000, 0.944, 1.000, 1.000], [0.000, 0.000, 0.857, 1.000, 1.000], [0.000, 0.000, 0.730, 1.000, 1.000], [0.000, 0.000, 0.584, 1.000, 1.000], [0.000, 0.000, 0.442, 1.000, 1.000], [0.000, 0.000, 0.221, 0.993, 1.000], [0.000, 0.000, 0.098, 0.956, 1.000], [0.000, 0.000, 0.040, 0.848, 1.000], [0.000, 0.000, 0.016, 0.667, 1.000], [0.000, 0.000, 0.006, 0.461, 1.000], [0.000, 0.000, 0.000, 0.036, 1.000], [0.000, 0.000, 0.000, 0.001, 1.000], [0.000, 0.000, 0.000, 0.000, 0.999], [0.000, 0.000, 0.000, 0.000, 0.917], [0.000, 0.000, 0.000, 0.000, 0.480], ]) aaae(lrem, expected_lrem, decimal=3) expected_counts = numpy.matrix([[4, 4, 4, 4, 4], [4, 4, 4, 4, 4], [4, 4, 4, 4, 4], [4, 4, 4, 4, 4], [4, 4, 4, 4, 3], [4, 4, 4, 4, 3], [3, 3, 3, 3, 2], [3, 3, 3, 3, 2], [3, 3, 3, 2, 2], [3, 3, 2, 2, 2], [3, 3, 2, 2, 2], [3, 2, 2, 2, 1], [2, 2, 2, 2, 1], [2, 2, 2, 2, 1], [2, 2, 2, 1, 1], [2, 2, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 0], [1, 1, 1, 1, 0], [1, 1, 0, 0, 0]]) # this is a test with curve_resolution=5, i.e. with ratios # [0.2, 0.4, 0.6, 0.8, 1.]; for each row in the lrem we # count how many ratios are greater equal than each ratio b = scientific.CurveBuilder('structural', numpy.linspace(0.2, 1, 5), user_provided=True) aaae(b.build_counts(expected_lrem), expected_counts)
def make_curve_builders(self, oqparam): """ Populate the inner lists .loss_types, .curve_builders. """ default_loss_ratios = numpy.linspace( 0, 1, oqparam.loss_curve_resolution + 1)[1:] loss_types = self._get_loss_types() for l, loss_type in enumerate(loss_types): if oqparam.calculation_mode in ('classical', 'classical_risk'): curve_resolutions = set() lines = [] for key in sorted(self): rm = self[key] if loss_type in rm.loss_ratios: ratios = rm.loss_ratios[loss_type] curve_resolutions.add(len(ratios)) lines.append( '%s %d' % (rm.risk_functions[loss_type], len(ratios))) if len(curve_resolutions) > 1: logging.info('Different num_loss_ratios:\n%s', '\n'.join(lines)) cb = scientific.CurveBuilder( loss_type, ratios, True, oqparam.conditional_loss_poes, oqparam.insured_losses, curve_resolution=max(curve_resolutions)) elif loss_type in oqparam.loss_ratios: # loss_ratios provided cb = scientific.CurveBuilder(loss_type, oqparam.loss_ratios[loss_type], True, oqparam.conditional_loss_poes, oqparam.insured_losses) else: # no loss_ratios provided cb = scientific.CurveBuilder(loss_type, default_loss_ratios, False, oqparam.conditional_loss_poes, oqparam.insured_losses) self.curve_builders.append(cb) self.loss_types.append(loss_type) self.lti[loss_type] = l
def make_curve_builders(self, oqparam): """ Populate the inner lists .loss_types, .curve_builders. """ for i, loss_type in enumerate(self.get_loss_types()): if not oqparam.loss_ratios: loss_ratios = numpy.logspace(-10, 0, oqparam.loss_curve_resolution) else: loss_ratios = oqparam.loss_ratios[loss_type] cb = scientific.CurveBuilder(loss_type, loss_ratios) self.curve_builders.append(cb) self.loss_types.append(loss_type) self.lti[loss_type] = i
def make_curve_builder(self, oqparam): # NB: populate the inner lists .loss_types too cbs = [] default_loss_ratios = numpy.linspace( 0, 1, oqparam.loss_curve_resolution + 1)[1:] loss_types = self._get_loss_types() ses_ratio = oqparam.ses_ratio if oqparam.calculation_mode in ( 'event_based_risk', ) else 1 for l, loss_type in enumerate(loss_types): if oqparam.calculation_mode in ('classical', 'classical_risk'): curve_resolutions = set() lines = [] for key in sorted(self): rm = self[key] if loss_type in rm.loss_ratios: ratios = rm.loss_ratios[loss_type] curve_resolutions.add(len(ratios)) lines.append( '%s %d' % (rm.risk_functions[loss_type], len(ratios))) if len(curve_resolutions) > 1: # example in test_case_5 logging.info('Different num_loss_ratios:\n%s', '\n'.join(lines)) cb = scientific.LossTypeCurveBuilder( loss_type, max(curve_resolutions), ratios, ses_ratio, True, oqparam.conditional_loss_poes, oqparam.insured_losses) elif loss_type in oqparam.loss_ratios: # loss_ratios provided cb = scientific.LossTypeCurveBuilder( loss_type, oqparam.loss_curve_resolution, oqparam.loss_ratios[loss_type], ses_ratio, True, oqparam.conditional_loss_poes, oqparam.insured_losses) else: # no loss_ratios provided cb = scientific.LossTypeCurveBuilder( loss_type, oqparam.loss_curve_resolution, default_loss_ratios, ses_ratio, False, oqparam.conditional_loss_poes, oqparam.insured_losses) cbs.append(cb) cb.index = l self.lti[loss_type] = l return scientific.CurveBuilder(cbs, oqparam.insured_losses, oqparam.conditional_loss_poes)
def post_execute(self, result): """ Extract from the result dictionary rlz.ordinal -> (loss_type, tag) -> [(asset.id, loss), ...] several interesting outputs. """ oq = self.oqparam # take the cached self.rlzs_assoc and write it on the datastore self.rlzs_assoc = self.rlzs_assoc rlzs = self.rlzs_assoc.realizations loss_types = self.riskmodel.get_loss_types() C = oq.loss_curve_resolution self.loss_curve_dt = numpy.dtype( [('losses', (float, C)), ('poes', (float, C)), ('avg', float)]) if oq.conditional_loss_poes: lm_names = _loss_map_names(oq.conditional_loss_poes) self.loss_map_dt = numpy.dtype([(f, float) for f in lm_names]) self.assets = assets = riskinput.sorted_assets(self.assets_by_site) self.specific_assets = specific_assets = [ a for a in assets if a.id in self.oqparam.specific_assets] specific_asset_refs = set(self.oqparam.specific_assets) N = len(assets) event_loss_asset = [{} for rlz in rlzs] event_loss = [{} for rlz in rlzs] loss_curves = self.zeros(N, self.loss_curve_dt) ins_curves = self.zeros(N, self.loss_curve_dt) if oq.conditional_loss_poes: loss_maps = self.zeros(N, self.loss_map_dt) agg_loss_curve = self.zeros(1, self.loss_curve_dt) for i in sorted(result): rlz = rlzs[i] data_by_lt_tag = result[i] # (loss_type, asset_id) -> [(tag, loss, ins_loss), ...] elass = {(loss_type, asset.id): [] for asset in assets for loss_type in loss_types} elagg = [] # aggregate event loss nonzero = total = 0 for loss_type, tag in data_by_lt_tag: d = data_by_lt_tag[loss_type, tag] if tag == 'counts_matrix': assets, counts = d.keys(), d.values() indices = numpy.array([asset.idx for asset in assets]) asset_values = workflows.get_values(loss_type, assets) poes = scientific.build_poes( counts, oq.ses_per_logic_tree_path) cb = scientific.CurveBuilder( loss_type, numpy.linspace(0, 1, C)) lcurves = cb.build_loss_curves( poes, asset_values, indices, N) self.store('lcurves/' + loss_type, rlz, lcurves) continue for aid, loss, ins_loss in d['data']: elass[loss_type, aid].append((tag, loss, ins_loss)) # aggregates elagg.append((loss_type, tag, d['loss'], d['ins_loss'])) nonzero += d['nonzero'] total += d['total'] logging.info('rlz=%d: %d/%d nonzero losses', i, nonzero, total) if elass: data_by_lt = collections.defaultdict(list) for (loss_type, asset_id), rows in elass.items(): for tag, loss, ins_loss in rows: data_by_lt[loss_type].append( (tag, asset_id, loss, ins_loss)) for loss_type, data in data_by_lt.items(): event_loss_asset[i][loss_type] = sorted( # data contains rows (tag, asset, loss, ins_loss) (t, a, l, i) for t, a, l, i in data if a in specific_asset_refs) # build the loss curves per asset lc = self.build_loss_curves(elass, loss_type, 1) loss_curves[loss_type] = lc if oq.insured_losses: # build the insured loss curves per asset ic = self.build_loss_curves(elass, loss_type, 2) ins_curves[loss_type] = ic if oq.conditional_loss_poes: # build the loss maps per asset, array of shape (N, P) losses_poes = numpy.array( # shape (N, 2, C) [lc['losses'], lc['poes']]).transpose(1, 0, 2) lmaps = scientific.loss_map_matrix( oq.conditional_loss_poes, losses_poes) # (P, N) for lm, lmap in zip(lm_names, lmaps): loss_maps[loss_type][lm] = lmap self.store('loss_curves', rlz, loss_curves) if oq.insured_losses: self.store('ins_curves', rlz, ins_curves) if oq.conditional_loss_poes: self.store('loss_maps', rlz, loss_maps) if elagg: for loss_type, rows in groupby( elagg, operator.itemgetter(0)).items(): event_loss[i][loss_type] = [row[1:] for row in rows] # aggregate loss curve for all tags losses, poes, avg, _ = self.build_agg_loss_curve_and_map( [loss for _lt, _tag, loss, _ins_loss in rows]) # NB: there is no aggregate insured loss curve agg_loss_curve[loss_type][0] = (losses, poes, avg) # NB: the aggregated loss_map is not stored self.store('agg_loss_curve', rlz, agg_loss_curve) if specific_assets: self.event_loss_asset = event_loss_asset self.event_loss = event_loss # store statistics (i.e. mean and quantiles) for curves and maps if len(self.rlzs_assoc.realizations) > 1: self.compute_store_stats('loss_curves') self.compute_store_stats('agg_loss_curve')