def compute_store_stats(self, rlzs, kind): """ Compute and store the statistical outputs """ oq = self.oqparam builder = scientific.StatsBuilder(oq.quantile_loss_curves, oq.conditional_loss_poes, [], scientific.normalize_curves_eb) if kind == '_specific': all_stats = [ builder.build(data, prefix='specific-') for data in self._collect_specific_data() ] else: all_stats = map(builder.build, self._collect_all_data()) for stat in all_stats: # there is one stat for each loss_type curves, ins_curves, maps = scientific.get_stat_curves(stat) for i, path in enumerate(stat.paths): # there are paths like # %s-stats/structural/mean # %s-stats/structural/quantile-0.1 # ... self.datastore[path % 'loss_curves'] = curves[i] if oq.insured_losses: self.datastore[path % 'ins_curves'] = ins_curves[i] if oq.conditional_loss_poes: self.datastore[path % 'loss_maps'] = maps[i] stats = scientific.SimpleStats(rlzs, oq.quantile_loss_curves) nbytes = stats.compute('avg_losses-rlzs', self.datastore) self.datastore['avg_losses-stats'].attrs['nbytes'] = nbytes self.datastore.hdf5.flush()
def compute_store_stats(self, loss_curve_key): """ Compute and store the statistical outputs """ oq = self.oqparam N = 1 if loss_curve_key.startswith('agg_') else len(self.assets) Q = 1 + len(oq.quantile_loss_curves) loss_curve_stats = self.zeros((Q, N), self.loss_curve_dt) ins_curve_stats = self.zeros((Q, N), self.loss_curve_dt) if oq.conditional_loss_poes: loss_map_stats = self.zeros((Q, N), self.loss_map_dt) for stat in self.build_stats(loss_curve_key): # there is one stat for each loss_type curves, ins_curves, maps = scientific.get_stat_curves(stat) loss_curve_stats[:][stat.loss_type] = curves if oq.insured_losses: ins_curve_stats[:][stat.loss_type] = ins_curves if oq.conditional_loss_poes: loss_map_stats[:][stat.loss_type] = maps for i, stats in enumerate(_mean_quantiles(oq.quantile_loss_curves)): self.store(loss_curve_key, stats, loss_curve_stats[i]) if oq.insured_losses: self.store(loss_curve_key + '_ins', stats, ins_curve_stats[i]) if oq.conditional_loss_poes: self.store(loss_curve_key + '_maps', stats, loss_map_stats[i])
def compute_store_stats(self, rlzs, kind): """ Compute and store the statistical outputs """ oq = self.oqparam builder = scientific.StatsBuilder( oq.quantile_loss_curves, oq.conditional_loss_poes, [], scientific.normalize_curves_eb) if kind == '_specific': all_stats = [builder.build(data, prefix='specific-') for data in self._collect_specific_data()] else: all_stats = map(builder.build, self._collect_all_data()) for stat in all_stats: # there is one stat for each loss_type curves, ins_curves, maps = scientific.get_stat_curves(stat) for i, path in enumerate(stat.paths): # there are paths like # %s-stats/structural/mean # %s-stats/structural/quantile-0.1 # ... self.datastore[path % 'loss_curves'] = curves[i] if oq.insured_losses: self.datastore[path % 'ins_curves'] = ins_curves[i] if oq.conditional_loss_poes: self.datastore[path % 'loss_maps'] = maps[i] stats = scientific.SimpleStats(rlzs, oq.quantile_loss_curves) nbytes = stats.compute('avg_losses-rlzs', self.datastore) self.datastore['avg_losses-stats'].attrs['nbytes'] = nbytes self.datastore.hdf5.flush()
def test_get_stat_curves(self): curves, ins_curves, maps = scientific.get_stat_curves(self.stats) actual = os.path.join(self.tempdir, 'expected_loss_curves.csv') writers.write_csv(actual, curves, fmt='%05.2f') tests.check_equal(__file__, 'expected_loss_curves.csv', actual) actual = os.path.join(self.tempdir, 'expected_ins_curves.csv') writers.write_csv(actual, ins_curves, fmt='%05.2f') tests.check_equal(__file__, 'expected_ins_curves.csv', actual) actual = os.path.join(self.tempdir, 'expected_loss_maps.csv') writers.write_csv(actual, maps, fmt='%05.2f') tests.check_equal(__file__, 'expected_loss_maps.csv', actual)
def test_get_stat_curves(self): tempdir = tempfile.mkdtemp() curves, ins_curves, maps = scientific.get_stat_curves(self.stats) actual = os.path.join(tempdir, 'expected_loss_curves.csv') writers.write_csv(actual, curves, fmt='%05.2f') tests.check_equal(__file__, 'expected_loss_curves.csv', actual) actual = os.path.join(tempdir, 'expected_loss_maps.csv') writers.write_csv(actual, maps, fmt='%05.2f') tests.check_equal(__file__, 'expected_loss_maps.csv', actual) # remove only if the test pass shutil.rmtree(tempdir)
def compute_store_stats(self, rlzs, kind): """ Compute and store the statistical outputs """ oq = self.oqparam N = (len(self.oqparam.specific_assets) if kind == '_specific' else len(self.assetcol)) Q = 1 + len(oq.quantile_loss_curves) C = oq.loss_curve_resolution # TODO: could be loss_type-dependent loss_curve_dt = numpy.dtype([('losses', (float, C)), ('poes', (float, C)), ('avg', float)]) if oq.conditional_loss_poes: lm_names = _loss_map_names(oq.conditional_loss_poes) loss_map_dt = numpy.dtype([(f, float) for f in lm_names]) loss_curve_stats = numpy.zeros((Q, N), loss_curve_dt) ins_curve_stats = numpy.zeros((Q, N), loss_curve_dt) if oq.conditional_loss_poes: loss_map_stats = numpy.zeros((Q, N), loss_map_dt) builder = scientific.StatsBuilder(oq.quantile_loss_curves, oq.conditional_loss_poes, [], scientific.normalize_curves_eb) build_stats = getattr(self, 'build%s_stats' % kind) all_stats = build_stats(builder) for stat in all_stats: # there is one stat for each loss_type curves, ins_curves, maps = scientific.get_stat_curves(stat) loss_curve_stats[:] = curves if oq.insured_losses: ins_curve_stats[:] = ins_curves if oq.conditional_loss_poes: loss_map_stats[:] = maps for i, path in enumerate(stat.paths): self._store(path % 'loss_curves', loss_curve_stats[i]) self._store(path % 'ins_curves', ins_curve_stats[i]) if oq.conditional_loss_poes: self._store(path % 'loss_maps', loss_map_stats[i]) stats = scientific.SimpleStats(rlzs, oq.quantile_loss_curves) stats.compute_and_store('avg_losses', self.datastore)
def build_agg_curve_stats(self, builder): """ Build and save `agg_curve-stats` in the HDF5 file. :param builder: :class:`openquake.risklib.scientific.StatsBuilder` instance """ rlzs = self.datastore['rlzs_assoc'].realizations agg_curve = self.datastore['agg_curve-rlzs'] Q1 = len(builder.quantiles) + 1 for loss_type in self.riskmodel.loss_types: aggcurve = agg_curve[loss_type].value outputs = [] for rlz in rlzs: average_loss = aggcurve['avg'][rlz.ordinal, 0] average_insured_loss = aggcurve['avg'][rlz.ordinal, 1] loss_curve = (aggcurve['losses'][rlz.ordinal, 0], aggcurve['poes'][rlz.ordinal, 0]) if self.oqparam.insured_losses: insured_curves = [(aggcurve['losses'][rlz.ordinal, 1], aggcurve['poes'][rlz.ordinal, 1])] else: insured_curves = None out = scientific.Output( [None], loss_type, rlz.ordinal, rlz.weight, loss_curves=[loss_curve], insured_curves=insured_curves, average_losses=[average_loss], average_insured_losses=[average_insured_loss]) outputs.append(out) stat = builder.build(outputs) curves, ins_curves, _maps = scientific.get_stat_curves(stat) # arrays of shape (Q1, 1) agg_curve_stats = numpy.zeros((Q1, 2), self.loss_curve_dt) for name in self.loss_curve_dt.names: agg_curve_stats[name][:, 0] = curves[name][:, 0] if self.oqparam.insured_losses: agg_curve_stats[name][:, 1] = ins_curves[name][:, 0] key = 'agg_curve-stats/' + loss_type self.datastore[key] = agg_curve_stats self.datastore[key].attrs['nbytes'] = agg_curve_stats.nbytes