def test_case_15(self): # full enumeration self.assert_curves_ok('''\ hazard_curve-mean.csv hazard_curve-smltp_SM1-gsimltp_BA2008_C2003.csv hazard_curve-smltp_SM1-gsimltp_BA2008_T2002.csv hazard_curve-smltp_SM1-gsimltp_CB2008_C2003.csv hazard_curve-smltp_SM1-gsimltp_CB2008_T2002.csv [email protected] [email protected] [email protected] [email protected] hazard_map-mean.csv hazard_map-smltp_SM1-gsimltp_BA2008_C2003.csv hazard_map-smltp_SM1-gsimltp_BA2008_T2002.csv hazard_map-smltp_SM1-gsimltp_CB2008_C2003.csv hazard_map-smltp_SM1-gsimltp_CB2008_T2002.csv [email protected] [email protected] [email protected] [email protected]'''.split(), case_15.__file__) # test UHS CSV export [fname] = [f for f in export(('uhs', 'csv'), self.calc.datastore) if 'mean' in f] self.assertEqualFiles('expected/hazard_uhs-mean.csv', fname) # test UHS XML export fnames = [f for f in export(('uhs', 'xml'), self.calc.datastore) if 'mean' in f] self.assertEqualFiles('expected/hazard_uhs-mean-0.01.xml', fnames[0]) self.assertEqualFiles('expected/hazard_uhs-mean-0.1.xml', fnames[1]) self.assertEqualFiles('expected/hazard_uhs-mean-0.2.xml', fnames[2])
def test_case_1(self): # test for the fatalities self.run_calc(case_1.__file__, 'job_ebr.ini') ds = DataStore(self.calc.datastore.calc_id, export_dir=self.calc.datastore.export_dir) fnames = export(('assetcol', 'csv'), ds) + export( ('event_loss_table-rlzs', 'csv'), ds) for fname in fnames: self.assertEqualFiles('expected/' + os.path.basename(fname), fname)
def test_case_1(self): # test for the fatalities self.run_calc(case_1.__file__, 'job_ebr.ini') ds = DataStore(self.calc.datastore.calc_id, export_dir=self.calc.datastore.export_dir) fnames = export(('assetcol', 'csv'), ds) + export( ('event_loss_table-rlzs', 'csv'), ds) for fname in fnames: self.assertEqualFiles('expected/' + os.path.basename(fname), fname)
def test_case_1(self): out = self.run_calc(case_1.__file__, 'job_risk.ini', exports='csv') [fname] = out['agglosses-rlzs', 'csv'] self.assertEqualFiles('expected/agg.csv', fname) # check the exported GMFs [gmf1, gmf2] = export(('gmfs:0,1', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/gmf1.csv', gmf1) self.assertEqualFiles('expected/gmf2.csv', gmf2) [fname] = export(('gmf_data', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/gmf-FromFile-PGA.csv', fname)
def test_case_1(self): out = self.run_calc(case_1.__file__, 'job_risk.ini', exports='csv') [fname] = out['agglosses-rlzs', 'csv'] self.assertEqualFiles('expected/agg.csv', fname) # check the exported GMFs [gmf1, gmf2] = export(('gmfs:0,1', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/gmf1.csv', gmf1) self.assertEqualFiles('expected/gmf2.csv', gmf2) [fname] = export(('gmf_data', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/gmf-FromFile-PGA.csv', fname)
def test_case_15(self): # full enumeration self.assert_curves_ok( """\ hazard_curve-mean.csv hazard_curve-smltp_SM1-gsimltp_BA2008_C2003.csv hazard_curve-smltp_SM1-gsimltp_BA2008_T2002.csv hazard_curve-smltp_SM1-gsimltp_CB2008_C2003.csv hazard_curve-smltp_SM1-gsimltp_CB2008_T2002.csv [email protected] [email protected] [email protected] [email protected] hazard_uhs-mean.csv hazard_uhs-smltp_SM1-gsimltp_BA2008_C2003.csv hazard_uhs-smltp_SM1-gsimltp_BA2008_T2002.csv hazard_uhs-smltp_SM1-gsimltp_CB2008_C2003.csv hazard_uhs-smltp_SM1-gsimltp_CB2008_T2002.csv [email protected] [email protected] [email protected] [email protected]""".split(), case_15.__file__, ) # test UHS XML export fnames = [f for f in export(("uhs", "xml"), self.calc.datastore) if "mean" in f] self.assertEqualFiles("expected/hazard_uhs-mean-0.01.xml", fnames[0]) self.assertEqualFiles("expected/hazard_uhs-mean-0.1.xml", fnames[1]) self.assertEqualFiles("expected/hazard_uhs-mean-0.2.xml", fnames[2])
def test_case_15(self): # full enumeration self.assert_curves_ok('''\ hazard_curve-mean.csv hazard_curve-smltp_SM1-gsimltp_BA2008_C2003.csv hazard_curve-smltp_SM1-gsimltp_BA2008_T2002.csv hazard_curve-smltp_SM1-gsimltp_CB2008_C2003.csv hazard_curve-smltp_SM1-gsimltp_CB2008_T2002.csv [email protected] [email protected] [email protected] [email protected] hazard_uhs-mean.csv hazard_uhs-smltp_SM1-gsimltp_BA2008_C2003.csv hazard_uhs-smltp_SM1-gsimltp_BA2008_T2002.csv hazard_uhs-smltp_SM1-gsimltp_CB2008_C2003.csv hazard_uhs-smltp_SM1-gsimltp_CB2008_T2002.csv [email protected] [email protected] [email protected] [email protected]'''.split(), case_15.__file__) # test UHS XML export fnames = [f for f in export(('uhs', 'xml'), self.calc.datastore) if 'mean' in f] self.assertEqualFiles('expected/hazard_uhs-mean-0.01.xml', fnames[0]) self.assertEqualFiles('expected/hazard_uhs-mean-0.1.xml', fnames[1]) self.assertEqualFiles('expected/hazard_uhs-mean-0.2.xml', fnames[2])
def export_from_datastore(output_key, calc_id, datadir, target): """ :param output_key: a pair (ds_key, fmt) :param calc_id: calculation ID :param datadir: directory containing the datastore :param target: directory, temporary when called from the engine server """ makedirs(target) ds_key, fmt = output_key dstore = datastore.read(calc_id, datadir=datadir) dstore.export_dir = target try: exported = export(output_key, dstore) except KeyError: raise DataStoreExportError( 'Could not export %s in %s' % output_key) if not exported: raise DataStoreExportError( 'Nothing to export for %s' % ds_key) elif len(exported) > 1: # NB: I am hiding the archive by starting its name with a '.', # to avoid confusing the users, since the unzip files are # already in the target directory; the archive is used internally # by the WebUI, so it must be there; it would be nice not to # generate it when not using the Web UI, but I will leave that # feature for after the removal of the old calculators archname = '.' + ds_key + '-' + fmt + '.zip' zipfiles(exported, os.path.join(target, archname)) return os.path.join(target, archname) else: # single file return exported[0]
def export_from_datastore(output_key, calc_id, datadir, target): """ :param output_key: a pair (ds_key, fmt) :param calc_id: calculation ID :param datadir: directory containing the datastore :param target: directory, temporary when called from the engine server """ makedirs(target) ds_key, fmt = output_key dstore = datastore.read(calc_id, datadir=datadir) dstore.export_dir = target try: exported = export(output_key, dstore) except KeyError: raise DataStoreExportError('Could not export %s in %s' % output_key) if not exported: raise DataStoreExportError('Nothing to export for %s' % ds_key) elif len(exported) > 1: # NB: I am hiding the archive by starting its name with a '.', # to avoid confusing the users, since the unzip files are # already in the target directory; the archive is used internally # by the WebUI, so it must be there; it would be nice not to # generate it when not using the Web UI, but I will leave that # feature for after the removal of the old calculators archname = '.' + ds_key + '-' + fmt + '.zip' zipfiles(exported, os.path.join(target, archname)) return os.path.join(target, archname) else: # single file return exported[0]
def test_case_1(self): self.assert_stats_ok(case_1, 'job_haz.ini,job_risk.ini') # make sure the XML and JSON exporters run ekeys = [ ('loss_curves-stats', 'xml'), ('loss_curves-stats', 'geojson'), ('rcurves-rlzs', 'xml'), ('rcurves-rlzs', 'geojson'), ('loss_maps-stats', 'xml'), ('loss_maps-stats', 'geojson'), ('loss_maps-rlzs', 'xml'), ('loss_maps-rlzs', 'geojson'), ('agg_curve-stats', 'xml'), ] for ekey in ekeys: export(ekey, self.calc.datastore)
def post_execute(self, result): """ :param result: a dictionary { ('asset', asset): <mean stddev>, ('taxonomy', asset.taxonomy): <damage array>} :returns: a dictionary { 'dmg_per_asset': /path/to/dmg_per_asset.xml, 'dmg_per_taxonomy': /path/to/dmg_per_taxonomy.xml, 'dmg_total': /path/to/dmg_total.xml} """ dmg_states = [DmgState(s, i) for i, s in enumerate(self.riskmodel.damage_states)] dd_taxo = [] dd_asset = [] shape = self.oqparam.number_of_ground_motion_fields, len(dmg_states) totals = numpy.zeros(shape) # R x D matrix for (key_type, key), values in result.iteritems(): if key_type == 'taxonomy': # values are fractions, R x D matrix totals += values means, stds = scientific.mean_std(values) for dmg_state, mean, std in zip(dmg_states, means, stds): dd_taxo.append( DmgDistPerTaxonomy(key, dmg_state, mean, std)) elif key_type == 'asset': # values are mean and stddev, at D x 2 matrix for dmg_state, mean_std in zip(dmg_states, values): dd_asset.append( DmgDistPerAsset( ExposureData(key.id, Site(key.location)), dmg_state, mean_std[0], mean_std[1])) dd_total = [] for dmg_state, total in zip(dmg_states, totals): mean, std = scientific.mean_std(total) dd_total.append(DmgDistTotal(dmg_state, mean, std)) # export f1 = export('dmg_per_asset_xml', self.oqparam.export_dir, self.riskmodel.damage_states, dd_asset) f2 = export('dmg_per_taxonomy_xml', self.oqparam.export_dir, self.riskmodel.damage_states, dd_taxo) f3 = export('dmg_total_xml', self.oqparam.export_dir, self.riskmodel.damage_states, dd_total) return f1 + f2 + f3
def test_case_4_hazard(self): # Turkey with SHARE logic tree; TODO: add site model out = self.run_calc(case_4.__file__, 'job_h.ini', ground_motion_fields='false', exports='csv') [fname] = out['hcurves', 'csv'] self.assertEqualFiles('expected/hazard_curve-mean.csv', fname) [fname] = out['hmaps', 'csv'] self.assertEqualFiles('expected/hazard_map-mean.csv', fname) fnames = export(('hmaps', 'xml'), self.calc.datastore) self.assertEqual(len(fnames), 4) # 2 IMT x 2 poes
def test_case_1(self): check_platform('xenial') self.assert_stats_ok(case_1, 'job.ini') # make sure the XML and JSON exporters run ekeys = [ ('loss_curves-stats', 'xml'), ('loss_curves-stats', 'geojson'), ('rcurves-rlzs', 'xml'), ('rcurves-rlzs', 'geojson'), ('loss_maps-stats', 'xml'), ('loss_maps-stats', 'geojson'), ('loss_maps-rlzs', 'xml'), ('loss_maps-rlzs', 'geojson'), ('agg_curve-stats', 'xml'), ] for ekey in ekeys: export(ekey, self.calc.datastore)
def test_event_based_risk(self): if h5py.__version__ < '2.3.0': raise unittest.SkipTest # UCERF requires vlen arrays self.run_calc(ucerf.__file__, 'job_ebr.ini', number_of_logic_tree_samples='2') fnames = export(('agg_loss_table', 'csv'), self.calc.datastore) for fname in fnames: self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname) fname = writetmp(view('portfolio_loss', self.calc.datastore)) self.assertEqualFiles('expected/portfolio_loss.txt', fname)
def test_case_4_hazard(self): # Turkey with SHARE logic tree; TODO: add site model out = self.run_calc(case_4.__file__, 'job.ini', calculation_mode='event_based', ground_motion_fields='false', exports='csv') [fname] = out['hcurves', 'csv'] self.assertEqualFiles('expected/hazard_curve-mean.csv', fname) [fname] = out['hmaps', 'csv'] self.assertEqualFiles('expected/hazard_map-mean.csv', fname) fnames = export(('hmaps', 'xml'), self.calc.datastore) self.assertEqual(len(fnames), 4) # 2 IMT x 2 poes
def test_case_4_hazard(self): # Turkey with SHARE logic tree; TODO: add site model out = self.run_calc(case_4.__file__, 'job.ini', calculation_mode='event_based', ground_motion_fields='false', exports='csv') [fname] = out['hcurves', 'csv'] self.assertEqualFiles('expected/hazard_curve-mean.csv', fname) [fname] = out['hmaps', 'csv'] self.assertEqualFiles('expected/hazard_map-mean.csv', fname) fnames = export(('hmaps', 'xml'), self.calc.datastore) self.assertEqual(len(fnames), 4) # 2 IMT x 2 poes # export a single rupture [f1, f2] = export(('gmfs:0', 'csv'), self.calc.datastore) self.assertEqualFiles( 'expected/gmf-trt=05' '~ses=0001~src=AS_TRAS334~rup=612021-01-PGA.csv', f1) self.assertEqualFiles( 'expected/gmf-trt=05' '~ses=0001~src=AS_TRAS334~rup=612021-01-SA(0.5).csv', f2)
def test_case_4_hazard(self): # Turkey with SHARE logic tree; TODO: add site model out = self.run_calc(case_4.__file__, 'job_h.ini', ground_motion_fields='false', exports='csv') [fname] = out['hcurves', 'csv'] self.assertEqualFiles('expected/hazard_curve-mean.csv', fname) [fname] = out['hmaps', 'csv'] self.assertEqualFiles('expected/hazard_map-mean.csv', fname) fnames = export(('hmaps', 'xml'), self.calc.datastore) self.assertEqual(len(fnames), 4) # 2 IMT x 2 poes # export a single rupture [f1, f2] = export(('gmfs:0', 'csv'), self.calc.datastore) self.assertEqualFiles( 'expected/gmf-col=05' '~ses=0001~src=AS_TRAS334~rup=612021-01-PGA.csv', f1) self.assertEqualFiles( 'expected/gmf-col=05' '~ses=0001~src=AS_TRAS334~rup=612021-01-SA(0.5).csv', f2)
def post_execute(self, result): """ :param result: a dictionary imt -> gmfs :returns: a dictionary {'gmf_xml': <gmf.xml filename>} """ logging.info('Exporting the result') gmfs_by_imt = { # build N x R matrices imt: numpy.array( [result[tag][imt] for tag in self.tags]).T for imt in map(str, self.imts)} out = export( 'gmf_xml', self.oqparam.export_dir, self.sitecol, self.tags, gmfs_by_imt) return out
def post_execute(self, result): """ Export the aggregate loss curves in CSV format. """ aggcurves = general.AccumDict() # key_type -> AggLossCurves for (key_type, loss_type), values in result.iteritems(): mean, std = scientific.mean_std(values) curve = AggLossCurve(loss_type, self.unit[loss_type], mean, std) aggcurves += {key_type: [curve]} out = {} for key_type in aggcurves: fname = export('%s_loss_csv' % key_type, self.oqparam.export_dir, aggcurves[key_type]) out[key_type] = fname return out
def test_case_6a(self): # case with two gsims out = self.run_calc(case_6a.__file__, 'job_haz.ini,job_risk.ini', exports='csv') f1, f2 = out['agglosses-rlzs', 'csv'] self.assertEqualFiles('expected/agg-gsimltp_b1_structural.csv', f1) self.assertEqualFiles('expected/agg-gsimltp_b2_structural.csv', f2) # testing the totlosses view dstore = self.calc.datastore fname = writetmp(view('totlosses', dstore)) self.assertEqualFiles('expected/totlosses.txt', fname) # testing the specific GMF exporter [gmf] = export(('gmfs:0', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/gmf-0-PGA.csv', gmf)
def test_case_6a(self): # case with two gsims out = self.run_calc(case_6a.__file__, 'job_haz.ini,job_risk.ini', exports='csv') f1, f2 = out['agglosses-rlzs', 'csv'] self.assertEqualFiles('expected/agg-gsimltp_b1_structural.csv', f1) self.assertEqualFiles('expected/agg-gsimltp_b2_structural.csv', f2) # testing the totlosses view dstore = self.calc.datastore fname = writetmp(view('totlosses', dstore)) self.assertEqualFiles('expected/totlosses.txt', fname) # testing the specific GMF exporter [gmf] = export(('gmfs:0', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/gmf-0-PGA.csv', gmf)
def test_case_17(self): # oversampling expected = [ 'hazard_curve-smltp_b2-gsimltp_b1-ltr_1.csv', 'hazard_curve-smltp_b2-gsimltp_b1-ltr_2.csv', 'hazard_curve-smltp_b2-gsimltp_b1-ltr_3.csv', 'hazard_curve-smltp_b2-gsimltp_b1-ltr_4.csv', ] out = self.run_calc(case_17.__file__, 'job.ini', exports='csv') fnames = out['hcurves', 'csv'] for exp, got in zip(expected, fnames): self.assertEqualFiles('expected/%s' % exp, got, sorted) # check that a single rupture file is exported even if there are # several collections [fname] = export(('sescollection', 'xml'), self.calc.datastore) self.assertEqualFiles('expected/ses.xml', fname)
def test_case_17(self): # oversampling expected = [ 'hazard_curve-smltp_b2-gsimltp_b1-ltr_1.csv', 'hazard_curve-smltp_b2-gsimltp_b1-ltr_2.csv', 'hazard_curve-smltp_b2-gsimltp_b1-ltr_3.csv', 'hazard_curve-smltp_b2-gsimltp_b1-ltr_4.csv', ] out = self.run_calc(case_17.__file__, 'job.ini', exports='csv') fnames = out['hcurves', 'csv'] for exp, got in zip(expected, fnames): self.assertEqualFiles('expected/%s' % exp, got, sorted) # check that a single rupture file is exported even if there are # several collections [fname] = export(('sescollection', 'xml'), self.calc.datastore) self.assertEqualFiles('expected/ses.xml', fname)
def export(self, exports=None): """ Export all the outputs in the datastore in the given export formats. :returns: dictionary output_key -> sorted list of exported paths """ exported = {} individual_curves = self.oqparam.individual_curves fmts = exports.split(',') if exports else self.oqparam.exports for fmt in fmts: if not fmt: continue for key in self.datastore: # top level keys if 'rlzs' in key and not individual_curves: continue # skip individual curves ekey = (key, fmt) if ekey not in export.export: # non-exportable output continue exported[ekey] = export.export(ekey, self.datastore) logging.info('exported %s: %s', key, exported[ekey]) return exported
def export(self): """ Export all the outputs in the datastore in the given export formats. :returns: dictionary output_key -> sorted list of exported paths """ exported = {} individual_curves = self.oqparam.individual_curves for fmt in self.oqparam.exports: if not fmt: continue for key in self.datastore: if 'rlzs' in key and not individual_curves: continue # skip individual curves ekey = (key, fmt) try: exported[ekey] = sorted( export.export(ekey, self.datastore)) logging.info('exported %s: %s', key, exported[ekey]) except KeyError: logging.info('%s is not exportable in %s', key, fmt) return exported
def export(self, exports=None): """ Export all the outputs in the datastore in the given export formats. :returns: dictionary output_key -> sorted list of exported paths """ exported = {} individual_curves = self.oqparam.individual_curves fmts = exports.split(',') if exports else self.oqparam.exports for fmt in fmts: if not fmt: continue for key in self.datastore: if 'rlzs' in key and not individual_curves: continue # skip individual curves ekey = (key, fmt) try: exported[ekey] = sorted(export.export( ekey, self.datastore)) logging.info('exported %s: %s', key, exported[ekey]) except KeyError: logging.info('%s is not exportable in %s', key, fmt) return exported
def post_execute(self, result): result = {str(imt): gmvs for imt, gmvs in result.items()} out = export('gmf_xml', self.oqparam.export_dir, self.sitecol, self.rupture_tags, result) return out
def post_execute(self, result): result = {str(imt): gmvs for imt, gmvs in result.items()} out = export('gmf_xml', self.oqparam.export_dir, self.sitecol, self.rupture_tags, result) return out