def test_case_master(self): self.assert_stats_ok(case_master, 'job.ini', individual_curves='false') fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore) for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname) fname = writetmp(view('portfolio_loss', self.calc.datastore)) self.assertEqualFiles('expected/portfolio_loss.txt', fname, delta=1E-5) # check rup_data is stored correctly fname = writetmp(view('ruptures_events', self.calc.datastore)) self.assertEqualFiles('expected/ruptures_events.txt', fname) # export a specific eid fnames = export(('all_loss_ratios:0', 'csv'), self.calc.datastore) for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname) self.assertEqualFiles('expected/losses-eid=0.csv', fname) # export a specific pair (sm_id, eid) fnames = export(('all_loss_ratios:1:0', 'csv'), self.calc.datastore) for fname in fnames: self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)
def test_occupants(self): self.run_calc(occupants.__file__, 'job.ini') fnames = export(('agg_curves-rlzs', 'csv'), self.calc.datastore) for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5) fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore) assert fnames, 'loss_maps-rlzs not exported?' for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5)
def test_case_4b(self): self.run_calc(case_4b.__file__, 'job_haz.ini,job_risk.ini') [fname] = export(('dmg_by_event', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/' + strip_calc_id(fname), fname) [fname] = export(('losses_by_event', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/' + strip_calc_id(fname), fname) fnames = export(('losses_by_asset', 'csv'), self.calc.datastore) self.assertEqual(len(fnames), 2) # one per realization for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname)
def test_case_master_ebr(self): out = self.run_calc(case_master.__file__, 'job.ini', calculation_mode='ebrisk', investigation_time='1', insured_losses='false', exports='csv') for fname in out['losses_by_taxon', 'csv']: self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname) for fname in out['agg_loss_table', 'csv']: self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname) fname = writetmp(view('portfolio_loss', self.calc.datastore)) self.assertEqualFiles( 'expected/portfolio_loss_ebr.txt', fname, delta=1E-5)
def test_case_7(self): # this is a case with three loss types self.assert_ok(case_7, 'job_h.ini,job_r.ini', exports='csv') # just run the npz export [npz] = export(('dmg_by_asset', 'npz'), self.calc.datastore) self.assertEqual(strip_calc_id(npz), 'dmg_by_asset.npz')
def test_case_1(self): out = self.assert_curves_ok( ['poe-0.02-rlz-0-PGA-10.1-40.1_Mag.csv', 'poe-0.02-rlz-0-PGA-10.1-40.1_Mag_Dist.csv', 'poe-0.02-rlz-0-PGA-10.1-40.1_Lon_Lat.csv', 'poe-0.02-rlz-0-SA(0.025)-10.1-40.1_Mag.csv', 'poe-0.02-rlz-0-SA(0.025)-10.1-40.1_Mag_Dist.csv', 'poe-0.02-rlz-0-SA(0.025)-10.1-40.1_Lon_Lat.csv', 'poe-0.1-rlz-0-PGA-10.1-40.1_Mag.csv', 'poe-0.1-rlz-0-PGA-10.1-40.1_Mag_Dist.csv', 'poe-0.1-rlz-0-PGA-10.1-40.1_Lon_Lat.csv', 'poe-0.1-rlz-0-SA(0.025)-10.1-40.1_Mag.csv', 'poe-0.1-rlz-0-SA(0.025)-10.1-40.1_Mag_Dist.csv', 'poe-0.1-rlz-0-SA(0.025)-10.1-40.1_Lon_Lat.csv'], case_1.__file__, fmt='csv') # check disagg_by_src, poe=0.02, 0.1, imt=PGA, SA(0.025) self.assertEqual(len(out['disagg_by_src', 'csv']), 4) for fname in out['disagg_by_src', 'csv']: self.assertEqualFiles('expected_output/%s' % strip_calc_id(fname), fname) # disaggregation by source group rlzs_assoc = self.calc.datastore['csm_info'].get_rlzs_assoc() pgetter = getters.PmapGetter(self.calc.datastore, rlzs_assoc) pgetter.init() pmaps = [] for grp in sorted(pgetter.dstore['poes']): pmaps.append(pgetter.get_mean(grp)) # make sure that the combination of the contributions is okay pmap = pgetter.get_mean() # total mean map cmap = combine(pmaps) # combination of the mean maps per source group for sid in pmap: numpy.testing.assert_almost_equal(pmap[sid].array, cmap[sid].array)
def test_case_3(self): # this is a test with statistics and without conditional_loss_poes out = self.run_calc(case_3.__file__, 'job.ini', exports='xml', individual_curves='false', concurrent_tasks='4') [fname] = out['agg_curve-stats', 'xml'] self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)
def test_occupants(self): out = self.run_calc(occupants.__file__, 'job.ini', exports='xml', individual_curves='true') fnames = out['loss_maps-rlzs', 'xml'] + out['agg_curve-rlzs', 'xml'] self.assertEqual(len(fnames), 3) # 2 loss_maps + 1 agg_curve for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname)
def test_case_1(self): self.run_calc(case_1.__file__, 'job.ini') ekeys = [('agg_curves-stats', 'csv')] for ekey in ekeys: for fname in export(ekey, self.calc.datastore): self.assertEqualFiles( 'expected/%s' % strip_calc_id(fname), fname) # make sure the agg_curves-stats has the right attrs self.check_attr('return_periods', [30, 60, 120, 240, 480, 960]) self.check_attr('units', [b'EUR', b'EUR']) self.check_attr('nbytes', 96) # test the loss curves exporter [f1] = export(('loss_curves/rlz-0', 'csv'), self.calc.datastore) [f2] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/loss_curves-rlz-000.csv', f1) self.assertEqualFiles('expected/loss_curves-rlz-001.csv', f2) [f] = export(('loss_curves/mean', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/loss_curves-mean.csv', f) # test the loss maps exporter fnames = export(('loss_maps-stats', 'csv'), self.calc.datastore) assert fnames for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5) # test portfolio loss tmp = gettemp(view('portfolio_loss', self.calc.datastore)) self.assertEqualFiles('expected/portfolio_loss.txt', tmp) # test the rup_loss_table exporter fnames = export(('rup_loss_table', 'xml'), self.calc.datastore) self.assertEqual(len(fnames), 2) for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname) # test the src_loss_table extractor arr = extract(self.calc.datastore, 'src_loss_table/structural') tmp = gettemp(rst_table(arr)) self.assertEqualFiles('expected/src_loss_table.txt', tmp)
def check(self, case): self.run_calc(case.__file__, 'job_haz.ini') self.run_calc(case.__file__, 'job_risk.ini', hazard_calculation_id=str(self.calc.datastore.calc_id)) fnames = export(('damages-rlzs', 'csv'), self.calc.datastore) if len(fnames) == 1: self.assertEqualFiles('expected/damages.csv', fnames[0]) else: for fname in fnames: self.assertEqualFiles( 'expected/%s' % strip_calc_id(fname), fname)
def test_case_4(self): # Turkey with SHARE logic tree out = self.run_calc(case_4.__file__, 'job.ini', exports='csv', individual_curves='true') [fname] = out['avg_losses-stats', 'csv'] self.assertEqualFiles('expected/avg_losses-mean.csv', fname) fnames = out['agg_loss_table', 'csv'] assert fnames, 'No agg_losses exported??' for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname)
def test_case_4(self): # a simple test with 1 asset and two source models # this is also a test with preimported exposure self.run_calc(case_4.__file__, 'job_haz.ini') calc0 = self.calc.datastore # event_based self.run_calc(case_4.__file__, 'job_risk.ini', hazard_calculation_id=str(calc0.calc_id)) calc1 = self.calc.datastore # event_based_risk [fname] = export(('agg_loss_table', 'csv'), calc1) self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5)
def test_case_2(self): # test with the exposure in CSV format self.run_calc(case_2.__file__, 'job.ini', exposure_file='exposure_model-header.xml') # test with the exposure in XML format out = self.run_calc(case_2.__file__, 'job.ini', exports='csv') fnames = out['bcr-stats', 'csv'] assert fnames for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname)
def test_event_based_risk(self): if h5py.__version__ < '2.6.0': raise unittest.SkipTest # UCERF requires vlen arrays self.run_calc(ucerf.__file__, 'job_ebr.ini', number_of_logic_tree_samples='2') fnames = export(('agg_loss_table', 'csv'), self.calc.datastore) for fname in fnames: self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname) fname = writetmp(view('portfolio_loss', self.calc.datastore)) self.assertEqualFiles('expected/portfolio_loss.txt', fname)
def check_multi_tag(self, dstore): # multi-tag aggregations arr = extract(dstore, 'aggregate/avg_losses?' 'tag=taxonomy&tag=occupancy&kind=quantile-0.5') self.assertEqual(len(arr.to_table()), 1) # aggregate by all loss types fnames = export( ('aggregate_by/avg_losses?tag=taxonomy&tag=occupancy&kind=mean', 'csv'), dstore) for fname in fnames: self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)
def test_case_4(self): # Turkey with SHARE logic tree self.run_calc(case_4.__file__, 'job.ini') [fname] = export(('avg_losses-stats', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/avg_losses-mean.csv', fname) fnames = export(('agg_loss_table', 'csv'), self.calc.datastore) assert fnames, 'No agg_losses exported??' for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname) # check that individual_curves = false is honored self.assertFalse('curves-rlzs' in self.calc.datastore) self.assertTrue('curves-stats' in self.calc.datastore)
def test_case_master(self): self.run_calc(case_master.__file__, 'job.ini') fnames = export(('loss_maps-stats', 'csv'), self.calc.datastore) assert fnames # sanity check # FIXME: on macOS the generation of loss maps stats is terribly wrong, # the number of losses do not match, this must be investigated if NOT_DARWIN: for fname in fnames: self.assertEqualFiles( 'expected/' + strip_calc_id(fname), fname) # exported the npz, not checking the content for kind in ('rlzs', 'stats'): [fname] = export(('loss_maps-' + kind, 'npz'), self.calc.datastore) print('Generated ' + fname)
def test_case_3(self): # this is a test with statistics and without conditional_loss_poes self.run_calc(case_3.__file__, 'job.ini', exports='csv', concurrent_tasks='4') # test the number of bytes saved in the rupture records nbytes = self.calc.datastore.get_attr('ruptures', 'nbytes') self.assertEqual(nbytes, 3180) # test postprocessing self.calc.datastore.close() hc_id = self.calc.datastore.calc_id self.run_calc(case_3.__file__, 'job.ini', exports='csv', hazard_calculation_id=str(hc_id)) [fname] = export(('agg_curves-stats', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)
def assert_stats_ok(self, pkg, job_ini): out = self.run_calc(pkg.__file__, job_ini, exports='csv', concurrent_tasks='4') # NB: it is important to use concurrent_tasks > 1 to test the # complications of concurrency (for instance the noncommutativity of # numpy.float32 addition when computing the average losses) all_csv = [] for fnames in out.values(): for fname in fnames: if 'rlz' in fname: continue elif fname.endswith('.csv') and any(x in fname for x in ( 'loss_curve', 'loss_map', 'agg_loss', 'avg_loss')): all_csv.append(fname) assert all_csv, 'Could not find any CSV file??' for fname in all_csv: self.assertEqualFiles( 'expected/%s' % strip_calc_id(fname), fname)
def test_case_1(self): self.assert_stats_ok(case_1, 'job.ini', individual_curves='false') # the numbers in the xml and geojson files are extremely sensitive to # the libraries; while waiting for the opt project we skip this test check_platform('xenial') ekeys = [ ('rcurves-stats', 'xml'), ('rcurves-stats', 'geojson'), ('loss_maps-stats', 'xml'), ('loss_maps-stats', 'geojson'), ('agg_curve-stats', 'xml'), ] for ekey in ekeys: for fname in export(ekey, self.calc.datastore): self.assertEqualFiles( 'expected/%s' % strip_calc_id(fname), fname)
def test_case_2(self): # this is a case with disagg_outputs = Mag and 4 realizations if sys.platform == 'darwin': raise unittest.SkipTest('MacOSX') self.assert_curves_ok([ 'rlz-0-PGA--3.0--3.0.xml', 'rlz-0-PGA-0.0-0.0.xml', 'rlz-1-PGA--3.0--3.0.xml', 'rlz-1-PGA-0.0-0.0.xml', 'rlz-2-PGA-0.0-0.0.xml', 'rlz-3-PGA-0.0-0.0.xml'], case_2.__file__) # check that the CSV exporter does not break fnames = export(('disagg', 'csv'), self.calc.datastore) self.assertEqual(len(fnames), 6) # number of CSV files # check stats fnames = export(('disagg-stats', 'csv'), self.calc.datastore) self.assertEqual(len(fnames), 2) # 2 sid x 1 key x 1 poe x 1 imt for fname in fnames: self.assertEqualFiles( 'expected_output/%s' % strip_calc_id(fname), fname)
def test_case_5(self): # this exercise gridded nonparametric sources self.run_calc(case_5.__file__, 'job.ini') fnames = export(('disagg', 'csv'), self.calc.datastore) for fname in fnames: self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)
def test_case_13a(self): # test event_based_damage, no aggregate_by self.run_calc(case_13.__file__, 'job_a.ini') [f] = export(('aggcurves', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/' + strip_calc_id(f), f, delta=5E-5)
def test_case_master(self): if sys.platform == 'darwin': raise unittest.SkipTest('MacOSX') self.run_calc(case_master.__file__, 'job.ini', exports='csv') fnames = export(('avg_losses-stats', 'csv'), self.calc.datastore) assert fnames, 'avg_losses-stats not exported?' for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5) # check event loss table [fname] = export(('losses_by_event', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5) # extract loss_curves/rlz-1 (with the first asset having zero losses) [fname] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5) fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore) assert fnames, 'loss_maps-rlzs not exported?' for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5) fname = gettemp(view('portfolio_losses', self.calc.datastore)) self.assertEqualFiles( 'expected/portfolio_losses.txt', fname, delta=1E-5) os.remove(fname) # check ruptures are stored correctly fname = gettemp(view('ruptures_events', self.calc.datastore)) self.assertEqualFiles('expected/ruptures_events.txt', fname) os.remove(fname) # check losses_by_tag fnames = export( ('aggregate_by/avg_losses?tag=occupancy&kind=mean', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/losses_by_occupancy.csv', fnames[0]) self.check_multi_tag(self.calc.datastore) # ------------------------- ebrisk calculator ---------------------- # self.run_calc(case_master.__file__, 'job.ini', calculation_mode='ebrisk', exports='', aggregate_by='id') # agg_losses-rlzs has shape (L=5, R=9) # agg_losses-stats has shape (L=5, S=4) fname = export(('agg_losses-stats', 'csv'), self.calc.datastore)[0] self.assertEqualFiles('expected/agglosses.csv', fname, delta=1E-5) fname = export(('agg_curves-stats', 'csv'), self.calc.datastore)[0] self.assertEqualFiles('expected/aggcurves.csv', fname, delta=1E-5) fname = export(('agg_maps-stats', 'csv'), self.calc.datastore)[0] self.assertEqualFiles('expected/aggmaps.csv', fname, delta=1E-5) fname = export(('avg_losses', 'csv'), self.calc.datastore)[0] self.assertEqualFiles('expected/avg_losses-mean.csv', fname, delta=1E-5) fname = export(('losses_by_event', 'csv'), self.calc.datastore)[0] self.assertEqualFiles('expected/elt.csv', fname)
def test_case_master(self): if sys.platform == 'darwin': raise unittest.SkipTest('MacOSX') self.run_calc(case_master.__file__, 'job.ini', exports='csv') fnames = export(('avg_losses-stats', 'csv'), self.calc.datastore) assert fnames, 'avg_losses-stats not exported?' for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5) # check event loss table [fname] = export(('losses_by_event', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5) # extract loss_curves/rlz-1 (with the first asset having zero losses) [fname] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5) fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore) assert fnames, 'loss_maps-rlzs not exported?' for fname in fnames: self.assertEqualFiles('expected/' + strip_calc_id(fname), fname, delta=1E-5) fname = gettemp(view('portfolio_losses', self.calc.datastore)) self.assertEqualFiles( 'expected/portfolio_losses.txt', fname, delta=1E-5) os.remove(fname) # check ruptures are stored correctly fname = gettemp(view('ruptures_events', self.calc.datastore)) self.assertEqualFiles('expected/ruptures_events.txt', fname) os.remove(fname) # check losses_by_tag fnames = export( ('aggregate_by/avg_losses?tag=occupancy&kind=mean', 'csv'), self.calc.datastore) self.assertEqualFiles('expected/losses_by_occupancy.csv', fnames[0]) self.check_multi_tag(self.calc.datastore) # ------------------------- ebrisk calculator ---------------------- # self.run_calc(case_master.__file__, 'job.ini', calculation_mode='ebrisk', exports='', aggregate_by='taxonomy') # agg_losses-rlzs has shape (L=5, R=9) # agg_losses-stats has shape (L=5, S=4) fname = export(('agg_losses-stats', 'csv'), self.calc.datastore)[0] self.assertEqualFiles('expected/agglosses.csv', fname) fname = export(('agg_curves-stats', 'csv'), self.calc.datastore)[0] self.assertEqualFiles('expected/aggcurves.csv', fname) fname = export(('agg_maps-stats', 'csv'), self.calc.datastore)[0] self.assertEqualFiles('expected/aggmaps.csv', fname) fname = export(('avg_losses', 'csv'), self.calc.datastore)[0] self.assertEqualFiles('expected/avg_losses-mean.csv', fname, delta=1E-5) fname = export(('losses_by_event', 'csv'), self.calc.datastore)[0] self.assertEqualFiles('expected/elt.csv', fname)
def test_case_6(self): # test with international date line self.run_calc(case_6.__file__, 'job.ini') fnames = export(('disagg', 'csv'), self.calc.datastore) for fname in fnames: self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)