Example #1
0
    def test_event_based(self):
        self.run_calc(ucerf.__file__, 'job.ini')
        gmv_uc = view('global_gmfs', self.calc.datastore)
        # check the distribution of the events
        self.assertEventsByRlz([2, 2, 2, 2, 6, 6, 2, 2, 2, 2, 6, 6, 2, 2, 3,
                                3, 6, 6, 1, 1, 1, 1, 6, 6, 2, 2, 3, 3, 2, 2,
                                2, 2, 3, 3, 2, 2, 3, 3, 3, 3, 2, 2, 3, 3,
                                3, 3, 3, 3])

        [fname] = export(('ruptures', 'csv'), self.calc.datastore)
        # check that we get the expected number of ruptures
        with open(fname) as f:
            self.assertEqual(len(f.readlines()), 72)
        self.assertEqualFiles('expected/ruptures.csv', fname, lastline=20,
                              delta=1E-5)

        # run a regular event based on top of the UCERF ruptures and
        # check the generated hazard maps
        self.run_calc(ucerf.__file__, 'job.ini',
                      calculation_mode='event_based',
                      hazard_calculation_id=str(self.calc.datastore.calc_id))

        # check ucerf_hazard and event_based produces the same GMFs
        gmv_eb = view('global_gmfs', self.calc.datastore)
        self.assertEqual(gmv_uc, gmv_eb)

        # check the mean hazard map
        [fname] = [f for f in export(('hmaps', 'csv'), self.calc.datastore)
                   if 'mean' in f]
        self.assertEqualFiles('expected/hazard_map-mean.csv', fname,
                              delta=1E-5)
 def test_case_15(self):
     # an example for Japan testing also the XML rupture exporter
     self.run_calc(case_15.__file__, 'job.ini')
     [fname] = export(('ruptures', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/ruptures.csv', fname)
     [fname] = export(('ruptures', 'xml'), self.calc.datastore)
     self.assertEqualFiles('expected/ruptures.xml', fname)
Example #3
0
    def test_case_2(self):
        self.run_calc(case_2.__file__, 'job_risk.ini', exports='csv')
        [fname] = export(('loss_curves/rlz-0', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves.csv', fname)

        [fname] = export(('loss_maps-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_maps.csv', fname)
    def test_case_1(self):
        self.assert_curves_ok(
            ['hazard_curve-PGA.csv', 'hazard_curve-SA(0.1).csv'],
            case_1.__file__)

        if parallel.oq_distribute() != 'no':
            info = view('job_info', self.calc.datastore)
            self.assertIn('task', info)
            self.assertIn('sent', info)
            self.assertIn('received', info)

        # there is a single source
        self.assertEqual(len(self.calc.datastore['source_info']), 1)

        # check npz export
        export(('hcurves', 'npz'), self.calc.datastore)

        # check extraction
        sitecol = extract(self.calc.datastore, 'sitecol')
        self.assertEqual(len(sitecol.array), 1)

        # check minimum_magnitude discards the source
        with self.assertRaises(RuntimeError) as ctx:
            self.run_calc(case_1.__file__, 'job.ini', minimum_magnitude='4.5')
        self.assertEqual(str(ctx.exception), 'All sources were filtered away!')
    def test_case_1(self):
        # case with volcanic multiperil ASH, LAVA, LAHAR, PYRO
        self.run_calc(case_1.__file__, 'job.ini')

        [fname] = export(('asset_risk', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/asset_risk.csv', fname)
        [fname] = export(('agg_risk', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/agg_risk.csv', fname)

        # check extract
        md = extract(self.calc.datastore, 'exposure_metadata')
        ae(md.array, ['number', 'occupants_night', 'value-structural'])
        ae(md.multi_risk, ['collapse-structural-ASH_DRY',
                           'collapse-structural-ASH_WET',
                           'loss-structural-ASH_DRY',
                           'loss-structural-ASH_WET',
                           'loss-structural-LAHAR',
                           'loss-structural-LAVA',
                           'loss-structural-PYRO',
                           'no_damage-structural-ASH_DRY',
                           'no_damage-structural-ASH_WET',
                           'number-LAHAR',
                           'number-LAVA',
                           'number-PYRO',
                           'occupants_night-LAHAR',
                           'occupants_night-LAVA',
                           'occupants_night-PYRO'])
Example #6
0
    def test_case_master(self):
        self.assert_stats_ok(case_master, 'job.ini', individual_curves='false')

        fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname), fname)

        fname = writetmp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_loss.txt', fname, delta=1E-5)

        # check rup_data is stored correctly
        fname = writetmp(view('ruptures_events', self.calc.datastore))
        self.assertEqualFiles('expected/ruptures_events.txt', fname)

        # export a specific eid
        fnames = export(('all_loss_ratios:0', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname), fname)
        self.assertEqualFiles('expected/losses-eid=0.csv', fname)

        # export a specific pair (sm_id, eid)
        fnames = export(('all_loss_ratios:1:0', 'csv'),
                        self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)
Example #7
0
 def test_case_3(self):
     self.run_calc(case_3.__file__, 'job.ini', exports='csv')
     [fname] = export(('loss_curves/rlz-0/sid-0', 'csv'),
                      self.calc.datastore)
     self.assertEqualFiles('expected/loss_curves-000.csv', fname)
     [fname] = export(('loss_curves/rlz-0/ref-a8', 'csv'),
                      self.calc.datastore)
     self.assertEqualFiles('expected/loss_curves-ref-a8-000.csv', fname)
 def test_case_8a(self):
     self.run_calc(
         case_8a.__file__, 'job_haz.ini,job_risk.ini')
     f1, f2 = export(('damages-rlzs', 'csv'), self.calc.datastore)
     self.assertEqualFiles(
         'expected/damages-rlzs-AkkarBommer2010().csv', f2)
     self.assertEqualFiles(
         'expected/damages-rlzs-SadighEtAl1997().csv', f1)
     [f] = export(('damages-stats', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/damages-stats.csv', f)
    def test_case_20(self):  # test for Vancouver using the NRCan15SiteTerm
        self.run_calc(case_20.__file__, 'job.ini')
        [gmf, _, _] = export(('gmf_data', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/gmf-data.csv', gmf)

        # run again the GMF calculation, but this time from stored ruptures
        hid = str(self.calc.datastore.calc_id)
        self.run_calc(case_20.__file__, 'job.ini', hazard_calculation_id=hid)
        [gmf, _, _] = export(('gmf_data', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/gmf-data-from-ruptures.csv', gmf)
Example #10
0
    def test_case_2_correlation(self):
        self.run_calc(case_2.__file__, 'job_loss.ini', asset_correlation=1.0)
        [fname] = export(('agg_loss_table', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/agg_losses.csv', fname)

        # test losses_by_tag with a single realization
        [fname] = export(
            ('aggregate_by/avg_losses?tag=taxonomy&kind=rlz-0', 'csv'),
            self.calc.datastore)
        self.assertEqualFiles('expected/losses_by_tag.csv', fname)
Example #11
0
    def test_case_16(self):   # sampling
        self.assert_curves_ok(
            ['hazard_curve-mean.csv',
             'quantile_curve-0.1.csv',
             'quantile_curve-0.9.csv'],
            case_16.__file__)

        # test that the single realization export fails because
        # individual_curves was false
        with self.assertRaises(KeyError) as ctx:
            export(('hcurves/rlz-3', 'csv'), self.calc.datastore)
        self.assertIn("No 'hcurves-rlzs' found", str(ctx.exception))
Example #12
0
 def assert_curves_ok(self, expected, test_dir, delta=None, **kw):
     kind = kw.pop('kind', '')
     self.run_calc(test_dir, 'job.ini', **kw)
     ds = self.calc.datastore
     got = (export(('hcurves/' + kind, 'csv'), ds) +
            export(('hmaps/' + kind, 'csv'), ds) +
            export(('uhs/' + kind, 'csv'), ds))
     self.assertEqual(len(expected), len(got), str(got))
     for fname, actual in zip(expected, got):
         self.assertEqualFiles('expected/%s' % fname, actual,
                               delta=delta)
     return got
Example #13
0
    def test_occupants(self):
        self.run_calc(occupants.__file__, 'job.ini')
        fnames = export(('agg_curves-rlzs', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname, delta=1E-5)

        fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore)
        assert fnames, 'loss_maps-rlzs not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname, delta=1E-5)
Example #14
0
    def test_case_4b(self):
        self.run_calc(case_4b.__file__, 'job_haz.ini,job_risk.ini')

        [fname] = export(('dmg_by_event', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/' + strip_calc_id(fname), fname)

        [fname] = export(('losses_by_event', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/' + strip_calc_id(fname), fname)

        fnames = export(('losses_by_asset', 'csv'), self.calc.datastore)
        self.assertEqual(len(fnames), 2)  # one per realization
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname), fname)
Example #15
0
    def test_case_4(self):
        # Turkey with SHARE logic tree
        self.run_calc(case_4.__file__, 'job.ini')
        [fname] = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/avg_losses-mean.csv', fname)

        fnames = export(('agg_loss_table', 'csv'), self.calc.datastore)
        assert fnames, 'No agg_losses exported??'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname), fname)

        # check that individual_curves = false is honored
        self.assertFalse('curves-rlzs' in self.calc.datastore)
        self.assertTrue('curves-stats' in self.calc.datastore)
Example #16
0
    def test_case_1(self):
        self.run_calc(case_1.__file__, 'job_risk.ini', exports='csv')

        # check loss ratios
        lrs = self.calc.datastore['risk_model/VF/structural-vulnerability']
        got = scientificformat(lrs.mean_loss_ratios, '%.2f')
        self.assertEqual(got, '0.05 0.10 0.20 0.40 0.80')

        # check loss curves
        [fname] = export(('loss_curves/mean', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves.csv', fname)

        # check loss maps
        [fname] = export(('loss_maps-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_maps.csv', fname)
Example #17
0
    def test_case_master(self):
        self.run_calc(case_master.__file__, 'job.ini')
        fnames = export(('loss_maps-stats', 'csv'), self.calc.datastore)
        assert fnames  # sanity check
        # FIXME: on macOS the generation of loss maps stats is terribly wrong,
        # the number of losses do not match, this must be investigated
        if NOT_DARWIN:
            for fname in fnames:
                self.assertEqualFiles(
                    'expected/' + strip_calc_id(fname), fname)

        # exported the npz, not checking the content
        for kind in ('rlzs', 'stats'):
            [fname] = export(('loss_maps-' + kind, 'npz'), self.calc.datastore)
            print('Generated ' + fname)
Example #18
0
 def test_event_based(self):
     if h5py.__version__ < '2.6.0':
         raise unittest.SkipTest  # UCERF requires vlen arrays
     self.run_calc(ucerf.__file__, 'job.ini')
     [fname] = export(('ruptures', 'csv'), self.calc.datastore)
     # just check that we get the expected number of ruptures
     self.assertEqual(open(fname).read().count('\n'), 918)
Example #19
0
    def test_case_5(self):
        # test with different curve resolution for different taxonomies
        self.run_calc(case_5.__file__, 'job_h.ini,job_r.ini')

        # check the cutoff in classical.fix_ones
        poes = self.calc.datastore['poes/grp-00'].array
        num_ones = (poes == 1.).sum()
        self.assertEqual(num_ones, 0)

        # check mean loss curves
        [fname] = export(('loss_curves/mean', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves-mean.csv', fname)

        # check avg losses
        [fname] = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves_avg.txt', fname)
Example #20
0
 def test_case_3(self):
     # this is a test with statistics and without conditional_loss_poes
     self.run_calc(case_3.__file__, 'job.ini',
                   exports='xml', individual_curves='false',
                   concurrent_tasks='4')
     [fname] = export(('agg_curve-stats', 'xml'), self.calc.datastore)
     self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)
Example #21
0
 def test_case_8(self):
     # case with a shakemap
     self.run_calc(case_8.__file__, 'prejob.ini')
     self.run_calc(case_8.__file__, 'job.ini',
                   hazard_calculation_id=str(self.calc.datastore.calc_id))
     [fname] = export(('dmg_by_event', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/dmg_by_event.csv', fname)
Example #22
0
    def test_case_7(self):
        # this is a case with three loss types
        self.assert_ok(case_7, 'job_h.ini,job_r.ini', exports='csv')

        # just run the npz export
        [npz] = export(('dmg_by_asset', 'npz'), self.calc.datastore)
        self.assertEqual(strip_calc_id(npz), 'dmg_by_asset.npz')
Example #23
0
    def test_case_13(self):
        self.assert_curves_ok(
            ['hazard_curve-mean_PGA.csv', 'hazard_curve-mean_SA(0.2).csv',
             'hazard_map-mean.csv'], case_13.__file__)

        # test recomputing the hazard maps
        self.run_calc(
            case_13.__file__, 'job.ini', exports='csv',
            hazard_calculation_id=str(self.calc.datastore.calc_id),
            gsim_logic_tree_file='', source_model_logic_tree_file='')
        [fname] = export(('hmaps', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/hazard_map-mean.csv', fname,
                              delta=1E-5)

        # test extract/hcurves/rlz-0, used by the npz exports
        haz = vars(extract(self.calc.datastore, 'hcurves'))
        self.assertEqual(sorted(haz), ['all', 'investigation_time'])
        self.assertEqual(
            haz['all'].dtype.names, ('lon', 'lat', 'depth', 'mean'))
        array = haz['all']['mean']
        self.assertEqual(array.dtype.names, ('PGA', 'SA(0.2)'))
        self.assertEqual(array['PGA'].dtype.names,
                         ('0.005', '0.007', '0.0098', '0.0137', '0.0192',
                          '0.0269', '0.0376', '0.0527', '0.0738', '0.103',
                          '0.145', '0.203', '0.284'))
Example #24
0
 def test_case_21(self):  # Simple fault dip and MFD enumeration
     self.assert_curves_ok([
         'hazard_curve-smltp_b1_mfd1_high_dip_dip30-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd1_high_dip_dip45-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd1_high_dip_dip60-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd1_low_dip_dip30-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd1_low_dip_dip45-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd1_low_dip_dip60-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd1_mid_dip_dip30-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd1_mid_dip_dip45-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd1_mid_dip_dip60-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd2_high_dip_dip30-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd2_high_dip_dip45-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd2_high_dip_dip60-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd2_low_dip_dip30-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd2_low_dip_dip45-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd2_low_dip_dip60-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd2_mid_dip_dip30-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd2_mid_dip_dip45-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd2_mid_dip_dip60-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd3_high_dip_dip30-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd3_high_dip_dip45-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd3_high_dip_dip60-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd3_low_dip_dip30-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd3_low_dip_dip45-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd3_low_dip_dip60-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd3_mid_dip_dip30-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd3_mid_dip_dip45-gsimltp_Sad1997.csv',
         'hazard_curve-smltp_b1_mfd3_mid_dip_dip60-gsimltp_Sad1997.csv'],
         case_21.__file__, delta=1E-7)
     [fname] = export(('sourcegroups', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/sourcegroups.csv', fname)
Example #25
0
    def test_case_miriam(self):
        # this is a case with a grid and asset-hazard association
        self.run_calc(case_miriam.__file__, 'job.ini')

        # check minimum_magnitude >= 5.2
        minmag = self.calc.datastore['ruptures']['mag'].min()
        self.assertGreaterEqual(minmag, 5.2)

        # check asset_loss_table
        tot = self.calc.datastore['asset_loss_table'].value.sum()
        self.assertEqual(tot, 15787827.0)
        [fname] = export(('agg_loss_table', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/agg_losses-rlz000-structural.csv',
                              fname, delta=1E-5)
        fname = gettemp(view('portfolio_losses', self.calc.datastore))
        self.assertEqualFiles(
            'expected/portfolio_losses.txt', fname, delta=1E-5)

        # this is a case with exposure and region_grid_spacing=1
        self.run_calc(case_miriam.__file__, 'job2.ini')
        hcurves = dict(extract(self.calc.datastore, 'hcurves'))['all']
        sitecol = self.calc.datastore['sitecol']  # filtered sitecol
        self.assertEqual(len(hcurves), len(sitecol))
        assetcol = self.calc.datastore['assetcol']
        self.assertEqual(len(sitecol), 15)
        self.assertGreater(sitecol.vs30.sum(), 0)
        self.assertEqual(len(assetcol), 548)
Example #26
0
    def test_case_15(self):  # full enumeration
        self.assert_curves_ok('''\
hazard_curve-mean.csv
hazard_curve-smltp_SM1-gsimltp_BA2008_C2003.csv
hazard_curve-smltp_SM1-gsimltp_BA2008_T2002.csv
hazard_curve-smltp_SM1-gsimltp_CB2008_C2003.csv
hazard_curve-smltp_SM1-gsimltp_CB2008_T2002.csv
[email protected]
[email protected]
[email protected]
[email protected]
hazard_uhs-mean.csv
hazard_uhs-smltp_SM1-gsimltp_BA2008_C2003.csv
hazard_uhs-smltp_SM1-gsimltp_BA2008_T2002.csv
hazard_uhs-smltp_SM1-gsimltp_CB2008_C2003.csv
hazard_uhs-smltp_SM1-gsimltp_CB2008_T2002.csv
[email protected]
[email protected]
[email protected]
[email protected]'''.split(),
                              case_15.__file__, delta=1E-6)

        # now some tests on the exact numbers
        check_platform('xenial', 'trusty')

        # test UHS XML export
        fnames = [f for f in export(('uhs', 'xml'), self.calc.datastore)
                  if 'mean' in f]
        self.assertEqualFiles('expected/hazard_uhs-mean-0.01.xml', fnames[0])
        self.assertEqualFiles('expected/hazard_uhs-mean-0.1.xml', fnames[1])
        self.assertEqualFiles('expected/hazard_uhs-mean-0.2.xml', fnames[2])

        # test hmaps geojson export
        fnames = [f for f in export(('hmaps', 'geojson'), self.calc.datastore)
                  if 'mean' in f]
        self.assertEqualFiles(
            'expected/hazard_map-mean-0.01-PGA.geojson', fnames[0])
        self.assertEqualFiles(
            'expected/hazard_map-mean-0.01-SA(0.1).geojson', fnames[1])
        self.assertEqualFiles(
            'expected/hazard_map-mean-0.1-PGA.geojson', fnames[2])
        self.assertEqualFiles(
            'expected/hazard_map-mean-0.1-SA(0.1).geojson', fnames[3])
        self.assertEqualFiles(
            'expected/hazard_map-mean-0.2-PGA.geojson', fnames[4])
        self.assertEqualFiles(
            'expected/hazard_map-mean-0.2-SA(0.1).geojson', fnames[5])
Example #27
0
    def test_case_1(self):
        out = self.run_calc(case_1.__file__, 'job_risk.ini', exports='csv')
        [fname] = out['agglosses-rlzs', 'csv']
        self.assertEqualFiles('expected/agg.csv', fname)

        # check the exported GMFs
        [fname] = export(('gmf_data', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/gmf-FromFile-PGA.csv', fname)
Example #28
0
    def test_case_4(self):
        # this is case with number of lon/lat bins different for site 0/site 1
        # this exercise sampling
        self.run_calc(case_4.__file__, 'job.ini')

        # check stats
        fnames = export(('disagg-stats', 'csv'), self.calc.datastore)
        self.assertEqual(len(fnames), 64)  # 2 sid x 8 keys x 2 poe x 2 imt
    def test_case_5(self):
        out = self.run_calc(case_5.__file__, 'job.ini', exports='csv')
        [fname, _, _] = out['gmf_data', 'csv']
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname,
                              delta=1E-6)

        [fname] = export(('ruptures', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/ruptures.csv', fname, delta=1E-6)
Example #30
0
    def test_case_4(self):
        out = self.run_calc(case_4.__file__, 'job_haz.ini,job_risk.ini',
                            exports='csv,xml')
        fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_maps-b1,b1.csv', fnames[0])
        self.assertEqualFiles('expected/loss_maps-b1,b2.csv', fnames[1])

        fnames = out['loss_curves-rlzs', 'csv']
        self.assertEqualFiles('expected/loss_curves-000.csv', fnames[0])
        self.assertEqualFiles('expected/loss_curves-001.csv', fnames[1])

        [fname] = export(('loss_maps-stats', 'xml'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_maps-mean-structural.xml', fname)

        [fname] = out['loss_curves-stats', 'xml']
        self.assertEqualFiles('expected/loss_curves-mean-structural.xml',
                              fname)
Example #31
0
    def test_case_1_eb(self):
        # this is a case with insured losses and tags
        self.run_calc(case_1.__file__, 'job_eb.ini', concurrent_tasks='4')

        [fname] = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname,
                              delta=1E-5)

        aw = extract(self.calc.datastore, 'agg_losses/structural')
        self.assertEqual(aw.stats, ['mean'])
        numpy.testing.assert_allclose(aw.array, [687.92365])

        fnames = export(('agg_curves-stats', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/eb_%s' % strip_calc_id(fname),
                                  fname, delta=1E-5)

        [fname] = export(('agg_loss_table', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname,
                              delta=1E-5)

        # extract tot_curves
        aw = extract(self.calc.datastore, 'tot_curves?kind=stats&'
                     'loss_type=structural&absolute=1')
        tmp = gettemp(rst_table(aw.to_dframe()))
        self.assertEqualFiles('expected/agg_curves1.csv', tmp)

        aw = extract(self.calc.datastore, 'tot_curves?kind=rlzs&'
                     'loss_type=structural&absolute=1')
        tmp = gettemp(rst_table(aw.to_dframe()))
        self.assertEqualFiles('expected/agg_curves2.csv', tmp)

        aw = extract(self.calc.datastore, 'tot_curves?kind=stats&'
                     'loss_type=structural&absolute=0')
        tmp = gettemp(rst_table(aw.to_dframe()))
        self.assertEqualFiles('expected/agg_curves3.csv', tmp)

        aw = extract(self.calc.datastore, 'tot_curves?kind=rlzs&'
                     'loss_type=structural&absolute=0')
        tmp = gettemp(rst_table(aw.to_dframe()))
        self.assertEqualFiles('expected/agg_curves4.csv', tmp)

        # extract agg_curves with tags
        aw = extract(self.calc.datastore, 'agg_curves?kind=stats&'
                     'loss_type=structural&absolute=1&policy=A&taxonomy=RC')
        tmp = gettemp(rst_table(aw.to_dframe()))
        self.assertEqualFiles('expected/agg_curves5.csv', tmp)

        aw = extract(self.calc.datastore, 'agg_curves?kind=rlzs&'
                     'loss_type=structural&absolute=1&policy=A&taxonomy=RC')
        tmp = gettemp(rst_table(aw.to_dframe()))
        self.assertEqualFiles('expected/agg_curves6.csv', tmp)

        aw = extract(self.calc.datastore, 'agg_curves?kind=stats&'
                     'loss_type=structural&absolute=0&policy=A&taxonomy=RC')
        tmp = gettemp(rst_table(aw.to_dframe()))
        self.assertEqualFiles('expected/agg_curves7.csv', tmp)

        aw = extract(self.calc.datastore, 'agg_curves?kind=rlzs&'
                     'loss_type=structural&absolute=0&policy=A&taxonomy=RC')
        tmp = gettemp(rst_table(aw.to_dframe()))
        self.assertEqualFiles('expected/agg_curves8.csv', tmp)

        # test ct_independence
        loss4 = view('portfolio_losses', self.calc.datastore)
        self.run_calc(case_1.__file__, 'job_eb.ini', concurrent_tasks='0')
        loss0 = view('portfolio_losses', self.calc.datastore)
        self.assertEqual(loss0, loss4)
    def test_case_1_eb(self):
        # this is a case with insured losses
        self.run_calc(case_1.__file__, 'job_eb.ini')

        [fname] = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)

        aw = extract(self.calc.datastore, 'agg_losses/structural')
        self.assertEqual(aw.stats, ['mean'])
        self.assertEqual(aw.array, numpy.float32([767.82324]))

        fnames = export(('agg_curves-stats', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/eb_%s' % strip_calc_id(fname),
                                  fname)

        fnames = export(('agg_losses-stats', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/%s' % strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        [fname] = export(('losses_by_event', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)

        # extract agg_curves, no tags
        aw = extract(
            self.calc.datastore, 'agg_curves?kind=stats&'
            'loss_type=structural&absolute=1')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves1.csv', tmp)

        aw = extract(self.calc.datastore, 'agg_curves?kind=rlzs&'
                     'loss_type=structural&absolute=1')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves2.csv', tmp)

        aw = extract(
            self.calc.datastore, 'agg_curves?kind=stats&'
            'loss_type=structural&absolute=0')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves3.csv', tmp)

        aw = extract(self.calc.datastore, 'agg_curves?kind=rlzs&'
                     'loss_type=structural&absolute=0')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves4.csv', tmp)

        # TODO: fix extract agg_curves for insured types

        # extract agg_curves with tags
        self.run_calc(case_1.__file__,
                      'job_eb.ini',
                      aggregate_by='policy,taxonomy',
                      hazard_calculation_id=str(self.calc.datastore.calc_id))

        aw = extract(
            self.calc.datastore, 'agg_curves?kind=stats&'
            'loss_type=structural&absolute=1&policy=A&taxonomy=RC')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves5.csv', tmp)

        aw = extract(
            self.calc.datastore, 'agg_curves?kind=rlzs&'
            'loss_type=structural&absolute=1&policy=A&taxonomy=RC')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves6.csv', tmp)

        aw = extract(
            self.calc.datastore, 'agg_curves?kind=stats&'
            'loss_type=structural&absolute=0&policy=A&taxonomy=RC')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves7.csv', tmp)

        aw = extract(
            self.calc.datastore, 'agg_curves?kind=rlzs&'
            'loss_type=structural&absolute=0&policy=A&taxonomy=RC')
        tmp = gettemp(rst_table(aw.to_table()))
        self.assertEqualFiles('expected/agg_curves8.csv', tmp)
Example #33
0
    def test_case_15(self):
        # this is a case with both splittable and unsplittable sources
        self.assert_curves_ok('''\
hazard_curve-max-PGA.csv,
hazard_curve-max-SA(0.1).csv
hazard_curve-mean-PGA.csv
hazard_curve-mean-SA(0.1).csv
hazard_curve-std-PGA.csv
hazard_curve-std-SA(0.1).csv
hazard_uhs-max.csv
hazard_uhs-mean.csv
hazard_uhs-std.csv
'''.split(),
                              case_15.__file__,
                              delta=1E-6)

        # test UHS XML export
        fnames = [
            f for f in export(('uhs', 'xml'), self.calc.datastore)
            if 'mean' in f
        ]
        self.assertEqualFiles('expected/hazard_uhs-mean-0.01.xml', fnames[0])
        self.assertEqualFiles('expected/hazard_uhs-mean-0.1.xml', fnames[1])
        self.assertEqualFiles('expected/hazard_uhs-mean-0.2.xml', fnames[2])

        # npz exports
        [fname] = export(('hmaps', 'npz'), self.calc.datastore)
        arr = numpy.load(fname)['all']
        self.assertEqual(arr['mean'].dtype.names, ('PGA', 'SA(0.1)'))
        [fname] = export(('uhs', 'npz'), self.calc.datastore)
        arr = numpy.load(fname)['all']
        self.assertEqual(arr['mean'].dtype.names, ('0.01', '0.1', '0.2'))

        # here is the size of assoc_by_grp for a complex logic tree
        # grp_id gsim_idx rlzis
        # 0	0	 {0, 1}
        # 0	1	 {2, 3}
        # 1	0	 {0, 2}
        # 1	1	 {1, 3}
        # 2	0	 {4}
        # 2	1	 {5}
        # 3	0	 {6}
        # 3	1	 {7}
        # nbytes = (2 + 2 + 8) * 8 + 4 * 4 + 4 * 2 = 120

        # full source model logic tree
        cinfo = self.calc.datastore['csm_info']
        ra0 = cinfo.get_rlzs_assoc()
        self.assertEqual(sorted(ra0.by_grp()),
                         ['grp-00', 'grp-01', 'grp-02', 'grp-03'])

        # reduction of the source model logic tree
        ra = cinfo.get_rlzs_assoc(sm_lt_path=['SM2', 'a3b1'])
        self.assertEqual(len(ra.by_grp()), 1)
        numpy.testing.assert_equal(len(ra.by_grp()['grp-02']),
                                   len(ra0.by_grp()['grp-02']))

        # more reduction of the source model logic tree
        ra = cinfo.get_rlzs_assoc(sm_lt_path=['SM1'])
        self.assertEqual(sorted(ra.by_grp()), ['grp-00', 'grp-01'])
        numpy.testing.assert_equal(ra.by_grp()['grp-00'],
                                   ra0.by_grp()['grp-00'])
        numpy.testing.assert_equal(ra.by_grp()['grp-01'],
                                   ra0.by_grp()['grp-01'])

        # reduction of the gsim logic tree
        ra = cinfo.get_rlzs_assoc(trts=['Stable Continental Crust'])
        self.assertEqual(sorted(ra.by_grp()), ['grp-00', 'grp-01'])
        numpy.testing.assert_equal(ra.by_grp()['grp-00'], [[0, 1]])
Example #34
0
 def test_case_1_ruptures(self):
     self.run_calc(case_1.__file__, 'job_ruptures.ini')
     self.assertEqual(len(self.calc.datastore['ruptures']), 2)
     [fname] = export(('events', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/events.csv', fname)
Example #35
0
 def test_case_6(self):
     # test with international date line
     self.run_calc(case_6.__file__, 'job.ini')
     fnames = export(('disagg', 'csv'), self.calc.datastore)
     for fname in fnames:
         self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)
Example #36
0
 def test_case_10(self):
     # case with more IMTs in the imported GMFs than required
     self.run_calc(case_10.__file__, 'job.ini')
     fnames = export(('dmg_by_asset', 'csv'), self.calc.datastore)
     for i, fname in enumerate(fnames):
         self.assertEqualFiles('expected/dmg_by_asset-%d.csv' % i, fname)
Example #37
0
 def test_case_2_correlation(self):
     self.run_calc(case_2.__file__, 'job_loss.ini', asset_correlation=1.0)
     [fname] = export(('agg_loss_table', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/agg_losses.csv', fname)
Example #38
0
 def test_case_26_liq(self):
     # cali liquefaction simplified
     self.run_calc(case_26.__file__, 'job_liq.ini')
     [fname] = export(('avg_gmf', 'csv'), self.calc.datastore)
     self.assertEqualFiles('avg_gmf.csv', fname)
Example #39
0
 def test_case_9(self):
     # case with noDamageLimit==0 that had NaNs in the past
     self.run_calc(case_9.__file__, 'job.ini')
     fnames = export(('dmg_by_asset', 'csv'), self.calc.datastore)
     for i, fname in enumerate(fnames):
         self.assertEqualFiles('expected/dmg_by_asset-%d.csv' % i, fname)
Example #40
0
 def test_case_1g(self):
     # vulnerability function with PMF
     self.run_calc(case_1g.__file__, 'job_h.ini,job_r.ini')
     [fname] = export(('avg_losses-rlzs', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/avg_losses.csv', fname)
     os.remove(fname)
 def test_continuous(self):
     self.run_calc(case_2.__file__, 'job_continuous.ini')
     [fname] = export(('damages-rlzs', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/damage_continuous.csv', fname)
 def test_discrete(self):
     # a test producing NaNs
     self.run_calc(case_2.__file__, 'job_discrete.ini')
     [fname] = export(('damages-rlzs', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/damage_discrete.csv', fname)
Example #43
0
 def test_case_14(self):
     # test event_based_damage, aggregate_by=NAME_1
     self.run_calc(case_14.__file__, 'job.ini')
     [f] = export(('aggcurves', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/' + strip_calc_id(f), f, delta=5E-5)
Example #44
0
 def test_case_master(self):
     self.run_calc(case_master.__file__, 'job.ini', exports='npz')
     # check losses_by_tag
     fnames = export(('losses_by_tag-rlzs', 'csv'), self.calc.datastore)
     for fname in fnames:
         self.assertEqualFiles('expected/' + strip_calc_id(fname), fname)
Example #45
0
 def test_case_24(self):
     # This is a test for shift_hypo = true - The expected results are the
     # same ones defined for the case_44 of the classical methodology
     self.run_calc(case_24.__file__, 'job.ini')
     [fname] = export(('hcurves', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/hazard_curve-mean-PGA.csv', fname)
Example #46
0
 def test_case_15(self):
     # an example for Japan testing also the XML rupture exporter
     self.run_calc(case_15.__file__, 'job.ini')
     [fname] = export(('ruptures', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/ruptures.csv', fname)
Example #47
0
 def test_case_10(self):
     # this is a case with multiple files in the smlt uncertaintyModel
     # and with sampling
     self.run_calc(case_10.__file__, 'job.ini')
     [fname] = export(('realizations', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/realizations.csv', fname)
    def test_case_master(self):
        if sys.platform == 'darwin':
            raise unittest.SkipTest('MacOSX')
        self.run_calc(case_master.__file__, 'job.ini', exports='csv')
        fnames = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        assert fnames, 'avg_losses-stats not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        # extract loss_curves/rlz-1 (with the first asset having zero losses)
        [fname] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/' + strip_calc_id(fname),
                              fname,
                              delta=1E-5)

        fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore)
        assert fnames, 'loss_maps-rlzs not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        # extract curves by tag
        tags = 'taxonomy=tax1&state=01&cresta=0.11'
        a = extract(self.calc.datastore, 'agg_curves/structural?' + tags)
        self.assertEqual(a.array.shape, (4, 3))  # 4 stats, 3 return periods

        fname = gettemp(view('portfolio_losses', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_losses.txt',
                              fname,
                              delta=1E-5)
        os.remove(fname)

        # check ruptures are stored correctly
        fname = gettemp(view('ruptures_events', self.calc.datastore))
        self.assertEqualFiles('expected/ruptures_events.txt', fname)
        os.remove(fname)

        # check losses_by_tag
        fnames = export(('aggregate_by/occupancy/avg_losses', 'csv'),
                        self.calc.datastore)
        self.assertEqualFiles('expected/losses_by_occupancy.csv', fnames[0])

        self.check_multi_tag(self.calc.datastore)

        # ------------------------- ebrisk calculator ---------------------- #
        self.run_calc(case_master.__file__,
                      'job.ini',
                      hazard_calculation_id=str(self.calc.datastore.calc_id),
                      calculation_mode='ebrisk',
                      exports='',
                      aggregate_by='taxonomy',
                      insured_losses='false')

        # agg_losses-rlzs has shape (L=5, R=9)
        # agg_losses-stats has shape (L=5, S=4)
        fname = export(('agg_losses-stats', 'csv'), self.calc.datastore)[0]
        self.assertEqualFiles('expected/agglosses.csv', fname)

        fname = export(('avg_losses', 'csv'), self.calc.datastore)[0]
        self.assertEqualFiles('expected/avglosses.csv', fname, delta=1E-5)
 def test_case_13a(self):
     # test event_based_damage, no aggregate_by
     self.run_calc(case_13.__file__, 'job_a.ini')
     [f1, f2] = export(('aggcurves', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/' + strip_calc_id(f1), f1, delta=1E-5)
     self.assertEqualFiles('expected/' + strip_calc_id(f2), f2, delta=1E-5)
Example #50
0
 def test_case_57(self):
     # AvgPoeGMPE
     self.run_calc(case_57.__file__, 'job.ini')
     f1, f2 = export(('hcurves/mean', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/hcurve_PGA.csv', f1)
     self.assertEqualFiles('expected/hcurve_SA.csv', f2)
Example #51
0
    def test_case_master(self):
        if sys.platform == 'darwin':
            raise unittest.SkipTest('MacOSX')
        self.run_calc(case_master.__file__, 'job.ini', exports='csv')
        fnames = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        assert fnames, 'avg_losses-stats not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        # check event loss table
        [fname] = export(('losses_by_event', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/' + strip_calc_id(fname),
                              fname,
                              delta=1E-5)

        # extract loss_curves/rlz-1 (with the first asset having zero losses)
        [fname] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/' + strip_calc_id(fname),
                              fname,
                              delta=1E-5)

        fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore)
        assert fnames, 'loss_maps-rlzs not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        fname = gettemp(view('portfolio_losses', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_losses.txt',
                              fname,
                              delta=1E-5)
        os.remove(fname)

        # check ruptures are stored correctly
        fname = gettemp(view('ruptures_events', self.calc.datastore))
        self.assertEqualFiles('expected/ruptures_events.txt', fname)
        os.remove(fname)

        # check losses_by_tag
        fnames = export(
            ('aggregate_by/avg_losses?tag=occupancy&kind=mean', 'csv'),
            self.calc.datastore)
        self.assertEqualFiles('expected/losses_by_occupancy.csv', fnames[0])

        self.check_multi_tag(self.calc.datastore)

        # ------------------------- ebrisk calculator ---------------------- #
        self.run_calc(case_master.__file__,
                      'job.ini',
                      calculation_mode='ebrisk',
                      exports='',
                      aggregate_by='id')

        # agg_losses-rlzs has shape (L=5, R=9)
        # agg_losses-stats has shape (L=5, S=4)
        fname = export(('agg_losses-stats', 'csv'), self.calc.datastore)[0]
        self.assertEqualFiles('expected/agglosses.csv', fname, delta=1E-5)

        fname = export(('agg_curves-stats', 'csv'), self.calc.datastore)[0]
        self.assertEqualFiles('expected/aggcurves.csv', fname, delta=1E-5)

        fname = export(('agg_maps-stats', 'csv'), self.calc.datastore)[0]
        self.assertEqualFiles('expected/aggmaps.csv', fname, delta=1E-5)

        fname = export(('avg_losses', 'csv'), self.calc.datastore)[0]
        self.assertEqualFiles('expected/avg_losses-mean.csv',
                              fname,
                              delta=1E-5)

        fname = export(('losses_by_event', 'csv'), self.calc.datastore)[0]
        self.assertEqualFiles('expected/elt.csv', fname)
Example #52
0
 def post_execute(self, result):
     result = {str(imt): gmvs for imt, gmvs in result.items()}
     out = export('gmf_xml', self.oqparam.export_dir, self.sitecol,
                  self.etags, result)
     return out
Example #53
0
 def test_case_12(self):
     # 1 assets, 2 samples
     self.run_calc(case_master.__file__, 'job12.ini', exports='csv')
     # alt = extract(self.calc.datastore, 'asset_loss_table')
     [fname] = export(('avg_losses', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/avg_loss_12.csv', fname)
Example #54
0
 def test_case_62(self):
     # multisurface with kite faults
     self.run_calc(case_62.__file__, 'job.ini')
     [f] = export(('hcurves/mean', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/hcurve-mean.csv', f)
Example #55
0
 def test_case_2(self):
     self.run_calc(case_2.__file__, 'job.ini', exports='csv,geojson')
     [fname] = export(('hmaps', 'csv'), self.calc.datastore)
     self.assertEqualFiles(
         'expected/hazard_map-mean.csv', fname, delta=1E-6)
Example #56
0
 def test_case_5(self):
     # this exercise gridded nonparametric sources
     self.run_calc(case_5.__file__, 'job.ini')
     fnames = export(('disagg', 'csv'), self.calc.datastore)
     for fname in fnames:
         self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)
Example #57
0
 def test_case_59(self):
     # test NRCan15SiteTerm
     self.run_calc(case_59.__file__, 'job.ini')
     [f] = export(('hcurves/mean', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/hcurve-mean.csv', f)
Example #58
0
 def test_occupants(self):
     self.run_calc(occupants.__file__, 'job.ini')
     fnames = export(('agg_curves-rlzs', 'csv'), self.calc.datastore)
     for fname in fnames:
         self.assertEqualFiles('expected/' + strip_calc_id(fname),
                               fname, delta=1E-5)
Example #59
0
    def test_case_15(self):  # full enumeration
        self.assert_curves_ok('''\
hazard_curve-max-PGA.csv,
hazard_curve-max-SA(0.1).csv
hazard_curve-mean-PGA.csv
hazard_curve-mean-SA(0.1).csv
hazard_uhs-max.csv
hazard_uhs-mean.csv
'''.split(), case_15.__file__, delta=1E-6)

        # test UHS XML export
        fnames = [f for f in export(('uhs', 'xml'), self.calc.datastore)
                  if 'mean' in f]
        self.assertEqualFiles('expected/hazard_uhs-mean-0.01.xml', fnames[0])
        self.assertEqualFiles('expected/hazard_uhs-mean-0.1.xml', fnames[1])
        self.assertEqualFiles('expected/hazard_uhs-mean-0.2.xml', fnames[2])

        # test hmaps geojson export
        fnames = [f for f in export(('hmaps', 'geojson'), self.calc.datastore)
                  if 'mean' in f]
        self.assertEqualFiles(
            'expected/hazard_map-mean-0.01-PGA.geojson', fnames[0])
        self.assertEqualFiles(
            'expected/hazard_map-mean-0.01-SA(0.1).geojson', fnames[1])
        self.assertEqualFiles(
            'expected/hazard_map-mean-0.1-PGA.geojson', fnames[2])
        self.assertEqualFiles(
            'expected/hazard_map-mean-0.1-SA(0.1).geojson', fnames[3])
        self.assertEqualFiles(
            'expected/hazard_map-mean-0.2-PGA.geojson', fnames[4])
        self.assertEqualFiles(
            'expected/hazard_map-mean-0.2-SA(0.1).geojson', fnames[5])

        # npz exports
        export(('hmaps', 'npz'), self.calc.datastore)
        export(('uhs', 'npz'), self.calc.datastore)

        # here is the size of assoc_by_grp for a complex logic tree
        # grp_id gsim_idx rlzis
        # 0	0	 {0, 1}
        # 0	1	 {2, 3}
        # 1	0	 {0, 2}
        # 1	1	 {1, 3}
        # 2	0	 {4}
        # 2	1	 {5}
        # 3	0	 {6}
        # 3	1	 {7}
        # nbytes = (2 + 2 + 8) * 8 + 4 * 4 + 4 * 2 = 120

        # full source model logic tree
        cinfo = self.calc.datastore['csm_info']
        ra0 = cinfo.get_rlzs_assoc()
        self.assertEqual(
            sorted(ra0.by_grp()), ['grp-00', 'grp-01', 'grp-02', 'grp-03'])

        # reduction of the source model logic tree
        ra = cinfo.get_rlzs_assoc(sm_lt_path=['SM2', 'a3b1'])
        self.assertEqual(len(ra.by_grp()), 1)
        numpy.testing.assert_equal(
            len(ra.by_grp()['grp-02']),
            len(ra0.by_grp()['grp-02']))

        # more reduction of the source model logic tree
        ra = cinfo.get_rlzs_assoc(sm_lt_path=['SM1'])
        self.assertEqual(sorted(ra.by_grp()), ['grp-00', 'grp-01'])
        numpy.testing.assert_equal(
            ra.by_grp()['grp-00'], ra0.by_grp()['grp-00'])
        numpy.testing.assert_equal(
            ra.by_grp()['grp-01'], ra0.by_grp()['grp-01'])

        # reduction of the gsim logic tree
        ra = cinfo.get_rlzs_assoc(trts=['Stable Continental Crust'])
        self.assertEqual(sorted(ra.by_grp()), ['grp-00', 'grp-01'])
        numpy.testing.assert_equal(ra.by_grp()['grp-00'][0], [0, [0, 1]])
 def test_interpolation(self):
     self.run_calc(case_2.__file__, 'job_interpolation.ini')
     [fname] = export(('damages-rlzs', 'csv'), self.calc.datastore)
     self.assertEqualFiles('expected/damage_interpolation.csv', fname)