Example #1
0
    def test_case_master(self):
        self.assert_stats_ok(case_master, 'job.ini', individual_curves='false')

        fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname), fname)

        fname = writetmp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_loss.txt', fname, delta=1E-5)

        # check rup_data is stored correctly
        fname = writetmp(view('ruptures_events', self.calc.datastore))
        self.assertEqualFiles('expected/ruptures_events.txt', fname)

        # export a specific eid
        fnames = export(('all_loss_ratios:0', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname), fname)
        self.assertEqualFiles('expected/losses-eid=0.csv', fname)

        # export a specific pair (sm_id, eid)
        fnames = export(('all_loss_ratios:1:0', 'csv'),
                        self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)
Example #2
0
    def test_event_based(self):
        self.run_calc(ucerf.__file__, 'job.ini')
        gmv_uc = view('global_gmfs', self.calc.datastore)
        # check the distribution of the events
        self.assertEventsByRlz([2, 2, 2, 2, 6, 6, 2, 2, 2, 2, 6, 6, 2, 2, 3,
                                3, 6, 6, 1, 1, 1, 1, 6, 6, 2, 2, 3, 3, 2, 2,
                                2, 2, 3, 3, 2, 2, 3, 3, 3, 3, 2, 2, 3, 3,
                                3, 3, 3, 3])

        [fname] = export(('ruptures', 'csv'), self.calc.datastore)
        # check that we get the expected number of ruptures
        with open(fname) as f:
            self.assertEqual(len(f.readlines()), 72)
        self.assertEqualFiles('expected/ruptures.csv', fname, lastline=20,
                              delta=1E-5)

        # run a regular event based on top of the UCERF ruptures and
        # check the generated hazard maps
        self.run_calc(ucerf.__file__, 'job.ini',
                      calculation_mode='event_based',
                      hazard_calculation_id=str(self.calc.datastore.calc_id))

        # check ucerf_hazard and event_based produces the same GMFs
        gmv_eb = view('global_gmfs', self.calc.datastore)
        self.assertEqual(gmv_uc, gmv_eb)

        # check the mean hazard map
        [fname] = [f for f in export(('hmaps', 'csv'), self.calc.datastore)
                   if 'mean' in f]
        self.assertEqualFiles('expected/hazard_map-mean.csv', fname,
                              delta=1E-5)
    def test_case_8(self):
        # a complex scenario_risk from GMFs where the hazard sites are
        # not in the asset locations
        self.run_calc(case_8.__file__, 'job.ini')
        agglosses = extract(self.calc.datastore, 'agg_losses/structural')
        aac(agglosses.array, [1159817.1])

        # make sure the fullreport can be extracted
        view('fullreport', self.calc.datastore)
Example #4
0
    def test_classical_time_dep(self):
        ucerf_base.RUPTURES_PER_BLOCK = 10  # check splitting
        out = self.run_calc(ucerf.__file__, 'job_classical_time_dep_redux.ini',
                            exports='csv')
        ucerf_base.RUPTURES_PER_BLOCK = 1000  # resume default
        fname = out['hcurves', 'csv'][0]
        self.assertEqualFiles('expected/hazard_curve-td-mean.csv', fname,
                              delta=1E-6)

        # make sure this runs
        view('fullreport', self.calc.datastore)
Example #5
0
    def test_classical(self):
        ucerf_base.RUPTURES_PER_BLOCK = 50  # check splitting
        self.run_calc(ucerf.__file__, 'job_classical_redux.ini', exports='csv')
        ucerf_base.RUPTURES_PER_BLOCK = 1000  # resume default
        fnames = export(('hcurves/', 'csv'), self.calc.datastore)
        expected = ['hazard_curve-0-PGA.csv', 'hazard_curve-0-SA(0.1).csv',
                    'hazard_curve-1-PGA.csv', 'hazard_curve-1-SA(0.1).csv']
        for fname, exp in zip(fnames, expected):
            self.assertEqualFiles('expected/' + exp, fname)

        # make sure this runs
        view('fullreport', self.calc.datastore)
Example #6
0
    def test_case_miriam(self):
        # this is a case with a grid and asset-hazard association
        self.run_calc(case_miriam.__file__, 'job.ini')

        # check minimum_magnitude >= 5.2
        minmag = self.calc.datastore['ruptures']['mag'].min()
        self.assertGreaterEqual(minmag, 5.2)

        # check asset_loss_table
        tot = self.calc.datastore['asset_loss_table'].value.sum()
        self.assertEqual(tot, 15787827.0)
        [fname] = export(('agg_loss_table', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/agg_losses-rlz000-structural.csv',
                              fname, delta=1E-5)
        fname = gettemp(view('portfolio_losses', self.calc.datastore))
        self.assertEqualFiles(
            'expected/portfolio_losses.txt', fname, delta=1E-5)

        # this is a case with exposure and region_grid_spacing=1
        self.run_calc(case_miriam.__file__, 'job2.ini')
        hcurves = dict(extract(self.calc.datastore, 'hcurves'))['all']
        sitecol = self.calc.datastore['sitecol']  # filtered sitecol
        self.assertEqual(len(hcurves), len(sitecol))
        assetcol = self.calc.datastore['assetcol']
        self.assertEqual(len(sitecol), 15)
        self.assertGreater(sitecol.vs30.sum(), 0)
        self.assertEqual(len(assetcol), 548)
    def test_case_1(self):
        self.assert_curves_ok(
            ['hazard_curve-PGA.csv', 'hazard_curve-SA(0.1).csv'],
            case_1.__file__)

        if parallel.oq_distribute() != 'no':
            info = view('job_info', self.calc.datastore)
            self.assertIn('task', info)
            self.assertIn('sent', info)
            self.assertIn('received', info)

        # there is a single source
        self.assertEqual(len(self.calc.datastore['source_info']), 1)

        # check npz export
        export(('hcurves', 'npz'), self.calc.datastore)

        # check extraction
        sitecol = extract(self.calc.datastore, 'sitecol')
        self.assertEqual(len(sitecol.array), 1)

        # check minimum_magnitude discards the source
        with self.assertRaises(RuntimeError) as ctx:
            self.run_calc(case_1.__file__, 'job.ini', minimum_magnitude='4.5')
        self.assertEqual(str(ctx.exception), 'All sources were filtered away!')
 def test_case_16(self):
     # an example with site model raising warnings and autogridded exposure
     self.run_calc(case_16.__file__, 'job.ini',
                   ground_motion_fields='false')
     hid = str(self.calc.datastore.calc_id)
     self.run_calc(case_16.__file__, 'job.ini', hazard_calculation_id=hid)
     tmp = gettemp(view('global_gmfs', self.calc.datastore))
     self.assertEqualFiles('expected/global_gmfs.txt', tmp)
    def test_case_4(self):
        # this test is sensitive to the ordering of the epsilons
        # in openquake.riskinput.make_eps
        out = self.run_calc(case_4.__file__, 'job.ini', exports='csv')
        fname = gettemp(view('totlosses', self.calc.datastore))
        self.assertEqualFiles('expected/totlosses.txt', fname)

        [fname] = out['agglosses', 'csv']
        self.assertEqualFiles('expected/agglosses.csv', fname, delta=1E-6)
Example #10
0
 def test_case_miriam(self):
     # this is a case with a grid and asset-hazard association
     out = self.run_calc(case_miriam.__file__, 'job.ini', exports='csv')
     [fname] = out['agg_loss_table', 'csv']
     self.assertEqualFiles('expected/agg_losses-rlz000-structural.csv',
                           fname)
     fname = writetmp(view('portfolio_loss', self.calc.datastore))
     self.assertEqualFiles(
         'expected/portfolio_loss.txt', fname, delta=1E-5)
Example #11
0
    def test_event_based_sampling(self):
        self.run_calc(ucerf.__file__, 'job_ebh.ini')

        # check the distribution of the events
        self.assertEventsByRlz([29, 25])

        # check the mean hazard map
        got = gettemp(view('hmap', self.calc.datastore))
        self.assertEqualFiles('expected/hmap.rst', got)
Example #12
0
 def add(self, name, obj=None):
     """Add the view named `name` to the report text"""
     title = self.title[name]
     line = '-' * len(title)
     if obj:
         text = '\n::\n\n' + indent(str(obj))
     else:
         text = views.view(name, self.dstore)
     self.text += '\n'.join(['\n\n' + title, line, text])
Example #13
0
    def test_case_2(self):
        self.run_calc(case_2.__file__, 'job.ini')

        # check view_pmap for a single realization
        got = view('pmap:grp-00', self.calc.datastore)
        self.assertEqual(got, '''\
{0: <ProbabilityCurve
[[2.26776679e-03 0.00000000e+00]
 [1.67915423e-05 0.00000000e+00]
 [0.00000000e+00 0.00000000e+00]
 [0.00000000e+00 0.00000000e+00]]>}''')

        # check view inputs
        lines = view('inputs', self.calc.datastore).splitlines()
        self.assertEqual(len(lines), 9)

        [fname] = export(('hcurves', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/hcurve.csv', fname)
Example #14
0
    def test_event_based_risk_sampling(self):
        # the fast calculator ucerf_risk
        raise unittest.SkipTest('ucerf_risk has been removed')
        self.run_calc(ucerf.__file__, 'job_ebr.ini',
                      number_of_logic_tree_samples='2')

        # check the right number of events was stored
        self.assertEqual(len(self.calc.datastore['events']), 79)

        fname = gettemp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles(
            'expected/portfolio_loss2.txt', fname, delta=1E-5)

        # check the mean losses_by_period
        [fname] = export(('agg_curves-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/losses_by_period2-mean.csv', fname)

        # make sure this runs
        view('fullreport', self.calc.datastore)
Example #15
0
    def test_case_master(self):
        # this tests exercise the case of a complex logic tree; it also
        # prints the warning on poe_agg very different from the expected poe
        self.run_calc(case_master.__file__, 'job.ini')
        fname = gettemp(view('mean_disagg', self.calc.datastore))
        self.assertEqualFiles('expected/mean_disagg.rst', fname)
        os.remove(fname)

        # check stats
        fnames = export(('disagg-stats', 'csv'), self.calc.datastore)
        self.assertEqual(len(fnames), 192)  # 2 sid x 8 keys x 2 poe x 2 imt
Example #16
0
    def test_event_based_risk(self):
        # the fast calculator ucerf_risk
        raise unittest.SkipTest('ucerf_risk has been removed')
        self.run_calc(ucerf.__file__, 'job_ebr.ini')

        fname = gettemp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_loss.txt', fname, delta=1E-5)

        # check the mean losses_by_period
        [fname] = export(('agg_curves-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/losses_by_period-mean.csv', fname)
Example #17
0
    def test_case_5(self):
        # test with different curve resolution for different taxonomies
        self.run_calc(case_5.__file__, 'job_h.ini,job_r.ini')

        # check mean loss curves
        [fname] = export(('loss_curves-stats', 'xml'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves-mean.xml', fname)

        # check individual avg losses
        fname = writetmp(view('loss_curves_avg', self.calc.datastore))
        self.assertEqualFiles('expected/loss_curves_avg.txt', fname)
Example #18
0
    def test_event_based_risk(self):
        if h5py.__version__ < '2.6.0':
            raise unittest.SkipTest  # UCERF requires vlen arrays
        self.run_calc(ucerf.__file__, 'job_ebr.ini',
                      number_of_logic_tree_samples='2')

        fnames = export(('agg_loss_table', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)

        fname = writetmp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_loss.txt', fname)
Example #19
0
    def test_case_6a(self):
        # case with two gsims
        out = self.run_calc(case_6a.__file__, 'job_haz.ini,job_risk.ini',
                            exports='csv')
        f1, f2 = out['agglosses-rlzs', 'csv']
        self.assertEqualFiles('expected/agg-gsimltp_b1_structural.csv', f1)
        self.assertEqualFiles('expected/agg-gsimltp_b2_structural.csv', f2)

        # testing the totlosses view
        dstore = self.calc.datastore
        fname = writetmp(view('totlosses', dstore))
        self.assertEqualFiles('expected/totlosses.txt', fname)
Example #20
0
    def test_case_2(self):
        self.assert_stats_ok(case_2, 'job.ini', individual_curves='true')
        fname = writetmp(view('mean_avg_losses', self.calc.datastore))
        self.assertEqualFiles('expected/mean_avg_losses.txt', fname)

        # test the case when all GMFs are filtered out
        with self.assertRaises(RuntimeError) as ctx:
            self.run_calc(case_2.__file__, 'job.ini', minimum_intensity='10.0')
        self.assertEqual(
            str(ctx.exception),
            'No GMFs were generated, perhaps they were all below the '
            'minimum_intensity threshold')
Example #21
0
def show(what, calc_id=-1):
    """
    Show the content of a datastore (by default the last one).
    """
    if what == 'all':  # show all
        if not os.path.exists(datastore.DATADIR):
            return
        rows = []
        for calc_id in datastore.get_calc_ids(datastore.DATADIR):
            try:
                ds = datastore.read(calc_id)
                oq = ds['oqparam']
                cmode, descr = oq.calculation_mode, oq.description
            except:
                # invalid datastore file, or missing calculation_mode
                # and description attributes, perhaps due to a manual kill
                f = os.path.join(datastore.DATADIR, 'calc_%s.hdf5' % calc_id)
                logging.warn('Unreadable datastore %s', f)
                continue
            else:
                rows.append((calc_id, cmode, descr.encode('utf-8')))
        for row in sorted(rows, key=lambda row: row[0]):  # by calc_id
            print('#%d %s: %s' % row)
        return

    ds = read(calc_id)

    # this part is experimental
    if what == 'rlzs' and 'hcurves' in ds:
        min_value = 0.01  # used in rmsep
        curves_by_rlz, mean_curves = get_hcurves_and_means(ds)
        dists = []
        for rlz, curves in curves_by_rlz.items():
            dist = sum(rmsep(mean_curves[imt], curves[imt], min_value)
                       for imt in mean_curves.dtype.fields)
            dists.append((dist, rlz))
        print('Realizations in order of distance from the mean curves')
        for dist, rlz in sorted(dists):
            print('%s: rmsep=%s' % (rlz, dist))
    elif view.keyfunc(what) in view:
        print(view(what, ds))
    elif what in ds:
        obj = ds[what]
        if hasattr(obj, 'value'):  # an array
            print(write_csv(io.StringIO(), obj.value))
        else:
            print(obj)
    else:
        print('%s not found' % what)

    ds.close()
Example #22
0
    def test_case_master(self):
        self.run_calc(case_master.__file__, 'job.ini', insured_losses='false')
        calc0 = self.calc.datastore  # single file event_based_risk
        self.run_calc(case_master.__file__, 'job.ini', insured_losses='false',
                      calculation_mode='event_based')
        calc1 = self.calc.datastore  # event_based
        self.run_calc(case_master.__file__, 'job.ini', insured_losses='false',
                      hazard_calculation_id=str(calc1.calc_id),
                      source_model_logic_tree_file='',
                      gsim_logic_tree_file='')
        calc2 = self.calc.datastore  # two files event_based_risk

        check_csm_info(calc0, calc1)  # the csm_info arrays must be equal
        check_csm_info(calc0, calc2)  # the csm_info arrays must be equal

        if sys.platform == 'darwin':
            raise unittest.SkipTest('MacOSX')

        # compare the event loss table generated by a event_based_risk
        # case_master calculation from ruptures
        f0 = gettemp(view('elt', calc0))
        self.assertEqualFiles('expected/elt.txt', f0, delta=1E-5)
        f2 = gettemp(view('elt', calc2))
        self.assertEqualFiles('expected/elt.txt', f2, delta=1E-5)
Example #23
0
    def test_case_master_ebr(self):
        out = self.run_calc(case_master.__file__, 'job.ini',
                            calculation_mode='ebrisk',
                            investigation_time='1',
                            insured_losses='false',
                            exports='csv')
        for fname in out['losses_by_taxon', 'csv']:
            self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)

        for fname in out['agg_loss_table', 'csv']:
            self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)

        fname = writetmp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles(
            'expected/portfolio_loss_ebr.txt', fname, delta=1E-5)
Example #24
0
    def test_ebr(self):
        # test a single case of `run_job`, but it is the most complex one,
        # event based risk with post processing
        job_ini = os.path.join(
            os.path.dirname(case_master.__file__), 'job.ini')
        with Print.patch() as p:
            job_id = run_job(job_ini, log_level='error')
        self.assertIn('id | name', str(p))

        # sanity check on the performance view: make sure that the most
        # relevant information is stored (it can be lost for instance due
        # to a wrong refactoring of the safely_call function)
        with read(job_id) as dstore:
            perf = view('performance', dstore)
            self.assertIn('total event_based_risk', perf)
    def test_case_6a(self):
        # case with two gsims
        self.run_calc(case_6a.__file__, 'job_haz.ini,job_risk.ini',
                      exports='csv')
        [f] = export(('agglosses', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/agg_structural.csv', f)

        # testing the totlosses view
        dstore = self.calc.datastore
        fname = gettemp(view('totlosses', dstore))
        self.assertEqualFiles('expected/totlosses.txt', fname)

        # two equal gsims
        with self.assertRaises(InvalidLogicTree):
            self.run_calc(case_6a.__file__, 'job_haz.ini',
                          gsim_logic_tree_file='wrong_gmpe_logic_tree.xml')
Example #26
0
    def test_case_14(self):
        # test preclassical and OQ_SAMPLE_SOURCES
        with mock.patch.dict(os.environ, OQ_SAMPLE_SOURCES='1'):
            self.run_calc(
                case_14.__file__, 'job.ini', calculation_mode='preclassical')
        rpt = view('ruptures_per_trt', self.calc.datastore)
        self.assertEqual(rpt, """\
================ ====== ==================== ============ ============
source_model     grp_id trt                  eff_ruptures tot_ruptures
================ ====== ==================== ============ ============
simple_fault.xml 0      Active Shallow Crust 447          447         
================ ====== ==================== ============ ============""")
        # test classical
        self.assert_curves_ok([
            'hazard_curve-smltp_simple_fault-gsimltp_AbrahamsonSilva2008.csv',
            'hazard_curve-smltp_simple_fault-gsimltp_CampbellBozorgnia2008.csv'
        ], case_14.__file__)
Example #27
0
    def test_case_1(self):
        self.run_calc(case_1.__file__, 'job.ini')
        ekeys = [('agg_curves-stats', 'csv')]
        for ekey in ekeys:
            for fname in export(ekey, self.calc.datastore):
                self.assertEqualFiles(
                    'expected/%s' % strip_calc_id(fname), fname)

        # make sure the agg_curves-stats has the right attrs
        self.check_attr('return_periods', [30, 60, 120, 240, 480, 960])
        self.check_attr('units', [b'EUR', b'EUR'])
        self.check_attr('nbytes', 96)

        # test the loss curves exporter
        [f1] = export(('loss_curves/rlz-0', 'csv'), self.calc.datastore)
        [f2] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves-rlz-000.csv', f1)
        self.assertEqualFiles('expected/loss_curves-rlz-001.csv', f2)

        [f] = export(('loss_curves/mean', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves-mean.csv', f)

        # test the loss maps exporter
        fnames = export(('loss_maps-stats', 'csv'), self.calc.datastore)
        assert fnames
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname, delta=1E-5)

        # test portfolio loss
        tmp = gettemp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_loss.txt', tmp)

        # test the rup_loss_table exporter
        fnames = export(('rup_loss_table', 'xml'), self.calc.datastore)
        self.assertEqual(len(fnames), 2)
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname)

        # test the src_loss_table extractor
        arr = extract(self.calc.datastore, 'src_loss_table/structural')
        tmp = gettemp(rst_table(arr))
        self.assertEqualFiles('expected/src_loss_table.txt', tmp)
Example #28
0
    def test_case_2(self):
        self.assert_stats_ok(case_2, 'job.ini', individual_curves='true')
        fname = writetmp(view('mean_avg_losses', self.calc.datastore))
        self.assertEqualFiles('expected/mean_avg_losses.txt', fname)

        # test the composite_risk_model keys (i.e. slash escaping)
        crm = sorted(self.calc.datastore.getitem('composite_risk_model'))
        self.assertEqual(crm, ['RC%2B', 'RM', 'W%2F1'])
        # export a specific eid
        [fname] = export(('all_loss_ratios:0', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/losses-eid=0.csv', fname)

        # test the case when all GMFs are filtered out
        with self.assertRaises(RuntimeError) as ctx:
            self.run_calc(case_2.__file__, 'job.ini', minimum_intensity='10.0')
        self.assertEqual(
            str(ctx.exception),
            'No GMFs were generated, perhaps they were all below the '
            'minimum_intensity threshold')
Example #29
0
    def test_case_1(self):
        # test with a single event
        self.assert_ok(case_1, 'job_risk.ini')
        got = view('num_units', self.calc.datastore)
        self.assertEqual('''\
======== =========
taxonomy num_units
======== =========
RC       2,000    
RM       4,000    
*ALL*    6,000    
======== =========''', got)

        # test agg_damages, 1 realization x 3 damage states
        [dmg] = extract(self.calc.datastore, 'agg_damages/structural?'
                        'taxonomy=RC&CRESTA=01.1')
        numpy.testing.assert_almost_equal(
            [1498.0121, 472.96616, 29.021801], dmg, decimal=4)
        # test no intersection
        dmg = extract(self.calc.datastore, 'agg_damages/structural?'
                      'taxonomy=RM&CRESTA=01.1')
        self.assertEqual(dmg.shape, ())
Example #30
0
 def test_case_1g(self):
     # vulnerability function with PMF
     self.run_calc(case_1g.__file__, 'job.ini')
     fname = writetmp(view('mean_avg_losses', self.calc.datastore))
     self.assertEqualFiles('expected/avg_losses.txt', fname)
     os.remove(fname)
 def test_case_1f(self):
     # vulnerability function with BT
     self.run_calc(case_1f.__file__, 'job_h.ini,job_r.ini')
     fname = gettemp(view('portfolio_losses', self.calc.datastore))
     self.assertEqualFiles('portfolio_losses.txt', fname, delta=1E-6)
     os.remove(fname)
Example #32
0
    def test_case_master(self):
        if sys.platform == 'darwin':
            raise unittest.SkipTest('MacOSX')
        self.run_calc(case_master.__file__, 'job.ini', exports='csv')
        fnames = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        assert fnames, 'avg_losses-stats not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        # check event loss table
        [fname] = export(('losses_by_event', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/' + strip_calc_id(fname),
                              fname,
                              delta=1E-5)

        # extract loss_curves/rlz-1 (with the first asset having zero losses)
        [fname] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/' + strip_calc_id(fname),
                              fname,
                              delta=1E-5)

        fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore)
        assert fnames, 'loss_maps-rlzs not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        fname = gettemp(view('portfolio_losses', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_losses.txt',
                              fname,
                              delta=1E-5)
        os.remove(fname)

        # check ruptures are stored correctly
        fname = gettemp(view('ruptures_events', self.calc.datastore))
        self.assertEqualFiles('expected/ruptures_events.txt', fname)
        os.remove(fname)

        # check losses_by_tag
        fnames = export(
            ('aggregate_by/avg_losses?tag=occupancy&kind=mean', 'csv'),
            self.calc.datastore)
        self.assertEqualFiles('expected/losses_by_occupancy.csv', fnames[0])

        self.check_multi_tag(self.calc.datastore)

        # ------------------------- ebrisk calculator ---------------------- #
        self.run_calc(case_master.__file__,
                      'job.ini',
                      calculation_mode='ebrisk',
                      exports='',
                      aggregate_by='id')

        # agg_losses-rlzs has shape (L=5, R=9)
        # agg_losses-stats has shape (L=5, S=4)
        fname = export(('agg_losses-stats', 'csv'), self.calc.datastore)[0]
        self.assertEqualFiles('expected/agglosses.csv', fname, delta=1E-5)

        fname = export(('agg_curves-stats', 'csv'), self.calc.datastore)[0]
        self.assertEqualFiles('expected/aggcurves.csv', fname, delta=1E-5)

        fname = export(('agg_maps-stats', 'csv'), self.calc.datastore)[0]
        self.assertEqualFiles('expected/aggmaps.csv', fname, delta=1E-5)

        fname = export(('avg_losses', 'csv'), self.calc.datastore)[0]
        self.assertEqualFiles('expected/avg_losses-mean.csv',
                              fname,
                              delta=1E-5)

        fname = export(('losses_by_event', 'csv'), self.calc.datastore)[0]
        self.assertEqualFiles('expected/elt.csv', fname)
Example #33
0
 def test_case_master(self):
     check_platform('xenial')
     self.assert_stats_ok(case_master, 'job.ini')
     fname = writetmp(view('portfolio_loss', self.calc.datastore))
     self.assertEqualFiles('expected/portfolio_loss.txt', fname, delta=1E-5)
Example #34
0
def export_fullreport(ekey, dstore):
    with open(dstore.export_path('report.rst'), 'w') as f:
        f.write(view('fullreport', dstore))
    return [f.name]
    def test_case_master(self):
        if sys.platform == 'darwin':
            raise unittest.SkipTest('MacOSX')
        self.run_calc(case_master.__file__, 'job.ini', exports='csv')
        fnames = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        assert fnames, 'avg_losses-stats not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        # extract loss_curves/rlz-1 (with the first asset having zero losses)
        [fname] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/' + strip_calc_id(fname),
                              fname,
                              delta=1E-5)

        fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore)
        assert fnames, 'loss_maps-rlzs not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        # extract curves by tag
        tags = 'taxonomy=tax1&state=01&cresta=0.11'
        a = extract(self.calc.datastore, 'agg_curves/structural?' + tags)
        self.assertEqual(a.array.shape, (4, 3))  # 4 stats, 3 return periods

        fname = gettemp(view('portfolio_losses', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_losses.txt',
                              fname,
                              delta=1E-5)
        os.remove(fname)

        # check ruptures are stored correctly
        fname = gettemp(view('ruptures_events', self.calc.datastore))
        self.assertEqualFiles('expected/ruptures_events.txt', fname)
        os.remove(fname)

        # check losses_by_tag
        fnames = export(('aggregate_by/occupancy/avg_losses', 'csv'),
                        self.calc.datastore)
        self.assertEqualFiles('expected/losses_by_occupancy.csv', fnames[0])

        self.check_multi_tag(self.calc.datastore)

        # ------------------------- ebrisk calculator ---------------------- #
        self.run_calc(case_master.__file__,
                      'job.ini',
                      hazard_calculation_id=str(self.calc.datastore.calc_id),
                      calculation_mode='ebrisk',
                      exports='',
                      aggregate_by='taxonomy',
                      insured_losses='false')

        # agg_losses-rlzs has shape (L=5, R=9)
        # agg_losses-stats has shape (L=5, S=4)
        fname = export(('agg_losses-stats', 'csv'), self.calc.datastore)[0]
        self.assertEqualFiles('expected/agglosses.csv', fname)

        fname = export(('avg_losses', 'csv'), self.calc.datastore)[0]
        self.assertEqualFiles('expected/avglosses.csv', fname, delta=1E-5)