Exemple #1
0
    def test_wrong_sites_csv(self):
        sites_csv = general.gettemp(
            'site_id,lon,lat\n1,1.0,2.1\n2,3.0,4.1\n3,5.0,6.1')
        source = general.gettemp("""
[general]
calculation_mode = scenario
[geometry]
sites_csv = %s
[misc]
maximum_distance=1
truncation_level=3
random_seed=5
[site_params]
reference_vs30_type = measured
reference_vs30_value = 600.0
reference_depth_to_2pt5km_per_sec = 5.0
reference_depth_to_1pt0km_per_sec = 100.0
intensity_measure_types_and_levels = {'PGA': [0.1, 0.2]}
investigation_time = 50.
export_dir = %s
""" % (os.path.basename(sites_csv), TMP))
        oq = readinput.get_oqparam(source)
        with self.assertRaises(InvalidFile) as ctx:
            readinput.get_mesh(oq)
        self.assertIn('expected site_id=0, got 1', str(ctx.exception))
        os.unlink(sites_csv)
Exemple #2
0
def _assert_equal_sources(nodes):
    if hasattr(nodes[0], 'source_id'):
        n0 = nodes[0]
        for n in nodes[1:]:
            n.assert_equal(n0, ignore=('id', 'src_group_id'))
    else:  # assume source nodes
        n0 = nodes[0].to_str()
        for n in nodes[1:]:
            eq = n.to_str() == n0
            if not eq:
                f0 = gettemp(n0)
                f1 = gettemp(n.to_str())
            assert eq, 'different parameters for source %s, run meld %s %s' % (
                n['id'], f0, f1)
Exemple #3
0
    def test_lr_eq_0_cov_gt_0(self):
        # If a vulnerability function loss ratio is 0 and its corresponding CoV
        # is > 0, a ValueError should be raised
        vuln_content = gettemp(u"""\
<?xml version='1.0' encoding='utf-8'?>
<nrml xmlns="http://openquake.org/xmlns/nrml/0.4"
      xmlns:gml="http://www.opengis.net/gml">
    <vulnerabilityModel>
        <discreteVulnerabilitySet vulnerabilitySetID="PAGER"
                                  assetCategory="population"
                                  lossCategory="occupants">
            <IML IMT="PGV">0.005 0.007 0.0098 0.0137</IML>
            <discreteVulnerability vulnerabilityFunctionID="A"
                                   probabilisticDistribution="LN">
                <lossRatio>0.00 0.06 0.18 0.36</lossRatio>
                <coefficientsVariation>0.30 0.30 0.30 0.30
                </coefficientsVariation>
            </discreteVulnerability>
        </discreteVulnerabilitySet>
    </vulnerabilityModel>
</nrml>
""")
        with self.assertRaises(ValueError) as ar:
            nrml.to_python(vuln_content)
        self.assertIn('It is not valid to define a loss ratio = 0.0 with a '
                      'corresponding coeff. of variation > 0.0',
                      str(ar.exception))
    def test_case_miriam(self):
        # this is a case with a grid and asset-hazard association
        self.run_calc(case_miriam.__file__, 'job.ini')

        # check minimum_magnitude >= 5.2
        minmag = self.calc.datastore['ruptures']['mag'].min()
        self.assertGreaterEqual(minmag, 5.2)

        # check asset_loss_table
        tot = self.calc.datastore['asset_loss_table'].value.sum()
        self.assertEqual(tot, 15787827.0)
        [fname] = export(('agg_loss_table', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/agg_losses-rlz000-structural.csv',
                              fname, delta=1E-5)
        fname = gettemp(view('portfolio_losses', self.calc.datastore))
        self.assertEqualFiles(
            'expected/portfolio_losses.txt', fname, delta=1E-5)

        # this is a case with exposure and region_grid_spacing=1
        self.run_calc(case_miriam.__file__, 'job2.ini')
        hcurves = dict(extract(self.calc.datastore, 'hcurves'))['all']
        sitecol = self.calc.datastore['sitecol']  # filtered sitecol
        self.assertEqual(len(hcurves), len(sitecol))
        assetcol = self.calc.datastore['assetcol']
        self.assertEqual(len(sitecol), 15)
        self.assertGreater(sitecol.vs30.sum(), 0)
        self.assertEqual(len(assetcol), 548)
 def test_case_16(self):
     # an example with site model raising warnings and autogridded exposure
     self.run_calc(case_16.__file__, 'job.ini',
                   ground_motion_fields='false')
     hid = str(self.calc.datastore.calc_id)
     self.run_calc(case_16.__file__, 'job.ini', hazard_calculation_id=hid)
     tmp = gettemp(view('global_gmfs', self.calc.datastore))
     self.assertEqualFiles('expected/global_gmfs.txt', tmp)
Exemple #6
0
    def test_get_oqparam_with_sites_csv(self):
        sites_csv = general.gettemp('1.0,2.1\n3.0,4.1\n5.0,6.1')
        try:
            source = general.gettemp("""
[general]
calculation_mode = scenario
[geometry]
sites_csv = %s
[misc]
maximum_distance=1
truncation_level=3
random_seed=5
[site_params]
reference_vs30_type = measured
reference_vs30_value = 600.0
reference_depth_to_2pt5km_per_sec = 5.0
reference_depth_to_1pt0km_per_sec = 100.0
intensity_measure_types_and_levels = {'PGA': [0.1, 0.2]}
investigation_time = 50.
export_dir = %s
            """ % (os.path.basename(sites_csv), TMP))
            exp_base_path = os.path.dirname(
                os.path.join(os.path.abspath('.'), source))

            expected_params = {
                'export_dir': TMP,
                'base_path': exp_base_path,
                'calculation_mode': 'scenario',
                'truncation_level': 3.0,
                'random_seed': 5,
                'maximum_distance': {'default': 1.0},
                'inputs': {'job_ini': source,
                           'sites': sites_csv},
                'reference_depth_to_1pt0km_per_sec': 100.0,
                'reference_depth_to_2pt5km_per_sec': 5.0,
                'reference_vs30_type': 'measured',
                'reference_vs30_value': 600.0,
                'hazard_imtls': {'PGA': [0.1, 0.2]},
                'investigation_time': 50.0,
                'risk_investigation_time': 50.0,
            }

            params = getparams(readinput.get_oqparam(source))
            self.assertEqual(expected_params, params)
        finally:
            os.unlink(sites_csv)
Exemple #7
0
def get_path(fname_or_fileobject):
    if isinstance(fname_or_fileobject, str):
        return fname_or_fileobject
    elif hasattr(fname_or_fileobject, 'getvalue'):
        return gettemp(fname_or_fileobject.getvalue())
    elif hasattr(fname_or_fileobject, 'name'):
        return fname_or_fileobject.name
    else:
        return TypeError(fname_or_fileobject)
Exemple #8
0
 def test_case_16(self):
     # an example with site model raising warnings and autogridded exposure
     self.run_calc(case_16.__file__,
                   'job.ini',
                   ground_motion_fields='false')
     hid = str(self.calc.datastore.calc_id)
     self.run_calc(case_16.__file__, 'job.ini', hazard_calculation_id=hid)
     tmp = gettemp(view('global_gmfs', self.calc.datastore))
     self.assertEqualFiles('expected/global_gmfs.txt', tmp)
    def test_event_based_sampling(self):
        self.run_calc(ucerf.__file__, 'job_ebh.ini')

        # check the distribution of the events
        self.assertEventsByRlz([29, 25])

        # check the mean hazard map
        got = gettemp(view('hmap', self.calc.datastore))
        self.assertEqualFiles('expected/hmap.rst', got)
Exemple #10
0
    def test(self):
        fname = general.gettemp('''\
<?xml version="1.0" encoding="utf-8"?>
<nrml xmlns:gml="http://www.opengis.net/gml"
      xmlns="http://openquake.org/xmlns/nrml/0.4">

    <!-- Spectral Acceleration (SA) example -->
    <hazardCurves sourceModelTreePath="b1_b2_b4" gsimTreePath="b1_b2" investigationTime="50.0" IMT="SA" saPeriod="0.025" saDamping="5.0">
        <IMLs>5.0000e-03 7.0000e-03 1.3700e-02</IMLs>

        <hazardCurve>
            <gml:Point>
                <gml:pos>-122.5000 37.5000</gml:pos>
            </gml:Point>
            <poEs>9.8728e-01 9.8266e-01 9.4957e-01</poEs>
        </hazardCurve>
        <hazardCurve>
            <gml:Point>
                <gml:pos>-123.5000 37.5000</gml:pos>
            </gml:Point>
            <poEs>9.8727e-02 9.8265e-02 9.4956e-02</poEs>
        </hazardCurve>
    </hazardCurves>

    <!-- Basic example, using PGA as IMT -->
    <hazardCurves sourceModelTreePath="b1_b2_b3" gsimTreePath="b1_b7" investigationTime="50.0" IMT="PGA">
        <IMLs>5.0000e-03 7.0000e-03 1.3700e-02 3.3700e-02</IMLs>

        <hazardCurve>
            <gml:Point>
                <gml:pos>-122.5000 37.5000</gml:pos>
            </gml:Point>
            <poEs>9.8728e-01 9.8226e-01 9.4947e-01 9.2947e-01</poEs>
        </hazardCurve>
        <hazardCurve>
            <gml:Point>
                <gml:pos>-123.5000 37.5000</gml:pos>
            </gml:Point>
            <poEs>9.8728e-02 9.8216e-02 9.4945e-02 9.2947e-02</poEs>
        </hazardCurve>
    </hazardCurves>
</nrml>
''', suffix='.xml')
        oqparam = object.__new__(oqvalidation.OqParam)
        oqparam.inputs = dict(hazard_curves=fname)
        sitecol = readinput.get_site_collection(oqparam)
        self.assertEqual(len(sitecol), 2)
        self.assertEqual(sorted(oqparam.hazard_imtls.items()),
                         [('PGA', [0.005, 0.007, 0.0137, 0.0337]),
                          ('SA(0.025)', [0.005, 0.007, 0.0137])])
        hcurves = readinput.pmap.convert(oqparam.imtls, 2)
        assert_allclose(hcurves['PGA'], numpy.array(
            [[0.098728, 0.098216, 0.094945, 0.092947],
             [0.98728, 0.98226, 0.94947, 0.92947]]))
        assert_allclose(hcurves['SA(0.025)'], numpy.array(
            [[0.098727, 0.098265, 0.094956],
             [0.98728, 0.98266, 0.94957]]))
    def test_case_4(self):
        # this test is sensitive to the ordering of the epsilons
        # in openquake.riskinput.make_eps
        out = self.run_calc(case_4.__file__, 'job.ini', exports='csv')
        fname = gettemp(view('totlosses', self.calc.datastore))
        self.assertEqualFiles('expected/totlosses.txt', fname)

        [fname] = out['agglosses', 'csv']
        self.assertEqualFiles('expected/agglosses.csv', fname, delta=1E-6)
    def test_case_1(self):
        self.run_calc(case_1.__file__, 'job.ini')
        ekeys = [('agg_curves-stats', 'csv')]
        for ekey in ekeys:
            for fname in export(ekey, self.calc.datastore):
                self.assertEqualFiles(
                    'expected/%s' % strip_calc_id(fname), fname)

        # make sure the agg_curves-stats has the right attrs
        self.check_attr('return_periods', [30, 60, 120, 240, 480, 960])
        self.check_attr('units', [b'EUR', b'EUR'])
        self.check_attr('nbytes', 96)

        # test the loss curves exporter
        [f1] = export(('loss_curves/rlz-0', 'csv'), self.calc.datastore)
        [f2] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves-rlz-000.csv', f1)
        self.assertEqualFiles('expected/loss_curves-rlz-001.csv', f2)

        [f] = export(('loss_curves/mean', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves-mean.csv', f)

        # test the loss maps exporter
        fnames = export(('loss_maps-stats', 'csv'), self.calc.datastore)
        assert fnames
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname, delta=1E-5)

        # test portfolio loss
        tmp = gettemp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_loss.txt', tmp)

        # test the rup_loss_table exporter
        fnames = export(('rup_loss_table', 'xml'), self.calc.datastore)
        self.assertEqual(len(fnames), 2)
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname)

        # test the src_loss_table extractor
        arr = extract(self.calc.datastore, 'src_loss_table/structural')
        tmp = gettemp(rst_table(arr))
        self.assertEqualFiles('expected/src_loss_table.txt', tmp)
Exemple #13
0
    def test_case_1_eb(self):
        # this is a case with no insured losses, no tags
        self.run_calc(case_1.__file__, 'job_eb.ini', concurrent_tasks='4')

        [fname] = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname)

        aw = extract(self.calc.datastore, 'agg_losses/structural')
        self.assertEqual(aw.stats, ['mean'])
        numpy.testing.assert_allclose(aw.array, [662.6701])

        fnames = export(('tot_curves-stats', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/eb_%s' % strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        [fname] = export(('losses_by_event', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname),
                              fname,
                              delta=1E-5)

        # extract tot_curves, no tags
        aw = extract(
            self.calc.datastore, 'tot_curves?kind=stats&'
            'loss_type=structural&absolute=1')
        tmp = gettemp(rst_table(aw.to_dframe()))
        self.assertEqualFiles('expected/agg_curves1.csv', tmp)

        aw = extract(self.calc.datastore, 'tot_curves?kind=rlzs&'
                     'loss_type=structural&absolute=1')
        tmp = gettemp(rst_table(aw.to_dframe()))
        self.assertEqualFiles('expected/agg_curves2.csv', tmp)

        aw = extract(
            self.calc.datastore, 'tot_curves?kind=stats&'
            'loss_type=structural&absolute=0')
        tmp = gettemp(rst_table(aw.to_dframe()))
        self.assertEqualFiles('expected/agg_curves3.csv', tmp)

        aw = extract(self.calc.datastore, 'tot_curves?kind=rlzs&'
                     'loss_type=structural&absolute=0')
        tmp = gettemp(rst_table(aw.to_dframe()))
        self.assertEqualFiles('expected/agg_curves4.csv', tmp)
    def test_case_1(self):
        self.run_calc(case_1.__file__, 'job.ini')
        ekeys = [('agg_curves-stats', 'csv')]
        for ekey in ekeys:
            for fname in export(ekey, self.calc.datastore):
                self.assertEqualFiles('expected/%s' % strip_calc_id(fname),
                                      fname)

        # make sure the agg_curves-stats has the right attrs
        self.check_attr('return_periods', [30, 60, 120, 240, 480, 960])
        self.check_attr('units', [b'EUR', b'EUR'])
        self.check_attr('nbytes', 96)

        # test the loss curves exporter
        [f1] = export(('loss_curves/rlz-0', 'csv'), self.calc.datastore)
        [f2] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves-rlz-000.csv', f1)
        self.assertEqualFiles('expected/loss_curves-rlz-001.csv', f2)

        [f] = export(('loss_curves/mean', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/loss_curves-mean.csv', f)

        # test the loss maps exporter
        fnames = export(('loss_maps-stats', 'csv'), self.calc.datastore)
        assert fnames
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        # test portfolio loss
        tmp = gettemp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_loss.txt', tmp)

        # test the rup_loss_table exporter
        fnames = export(('rup_loss_table', 'xml'), self.calc.datastore)
        self.assertEqual(len(fnames), 2)
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname), fname)

        # test the src_loss_table extractor
        arr = extract(self.calc.datastore, 'src_loss_table/structural')
        tmp = gettemp(rst_table(arr))
        self.assertEqualFiles('expected/src_loss_table.txt', tmp)
Exemple #15
0
    def test_case_master(self):
        if sys.platform == 'darwin':
            raise unittest.SkipTest('MacOSX')
        self.run_calc(case_master.__file__, 'job.ini', exports='csv')
        fnames = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        assert fnames, 'avg_losses-stats not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        # extract loss_curves/rlz-1 (with the first asset having zero losses)
        [fname] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/' + strip_calc_id(fname),
                              fname,
                              delta=1E-5)

        fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore)
        assert fnames, 'loss_maps-rlzs not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        # extract curves by tag
        tags = 'taxonomy=tax1&state=01&cresta=0.11'
        a = extract(self.calc.datastore, 'agg_curves/structural?' + tags)
        self.assertEqual(a.array.shape, (4, 3))  # 4 stats, 3 return periods

        fname = gettemp(view('portfolio_losses', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_losses.txt',
                              fname,
                              delta=1E-5)
        os.remove(fname)

        # check ruptures are stored correctly
        fname = gettemp(view('ruptures_events', self.calc.datastore))
        self.assertEqualFiles('expected/ruptures_events.txt', fname)
        os.remove(fname)

        # check losses_by_tag
        fnames = export(('losses_by_tag/occupancy', 'csv'),
                        self.calc.datastore)
        self.assertEqualFiles('expected/losses_by_occupancy.csv', fnames[0])
Exemple #16
0
    def test_missing_cost_types(self):
        job_ini = general.gettemp('''\
[general]
description = Exposure with missing cost_types
calculation_mode = scenario
exposure_file = %s''' % os.path.basename(self.exposure4))
        oqparam = readinput.get_oqparam(job_ini)
        with self.assertRaises(InvalidFile) as ctx:
            readinput.get_sitecol_assetcol(oqparam, cost_types=['structural'])
        self.assertIn("is missing", str(ctx.exception))
Exemple #17
0
 def __init__(self, sitecol, integration_distance, hdf5path=None):
     if rtree is None:
         raise ImportError('rtree')
     super().__init__(sitecol, integration_distance, hdf5path)
     self.indexpath = gettemp()
     lonlats = zip(sitecol.lons, sitecol.lats)
     index = rtree.index.Index(self.indexpath)
     for i, (lon, lat) in enumerate(lonlats):
         index.insert(i, (lon, lat, lon, lat))
     index.close()
Exemple #18
0
    def test_invalid_magnitude_distance_filter(self):
        source = general.gettemp("""
[general]
maximum_distance=[(200, 8)]
""")
        with self.assertRaises(ValueError) as ctx:
            readinput.get_oqparam(source)
        self.assertIn('magnitude 200.0 is bigger than the maximum (11): '
                      'could not convert to maximum_distance:',
                      str(ctx.exception))
Exemple #19
0
 def get(cls, path, **data):
     resp = cls.c.get('/v1/calc/%s' % path, data,
                      HTTP_HOST='127.0.0.1')
     assert resp.content, 'No content from /v1/calc/%s' % path
     try:
         return json.loads(resp.content.decode('utf8'))
     except Exception:
         print('Invalid JSON, see %s' % gettemp(resp.content),
               file=sys.stderr)
         return {}
Exemple #20
0
    def test_missing_cost_types(self):
        job_ini = general.gettemp('''\
[general]
description = Exposure with missing cost_types
calculation_mode = scenario
exposure_file = %s''' % os.path.basename(self.exposure4))
        oqparam = readinput.get_oqparam(job_ini)
        with self.assertRaises(InvalidFile) as ctx:
            readinput.get_sitecol_assetcol(oqparam, cost_types=['structural'])
        self.assertIn("is missing", str(ctx.exception))
Exemple #21
0
    def test_no_nrml(self):
        fname = gettemp('''\
<?xml version="1.0" encoding="UTF-8"?>
<fragilityModel id="Ethiopia" assetCategory="buildings"
lossCategory="structural" />
''')
        with self.assertRaises(ValueError) as ctx:
            read(fname)
        self.assertIn('expected a node of kind nrml, got fragilityModel',
                      str(ctx.exception))
Exemple #22
0
    def test_invalid_magnitude_distance_filter(self):
        source = general.gettemp("""
[general]
maximum_distance=[(200, 8)]
""")
        with self.assertRaises(ValueError) as ctx:
            readinput.get_oqparam(source)
        self.assertIn(
            'magnitude 200.0 is bigger than the maximum (11): '
            'could not convert to maximum_distance:', str(ctx.exception))
Exemple #23
0
 def test_case_23(self):
     # case with implicit grid and site model on a larger grid
     out = self.run_calc(case_23.__file__, 'job.ini', exports='csv')
     [fname] = out['ruptures', 'csv']
     self.assertEqualFiles('expected/%s' % strip_calc_id(fname),
                           fname,
                           delta=1E-6)
     arr = self.calc.datastore.getitem('sitecol')
     tmp = gettemp(write_csv(io.BytesIO(), arr).decode('utf8'))
     self.assertEqualFiles('expected/sitecol.csv', tmp)
Exemple #24
0
    def test_no_nrml(self):
        fname = gettemp('''\
<?xml version="1.0" encoding="UTF-8"?>
<fragilityModel id="Ethiopia" assetCategory="buildings"
lossCategory="structural" />
''')
        with self.assertRaises(ValueError) as ctx:
            read(fname)
        self.assertIn('expected a node of kind nrml, got fragilityModel',
                      str(ctx.exception))
 def test_long_code(self):
     fname = gettemp(long_ampl_code)
     with self.assertRaises(InvalidFile) as ctx:
         read_csv(fname, {
             'ampcode': ampcode_dt,
             None: numpy.float64
         },
                  index='ampcode')
     self.assertIn("line 3: ampcode='long_code' has length 9 > 4",
                   str(ctx.exception))
    def test_multipoint(self):
        smodel = self.check_round_trip(MULTIPOINT)

        # test toml round trip
        temp = general.gettemp(suffix='.toml')
        with open(temp, 'w') as f:
            tomldump(smodel, f)
        with open(temp, 'r') as f:
            sm = toml.load(f)['sourceModel']
        self.assertEqual(smodel.name, sm['_name'])
Exemple #27
0
 def get(cls, path, **data):
     resp = cls.c.get('/v1/calc/%s' % path, data,
                      HTTP_HOST='127.0.0.1')
     assert resp.content, 'No content from /v1/calc/%s' % path
     try:
         return json.loads(resp.content.decode('utf8'))
     except Exception:
         print('Invalid JSON, see %s' % gettemp(resp.content),
               file=sys.stderr)
         return {}
Exemple #28
0
    def test_event_based_risk(self):
        # the fast calculator ucerf_risk
        self.run_calc(ucerf.__file__, 'job_ebr.ini')

        fname = gettemp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_loss.txt', fname, delta=1E-5)

        # check the mean losses_by_period
        [fname] = export(('agg_curves-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/losses_by_period-mean.csv', fname)
Exemple #29
0
    def test_case_master(self):
        if sys.platform == 'darwin':
            raise unittest.SkipTest('MacOSX')
        self.run_calc(case_master.__file__, 'job.ini', exports='csv')
        fnames = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        assert fnames, 'avg_losses-stats not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname), fname,
                                  delta=1E-5)

        # check event loss table
        [fname] = export(('losses_by_event', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/' + strip_calc_id(fname), fname,
                              delta=1E-5)

        # extract loss_curves/rlz-1 (with the first asset having zero losses)
        [fname] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/' + strip_calc_id(fname), fname,
                              delta=1E-5)

        fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore)
        assert fnames, 'loss_maps-rlzs not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname, delta=1E-5)

        fname = gettemp(view('portfolio_losses', self.calc.datastore))
        self.assertEqualFiles(
            'expected/portfolio_losses.txt', fname, delta=1E-5)
        os.remove(fname)

        # check ruptures are stored correctly
        fname = gettemp(view('ruptures_events', self.calc.datastore))
        self.assertEqualFiles('expected/ruptures_events.txt', fname)
        os.remove(fname)

        # check losses_by_tag
        fnames = export(
            ('aggregate_by/avg_losses?tag=occupancy&kind=mean', 'csv'),
            self.calc.datastore)
        self.assertEqualFiles('expected/losses_by_occupancy.csv', fnames[0])

        self.check_multi_tag(self.calc.datastore)
 def test_case_23(self):
     # case with implicit grid and site model on a larger grid
     out = self.run_calc(case_23.__file__, 'job.ini', exports='csv')
     [fname] = out['ruptures', 'csv']
     self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname,
                           delta=1E-4)
     sio = io.StringIO()
     write_csv(sio, self.calc.datastore['sitecol'].array)
     tmp = gettemp(sio.getvalue())
     self.assertEqualFiles('expected/sitecol.csv', tmp)
 def test_dupl(self):
     fname = gettemp(dupl_ampl_func)
     df = read_csv(fname, {
         'ampcode': ampcode_dt,
         None: numpy.float64
     },
                   index='ampcode')
     with self.assertRaises(ValueError) as ctx:
         Amplifier(self.imtls, df, self.soil_levels)
     self.assertEqual(str(ctx.exception), "Found duplicates for b'A'")
    def test(self):
        npd = PMF([(0.5, NodalPlane(1, 20, 3)),
                   (0.5, NodalPlane(2, 2, 4))])
        hd = PMF([(1, 14)])
        mesh = Mesh(numpy.array([0, 1]), numpy.array([0.5, 1]))
        mmfd = MultiMFD('incrementalMFD',
                        size=2,
                        min_mag=[4.5],
                        bin_width=[2.0],
                        occurRates=[[.3, .1], [.4, .2, .1]])
        mps = MultiPointSource('mp1', 'multi point source',
                               'Active Shallow Crust',
                               mmfd, PeerMSR(), 1.0,
                               10, 20, npd, hd, mesh)
        # test the splitting
        splits = list(mps)
        self.assertEqual(len(splits), 2)
        for split in splits:
            self.assertEqual(split.src_group_id, mps.src_group_id)

        got = obj_to_node(mps).to_str()
        print(got)
        exp = '''\
multiPointSource{id='mp1', name='multi point source', tectonicRegion='Active Shallow Crust'}
  multiPointGeometry
    gml:posList [0.0, 0.5, 1.0, 1.0]
    upperSeismoDepth 10
    lowerSeismoDepth 20
  magScaleRel 'PeerMSR'
  ruptAspectRatio 1.0
  multiMFD{kind='incrementalMFD', size=2}
    bin_width [2.0]
    min_mag [4.5]
    occurRates [0.3, 0.1, 0.4, 0.2, 0.1]
    lengths [2, 3]
  nodalPlaneDist
    nodalPlane{dip=20, probability=0.5, rake=3, strike=1}
    nodalPlane{dip=2, probability=0.5, rake=4, strike=2}
  hypoDepthDist
    hypoDepth{depth=14, probability=1.0}
'''
        self.assertEqual(got, exp)

        # test serialization to and from hdf5
        tmp = general.gettemp(suffix='.hdf5')
        with hdf5.File(tmp, 'w') as f:
            f[mps.source_id] = mps
        with hdf5.File(tmp, 'r') as f:
            f[mps.source_id]

        # test the bounding box
        bbox = mps.get_bounding_box(maxdist=100)
        numpy.testing.assert_almost_equal(
            (-0.8994569916564479, -0.39932, 1.8994569916564479, 1.89932),
            bbox)
    def test(self):
        npd = PMF([(0.5, NodalPlane(1, 20, 3)),
                   (0.5, NodalPlane(2, 2, 4))])
        hd = PMF([(1, 14)])
        mesh = Mesh(numpy.array([0, 1]), numpy.array([0.5, 1]))
        mmfd = MultiMFD('incrementalMFD',
                        size=2,
                        min_mag=[4.5],
                        bin_width=[2.0],
                        occurRates=[[.3, .1], [.4, .2, .1]])
        mps = MultiPointSource('mp1', 'multi point source',
                               'Active Shallow Crust',
                               mmfd, PeerMSR(), 1.0,
                               10, 20, npd, hd, mesh)
        # test the splitting
        splits = list(mps)
        self.assertEqual(len(splits), 2)
        for split in splits:
            self.assertEqual(split.grp_id, mps.grp_id)

        got = obj_to_node(mps).to_str()
        print(got)
        exp = '''\
multiPointSource{id='mp1', name='multi point source'}
  multiPointGeometry
    gml:posList [0.0, 0.5, 1.0, 1.0]
    upperSeismoDepth 10
    lowerSeismoDepth 20
  magScaleRel 'PeerMSR'
  ruptAspectRatio 1.0
  multiMFD{kind='incrementalMFD', size=2}
    bin_width [2.0]
    min_mag [4.5]
    occurRates [0.3, 0.1, 0.4, 0.2, 0.1]
    lengths [2, 3]
  nodalPlaneDist
    nodalPlane{dip=20, probability=0.5, rake=3, strike=1}
    nodalPlane{dip=2, probability=0.5, rake=4, strike=2}
  hypoDepthDist
    hypoDepth{depth=14, probability=1.0}
'''
        self.assertEqual(got, exp)

        # test serialization to and from hdf5
        tmp = general.gettemp(suffix='.hdf5')
        with hdf5.File(tmp, 'w') as f:
            f[mps.source_id] = mps
        with hdf5.File(tmp, 'r') as f:
            f[mps.source_id]

        # test the bounding box
        bbox = mps.get_bounding_box(maxdist=100)
        numpy.testing.assert_almost_equal(
            (-0.8994569916564479, -0.39932, 1.8994569916564479, 1.89932),
            bbox)
Exemple #34
0
    def test_case_master(self):
        if sys.platform == 'darwin':
            raise unittest.SkipTest('MacOSX')
        self.run_calc(case_master.__file__, 'job.ini', exports='csv')
        fnames = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        assert fnames, 'avg_losses-stats not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        # check event loss table
        [fname] = export(('losses_by_event', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/' + strip_calc_id(fname),
                              fname,
                              delta=1E-5)

        fname = gettemp(view('portfolio_losses', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_losses.txt',
                              fname,
                              delta=1E-5)
        os.remove(fname)

        # check ruptures are stored correctly
        fname = gettemp(view('ruptures_events', self.calc.datastore))
        self.assertEqualFiles('expected/ruptures_events.txt', fname)
        os.remove(fname)

        # check losses_by_tag
        fnames = export(
            ('aggregate_by/avg_losses?tag=occupancy&kind=mean', 'csv'),
            self.calc.datastore)
        self.assertEqualFiles('expected/losses_by_occupancy.csv', fnames[0])

        self.check_multi_tag(self.calc.datastore)

        # check curves-rlzs and curves-stats are readable
        df1 = self.calc.datastore.read_df('curves-rlzs', 'assets')
        aae(df1.columns, ['rlzs', 'return_periods', 'loss_types', 'value'])

        df2 = self.calc.datastore.read_df('curves-stats', 'assets')
        aae(df2.columns, ['stats', 'return_periods', 'loss_types', 'value'])
    def test_case_master(self):
        if sys.platform == 'darwin':
            raise unittest.SkipTest('MacOSX')
        self.run_calc(case_master.__file__, 'job.ini', exports='csv')
        fnames = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        assert fnames, 'avg_losses-stats not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        # extract loss_curves/rlz-1 (with the first asset having zero losses)
        [fname] = export(('loss_curves/rlz-1', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/' + strip_calc_id(fname),
                              fname,
                              delta=1E-5)

        fnames = export(('loss_maps-rlzs', 'csv'), self.calc.datastore)
        assert fnames, 'loss_maps-rlzs not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname),
                                  fname,
                                  delta=1E-5)

        # extract curves by tag
        tags = 'taxonomy=tax1&state=01&cresta=0.11'
        a = extract(self.calc.datastore, 'aggcurves/structural?' + tags)
        self.assertEqual(a.array.shape, (4, 3))  # 4 stats, 3 return periods

        fname = gettemp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_loss.txt', fname, delta=1E-5)
        os.remove(fname)

        # check ruptures are stored correctly
        fname = gettemp(view('ruptures_events', self.calc.datastore))
        self.assertEqualFiles('expected/ruptures_events.txt', fname)
        os.remove(fname)

        # check max_gmf_size
        exp = self.calc.datastore.get_attr('events', 'max_gmf_size')
        got = self.calc.datastore['gmf_data/data'].value.nbytes
        self.assertGreater(exp, got)  # there is minimum_intensity
    def test_case_master(self):
        # this tests exercise the case of a complex logic tree; it also
        # prints the warning on poe_agg very different from the expected poe
        self.run_calc(case_master.__file__, 'job.ini')
        fname = gettemp(view('mean_disagg', self.calc.datastore))
        self.assertEqualFiles('expected/mean_disagg.rst', fname)
        os.remove(fname)

        # check stats
        fnames = export(('disagg-stats', 'csv'), self.calc.datastore)
        self.assertEqual(len(fnames), 192)  # 2 sid x 8 keys x 2 poe x 2 imt
    def test_case_master(self):
        # this tests exercise the case of a complex logic tree; it also
        # prints the warning on poe_agg very different from the expected poe
        self.run_calc(case_master.__file__, 'job.ini')
        fname = gettemp(view('mean_disagg', self.calc.datastore))
        self.assertEqualFiles('expected/mean_disagg.rst', fname)
        os.remove(fname)

        # check stats
        fnames = export(('disagg-stats', 'csv'), self.calc.datastore)
        self.assertEqual(len(fnames), 192)  # 2 sid x 8 keys x 2 poe x 2 imt
Exemple #38
0
    def test_event_based_risk(self):
        # the fast calculator ucerf_risk
        raise unittest.SkipTest('ucerf_risk has been removed')
        self.run_calc(ucerf.__file__, 'job_ebr.ini')

        fname = gettemp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_loss.txt', fname, delta=1E-5)

        # check the mean losses_by_period
        [fname] = export(('agg_curves-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/losses_by_period-mean.csv', fname)
    def test_multipoint(self):
        smodel = self.check_round_trip(MULTIPOINT)

        # test hdf5 round trip
        temp = general.gettemp(suffix='.hdf5')
        with hdf5.File(temp, 'w') as f:
            f['/'] = smodel
        with hdf5.File(temp, 'r') as f:
            sm = f['/']
        self.assertEqual(smodel.name, sm.name)
        self.assertEqual(len(smodel.src_groups), len(sm.src_groups))
    def test_case_6a(self):
        # case with two gsims
        self.run_calc(case_6a.__file__, 'job_haz.ini,job_risk.ini',
                      exports='csv')
        [f] = export(('agglosses', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/agg_structural.csv', f)

        # testing the totlosses view
        dstore = self.calc.datastore
        fname = gettemp(view('totlosses', dstore))
        self.assertEqualFiles('expected/totlosses.txt', fname)

        # testing portfolio_losses
        fname = gettemp(view('portfolio_losses', dstore))
        self.assertEqualFiles('expected/portfolio_losses.txt', fname)

        # two equal gsims
        with self.assertRaises(InvalidLogicTree):
            self.run_calc(case_6a.__file__, 'job_haz.ini',
                          gsim_logic_tree_file='wrong_gmpe_logic_tree.xml')
Exemple #41
0
 def __init__(self, sitecol, integration_distance):
     if rtree is None:
         raise ImportError('rtree')
     self.integration_distance = integration_distance
     self.distribute = 'processpool'
     self.indexpath = gettemp()
     lonlats = zip(sitecol.lons, sitecol.lats)
     index = rtree.index.Index(self.indexpath)
     for i, (lon, lat) in enumerate(lonlats):
         index.insert(i, (lon, lat, lon, lat))
     index.close()
    def test_case_1(self):
        self.run_calc(case_1.__file__, 'job.ini')
        ekeys = [('agg_curves-stats', 'csv')]
        for ekey in ekeys:
            for fname in export(ekey, self.calc.datastore):
                self.assertEqualFiles('expected/%s' % strip_calc_id(fname),
                                      fname)

        # make sure the agg_curves-stats has the right attrs
        self.check_attr('return_periods', [30, 60, 120, 240, 480, 960])
        self.check_attr('units', [b'EUR', b'EUR'])

        # test portfolio loss
        tmp = gettemp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles('expected/portfolio_loss.txt', tmp)

        # test the src_loss_table extractor
        arr = extract(self.calc.datastore, 'src_loss_table/structural')
        tmp = gettemp(rst_table(arr))
        self.assertEqualFiles('expected/src_loss_table.txt', tmp)
    def test_case_1_eb(self):
        # this is a case with insured losses and tags
        self.run_calc(case_1.__file__, 'job_eb.ini', concurrent_tasks='4')

        [fname] = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname,
                              delta=1E-5)

        aw = extract(self.calc.datastore, 'agg_losses/structural')
        self.assertEqual(aw.stats, ['mean'])
        numpy.testing.assert_allclose(aw.array, [685.5015], atol=.001)

        [fname] = export(('aggrisk', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname),
                              fname, delta=1E-5)

        fnames = export(('aggcurves', 'csv'), self.calc.datastore)
        for fname in fnames:
            self.assertEqualFiles('expected/eb_%s' % strip_calc_id(fname),
                                  fname, delta=1E-5)

        [fname] = export(('risk_by_event', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname,
                              delta=1E-5)

        # extract agg_curves with tags
        aw = extract(self.calc.datastore, 'agg_curves?kind=stats&'
                     'loss_type=structural&absolute=1&policy=A&taxonomy=RC')
        tmp = gettemp(text_table(aw.to_dframe()))
        self.assertEqualFiles('expected/agg_curves5.csv', tmp)

        aw = extract(self.calc.datastore, 'agg_curves?kind=stats&'
                     'loss_type=structural&absolute=0&policy=A&taxonomy=RC')
        tmp = gettemp(text_table(aw.to_dframe()))
        self.assertEqualFiles('expected/agg_curves7.csv', tmp)

        # test ct_independence
        loss4 = view('portfolio_losses', self.calc.datastore)
        self.run_calc(case_1.__file__, 'job_eb.ini', concurrent_tasks='0')
        loss0 = view('portfolio_losses', self.calc.datastore)
        self.assertEqual(loss0, loss4)
Exemple #44
0
    def test_case_master1(self):
        # needs a large tolerance: https://github.com/gem/oq-engine/issues/5825
        # it looks like the cholesky decomposition is OS-dependent, so
        # the GMFs are different of macOS/Ubuntu20/Ubuntu18
        self.run_calc(case_master.__file__, 'job.ini', exports='csv')
        fnames = export(('avg_losses-stats', 'csv'), self.calc.datastore)
        assert fnames, 'avg_losses-stats not exported?'
        for fname in fnames:
            self.assertEqualFiles('expected/' + strip_calc_id(fname), fname,
                                  delta=1E-4)

        # check event loss table
        [fname] = export(('losses_by_event', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/' + strip_calc_id(fname), fname,
                              delta=1E-4)

        fname = gettemp(view('portfolio_losses', self.calc.datastore))
        self.assertEqualFiles(
            'expected/portfolio_losses.txt', fname, delta=1E-4)
        os.remove(fname)

        # check ruptures are stored correctly
        fname = gettemp(view('ruptures_events', self.calc.datastore))
        self.assertEqualFiles('expected/ruptures_events.txt', fname)
        os.remove(fname)

        # check losses_by_tag
        fnames = export(
            ('aggregate_by/avg_losses?tag=occupancy&kind=mean', 'csv'),
            self.calc.datastore)
        self.assertEqualFiles('expected/losses_by_occupancy.csv', fnames[0],
                              delta=1E-4)

        self.check_multi_tag(self.calc.datastore)

        # check curves-rlzs and curves-stats are readable
        df1 = self.calc.datastore.read_df('curves-rlzs', 'assets')
        aae(df1.columns, ['rlzs', 'return_periods', 'loss_types', 'value'])

        df2 = self.calc.datastore.read_df('curves-stats', 'assets')
        aae(df2.columns, ['stats', 'return_periods', 'loss_types', 'value'])
Exemple #45
0
 def test_international_date_line_2(self):
     # from a bug affecting a calculation in New Zealand
     fname = gettemp(characteric_source)
     [[src]] = nrml.to_python(fname)
     os.remove(fname)
     maxdist = IntegrationDistance({'default': 200})
     sitecol = SiteCollection([
         Site(location=Point(176.919, -39.489),
              vs30=760, vs30measured=True, z1pt0=100, z2pt5=5)])
     srcfilter = SourceFilter(sitecol, maxdist)
     sites = srcfilter.get_close_sites(src)
     self.assertIsNotNone(sites)
Exemple #46
0
 def test(self):
     # make sure the src_group_id is transferred also for single split
     # sources, since this caused hard to track bugs
     fname = gettemp(characteric_source)
     [[char]] = nrml.to_python(fname)
     char.id = 1
     char.src_group_id = 1
     os.remove(fname)
     [src], _ = split_sources([char])
     self.assertEqual(char.id, src.id)
     self.assertEqual(char.source_id, src.source_id)
     self.assertEqual(char.src_group_id, src.src_group_id)
Exemple #47
0
def validate_nrml(request):
    """
    Leverage oq-risklib to check if a given XML text is a valid NRML

    :param request:
        a `django.http.HttpRequest` object containing the mandatory
        parameter 'xml_text': the text of the XML to be validated as NRML

    :returns: a JSON object, containing:
        * 'valid': a boolean indicating if the provided text is a valid NRML
        * 'error_msg': the error message, if any error was found
                       (None otherwise)
        * 'error_line': line of the given XML where the error was found
                        (None if no error was found or if it was not a
                        validation error)
    """
    xml_text = request.POST.get('xml_text')
    if not xml_text:
        return HttpResponseBadRequest(
            'Please provide the "xml_text" parameter')
    xml_file = gettemp(xml_text, suffix='.xml')
    try:
        nrml.to_python(xml_file)
    except ExpatError as exc:
        return _make_response(error_msg=str(exc),
                              error_line=exc.lineno,
                              valid=False)
    except Exception as exc:
        # get the exception message
        exc_msg = exc.args[0]
        if isinstance(exc_msg, bytes):
            exc_msg = exc_msg.decode('utf-8')  # make it a unicode object
        elif isinstance(exc_msg, str):
            pass
        else:
            # if it is another kind of object, it is not obvious a priori how
            # to extract the error line from it
            return _make_response(error_msg=str(exc_msg),
                                  error_line=None,
                                  valid=False)
        # if the line is not mentioned, the whole message is taken
        error_msg = exc_msg.split(', line')[0]
        # check if the exc_msg contains a line number indication
        search_match = re.search(r'line \d+', exc_msg)
        if search_match:
            error_line = int(search_match.group(0).split()[1])
        else:
            error_line = None
        return _make_response(error_msg=error_msg,
                              error_line=error_line,
                              valid=False)
    else:
        return _make_response(error_msg=None, error_line=None, valid=True)
 def test_international_date_line_2(self):
     # from a bug affecting a calculation in New Zealand
     fname = gettemp(characteric_source)
     [[src]] = nrml.to_python(fname)
     os.remove(fname)
     maxdist = IntegrationDistance({'default': 200})
     sitecol = SiteCollection([
         Site(location=Point(176.919, -39.489),
              vs30=760, vs30measured=True, z1pt0=100, z2pt5=5)])
     srcfilter = SourceFilter(sitecol, maxdist)
     sites = srcfilter.get_close_sites(src)
     self.assertIsNotNone(sites)
Exemple #49
0
    def test_no_absolute_path(self):
        temp_dir = tempfile.mkdtemp()
        site_model_input = general.gettemp(dir=temp_dir, content="foo")
        job_config = general.gettemp(dir=temp_dir, content="""
[general]
calculation_mode = event_based
[foo]
bar = baz
[site]
sites = 0 0
site_model_file = %s
maximum_distance=1
truncation_level=0
random_seed=0
intensity_measure_types = PGA
investigation_time = 50
export_dir = %s
        """ % (site_model_input, TMP))
        with self.assertRaises(ValueError) as ctx:
            readinput.get_params([job_config])
        self.assertIn('is an absolute path', str(ctx.exception))
Exemple #50
0
    def test_no_absolute_path(self):
        temp_dir = tempfile.mkdtemp()
        site_model_input = general.gettemp(dir=temp_dir, content="foo")
        job_config = general.gettemp(dir=temp_dir, content="""
[general]
calculation_mode = event_based
[foo]
bar = baz
[site]
sites = 0 0
site_model_file = %s
maximum_distance=1
truncation_level=0
random_seed=0
intensity_measure_types = PGA
investigation_time = 50
export_dir = %s
        """ % (site_model_input, TMP))
        with self.assertRaises(ValueError) as ctx:
            readinput.get_params([job_config])
        self.assertIn('is an absolute path', str(ctx.exception))
Exemple #51
0
def validate_nrml(request):
    """
    Leverage oq-risklib to check if a given XML text is a valid NRML

    :param request:
        a `django.http.HttpRequest` object containing the mandatory
        parameter 'xml_text': the text of the XML to be validated as NRML

    :returns: a JSON object, containing:
        * 'valid': a boolean indicating if the provided text is a valid NRML
        * 'error_msg': the error message, if any error was found
                       (None otherwise)
        * 'error_line': line of the given XML where the error was found
                        (None if no error was found or if it was not a
                        validation error)
    """
    xml_text = request.POST.get('xml_text')
    if not xml_text:
        return HttpResponseBadRequest(
            'Please provide the "xml_text" parameter')
    xml_file = gettemp(xml_text, suffix='.xml')
    try:
        nrml.to_python(xml_file)
    except ExpatError as exc:
        return _make_response(error_msg=str(exc),
                              error_line=exc.lineno,
                              valid=False)
    except Exception as exc:
        # get the exception message
        exc_msg = exc.args[0]
        if isinstance(exc_msg, bytes):
            exc_msg = exc_msg.decode('utf-8')   # make it a unicode object
        elif isinstance(exc_msg, str):
            pass
        else:
            # if it is another kind of object, it is not obvious a priori how
            # to extract the error line from it
            return _make_response(
                error_msg=str(exc_msg), error_line=None, valid=False)
        # if the line is not mentioned, the whole message is taken
        error_msg = exc_msg.split(', line')[0]
        # check if the exc_msg contains a line number indication
        search_match = re.search(r'line \d+', exc_msg)
        if search_match:
            error_line = int(search_match.group(0).split()[1])
        else:
            error_line = None
        return _make_response(
            error_msg=error_msg, error_line=error_line, valid=False)
    else:
        return _make_response(error_msg=None, error_line=None, valid=True)
Exemple #52
0
    def test_case_master(self):
        self.run_calc(case_master.__file__, 'job.ini', insured_losses='false')
        calc0 = self.calc.datastore  # single file event_based_risk
        self.run_calc(case_master.__file__, 'job.ini', insured_losses='false',
                      calculation_mode='event_based')
        calc1 = self.calc.datastore  # event_based
        self.run_calc(case_master.__file__, 'job.ini', insured_losses='false',
                      hazard_calculation_id=str(calc1.calc_id),
                      source_model_logic_tree_file='',
                      gsim_logic_tree_file='')
        calc2 = self.calc.datastore  # two files event_based_risk

        check_csm_info(calc0, calc1)  # the csm_info arrays must be equal
        check_csm_info(calc0, calc2)  # the csm_info arrays must be equal

        if sys.platform == 'darwin':
            raise unittest.SkipTest('MacOSX')

        # compare the event loss table generated by a event_based_risk
        # case_master calculation from ruptures
        f0 = gettemp(view('elt', calc0))
        self.assertEqualFiles('expected/elt.txt', f0, delta=1E-5)
        f2 = gettemp(view('elt', calc2))
        self.assertEqualFiles('expected/elt.txt', f2, delta=1E-5)
 def test_pga(self):
     imt_dt = numpy.dtype([('PGA', F32)])
     array = get_shakemap_array(gettemp(example_pga))
     n = 3  # number of sites
     self.assertEqual(len(array), n)
     self.assertEqual(array.dtype.names,
                      ('lon', 'lat', 'vs30', 'val', 'std'))
     dec = 4  # four digits
     aae(array['lon'], [13.580, 13.5883, 13.5967], dec)
     aae(array['lat'], [39.3103, 39.3103, 39.3103], dec)
     aae(array['vs30'], [603, 603, 603], dec)
     val = numpy.zeros(n, imt_dt)
     std = numpy.array([(0.51,), (0.51,), (0.51,)], imt_dt)
     for imt in imt_dt.names:
         aae(array['val'][imt], val[imt])
         aae(array['std'][imt], std[imt])
    def test_case_6a(self):
        # case with two gsims
        self.run_calc(case_6a.__file__, 'job_haz.ini,job_risk.ini',
                      exports='csv')
        [f] = export(('agglosses', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/agg_structural.csv', f)

        # testing the totlosses view
        dstore = self.calc.datastore
        fname = gettemp(view('totlosses', dstore))
        self.assertEqualFiles('expected/totlosses.txt', fname)

        # two equal gsims
        with self.assertRaises(InvalidLogicTree):
            self.run_calc(case_6a.__file__, 'job_haz.ini',
                          gsim_logic_tree_file='wrong_gmpe_logic_tree.xml')
    def test_splittable_events(self):
        # split the events in two blocks and check that the ratios are
        # same: there is no randomness in VulnerabilityFunction.sample
        vuln_model = gettemp("""\
<?xml version='1.0' encoding='utf-8'?>
<nrml xmlns="http://openquake.org/xmlns/nrml/0.4"
      xmlns:gml="http://www.opengis.net/gml">
    <vulnerabilityModel>
        <discreteVulnerabilitySet vulnerabilitySetID="PAGER"
                                  assetCategory="Category"
                                  lossCategory="structural">
            <IML IMT="PGA">0.005 0.007 0.0098 0.0137</IML>
            <discreteVulnerability vulnerabilityFunctionID="RC/A"
                                   probabilisticDistribution="LN">
                <lossRatio>0.01 0.06 0.18 0.36</lossRatio>
                <coefficientsVariation>0.30 0.30 0.30 0.30
         </coefficientsVariation>
            </discreteVulnerability>
        </discreteVulnerabilitySet>
    </vulnerabilityModel>
</nrml>""")
        vfs = {('structural', 'vulnerability'):
               nrml.to_python(vuln_model)['PGA', 'RC/A']}
        rm = riskmodels.RiskModel('event_based_risk', "RC/A", vfs,
                                  ignore_covs=False)
        assets = [0, 1]
        eids = numpy.array([1, 2, 3, 4, 5])
        gmvs = numpy.array([.1, .2, .3, .4, .5])
        epsilons = numpy.array(
            [[.01, .02, .03, .04, .05], [.001, .002, .003, .004, .005]])

        # compute the ratios by considering all the events
        ratios = rm('structural', assets, gmvs, eids, epsilons)
        numpy.testing.assert_allclose(ratios, self.expected_ratios)

        # split the events in two blocks
        eids1 = numpy.array([1, 2])
        eids2 = numpy.array([3, 4, 5])
        gmvs1 = numpy.array([.1, .2])
        gmvs2 = numpy.array([.3, .4, .5])
        eps1 = numpy.array([[.01, .02], [.001, .002]])
        eps2 = numpy.array([[.03, .04, .05], [.003, .004, .005]])
        ratios1 = rm('structural', assets, gmvs1, eids1, eps1)
        ratios2 = rm('structural', assets, gmvs2, eids2, eps2)
        numpy.testing.assert_allclose(ratios1, self.expected_ratios[:, :2])
        numpy.testing.assert_allclose(ratios2, self.expected_ratios[:, 2:])
    def test_invalid(self):
        fname = gettemp('''\
<?xml version="1.0" encoding="utf-8"?>
<nrml xmlns="http://openquake.org/xmlns/nrml/0.4"
      xmlns:gml="http://www.opengis.net/gml">
<gmfCollection gsimTreePath="" sourceModelTreePath="">
  <gmfSet stochasticEventSetId="1">
    <gmf IMT="PGA" ruptureId="0">
      <node gmv="0.012646" lon="12.12477995" lat="43.5812"/>
      <node gmv="-0.012492" lon="12.12478193" lat="43.5812"/>
    </gmf>
  </gmfSet>
</gmfCollection>
</nrml>''', suffix='.xml')
        with Print.patch() as p:
            tidy([fname])
        self.assertIn('Could not convert gmv->positivefloat: '
                      'float -0.012492 < 0, line 8', str(p))
Exemple #57
0
    def test_two_nodes_on_the_same_point(self):
        # after rounding of the coordinates two points can collide
        fname = general.gettemp('''\
<?xml version="1.0" encoding="utf-8"?>
<nrml xmlns="http://openquake.org/xmlns/nrml/0.4"
      xmlns:gml="http://www.opengis.net/gml">
<gmfCollection gsimTreePath="" sourceModelTreePath="">
  <gmfSet stochasticEventSetId="1">
    <gmf IMT="PGA" ruptureId="0">
      <node gmv="0.0126515007046" lon="12.12477995" lat="43.5812"/>
      <node gmv="0.0124056290492" lon="12.12478193" lat="43.5812"/>
    </gmf>
  </gmfSet>
</gmfCollection>
</nrml>''')
        self.oqparam.imtls = {'PGA': None}
        with self.assertRaises(readinput.InvalidFile) as ctx:
            readinput.get_scenario_from_nrml(self.oqparam, fname)
        self.assertIn("Expected 1 sites, got 2 nodes in", str(ctx.exception))
Exemple #58
0
    def test_event_based_risk_sampling(self):
        # the fast calculator ucerf_risk
        raise unittest.SkipTest('ucerf_risk has been removed')
        self.run_calc(ucerf.__file__, 'job_ebr.ini',
                      number_of_logic_tree_samples='2')

        # check the right number of events was stored
        self.assertEqual(len(self.calc.datastore['events']), 79)

        fname = gettemp(view('portfolio_loss', self.calc.datastore))
        self.assertEqualFiles(
            'expected/portfolio_loss2.txt', fname, delta=1E-5)

        # check the mean losses_by_period
        [fname] = export(('agg_curves-stats', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/losses_by_period2-mean.csv', fname)

        # make sure this runs
        view('fullreport', self.calc.datastore)
Exemple #59
0
def submit_job(job_ini, username, hazard_job_id=None):
    """
    Create a job object from the given job.ini file in the job directory
    and run it in a new process. Returns the job ID and PID.
    """
    job_id = logs.init('job')
    oq = engine.job_from_file(
        job_ini, job_id, username, hazard_calculation_id=hazard_job_id)
    pik = pickle.dumps(oq, protocol=0)  # human readable protocol
    code = RUNCALC % dict(job_id=job_id, hazard_job_id=hazard_job_id, pik=pik,
                          username=username)
    tmp_py = gettemp(code, suffix='.py')
    # print(code, tmp_py)  # useful when debugging
    devnull = subprocess.DEVNULL
    popen = subprocess.Popen([sys.executable, tmp_py],
                             stdin=devnull, stdout=devnull, stderr=devnull)
    threading.Thread(target=popen.wait).start()
    logs.dbcmd('update_job', job_id, {'pid': popen.pid})
    return job_id, popen.pid
    def test_case_master(self):
        # a case with two GSIMs
        self.run_calc(case_master.__file__, 'job.ini', exports='npz')

        # check realizations
        [fname] = export(('realizations', 'csv'), self.calc.datastore)
        self.assertEqualFiles('expected/realizations.csv', fname)

        # check losses by taxonomy
        agglosses = extract(self.calc.datastore, 'agg_losses/structural?'
                            'taxonomy=*').array  # shape (T, R) = (3, 2)
        self.assertEqualFiles('expected/agglosses_taxo.txt',
                              gettemp(str(agglosses)))

        # extract agglosses with a * and a selection
        obj = extract(self.calc.datastore, 'agg_losses/structural?'
                      'state=*&cresta=0.11')
        self.assertEqual(obj.selected, [b'state=*', b'cresta=0.11'])
        self.assertEqual(obj.tags, [b'state=01'])
        aac(obj.array, [[2493.7097, 2943.6640]])