Пример #1
0
def HAZARD_MAP_QUANTILE_DATA():
    return [
        (Site(-121.7, 37.6),
         {'IML': 1.9266716959669603,
          'IMT': 'PGA',
          'investigationTimeSpan': '50.0',
          'poE': 0.01,
          'statistics': 'quantile',
          'quantileValue': 0.2,
          'vs30': 760.0}),
        (Site(-121.8, 38.0),
         {'IML': 1.9352164637194078,
          'IMT': 'PGA',
          'investigationTimeSpan': '50.0',
          'poE': 0.01,
          'statistics': 'quantile',
          'quantileValue': 0.2,
          'vs30': 760.0}),
        (Site(-122.1, 37.8),
         {'IML': 1.9459475420737888,
          'IMT': 'PGA',
          'investigationTimeSpan': '50.0',
          'poE': 0.01,
          'statistics': 'quantile',
          'quantileValue': 0.2,
          'vs30': 760.0}),
        (Site(-121.9, 37.7),
         {'IML': 1.9566716959669603,
          'IMT': 'PGA',
          'investigationTimeSpan': '50.0',
          'poE': 0.01,
          'statistics': 'quantile',
          'quantileValue': 0.2,
          'vs30': 760.0})]
Пример #2
0
def GMF_DATA():
    return {
        Site(-117, 40): {'groundMotion': 0.0},
        Site(-116, 40): {'groundMotion': 0.1},
        Site(-116, 41): {'groundMotion': 0.2},
        Site(-117, 41): {'groundMotion': 0.3},
    }
Пример #3
0
    def test_compute_uhs_task_pi_failure_counter(self):
        # Same as the previous test, except that we want to make sure task
        # failure counters are properly incremented if a task fails.

        cmpt_uhs = '%s.%s' % (self.UHS_CORE_MODULE, 'compute_uhs')
        with helpers.patch(cmpt_uhs) as compute_mock:

            # We want to force a failure to occur in the task:
            compute_mock.side_effect = RuntimeError('Mock exception')

            get_counter = lambda: stats.pk_get(self.job_id, "nhzrd_failed")

            # The counter should start out empty:
            self.assertEqual(0, get_counter())

            self.assertRaises(RuntimeError,
                              compute_uhs_task,
                              self.job_id,
                              0,
                              site=Site(0.0, 0.0))
            self.assertEqual(1, get_counter())

            # Create two more failures:
            self.assertRaises(RuntimeError,
                              compute_uhs_task,
                              self.job_id,
                              0,
                              site=Site(0.0, 0.0))
            self.assertRaises(RuntimeError,
                              compute_uhs_task,
                              self.job_id,
                              0,
                              site=Site(0.0, 0.0))
            self.assertEqual(3, get_counter())
Пример #4
0
    def test_serialize_scenario(self):
        """
        All the records for scenario loss maps are inserted correctly.
        """

        output = self.writer.output

        # Call the function under test.
        data = SAMPLE_SCENARIO_LOSS_MAP_DATA
        self.writer.serialize(data)

        # Output record
        self.assertEqual(1, len(self.job.output_set.all()))
        output = self.job.output_set.get()
        self.assertTrue(output.db_backed)
        self.assertTrue(output.path is None)
        self.assertEqual(self.display_name, output.display_name)
        self.assertEqual("loss_map", output.output_type)

        # LossMap record
        self.assertEqual(1, len(output.lossmap_set.all()))
        metadata = output.lossmap_set.get()
        self.assertEqual(SCENARIO_LOSS_MAP_METADATA['scenario'],
                         metadata.scenario)
        self.assertEqual(SCENARIO_LOSS_MAP_METADATA['endBranchLabel'],
                         metadata.end_branch_label)
        self.assertEqual(SCENARIO_LOSS_MAP_METADATA['lossCategory'],
                         metadata.category)
        self.assertEqual(SCENARIO_LOSS_MAP_METADATA['unit'], metadata.unit)
        self.assertEqual(None, metadata.poe)

        # LossMapData records
        self.assertEqual(3, len(metadata.lossmapdata_set.all()))
        [data_a, data_b, data_c] = sorted(metadata.lossmapdata_set.all(),
                                          key=lambda d: d.id)

        self.assertEqual(SITE_A, Site(*data_a.location.coords))

        self.assertEqual(SAMPLE_SCENARIO_LOSS_MAP_DATA[1][1][0][1]['assetID'],
                         data_a.asset_ref)
        # self.assertEqual(self.asset_a_1.asset_ref, data_a.asset_ref)
        self.assertEqual(SITE_A_SCENARIO_LOSS_ONE['mean_loss'], data_a.value)
        self.assertEqual(SITE_A_SCENARIO_LOSS_ONE['stddev_loss'],
                         data_a.std_dev)

        self.assertEqual(SITE_A, Site(*data_b.location.coords))
        self.assertEqual(SAMPLE_SCENARIO_LOSS_MAP_DATA[1][1][1][1]['assetID'],
                         data_b.asset_ref)
        self.assertEqual(SITE_A_SCENARIO_LOSS_TWO['mean_loss'], data_b.value)
        self.assertEqual(SITE_A_SCENARIO_LOSS_TWO['stddev_loss'],
                         data_b.std_dev)

        self.assertEqual(SITE_B, Site(*data_c.location.coords))
        self.assertEqual(SAMPLE_SCENARIO_LOSS_MAP_DATA[2][1][0][1]['assetID'],
                         data_c.asset_ref)
        self.assertEqual(SITE_B_SCENARIO_LOSS_ONE['mean_loss'], data_c.value)
        self.assertEqual(SITE_B_SCENARIO_LOSS_ONE['stddev_loss'],
                         data_c.std_dev)
Пример #5
0
    def setUp(self):
        path = os.path.join(helpers.SCHEMA_EXAMPLES_DIR, "LCB-exposure.yaml")
        inputs = [("exposure", path)]
        self.job = self.setup_classic_job(inputs=inputs)

        [input] = models.inputs4job(self.job.id,
                                    input_type="exposure",
                                    path=path)
        owner = models.OqUser.objects.get(user_name="openquake")
        emdl = input.model()
        if not emdl:
            emdl = models.ExposureModel(owner=owner,
                                        input=input,
                                        description="LCB exposure model",
                                        category="LCB cars",
                                        stco_unit="peanuts",
                                        stco_type="aggregated")
            emdl.save()

        asset_data = [
            (Site(-118.077721, 33.852034), {
                u'stco': 5.07,
                u'asset_ref': u'a5625',
                u'taxonomy': u'HAZUS_RM1L_LC'
            }),
            (Site(-118.077721, 33.852034), {
                u'stco': 5.63,
                u'asset_ref': u'a5629',
                u'taxonomy': u'HAZUS_URML_LC'
            }),
            (Site(-118.077721, 33.852034), {
                u'stco': 11.26,
                u'asset_ref': u'a5630',
                u'taxonomy': u'HAZUS_URML_LS'
            }),
            (Site(-118.077721, 33.852034), {
                u'stco': 5.5,
                u'asset_ref': u'a5636',
                u'taxonomy': u'HAZUS_C3L_MC'
            }),
        ]
        for idx, (site, adata) in enumerate(asset_data):
            location = GEOSGeometry(site.point.to_wkt())
            asset = models.ExposureData(exposure_model=emdl,
                                        site=location,
                                        **adata)
            asset.save()
            RISK_LOSS_CURVE_DATA[idx][1][1] = asset

        output_path = self.generate_output_path(self.job)
        self.display_name = os.path.basename(output_path)

        self.writer = LossCurveDBWriter(output_path, self.job.id)
        self.reader = LossCurveDBReader()
Пример #6
0
    def test_site_keys(self):
        """Verify _sites_to_gmf_keys"""
        params = {
            'REGION_VERTEX': '40,-117, 42,-117, 42,-116, 40,-116',
            'REGION_GRID_SPACING': '1.0'
        }

        the_job = helpers.create_job(params, job_id=self.job.id)
        calculator = EventBasedRiskCalculator(the_job)

        keys = calculator._sites_to_gmf_keys([Site(-117, 40), Site(-116, 42)])

        self.assertEqual(["0!0", "2!1"], keys)
Пример #7
0
    def test_read_curve(self):
        """Verify _get_db_curve."""
        the_job = helpers.create_job({}, job_id=self.job.id)
        calculator = ClassicalRiskCalculator(the_job)

        curve1 = calculator._get_db_curve(Site(-122.2, 37.5))
        self.assertEqual(list(curve1.abscissae),
                         [0.005, 0.007, 0.0098, 0.0137])
        self.assertEqual(list(curve1.ordinates), [0.354, 0.114, 0.023, 0.002])

        curve2 = calculator._get_db_curve(Site(-122.1, 37.5))
        self.assertEqual(list(curve2.abscissae),
                         [0.005, 0.007, 0.0098, 0.0137])
        self.assertEqual(list(curve2.ordinates), [0.454, 0.214, 0.123, 0.102])
Пример #8
0
    def test_read_gmfs(self):
        """Verify _get_db_gmfs."""
        params = {
            'REGION_VERTEX': '40,-117, 42,-117, 42,-116, 40,-116',
            'REGION_GRID_SPACING': '1.0'
        }

        the_job = helpers.create_job(params, job_id=self.job.id)
        calculator = EventBasedRiskCalculator(the_job)

        self.assertEqual(3, len(calculator._gmf_db_list(self.job.id)))

        # only the keys in gmfs are used
        gmfs = calculator._get_db_gmfs([], self.job.id)
        self.assertEqual({}, gmfs)

        # only the keys in gmfs are used
        sites = [
            Site(lon, lat) for lon in xrange(-117, -115)
            for lat in xrange(40, 43)
        ]
        gmfs = calculator._get_db_gmfs(sites, self.job.id)
        # avoid rounding errors
        for k, v in gmfs.items():
            gmfs[k] = [round(i, 1) for i in v]

        self.assertEqual(
            {
                '0!0': [0.1, 0.5, 0.0],
                '0!1': [0.2, 0.6, 0.0],
                '1!0': [0.4, 0.8, 1.3],
                '1!1': [0.3, 0.7, 1.2],
                '2!0': [0.0, 0.0, 1.0],
                '2!1': [0.0, 0.0, 1.1],
            }, gmfs)
Пример #9
0
    def test_uhs(self):
        # Kick off the engine and run the UHS demo job.
        # When that's done, query the database and check the UHS results.

        exp_results = self._load_expected_results()
        exp_site = Site(0.0, 0.0)  # This calculation operates on a single site

        run_job(self.UHS_DEMO_CONFIG)

        job = OqJob.objects.latest('id')

        uh_spectra = UhSpectra.objects.get(output__oq_job=job.id)

        self.assertEqual(1, uh_spectra.realizations)

        for poe, data in exp_results.items():
            uh_spectrum = UhSpectrum.objects.get(poe=poe,
                                                 uh_spectra=uh_spectra.id)
            uh_spectrum_data = UhSpectrumData.objects.get(
                uh_spectrum=uh_spectrum.id)

            self.assertTrue(
                numpy.allclose(data['sa_values'], uh_spectrum_data.sa_values))
            self.assertTrue(numpy.allclose(data['periods'],
                                           uh_spectra.periods))

            self.assertEqual(0, uh_spectrum_data.realization)
            self.assertEqual(exp_site.point.to_wkt(),
                             uh_spectrum_data.location.wkt)
Пример #10
0
    def setUp(self):
        inputs = [("fragility", ""), ("exposure", "")]
        self.job = self.setup_classic_job(inputs=inputs)

        kvs.mark_job_as_current(self.job.id)
        kvs.cache_gc(self.job.id)

        self.site = Site(1.0, 1.0)
        block = Block(self.job.id, BLOCK_ID, [self.site])
        block.to_kvs()

        # this region contains a single site, that is exactly
        # a site with longitude == 1.0 and latitude == 1.0
        params = {"REGION_VERTEX": "1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0",
                "REGION_GRID_SPACING": "0.5", "BASE_PATH": ".",
                "OUTPUT_DIR": "."}

        self.job_ctxt = JobContext(params, self.job.id, oq_job=self.job)

        self.em = self._store_em()
        self._store_gmvs([0.40, 0.30, 0.45, 0.35, 0.40])

        self.calculator = ScenarioDamageRiskCalculator(self.job_ctxt)

        # just stubbing out some preprocessing stuff...
        ScenarioDamageRiskCalculator.store_exposure_assets = lambda self: None
        ScenarioDamageRiskCalculator.store_fragility_model = lambda self: None
        ScenarioDamageRiskCalculator.partition = lambda self: None
Пример #11
0
    def test_get_gmvs_at(self):
        params = {
            "REGION_VERTEX": "40,-117.5, 42,-117.5, 42,-116, 40,-116",
            "REGION_GRID_SPACING": "1.0"
        }

        the_job = helpers.create_job(params, job_id=self.job.id)
        calculator = core.EventBasedRiskCalculator(the_job)

        self.assertEqual([0.1, 0.5, 1.0],
                         calculator._get_gmvs_at(Site(-117, 40)))

        self.assertEqual([0.2, 0.6, 1.1],
                         calculator._get_gmvs_at(Site(-117, 41)))

        self.assertEqual([], calculator._get_gmvs_at(Site(-117.5, 40)))
Пример #12
0
    def test_compute_uhs_with_site_model(self):
        the_job = helpers.prepare_job_context(
            helpers.demo_file('uhs/config_with_site_model.gem'))
        the_job.to_kvs()

        site = Site(0, 0)

        helpers.store_hazard_logic_trees(the_job)

        get_sm_patch = helpers.patch(
            'openquake.calculators.hazard.general.get_site_model')
        get_closest_patch = helpers.patch(
            'openquake.calculators.hazard.general.get_closest_site_model_data')
        compute_patch = helpers.patch(
            'openquake.calculators.hazard.uhs.core._compute_uhs')

        get_sm_mock = get_sm_patch.start()
        get_closest_mock = get_closest_patch.start()
        compute_mock = compute_patch.start()

        get_closest_mock.return_value = SiteModel(
            vs30=800, vs30_type='measured', z1pt0=100, z2pt5=200)
        try:
            compute_uhs(the_job, site)

            self.assertEqual(1, get_sm_mock.call_count)
            self.assertEqual(1, get_closest_mock.call_count)
            self.assertEqual(1, compute_mock.call_count)
        finally:
            get_sm_patch.stop()
            get_closest_patch.stop()
            compute_patch.stop()
Пример #13
0
    def test_compute_uhs_task_pi(self):
        # Test that progress indicators are working properly for
        # `compute_uhs_task`.

        # Mock out the two 'heavy' functions called by this task;
        # we don't need to do these and we don't want to waste the cycles.
        cmpt_uhs = '%s.%s' % (self.UHS_CORE_MODULE, 'compute_uhs')
        write_uhs_data = '%s.%s' % (self.UHS_CORE_MODULE,
                                    'write_uhs_spectrum_data')
        with helpers.patch(cmpt_uhs):
            with helpers.patch(write_uhs_data):

                get_counter = lambda: stats.get_counter(
                    self.job_id, 'h', 'compute_uhs_task', 'i')

                # First, check that the counter for `compute_uhs_task` is
                # `None`:
                self.assertIsNone(get_counter())

                realization = 0
                site = Site(0.0, 0.0)
                # execute the task as a plain old function
                compute_uhs_task(self.job_id, realization, site)
                self.assertEqual(1, get_counter())

                compute_uhs_task(self.job_id, realization, site)
                self.assertEqual(2, get_counter())
Пример #14
0
    def test_write_uhs_spectrum_data(self):
        # Test `write_uhs_spectrum_data`.

        # To start with, we need to write the 'container' records for the UHS
        # results:
        write_uh_spectra(self.job_ctxt)

        uhs_results = []  # The results we want to write to HDF5
        uhs_result = java.jvm().JClass('org.gem.calc.UHSResult')

        # Build up a result list that we can pass to the function under test:
        for poe, uhs in self.UHS_RESULTS:
            uhs_results.append(uhs_result(poe, list_to_jdouble_array(uhs)))

        realization = 0
        test_site = Site(0.0, 0.0)

        # Call the function under test
        write_uhs_spectrum_data(
            self.job_ctxt, realization, test_site, uhs_results)

        uhs_data = UhSpectrumData.objects.filter(
            uh_spectrum__uh_spectra__output__oq_job=(
            self.job.id))

        self.assertEqual(len(self.UHS_RESULTS), len(uhs_data))
        self.assertTrue(all([x.realization == 0 for x in uhs_data]))

        uhs_results_dict = dict(self.UHS_RESULTS)  # keyed by PoE
        for uhs_datum in uhs_data:
            self.assertTrue(
                numpy.allclose(uhs_results_dict[uhs_datum.uh_spectrum.poe],
                               uhs_datum.sa_values))
            self.assertEqual(test_site.point.to_wkt(), uhs_datum.location.wkt)
Пример #15
0
    def test_serialize(self):
        """serialize() inserts the output and the gmf_data records."""
        # This job has no outputs before calling the function under test.
        self.assertEqual(0, len(self.job.output_set.all()))

        # Call the function under test.
        self.writer.serialize(GMF_DATA())

        # Reload job row.
        self.job = models.OqJob.objects.get(id=self.job.id)
        # After calling the function under test we see the expected output.
        self.assertEqual(1, len(self.job.output_set.all()))

        # After calling the function under test we see the expected map data.
        output = self.job.output_set.get()
        self.assertEqual(0, len(output.hazardcurve_set.all()))
        self.assertEqual(0, len(output.lossmap_set.all()))
        self.assertEqual(4, len(output.gmfdata_set.all()))

        # read data from the DB and check that it's equal to the original data
        inserted_data = []

        for gmfd in output.gmfdata_set.all():
            location = gmfd.location
            inserted_data.append((Site(location.x, location.y),
                                  {'groundMotion': gmfd.ground_motion}))

        self.assertEquals(self.normalize(GMF_DATA().items()),
                          self.normalize(inserted_data))
Пример #16
0
def HAZARD_CURVE_DATA():
    return [
        (Site(-122.2, 37.5), {
            'investigationTimeSpan': '50.0',
            'IMLValues': [0.778, 1.09, 1.52, 2.13],
            'PoEValues': [0.354, 0.114, 0.023, 0.002],
            'IMT': 'PGA',
            'endBranchLabel': '1'
        }),
        (Site(-122.1, 37.5), {
            'investigationTimeSpan': '50.0',
            'IMLValues': [0.778, 1.09, 1.52, 2.13],
            'PoEValues': [0.454, 0.214, 0.123, 0.102],
            'IMT': 'PGA',
            'endBranchLabel': '1'
        }),
    ]
Пример #17
0
def GMF_DATA(r1, r2):
    data = {}

    for lon in xrange(-179, -179 + r1):
        for lat in xrange(-90, + r2):
            data[Site(lon, lat)] = {'groundMotion': 0.0}

    return data
Пример #18
0
def MEAN_CURVE_DATA():
    return [
        (Site(-122.2, 37.5), {
            'investigationTimeSpan': '50.0',
            'IMLValues': [0.778, 1.09, 1.52, 2.13],
            'PoEValues': [0.354, 0.114, 0.023, 0.002],
            'IMT': 'PGA',
            'statistics': 'mean'
        }),
        (Site(-122.1, 37.5), {
            'investigationTimeSpan': '50.0',
            'IMLValues': [0.778, 1.09, 1.52, 2.13],
            'PoEValues': [0.454, 0.214, 0.123, 0.102],
            'IMT': 'PGA',
            'statistics': 'mean'
        }),
    ]
Пример #19
0
def QUANTILE_CURVE_DATA():
    return [
        (Site(-122.2, 37.5), {
            'investigationTimeSpan': '50.0',
            'IMLValues': [0.778, 1.09, 1.52, 2.13],
            'PoEValues': [0.354, 0.114, 0.023, 0.002],
            'IMT': 'PGA',
            'statistics': 'quantile',
            'quantileValue': 0.25
        }),
        (Site(-122.1, 37.5), {
            'investigationTimeSpan': '50.0',
            'IMLValues': [0.778, 1.09, 1.52, 2.13],
            'PoEValues': [0.454, 0.214, 0.123, 0.102],
            'IMT': 'PGA',
            'statistics': 'quantile',
            'quantileValue': 0.25
        }),
    ]
Пример #20
0
    def setUp(self):
        self.job = self.setup_classic_job()

        gmfs = [{
            Site(-117, 40): {
                "groundMotion": 0.1
            },
            Site(-117, 41): {
                "groundMotion": 0.2
            },
        }, {
            Site(-117, 40): {
                "groundMotion": 0.5
            },
            Site(-117, 41): {
                "groundMotion": 0.6
            },
        }, {
            Site(-117, 40): {
                "groundMotion": 1.0
            },
            Site(-117, 41): {
                "groundMotion": 1.1
            },
        }]

        for gmf in gmfs:
            output_path = self.generate_output_path(self.job)
            hcw = GmfDBWriter(output_path, self.job.id)
            hcw.serialize(gmf)
Пример #21
0
def HAZARD_CURVE_DATA(branches, r1, r2):
    data = []
    poes = imls = [0.1] * 20

    for lon in xrange(-179, -179 + r1):
        for lat in xrange(-90, + r2):
            for branch in branches:
                data.append((Site(lon, lat),
                             {'investigationTimeSpan': '50.0',
                              'IMLValues': imls,
                              'PoEValues': poes,
                              'IMT': 'PGA',
                              'endBranchLabel': branch}))

            data.append((Site(lon, lat),
                         {'investigationTimeSpan': '50.0',
                          'IMLValues': imls,
                          'PoEValues': poes,
                          'IMT': 'PGA',
                          'statistics': 'mean'}))

    return data
Пример #22
0
def LOSS_MAP_DATA(assets, r1, r2):
    data = [{'scenario': True}]

    for lon in xrange(-179, -179 + r1):
        for lat in xrange(-90, + r2):
            data.append((Site(lon, lat), []))

            for asset in assets:
                data[-1][-1].append(({'mean_loss': 120000.0,
                                      'stddev_loss': 2000.0},
                                     {'assetID': asset}))

    return data
Пример #23
0
def HAZARD_MAP_DATA(r1, r2):
    data = []

    for lon in xrange(-179, -179 + r1):
        for lat in xrange(-90, + r2):
            data.append((Site(lon, lat),
                         {'IML': 1.9266716959669603,
                          'IMT': 'PGA',
                          'investigationTimeSpan': '50.0',
                          'poE': 0.01,
                          'statistics': 'mean',
                          'vs30': 760.0}))

    return data
Пример #24
0
    def test_compute_uhs_task_calls_compute_and_write(self):
        # The celery task `compute_uhs_task` basically just calls a few other
        # functions to do the calculation and write results. Those functions
        # have their own test coverage; in this test, we just want to make
        # sure they get called.

        cmpt_uhs = '%s.%s' % (self.UHS_CORE_MODULE, 'compute_uhs')
        write_uhs_data = '%s.%s' % (self.UHS_CORE_MODULE,
                                    'write_uhs_spectrum_data')
        with helpers.patch(cmpt_uhs) as compute_mock:
            with helpers.patch(write_uhs_data) as write_mock:
                # Call the function under test as a normal function, not a
                # @task:
                compute_uhs_task(self.job_id, 0, Site(0.0, 0.0))

                self.assertEqual(1, compute_mock.call_count)
                self.assertEqual(1, write_mock.call_count)
Пример #25
0
def LOSS_CURVE_DATA(r1, r2):
    data = []
    poes = imls = [0.1] * 20

    for lon in xrange(-179, -179 + r1):
        for lat in xrange(-90, + r2):
            data.append((Site(lon, lat),
                         (Curve(zip(imls, poes)),
                          {'assetValue': 5.07,
                           'assetID': 'a5625',
                           'listDescription': 'Collection of exposure values',
                           'structureCategory': 'RM1L',
                           'lon': -118.077721,
                           'taxonomy': 'HAZUS_RM1L_LC',
                           'listID': 'LA01',
                           'assetValueUnit': 'EUR',
                           'lat': 33.852034})))

    return data
Пример #26
0
    def test_serialize(self):
        """serialize() inserts the output and the hazard_map_data records."""
        # This job has no outputs before calling the function under test.
        self.assertEqual(0, len(self.job.output_set.all()))

        for hcd in HAZARD_CURVE_DATA():
            output_path = self.generate_output_path(self.job,
                                                    output_type="hazard_curve")
            writer = HazardCurveDBWriter(output_path, self.job.id)
            # Call the function under test.
            writer.serialize(hcd)

        # After calling the function under test we see the expected output.
        self.job = models.OqJob.objects.get(id=self.job.id)
        self.assertEqual(4, len(self.job.output_set.all()))

        # After calling the function under test we see the expected map data.
        hcs = models.HazardCurve.objects.filter(output__oq_job=self.job)
        self.assertEqual(4, len(hcs))

        # read data from the DB and check that it's equal to the original data

        ebl2index = {"1_1": 0, "1_2": 1, "2": 2}
        for hc in hcs:
            indb = []
            ebl = hc.end_branch_label
            hdidx = 3 if ebl is None else ebl2index[ebl]
            for hcd in hc.hazardcurvedata_set.all():
                location = hcd.location
                node = (Site(location.x, location.y),
                        {'PoEValues': hcd.poes})
                if hc.end_branch_label:
                    node[1]['endBranchLabel'] = hc.end_branch_label
                else:
                    node[1]['statistics'] = hc.statistic_type
                    if hc.quantile is not None:
                        node[1]['quantileValue'] = hc.quantile

                indb.append(node)
            self.assertEquals(self.normalize(HAZARD_CURVE_DATA()[hdidx]),
                              self.normalize(indb))
Пример #27
0
    def test_serialize(self):
        """All the records are inserted correctly."""
        output = self.writer.output

        # Call the function under test.
        self.writer.serialize(RISK_LOSS_CURVE_DATA)

        # output record
        self.assertEqual(1, len(self.job.output_set.all()))

        output = self.job.output_set.get()
        self.assertTrue(output.db_backed)
        self.assertTrue(output.path is None)
        self.assertEqual(self.display_name, output.display_name)
        self.assertEqual("loss_curve", output.output_type)

        # loss curve record
        self.assertEqual(1, len(output.losscurve_set.all()))

        loss_curve = output.losscurve_set.get()

        self.assertEqual(loss_curve.unit, "peanuts")
        self.assertEqual(loss_curve.end_branch_label, None)
        self.assertEqual(loss_curve.category, "LCB cars")

        # loss curve data records
        self.assertEqual(4, len(loss_curve.losscurvedata_set.all()))

        inserted_data = []

        for lcd in loss_curve.losscurvedata_set.all():
            loc = lcd.location

            data = (Site(loc.x, loc.y), (Curve(zip(lcd.losses, lcd.poes)), {
                u'assetID': lcd.asset_ref
            }))

            inserted_data.append(data)

        self.assertEquals(self.normalize(RISK_LOSS_CURVE_DATA),
                          self.normalize(inserted_data))
Пример #28
0
    def test_compute_uhs(self):
        # Test the :function:`openquake.hazard.uhs.core.compute_uhs`
        # function. This function makes use of the Java `UHSCalculator` and
        # performs the main UHS computation.

        # The results of the computation are a sequence of Java `UHSResult`
        # objects.
        the_job = helpers.job_from_file(UHS_DEMO_CONFIG_FILE)

        site = Site(0.0, 0.0)

        helpers.store_hazard_logic_trees(the_job)

        uhs_results = compute_uhs(the_job, site)

        for i, result in enumerate(uhs_results):
            poe = result.getPoe()
            uhs = result.getUhs()

            self.assertEquals(self.UHS_RESULTS[i][0], poe)
            self.assertTrue(numpy.allclose(self.UHS_RESULTS[i][1],
                                           [x.value for x in uhs]))
Пример #29
0
    def test_serialize(self):
        """serialize() inserts the output and the hazard_map_data records."""
        # This job has no outputs before calling the function under test.
        self.assertEqual(0, len(self.job.output_set.all()))

        # Call the function under test.
        self.writer.serialize(HAZARD_CURVE_DATA())

        # After calling the function under test we see the expected output.
        self.job = models.OqJob.objects.get(id=self.job.id)
        self.assertEqual(1, len(self.job.output_set.all()))

        # After calling the function under test we see the expected map data.
        output = self.job.output_set.get()
        self.assertEqual(4, len(output.hazardcurve_set.all()))
        self.assertEqual(0, len(output.lossmap_set.all()))

        # read data from the DB and check that it's equal to the original data
        inserted_data = []

        for hc in output.hazardcurve_set.all():
            for hcd in hc.hazardcurvedata_set.all():
                location = hcd.location
                node = (Site(location.x, location.y),
                        {'PoEValues': hcd.poes})
                if hc.end_branch_label:
                    node[1]['endBranchLabel'] = hc.end_branch_label
                else:
                    node[1]['statistics'] = hc.statistic_type
                    if hc.quantile is not None:
                        node[1]['quantileValue'] = hc.quantile

                inserted_data.append(node)

        self.assertEquals(self.normalize(HAZARD_CURVE_DATA()),
                          self.normalize(inserted_data))
Пример #30
0
def HAZARD_CURVE_DATA():
    return [
        [(Site(-122.2, 37.5),
          {'investigationTimeSpan': '50.0',
           'IMLValues': [0.778, 1.09, 1.52, 2.13],
           'PoEValues': [0.354, 0.114, 0.023, 0.002],
           'IMT': 'PGA',
           'endBranchLabel': '1_1'}),
         (Site(-122.0, 37.5),
          {'investigationTimeSpan': '50.0',
           'IMLValues': [0.778, 1.09, 1.52, 2.13],
           'PoEValues': [0.354, 0.114, 0.023, 0.002],
           'IMT': 'PGA',
           'endBranchLabel': '1_1'})],
        [(Site(-122.1, 37.5),
          {'investigationTimeSpan': '50.0',
           'IMLValues': [0.778, 1.09, 1.52, 2.13],
           'PoEValues': [0.354, 0.114, 0.023, 0.002],
           'IMT': 'PGA',
           'endBranchLabel': '1_2'})],
        [(Site(-121.9, 37.5),
          {'investigationTimeSpan': '50.0',
           'IMLValues': [0.778, 1.09, 1.52, 2.13],
           'PoEValues': [0.354, 0.114, 0.023, 0.002],
           'IMT': 'PGA',
           'endBranchLabel': '2'})],
        [(Site(-122.0, 37.5),
          {'investigationTimeSpan': '50.0',
           'IMLValues': [0.778, 1.09, 1.52, 2.13],
           'PoEValues': [0.354, 0.114, 0.023, 0.002],
           'IMT': 'PGA',
           'quantileValue': 0.6,
           'statistics': 'quantile'}),
         (Site(-122.1, 37.5),
          {'investigationTimeSpan': '50.0',
           'IMLValues': [0.778, 1.09, 1.52, 2.13],
           'PoEValues': [0.354, 0.114, 0.023, 0.002],
           'IMT': 'PGA',
           'quantileValue': 0.6,
           'statistics': 'quantile'})],
         ]