예제 #1
0
    def _create_job_profiles(self, user_name):
        uhs_cfg = helpers.demo_file('uhs/config.gem')
        job = engine.prepare_job()
        self.uhs_jp, _, _ = engine.import_job_profile(uhs_cfg, job,
                                               user_name=user_name)

        cpsha_cfg = helpers.demo_file('classical_psha_based_risk/config.gem')
        job = engine.prepare_job()
        self.cpsha_jp, _, _ = engine.import_job_profile(cpsha_cfg, job,
                                                 user_name=user_name)
예제 #2
0
파일: core_test.py 프로젝트: pslh/oq-engine
    def _create_job_profiles(self, user_name):
        uhs_cfg = helpers.demo_file('uhs/config.gem')
        job = engine.prepare_job()
        self.uhs_jp, _, _ = engine.import_job_profile(uhs_cfg,
                                                      job,
                                                      user_name=user_name)

        cpsha_cfg = helpers.demo_file('classical_psha_based_risk/config.gem')
        job = engine.prepare_job()
        self.cpsha_jp, _, _ = engine.import_job_profile(cpsha_cfg,
                                                        job,
                                                        user_name=user_name)
예제 #3
0
    def setUp(self):
        client = kvs.get_client()

        # Delete managed job id info so we can predict the job key
        # which will be allocated for us
        client.delete(kvs.tokens.CURRENT_JOBS)

        self.generated_files = []

        job = engine.prepare_job()
        jp, params, sections = import_job_profile(helpers.get_data_path(CONFIG_FILE), job)
        self.job_ctxt = JobContext(params, job.id, sections=sections, oq_job_profile=jp, oq_job=job)

        job = engine.prepare_job()
        jp, params, sections = import_job_profile(helpers.get_data_path(CONFIG_WITH_INCLUDES), job)
        self.job_ctxt_with_includes = JobContext(params, job.id, sections=sections, oq_job_profile=jp, oq_job=job)
예제 #4
0
    def setUp(self):
        # Test 'event-based' job
        cfg_path = helpers.testdata_path("simplecase/config.gem")

        self.job = engine.prepare_job()
        self.jp, self.params, self.sections = engine.import_job_profile(
            cfg_path, self.job)
예제 #5
0
파일: job_test.py 프로젝트: angri/openquake
    def setUp(self):
        # Test 'event-based' job
        cfg_path = helpers.testdata_path("simplecase/config.gem")

        self.job = engine.prepare_job()
        self.jp, self.params, self.sections = engine.import_job_profile(
            cfg_path, self.job)
예제 #6
0
    def test_calculator_for_task(self):
        """Load up a sample calculation (into the db and cache) and make sure
        we can instantiate the correct calculator for a given calculation id.
        """
        from openquake.calculators.hazard.classical.core import (
            ClassicalHazardCalculator)
        job = engine.prepare_job()
        job_profile, params, sections = engine.import_job_profile(demo_file(
            'simple_fault_demo_hazard/config.gem'), job)

        job_ctxt = engine.JobContext(params, job.id,
                                             oq_job_profile=job_profile,
                                             oq_job=job)
        job_ctxt.to_kvs()

        with patch(
            'openquake.utils.tasks.get_running_job') as grc_mock:

            # Loading of the JobContext is done by
            # `get_running_job`, which is covered by other tests.
            # So, we just want to make sure that it's called here.
            grc_mock.return_value = job_ctxt

            calculator = tasks.calculator_for_task(job.id, 'hazard')

            self.assertTrue(isinstance(calculator, ClassicalHazardCalculator))
            self.assertEqual(1, grc_mock.call_count)
    def setUpClass(cls):
        cls.job = engine.prepare_job()
        jp, _, _ = engine.import_job_profile(RISK_DEMO_CONFIG_FILE, cls.job)

        cls.job_ctxt = helpers.create_job({}, job_id=cls.job.id,
                                          oq_job_profile=jp, oq_job=cls.job)
        calc = ClassicalRiskCalculator(cls.job_ctxt)

        calc.store_exposure_assets()
        [input] = models.inputs4job(cls.job.id, input_type="exposure")
        model = input.model()
        assets = model.exposuredata_set.filter(taxonomy="af/ctc-D/LR")
        # Add some more assets.
        coos = [(10.000155392289116, 46.546194318563),
                (10.222034128255, 46.0071299176413),
                (10.520376165581, 46.247463385278)]
        for lat, lon in coos:
            site = shapes.Site(lat, lon)
            cls.sites.append(site)
            if assets:
                continue
            location = geos.GEOSGeometry(site.point.to_wkt())
            asset = models.ExposureData(
                exposure_model=model, taxonomy="af/ctc-D/LR",
                asset_ref=helpers.random_string(6), stco=lat * 2,
                site=location, reco=1.1 * lon)
            asset.save()
예제 #8
0
    def test_calculator_for_task(self):
        """Load up a sample calculation (into the db and cache) and make sure
        we can instantiate the correct calculator for a given calculation id.
        """
        from openquake.calculators.hazard.classical.core import (
            ClassicalHazardCalculator)
        job_profile, params, sections = engine.import_job_profile(demo_file(
            'simple_fault_demo_hazard/config.gem'))

        calculation = OqCalculation(owner=job_profile.owner,
                                    oq_job_profile=job_profile)
        calculation.save()

        calc_proxy = engine.CalculationProxy(params, calculation.id,
                                             oq_job_profile=job_profile,
                                             oq_calculation=calculation)
        calc_proxy.to_kvs()

        with patch(
            'openquake.utils.tasks.get_running_calculation') as grc_mock:

            # Loading of the CalculationProxy is done by
            # `get_running_calculation`, which is covered by other tests.
            # So, we just want to make sure that it's called here.
            grc_mock.return_value = calc_proxy

            calculator = tasks.calculator_for_task(calculation.id, 'hazard')

            self.assertTrue(isinstance(calculator, ClassicalHazardCalculator))
            self.assertEqual(1, grc_mock.call_count)
예제 #9
0
    def setUpClass(cls):
        cls.job = engine.prepare_job()
        jp, _, _ = engine.import_job_profile(RISK_DEMO_CONFIG_FILE, cls.job)
        calc_proxy = helpers.create_job({}, job_id=cls.job.id,
                oq_job_profile=jp, oq_job=cls.job)

        # storing the basic exposure model
        ClassicalRiskCalculator(calc_proxy).store_exposure_assets()

        [em_input] = models.inputs4job(cls.job.id, input_type="exposure")
        [model] = em_input.exposuremodel_set.all()

        site = shapes.Site(1.0, 2.0)

        # more assets at same location
        models.ExposureData(
            exposure_model=model, taxonomy="NOT_USED",
            asset_ref="ASSET_1", stco=1,
            site=geos.GEOSGeometry(site.point.to_wkt()), reco=1).save()

        models.ExposureData(
            exposure_model=model, taxonomy="NOT_USED",
            asset_ref="ASSET_2", stco=1,
            site=geos.GEOSGeometry(site.point.to_wkt()), reco=1).save()

        site = shapes.Site(2.0, 2.0)

        # just one asset at location
        models.ExposureData(
            exposure_model=model, taxonomy="NOT_USED",
            asset_ref="ASSET_3", stco=1,
            site=geos.GEOSGeometry(site.point.to_wkt()), reco=1).save()
예제 #10
0
    def setUp(self):
        kvs.get_client().flushall()

        base_path = helpers.testdata_path("scenario")
        job = engine.prepare_job()
        self.job_profile, self.params, self.sections = (
            engine.import_job_profile(SCENARIO_SMOKE_TEST, job))
        self.job_ctxt = JobContext(self.params,
                                   job.id,
                                   sections=self.sections,
                                   base_path=base_path,
                                   oq_job_profile=self.job_profile,
                                   oq_job=job)

        self.job_ctxt.params[NUMBER_OF_CALC_KEY] = "1"

        self.job_ctxt.params['SERIALIZE_RESULTS_TO'] = 'xml'
        self.job_ctxt.serialize_results_to = ["xml"]

        # saving the default java implementation
        self.default = (
            scenario.ScenarioHazardCalculator.compute_ground_motion_field)

        self.grid = self.job_ctxt.region.grid

        self.job_ctxt.to_kvs()
    def setUpClass(cls):
        cls.job = engine.prepare_job()
        jp, _, _ = engine.import_job_profile(RISK_DEMO_CONFIG_FILE, cls.job)
        calc_proxy = helpers.create_job({}, job_id=cls.job.id,
                oq_job_profile=jp, oq_job=cls.job)

        # storing the basic exposure model
        ClassicalRiskCalculator(calc_proxy).store_exposure_assets()
        [input] = models.inputs4job(cls.job.id, input_type="exposure")
        model = input.model()
        assets = model.exposuredata_set.filter(taxonomy="aa/aatc-D/LR")

        if not assets:
            # This model did not exist in the database before.
            site = shapes.Site(1.0, 2.0)
            # more assets at same location
            models.ExposureData(
                exposure_model=model, taxonomy="aa/aatc-D/LR",
                asset_ref="ASSET_1", stco=1,
                site=geos.GEOSGeometry(site.point.to_wkt()), reco=1).save()

            models.ExposureData(
                exposure_model=model, taxonomy="aa/aatc-D/LR",
                asset_ref="ASSET_2", stco=1,
                site=geos.GEOSGeometry(site.point.to_wkt()), reco=1).save()

            site = shapes.Site(2.0, 2.0)
            # just one asset at location
            models.ExposureData(
                exposure_model=model, taxonomy="aa/aatc-D/LR",
                asset_ref="ASSET_3", stco=1,
                site=geos.GEOSGeometry(site.point.to_wkt()), reco=1).save()
예제 #12
0
    def setUp(self):
        kvs.get_client().flushall()

        base_path = helpers.testdata_path("scenario")
        self.job_profile, self.params, self.sections = (
            engine.import_job_profile(SCENARIO_SMOKE_TEST))
        calculation = OqCalculation(owner=self.job_profile.owner,
                                    oq_job_profile=self.job_profile)
        calculation.save()
        self.calc_proxy = CalculationProxy(
            self.params, calculation.id, sections=self.sections,
            base_path=base_path, oq_job_profile=self.job_profile,
            oq_calculation=calculation)

        self.calc_proxy.params[NUMBER_OF_CALC_KEY] = "1"

        self.calc_proxy.params['SERIALIZE_RESULTS_TO'] = 'xml'

        # saving the default java implementation
        self.default = (
            scenario.ScenarioHazardCalculator.compute_ground_motion_field)

        self.grid = self.calc_proxy.region.grid

        self.calc_proxy.to_kvs()
예제 #13
0
    def setUpClass(cls):
        cls.job = engine.prepare_job()
        jp, _, _ = engine.import_job_profile(RISK_DEMO_CONFIG_FILE, cls.job)

        cls.job_ctxt = helpers.create_job({},
                                          job_id=cls.job.id,
                                          oq_job_profile=jp,
                                          oq_job=cls.job)
        calc = ClassicalRiskCalculator(cls.job_ctxt)

        calc.store_exposure_assets()
        [input] = models.inputs4job(cls.job.id, input_type="exposure")
        model = input.model()
        assets = model.exposuredata_set.filter(taxonomy="af/ctc-D/LR")
        # Add some more assets.
        coos = [(10.000155392289116, 46.546194318563),
                (10.222034128255, 46.0071299176413),
                (10.520376165581, 46.247463385278)]
        for lat, lon in coos:
            site = shapes.Site(lat, lon)
            cls.sites.append(site)
            if assets:
                continue
            location = geos.GEOSGeometry(site.point.to_wkt())
            asset = models.ExposureData(exposure_model=model,
                                        taxonomy="af/ctc-D/LR",
                                        asset_ref=helpers.random_string(6),
                                        stco=lat * 2,
                                        site=location,
                                        reco=1.1 * lon)
            asset.save()
예제 #14
0
    def test_calculator_for_task(self):
        """Load up a sample calculation (into the db and cache) and make sure
        we can instantiate the correct calculator for a given calculation id.
        """
        from openquake.calculators.hazard.classical.core import (
            ClassicalHazardCalculator)
        job = engine.prepare_job()
        job_profile, params, sections = engine.import_job_profile(
            demo_file('simple_fault_demo_hazard/config.gem'), job)

        job_ctxt = engine.JobContext(params,
                                     job.id,
                                     oq_job_profile=job_profile,
                                     oq_job=job)
        job_ctxt.to_kvs()

        with patch('openquake.utils.tasks.get_running_job') as grc_mock:

            # Loading of the JobContext is done by
            # `get_running_job`, which is covered by other tests.
            # So, we just want to make sure that it's called here.
            grc_mock.return_value = job_ctxt

            calculator = tasks.calculator_for_task(job.id, 'hazard')

            self.assertTrue(isinstance(calculator, ClassicalHazardCalculator))
            self.assertEqual(1, grc_mock.call_count)
예제 #15
0
파일: job_test.py 프로젝트: angri/openquake
 def setUp(self):
     self.job_from_file = engine._job_from_file
     self.init_logs_amqp_send = patch('openquake.logs.init_logs_amqp_send')
     self.init_logs_amqp_send.start()
     self.job = engine.prepare_job()
     self.job_profile, self.params, self.sections = (
         engine.import_job_profile(helpers.get_data_path(CONFIG_FILE),
                                   self.job))
예제 #16
0
 def setUp(self):
     self.job_from_file = engine._job_from_file
     self.init_logs_amqp_send = patch('openquake.logs.init_logs_amqp_send')
     self.init_logs_amqp_send.start()
     self.job = engine.prepare_job()
     self.job_profile, self.params, self.sections = (
         engine.import_job_profile(helpers.get_data_path(CONFIG_FILE),
                                   self.job))
예제 #17
0
    def setUp(self):
        # Test 'event-based' job
        cfg_path = helpers.testdata_path("simplecase/config.gem")
        base_path = helpers.testdata_path("simplecase")

        oq_job = engine.prepare_job()
        oq_job_profile, params, sections = engine.import_job_profile(cfg_path, oq_job)

        self.eb_job = JobContext(
            params, oq_job.id, sections=sections, base_path=base_path, oq_job_profile=oq_job_profile, oq_job=oq_job
        )
예제 #18
0
파일: core_test.py 프로젝트: pslh/oq-engine
    def setUp(self):
        self.job = engine.prepare_job()
        self.job_profile, params, sections = engine.import_job_profile(
            UHS_DEMO_CONFIG_FILE, self.job)

        self.job_ctxt = engine.JobContext(
            params, self.job.id, sections=sections,
            serialize_results_to=['db'], oq_job_profile=self.job_profile,
            oq_job=self.job)
        self.job_ctxt.to_kvs()
        self.job_id = self.job_ctxt.job_id
예제 #19
0
    def setUp(self):
        client = kvs.get_client()

        # Delete managed job id info so we can predict the job key
        # which will be allocated for us
        client.delete(kvs.tokens.CURRENT_JOBS)

        self.generated_files = []

        job = engine.prepare_job()
        jp, params, sections = import_job_profile(helpers.get_data_path(
            CONFIG_FILE), job)
        self.job_ctxt = JobContext(
            params, job.id, sections=sections, oq_job_profile=jp, oq_job=job)

        job = engine.prepare_job()
        jp, params, sections = import_job_profile(helpers.get_data_path(
            CONFIG_WITH_INCLUDES), job)
        self.job_ctxt_with_includes = JobContext(
            params, job.id, sections=sections, oq_job_profile=jp, oq_job=job)
예제 #20
0
    def test_default_validators_classical_bcr_risk(self):
        # For Classical BCR Hazard+Risk calculations, ensure that a
        # `ClassicalRiskValidator` is included in the default validators.
        cfg_path = helpers.demo_file('benefit_cost_ratio/config.gem')

        job_profile, params, sections = engine.import_job_profile(cfg_path)

        validators = config.default_validators(sections, params)

        self.assertTrue(any(
            isinstance(v, ClassicalRiskValidator) for v in validators))
예제 #21
0
    def setUp(self):
        self.job = engine.prepare_job()
        self.job_profile, params, sections = engine.import_job_profile(
            UHS_DEMO_CONFIG_FILE, self.job)

        self.job_ctxt = engine.JobContext(
            params, self.job.id, sections=sections,
            serialize_results_to=['db'], oq_job_profile=self.job_profile,
            oq_job=self.job)
        self.job_ctxt.to_kvs()
        self.job_id = self.job_ctxt.job_id
예제 #22
0
    def test_default_validators_event_based_bcr_risk(self):
        # For Event-Based BCR Risk calculations, ensure that a
        # `EventBasedRiskValidator` is included in the default validators.
        cfg_path = helpers.demo_file('benefit_cost_ratio/config_ebased.gem')

        job_profile, params, sections = engine.import_job_profile(
            cfg_path, self.job)

        validators = config.default_validators(sections, params)

        self.assertTrue(
            any(isinstance(v, EventBasedRiskValidator) for v in validators))
예제 #23
0
    def setUp(self):
        # Test 'event-based' job
        cfg_path = helpers.testdata_path("simplecase/config.gem")
        base_path = helpers.testdata_path("simplecase")

        oq_job = engine.prepare_job()
        oq_job_profile, params, sections = engine.import_job_profile(
            cfg_path, oq_job)

        self.eb_job = JobContext(
            params, oq_job.id, sections=sections, base_path=base_path,
            oq_job_profile=oq_job_profile, oq_job=oq_job)
예제 #24
0
    def test_default_validators_event_based_bcr_risk(self):
        # For Event-Based BCR Risk calculations, ensure that a
        # `EventBasedRiskValidator` is included in the default validators.
        cfg_path = helpers.demo_file(
            'event_based_bcr_risk/config.gem')

        job_profile, params, sections = engine.import_job_profile(cfg_path)

        validators = config.default_validators(sections, params)

        self.assertTrue(any(
            isinstance(v, EventBasedRiskValidator) for v in validators))
예제 #25
0
    def test_default_validators_classical_bcr_risk(self):
        # For Classical BCR Hazard+Risk calculations, ensure that a
        # `ClassicalRiskValidator` is included in the default validators.
        cfg_path = helpers.demo_file('benefit_cost_ratio/config.gem')

        job_profile, params, sections = engine.import_job_profile(
            cfg_path, self.job)

        validators = config.default_validators(sections, params)

        self.assertTrue(
            any(isinstance(v, ClassicalRiskValidator) for v in validators))
예제 #26
0
    def setUp(self):
        self.job = engine.prepare_job()
        self.job_profile, self.params, _sections = (
            engine.import_job_profile(demo_file(
                'simple_fault_demo_hazard/config.gem'), self.job))

        self.params['debug'] = 'warn'

        # Cache the calc proxy data into the kvs:
        job_ctxt = engine.JobContext(
            self.params, self.job.id, oq_job_profile=self.job_profile,
            oq_job=self.job)
        job_ctxt.to_kvs()
예제 #27
0
    def setUp(self):
        self.job = engine.prepare_job()
        self.job_profile, self.params, _sections = (engine.import_job_profile(
            demo_file('simple_fault_demo_hazard/config.gem'), self.job))

        self.params['debug'] = 'warn'

        # Cache the calc proxy data into the kvs:
        job_ctxt = engine.JobContext(self.params,
                                     self.job.id,
                                     oq_job_profile=self.job_profile,
                                     oq_job=self.job)
        job_ctxt.to_kvs()
예제 #28
0
    def test_compute_bcr_in_the_classical_psha_calculator(self):
        self._compute_risk_classical_psha_setup()
        helpers.delete_profile(self.job)
        bcr_config = helpers.demo_file('benefit_cost_ratio/config.gem')
        job_profile, params, sections = engine.import_job_profile(
            bcr_config, self.job)

        # We need to adjust a few of the parameters for this test:
        job_profile.imls = [
            0.005, 0.007, 0.0098, 0.0137, 0.0192, 0.0269, 0.0376, 0.0527,
            0.0738, 0.103, 0.145, 0.203, 0.284, 0.397, 0.556, 0.778]
        params['ASSET_LIFE_EXPECTANCY'] = '50'
        job_profile.asset_life_expectancy = 50
        params['REGION_VERTEX'] = '0.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0, 0.0'
        job_profile.region = GEOSGeometry(shapes.polygon_ewkt_from_coords(
            params['REGION_VERTEX']))
        job_profile.save()

        job_ctxt = engine.JobContext(
            params, self.job_id, sections=sections, oq_job_profile=job_profile)

        calculator = classical_core.ClassicalRiskCalculator(job_ctxt)

        [input] = models.inputs4job(self.job.id, input_type="exposure")
        emdl = input.model()
        if not emdl:
            emdl = models.ExposureModel(
                owner=self.job.owner, input=input,
                description="c-psha test exposure model",
                category="c-psha power plants", stco_unit="watt",
                stco_type="aggregated", reco_unit="joule",
                reco_type="aggregated")
            emdl.save()

        assets = emdl.exposuredata_set.filter(asset_ref="rubcr")
        if not assets:
            asset = models.ExposureData(exposure_model=emdl, taxonomy="ID",
                                        asset_ref="rubcr", stco=1, reco=123.45,
                                        site=GEOSGeometry("POINT(1.0 1.0)"))
            asset.save()

        Block.from_kvs(self.job_id, self.block_id)
        calculator.compute_risk(self.block_id)

        result_key = kvs.tokens.bcr_block_key(self.job_id, self.block_id)
        res = kvs.get_value_json_decoded(result_key)
        expected_result = {'bcr': 0.0, 'eal_original': 0.003032,
                           'eal_retrofitted': 0.003032}

        helpers.assertDeepAlmostEqual(
            self, res, [[[1, 1], [[expected_result, "rubcr"]]]])
    def test__serialize_xml_filenames(self):
        # Test that the file names of the loss XML artifacts are correct.
        # See https://bugs.launchpad.net/openquake/+bug/894706.
        expected_lrc_file_name = (
            'losscurves-block-#%(job_id)s-block#%(block)s.xml')
        expected_lr_file_name = (
            'losscurves-loss-block-#%(job_id)s-block#%(block)s.xml')

        cfg_file = helpers.demo_file('classical_psha_based_risk/config.gem')

        job = engine.prepare_job()
        job_profile, params, sections = engine.import_job_profile(
            cfg_file, job)

        job_ctxt = engine.JobContext(
            params, job.id, sections=sections,
            serialize_results_to=['xml', 'db'], oq_job_profile=job_profile,
            oq_job=job)

        calculator = ClassicalRiskCalculator(job_ctxt)

        with helpers.patch('openquake.writer.FileWriter.serialize'):
            # The 'curves' key in the kwargs just needs to be present;
            # because of the serialize mock in place above, it doesn't need
            # to have a real value.

            # First, we test loss ratio curve output,
            # then we'll do the same test for loss curve output.

            # We expect to get a single file path back.
            [file_path] = calculator._serialize(
                0, **dict(curve_mode='loss_ratio', curves=[]))

            _dir, file_name = os.path.split(file_path)

            self.assertEqual(
                expected_lrc_file_name % dict(job_id=job.id,
                                              block=0),
                file_name)

            # The same test again, except for loss curves this time.
            [file_path] = calculator._serialize(
                0, **dict(curve_mode='loss', curves=[]))

            _dir, file_name = os.path.split(file_path)

            self.assertEqual(
                expected_lr_file_name % dict(job_id=job.id,
                                             block=0),
                file_name)
예제 #30
0
    def test__serialize_xml_filenames(self):
        # Test that the file names of the loss XML artifacts are correct.
        # See https://bugs.launchpad.net/openquake/+bug/894706.
        expected_lrc_file_name = (
            'losscurves-block-#%(job_id)s-block#%(block)s.xml')
        expected_lr_file_name = (
            'losscurves-loss-block-#%(job_id)s-block#%(block)s.xml')

        cfg_file = helpers.demo_file('classical_psha_based_risk/config.gem')

        job = engine.prepare_job()
        job_profile, params, sections = engine.import_job_profile(
            cfg_file, job)

        job_ctxt = engine.JobContext(params,
                                     job.id,
                                     sections=sections,
                                     serialize_results_to=['xml', 'db'],
                                     oq_job_profile=job_profile,
                                     oq_job=job)

        calculator = ClassicalRiskCalculator(job_ctxt)

        with helpers.patch('openquake.writer.FileWriter.serialize'):
            # The 'curves' key in the kwargs just needs to be present;
            # because of the serialize mock in place above, it doesn't need
            # to have a real value.

            # First, we test loss ratio curve output,
            # then we'll do the same test for loss curve output.

            # We expect to get a single file path back.
            [file_path] = calculator._serialize(
                0, **dict(curve_mode='loss_ratio', curves=[]))

            _dir, file_name = os.path.split(file_path)

            self.assertEqual(
                expected_lrc_file_name % dict(job_id=job.id, block=0),
                file_name)

            # The same test again, except for loss curves this time.
            [file_path
             ] = calculator._serialize(0, **dict(curve_mode='loss', curves=[]))

            _dir, file_name = os.path.split(file_path)

            self.assertEqual(
                expected_lr_file_name % dict(job_id=job.id, block=0),
                file_name)
예제 #31
0
    def test_compute_bcr(self):
        cfg_path = helpers.demo_file(
            'probabilistic_event_based_risk/config.gem')
        helpers.delete_profile(self.job)
        job_profile, params, sections = engine.import_job_profile(
            cfg_path, self.job)
        job_profile.calc_mode = 'event_based_bcr'
        job_profile.interest_rate = 0.05
        job_profile.asset_life_expectancy = 50
        job_profile.region = GEOSGeometry(shapes.polygon_ewkt_from_coords(
            '0.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0, 0.0'))
        job_profile.region_grid_spacing = 0.1
        job_profile.maximum_distance = 200.0
        job_profile.gmf_random_seed = None
        job_profile.save()

        params.update(dict(CALCULATION_MODE='Event Based BCR',
                           INTEREST_RATE='0.05',
                           ASSET_LIFE_EXPECTANCY='50',
                           MAXIMUM_DISTANCE='200.0',
                           REGION_VERTEX=('0.0, 0.0, 0.0, 2.0, '
                                          '2.0, 2.0, 2.0, 0.0'),
                           REGION_GRID_SPACING='0.1'))

        job_ctxt = engine.JobContext(
            params, self.job_id, sections=sections, oq_job_profile=job_profile)

        calculator = eb_core.EventBasedRiskCalculator(job_ctxt)

        self.block_id = 7
        SITE = shapes.Site(1.0, 1.0)
        block = Block(self.job_id, self.block_id, (SITE, ))
        block.to_kvs()

        location = GEOSGeometry(SITE.point.to_wkt())
        asset = models.ExposureData(exposure_model=self.emdl, taxonomy="ID",
                                    asset_ref=22.61, stco=1, reco=123.45,
                                    site=location)
        asset.save()

        calculator.compute_risk(self.block_id)

        result_key = kvs.tokens.bcr_block_key(self.job_id, self.block_id)
        result = kvs.get_value_json_decoded(result_key)
        expected_result = {'bcr': 0.0, 'eal_original': 0.0,
                           'eal_retrofitted': 0.0}
        helpers.assertDeepAlmostEqual(
            self, [[[1, 1], [[expected_result, "22.61"]]]], result)
예제 #32
0
    def setUp(self):
        cfg_path = helpers.demo_file(
            'probabilistic_event_based_risk/config.gem')

        job_profile, params, sections = engine.import_job_profile(cfg_path)

        calc_proxy = engine.CalculationProxy(
            params, 1, sections=sections, base_path='/tmp',
            serialize_results_to=['db', 'xml'],
            oq_job_profile=job_profile)
        calc_proxy.blocks_keys = []

        self.calculator = EventBasedRiskCalculator(calc_proxy)
        self.calculator.store_exposure_assets = lambda: None
        self.calculator.store_vulnerability_model = lambda: None
        self.calculator.partition = lambda: None
예제 #33
0
    def setUp(self):
        cfg_path = helpers.demo_file(
            'probabilistic_event_based_risk/config.gem')

        job = engine.prepare_job()
        jp, params, sections = engine.import_job_profile(cfg_path, job)

        job_ctxt = engine.JobContext(
            params, 1, sections=sections, base_path='/tmp',
            serialize_results_to=['db', 'xml'], oq_job_profile=jp, oq_job=job)
        job_ctxt.blocks_keys = []

        self.calculator = EventBasedRiskCalculator(job_ctxt)
        self.calculator.store_exposure_assets = lambda: None
        self.calculator.store_vulnerability_model = lambda: None
        self.calculator.partition = lambda: None
예제 #34
0
    def test_write_output(self):
        # Test that the loss map writers are properly called when
        # write_output is invoked.
        cfg_file = helpers.demo_file('classical_psha_based_risk/config.gem')

        job = engine.prepare_job()
        job_profile, params, sections = engine.import_job_profile(
            cfg_file, job)

        # Set conditional loss poe so that loss maps are created.
        # If this parameter is not specified, no loss maps will be serialized
        # at the end of the job.
        params['CONDITIONAL_LOSS_POE'] = '0.01'
        job_profile.conditional_loss_poe = [0.01]
        job_profile.save()

        job_ctxt = engine.JobContext(params,
                                     job.id,
                                     sections=sections,
                                     serialize_results_to=['xml', 'db'],
                                     oq_job_profile=job_profile,
                                     oq_job=job)

        calculator = ClassicalRiskCalculator(job_ctxt)

        # Mock the composed loss map serializer:
        with helpers.patch('openquake.writer.CompositeWriter'
                           '.serialize') as writer_mock:
            calculator.write_output()

            self.assertEqual(1, writer_mock.call_count)

            # Now test that the composite writer got the correct
            # 'serialize to' instructions. The composite writer should have
            # 1 DB and 1 XML loss map serializer:
            composite_writer = writer_mock.call_args[0][0]
            writers = composite_writer.writers

            self.assertEqual(2, len(writers))
            # We don't assume anything about the order of the writers,
            # and we don't care anyway in this test:
            self.assertTrue(
                any(isinstance(w, LossMapDBWriter) for w in writers))
            self.assertTrue(
                any(
                    isinstance(w, LossMapNonScenarioXMLWriter)
                    for w in writers))
예제 #35
0
파일: oqscript.py 프로젝트: bwyss/oq-engine
def main():
    arg_parser = set_up_arg_parser()
    args = arg_parser.parse_args()

    if args.version:
        print utils_version.info(__version__)
    elif args.config_file is not None:
        from openquake import job
        from openquake import engine

        try:
            if args.log_file is not None:
                # Capture logging messages to a file.
                try:
                    _touch_log_file(args.log_file)
                except IOError as e:
                    raise IOError("Error writing to log file %s: %s" % (args.log_file, e.strerror))

            user_name = getpass.getuser()
            ajob = engine.prepare_job(user_name)
            _, params, sections = engine.import_job_profile(args.config_file, ajob, user_name, args.force_inputs)
            engine.run_job(
                ajob,
                params,
                sections,
                output_type=args.output_type,
                log_level=args.log_level,
                force_inputs=args.force_inputs,
                log_file=args.log_file,
            )
        except job.config.ValidationException as e:
            print str(e)
        except IOError as e:
            print str(e)
        except Exception as e:
            raise
    elif args.list_calculations:
        list_calculations()
    elif args.list_outputs is not None:
        list_outputs(args.list_outputs)
    elif args.export is not None:
        output_id, target_dir = args.export
        output_id = int(output_id)

        do_export(output_id, target_dir)
    else:
        arg_parser.print_usage()
예제 #36
0
    def test__launch_job_calls_core_calc_methods(self):
        # The `Calculator` interface defines 4 general methods:
        # - initialize
        # - pre_execute
        # - execute
        # - post_execute
        # When `_launch_job` is called, each of these methods should be
        # called once per job type (hazard, risk).

        # Calculation setup:
        cfg_file = helpers.demo_file('classical_psha_based_risk/config.gem')

        job = engine.prepare_job()
        job_profile, params, sections = engine.import_job_profile(
            cfg_file, job)

        job_ctxt = engine.JobContext(
            params, job.id, sections=sections,
            serialize_results_to=['xml', 'db'],
            oq_job_profile=job_profile, oq_job=job)

        # Mocking setup:
        cls_haz_calc = ('openquake.calculators.hazard.classical.core'
                        '.ClassicalHazardCalculator')
        cls_risk_calc = ('openquake.calculators.risk.classical.core'
                         '.ClassicalRiskCalculator')
        methods = ('initialize', 'pre_execute', 'execute', 'post_execute')
        haz_patchers = [helpers.patch('%s.%s' % (cls_haz_calc, m))
                        for m in methods]
        risk_patchers = [helpers.patch('%s.%s' % (cls_risk_calc, m))
                         for m in methods]

        haz_mocks = [p.start() for p in haz_patchers]
        risk_mocks = [p.start() for p in risk_patchers]

        # Call the function under test:
        engine._launch_job(job_ctxt, sections)

        self.assertTrue(all(x.call_count == 1 for x in haz_mocks))
        self.assertTrue(all(x.call_count == 1 for x in risk_mocks))

        # Tear down the mocks:
        for p in haz_patchers:
            p.stop()
        for p in risk_patchers:
            p.stop()
예제 #37
0
    def setUp(self):
        self.job_profile, self.params, _sections = (
            engine.import_job_profile(demo_file(
                'simple_fault_demo_hazard/config.gem')))

        self.params['debug'] = 'warn'

        self.calculation = OqCalculation(
            owner=self.job_profile.owner,
            oq_job_profile=self.job_profile)
        self.calculation.save()

        # Cache the calc proxy data into the kvs:
        calc_proxy = engine.CalculationProxy(
            self.params, self.calculation.id, oq_job_profile=self.job_profile,
            oq_calculation=self.calculation)
        calc_proxy.to_kvs()
예제 #38
0
파일: oqscript.py 프로젝트: bwyss/oq-engine
def main():
    arg_parser = set_up_arg_parser()
    args = arg_parser.parse_args()

    if args.version:
        print utils_version.info(__version__)
    elif args.config_file is not None:
        from openquake import job
        from openquake import engine
        try:
            if args.log_file is not None:
                # Capture logging messages to a file.
                try:
                    _touch_log_file(args.log_file)
                except IOError as e:
                    raise IOError('Error writing to log file %s: %s' %
                                  (args.log_file, e.strerror))

            user_name = getpass.getuser()
            ajob = engine.prepare_job(user_name)
            _, params, sections = engine.import_job_profile(
                args.config_file, ajob, user_name, args.force_inputs)
            engine.run_job(ajob,
                           params,
                           sections,
                           output_type=args.output_type,
                           log_level=args.log_level,
                           force_inputs=args.force_inputs,
                           log_file=args.log_file)
        except job.config.ValidationException as e:
            print str(e)
        except IOError as e:
            print str(e)
        except Exception as e:
            raise
    elif args.list_calculations:
        list_calculations()
    elif args.list_outputs is not None:
        list_outputs(args.list_outputs)
    elif args.export is not None:
        output_id, target_dir = args.export
        output_id = int(output_id)

        do_export(output_id, target_dir)
    else:
        arg_parser.print_usage()
예제 #39
0
    def setUp(self):
        cfg_path = helpers.demo_file(
            'probabilistic_event_based_risk/config.gem')

        job = engine.prepare_job()
        jp, params, sections = engine.import_job_profile(cfg_path, job)

        job_ctxt = engine.JobContext(
            params, 1, sections=sections, base_path='/tmp',
            serialize_results_to=['db', 'xml'], oq_job_profile=jp, oq_job=job)
        job_ctxt.blocks_keys = []

        self.calculator = EventBasedRiskCalculator(job_ctxt)
        self.calculator.store_exposure_assets = lambda: None
        self.calculator.store_fragility_model = lambda: None
        self.calculator.store_vulnerability_model = lambda: None
        self.calculator.partition = lambda: None
예제 #40
0
    def test__launch_job_calls_core_calc_methods(self):
        # The `Calculator` interface defines 4 general methods:
        # - initialize
        # - pre_execute
        # - execute
        # - post_execute
        # When `_launch_job` is called, each of these methods should be
        # called once per job type (hazard, risk).

        # Calculation setup:
        cfg_file = helpers.demo_file('classical_psha_based_risk/config.gem')

        job = engine.prepare_job()
        job_profile, params, sections = engine.import_job_profile(
            cfg_file, job)

        job_ctxt = engine.JobContext(
            params, job.id, sections=sections,
            serialize_results_to=['xml', 'db'],
            oq_job_profile=job_profile, oq_job=job)

        # Mocking setup:
        cls_haz_calc = ('openquake.calculators.hazard.classical.core'
                        '.ClassicalHazardCalculator')
        cls_risk_calc = ('openquake.calculators.risk.classical.core'
                         '.ClassicalRiskCalculator')
        methods = ('initialize', 'pre_execute', 'execute', 'post_execute')
        haz_patchers = [helpers.patch('%s.%s' % (cls_haz_calc, m))
                        for m in methods]
        risk_patchers = [helpers.patch('%s.%s' % (cls_risk_calc, m))
                         for m in methods]

        haz_mocks = [p.start() for p in haz_patchers]
        risk_mocks = [p.start() for p in risk_patchers]

        # Call the function under test:
        engine._launch_job(job_ctxt, sections)

        self.assertTrue(all(x.call_count == 1 for x in haz_mocks))
        self.assertTrue(all(x.call_count == 1 for x in risk_mocks))

        # Tear down the mocks:
        for p in haz_patchers:
            p.stop()
        for p in risk_patchers:
            p.stop()
예제 #41
0
    def test_write_output(self):
        # Test that the loss map writers are properly called when
        # write_output is invoked.
        cfg_file = demo_file("classical_psha_based_risk/config.gem")

        job_profile, params, sections = import_job_profile(cfg_file)

        # Set conditional loss poe so that loss maps are created.
        # If this parameter is not specified, no loss maps will be serialized
        # at the end of the calculation.
        params["CONDITIONAL_LOSS_POE"] = "0.01"
        job_profile.conditional_loss_poe = [0.01]
        job_profile.save()

        calculation = OqCalculation(owner=job_profile.owner, oq_job_profile=job_profile)
        calculation.save()

        calc_proxy = CalculationProxy(
            params,
            calculation.id,
            sections=sections,
            serialize_results_to=["xml", "db"],
            oq_job_profile=job_profile,
            oq_calculation=calculation,
        )

        calculator = ClassicalRiskCalculator(calc_proxy)

        # Mock the composed loss map serializer:
        with patch("openquake.writer.CompositeWriter" ".serialize") as writer_mock:
            calculator.write_output()

            self.assertEqual(1, writer_mock.call_count)

            # Now test that the composite writer got the correct
            # 'serialize to' instructions. The composite writer should have
            # 1 DB and 1 XML loss map serializer:
            composite_writer = writer_mock.call_args[0][0]
            writers = composite_writer.writers

            self.assertEqual(2, len(writers))
            # We don't assume anything about the order of the writers,
            # and we don't care anyway in this test:
            self.assertTrue(any(isinstance(w, LossMapDBWriter) for w in writers))
            self.assertTrue(any(isinstance(w, LossMapNonScenarioXMLWriter) for w in writers))
예제 #42
0
    def test__serialize_xml_filenames(self):
        # Test that the file names of the loss XML artifacts are correct.
        # See https://bugs.launchpad.net/openquake/+bug/894706.
        expected_lrc_file_name = "losscurves-block-#%(calculation_id)s-block#%(block)s.xml"
        expected_lr_file_name = "losscurves-loss-block-#%(calculation_id)s-block#%(block)s.xml"

        cfg_file = demo_file("classical_psha_based_risk/config.gem")

        job_profile, params, sections = import_job_profile(cfg_file)

        calculation = OqCalculation(owner=job_profile.owner, oq_job_profile=job_profile)
        calculation.save()

        calc_proxy = CalculationProxy(
            params,
            calculation.id,
            sections=sections,
            serialize_results_to=["xml", "db"],
            oq_job_profile=job_profile,
            oq_calculation=calculation,
        )

        calculator = ClassicalRiskCalculator(calc_proxy)

        with patch("openquake.writer.FileWriter.serialize"):
            # The 'curves' key in the kwargs just needs to be present;
            # because of the serialize mock in place above, it doesn't need
            # to have a real value.

            # First, we test loss ratio curve output,
            # then we'll do the same test for loss curve output.

            # We expect to get a single file path back.
            [file_path] = calculator._serialize(0, **dict(curve_mode="loss_ratio", curves=[]))

            _dir, file_name = os.path.split(file_path)

            self.assertEqual(expected_lrc_file_name % dict(calculation_id=calculation.id, block=0), file_name)

            # The same test again, except for loss curves this time.
            [file_path] = calculator._serialize(0, **dict(curve_mode="loss", curves=[]))

            _dir, file_name = os.path.split(file_path)

            self.assertEqual(expected_lr_file_name % dict(calculation_id=calculation.id, block=0), file_name)
예제 #43
0
    def setUp(self):
        # Test 'event-based' job
        cfg_path = helpers.testdata_path("simplecase/config.gem")
        base_path = helpers.testdata_path("simplecase")

        oq_job_profile, params, sections = engine.import_job_profile(cfg_path)

        oq_calculation = OqCalculation(owner=oq_job_profile.owner, description="", oq_job_profile=oq_job_profile)
        oq_calculation.save()

        self.eb_job = CalculationProxy(
            params,
            oq_calculation.id,
            sections=sections,
            base_path=base_path,
            oq_job_profile=oq_job_profile,
            oq_calculation=oq_calculation,
        )
예제 #44
0
    def test_compute_risk_in_the_classical_psha_calculator(self):
        """
            tests ClassicalRiskCalculator.compute_risk by retrieving
            all the loss curves in the kvs and checks their presence
        """
        helpers.delete_profile(self.job)
        cls_risk_cfg = helpers.demo_file(
            'classical_psha_based_risk/config.gem')
        job_profile, params, sections = engine.import_job_profile(
            cls_risk_cfg, self.job)

        # We need to adjust a few of the parameters for this test:
        params['REGION_VERTEX'] = '0.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0, 0.0'
        job_profile.region = GEOSGeometry(shapes.polygon_ewkt_from_coords(
            params['REGION_VERTEX']))
        job_profile.save()

        job_ctxt = engine.JobContext(
            params, self.job_id, sections=sections, oq_job_profile=job_profile)

        self._compute_risk_classical_psha_setup()

        calculator = classical_core.ClassicalRiskCalculator(job_ctxt)
        calculator.vuln_curves = {"ID": self.vuln_function}

        block = Block.from_kvs(self.job_id, self.block_id)

        # computes the loss curves and puts them in kvs
        calculator.compute_risk(self.block_id)

        for point in block.grid(job_ctxt.region):
            assets = BaseRiskCalculator.assets_for_cell(
                self.job_id, point.site)
            for asset in assets:
                loss_ratio_key = kvs.tokens.loss_ratio_key(
                    self.job_id, point.row, point.column, asset.asset_ref)

                self.assertTrue(kvs.get_client().get(loss_ratio_key))

                loss_key = kvs.tokens.loss_curve_key(
                    self.job_id, point.row, point.column, asset.asset_ref)

                self.assertTrue(kvs.get_client().get(loss_key))
예제 #45
0
def prepare_job_context(path_to_cfg):
    """Given a path to a config file, prepare and return a
    :class:`openquake.engine.JobContext`. This convenient because it can be
    immediately passed to a calculator constructor.

    This also creates the necessary job and oq_job_profile records.
    """
    job = engine.prepare_job()

    cfg = demo_file(path_to_cfg)

    job_profile, params, sections = engine.import_job_profile(
        cfg, job, force_inputs=True)

    job_ctxt = engine.JobContext(
        params, job.id, sections=sections, oq_job_profile=job_profile,
        oq_job=job)

    return job_ctxt
예제 #46
0
    def test_import_job_profile_as_specified_user(self):
        # Test importing of a job profile when a user is specified
        # The username will be randomly generated and unique to give
        # a clean set of test conditions.
        user_name = str(uuid.uuid4())

        # For sanity, check that the user does not exist to begin with.
        self.assertRaises(ObjectDoesNotExist, models.OqUser.objects.get,
                          user_name=user_name)

        cfg_path = helpers.demo_file('HazardMapTest/config.gem')

        job_profile, _params, _sections = engine.import_job_profile(
            cfg_path, self.job, user_name=user_name)

        self.assertEqual(user_name, job_profile.owner.user_name)
        # Check that the OqUser record for this user now exists.
        # If this fails, it will raise an `ObjectDoesNotExist` exception.
        models.OqUser.objects.get(user_name=user_name)
예제 #47
0
    def test_import_job_profile_as_specified_user(self):
        # Test importing of a job profile when a user is specified
        # The username will be randomly generated and unique to give
        # a clean set of test conditions.
        user_name = str(uuid.uuid4())

        # For sanity, check that the user does not exist to begin with.
        self.assertRaises(ObjectDoesNotExist, models.OqUser.objects.get,
                          user_name=user_name)

        cfg_path = helpers.demo_file('HazardMapTest/config.gem')

        job_profile, _params, _sections = engine.import_job_profile(
            cfg_path, self.job, user_name=user_name)

        self.assertEqual(user_name, job_profile.owner.user_name)
        # Check that the OqUser record for this user now exists.
        # If this fails, it will raise an `ObjectDoesNotExist` exception.
        models.OqUser.objects.get(user_name=user_name)
    def test_write_output(self):
        # Test that the loss map writers are properly called when
        # write_output is invoked.
        cfg_file = helpers.demo_file('classical_psha_based_risk/config.gem')

        job = engine.prepare_job()
        job_profile, params, sections = engine.import_job_profile(
            cfg_file, job)

        # Set conditional loss poe so that loss maps are created.
        # If this parameter is not specified, no loss maps will be serialized
        # at the end of the job.
        params['CONDITIONAL_LOSS_POE'] = '0.01'
        job_profile.conditional_loss_poe = [0.01]
        job_profile.save()

        job_ctxt = engine.JobContext(
            params, job.id, sections=sections,
            serialize_results_to=['xml', 'db'], oq_job_profile=job_profile,
            oq_job=job)

        calculator = ClassicalRiskCalculator(job_ctxt)

        # Mock the composed loss map serializer:
        with helpers.patch('openquake.writer.CompositeWriter'
                           '.serialize') as writer_mock:
            calculator.write_output()

            self.assertEqual(1, writer_mock.call_count)

            # Now test that the composite writer got the correct
            # 'serialize to' instructions. The composite writer should have
            # 1 DB and 1 XML loss map serializer:
            composite_writer = writer_mock.call_args[0][0]
            writers = composite_writer.writers

            self.assertEqual(2, len(writers))
            # We don't assume anything about the order of the writers,
            # and we don't care anyway in this test:
            self.assertTrue(any(
                isinstance(w, LossMapDBWriter) for w in writers))
            self.assertTrue(any(
                isinstance(w, LossMapNonScenarioXMLWriter) for w in writers))
예제 #49
0
    def test_run_job_deletes_job_counters(self):
        # This test ensures that
        # :function:`openquake.utils.stats.delete_job_counters` is called
        cfg_path = helpers.demo_file('HazardMapTest/config.gem')

        job_profile, params, sections = engine.import_job_profile(
            cfg_path, self.job)

        # We don't want any of the supervisor/executor forking to happen; it's
        # not necessary. Also, forking should not happen in the context of a
        # test run.
        with helpers.patch('os.fork', mocksignature=False) as fork_mock:
            # Fake return val for fork:
            fork_mock.return_value = 0
            # And we don't actually want to run the job.
            with helpers.patch('openquake.engine._launch_job'):
                with helpers.patch(
                    'openquake.utils.stats.delete_job_counters') as djc_mock:
                    engine.run_job(self.job, params, sections)
                    self.assertEqual(1, djc_mock.call_count)
예제 #50
0
파일: helpers.py 프로젝트: bwyss/oq-engine
def prepare_job_context(path_to_cfg):
    """Given a path to a config file, prepare and return a
    :class:`openquake.engine.JobContext`. This convenient because it can be
    immediately passed to a calculator constructor.

    This also creates the necessary job and oq_job_profile records.
    """
    job = engine.prepare_job()

    cfg = demo_file(path_to_cfg)

    job_profile, params, sections = engine.import_job_profile(
        cfg, job, force_inputs=True)

    job_ctxt = engine.JobContext(params,
                                 job.id,
                                 sections=sections,
                                 oq_job_profile=job_profile,
                                 oq_job=job)

    return job_ctxt
예제 #51
0
    def setUpClass(cls):
        cls.job = engine.prepare_job()
        jp, _, _ = engine.import_job_profile(RISK_DEMO_CONFIG_FILE, cls.job)
        calc_proxy = helpers.create_job({},
                                        job_id=cls.job.id,
                                        oq_job_profile=jp,
                                        oq_job=cls.job)

        # storing the basic exposure model
        ClassicalRiskCalculator(calc_proxy).store_exposure_assets()
        [input] = models.inputs4job(cls.job.id, input_type="exposure")
        model = input.model()
        assets = model.exposuredata_set.filter(taxonomy="aa/aatc-D/LR")

        if not assets:
            # This model did not exist in the database before.
            site = shapes.Site(1.0, 2.0)
            # more assets at same location
            models.ExposureData(exposure_model=model,
                                taxonomy="aa/aatc-D/LR",
                                asset_ref="ASSET_1",
                                stco=1,
                                site=geos.GEOSGeometry(site.point.to_wkt()),
                                reco=1).save()

            models.ExposureData(exposure_model=model,
                                taxonomy="aa/aatc-D/LR",
                                asset_ref="ASSET_2",
                                stco=1,
                                site=geos.GEOSGeometry(site.point.to_wkt()),
                                reco=1).save()

            site = shapes.Site(2.0, 2.0)
            # just one asset at location
            models.ExposureData(exposure_model=model,
                                taxonomy="aa/aatc-D/LR",
                                asset_ref="ASSET_3",
                                stco=1,
                                site=geos.GEOSGeometry(site.point.to_wkt()),
                                reco=1).save()
예제 #52
0
    def test_partition(self):
        job_cfg = helpers.demo_file('classical_psha_based_risk/config.gem')
        job_profile, params, sections = engine.import_job_profile(
            job_cfg, self.job, force_inputs=True)
        job_ctxt = engine.JobContext(params,
                                     self.job.id,
                                     sections=sections,
                                     oq_job_profile=job_profile)

        calc = general.BaseRiskCalculator(job_ctxt)
        calc.store_exposure_assets()

        calc.partition()

        expected_blocks_keys = [0]
        self.assertEqual(expected_blocks_keys, job_ctxt.blocks_keys)

        expected_sites = [shapes.Site(-122.0, 38.225)]
        expected_block = general.Block(self.job.id, 0, expected_sites)

        actual_block = general.Block.from_kvs(self.job.id, 0)
        self.assertEqual(expected_block, actual_block)
        self.assertEqual(expected_block.sites, actual_block.sites)
예제 #53
0
    def test_import_job_profile(self):
        # Given a path to a demo config file, ensure that the appropriate
        # database record for OqJobProfile is created.

        # At the moment, the api function used to import the job profile also
        # returns a dict of the config params and a list of config file
        # sections.

        cfg_path = helpers.demo_file('HazardMapTest/config.gem')

        # Default 'openquake' user:
        owner = helpers.default_user()

        smlt_input = models.Input(
            owner=helpers.default_user(),
            path=os.path.abspath(helpers.demo_file(
                'HazardMapTest/source_model_logic_tree.xml')),
            input_type='lt_source', size=671,
            digest="4372d13cec89f2a1072a2c7c694656d0")

        gmpelt_input = models.Input(
            owner=helpers.default_user(),
            path=os.path.abspath(helpers.demo_file(
                'HazardMapTest/gmpe_logic_tree.xml')),
            input_type='lt_gmpe', size=709,
            digest="d9ece248a1e73ee25bd5964670282012")

        src_model_input = models.Input(
            owner=helpers.default_user(),
            path=os.path.abspath(helpers.demo_file(
                'HazardMapTest/source_model.xml')),
            input_type='source', size=1644,
            digest="3118538b30b69289e6ea47967e9f51aa")

        expected_inputs_map = dict(
            lt_source=smlt_input, lt_gmpe=gmpelt_input, source=src_model_input)

        expected_jp = models.OqJobProfile(
            owner=owner,
            calc_mode='classical',
            job_type=['hazard'],
            region=GEOSGeometry(
                    'POLYGON((-122.2 37.6, -122.2 38.2, '
                    '-121.5 38.2, -121.5 37.6, -122.2 37.6))'),
            region_grid_spacing=0.01,
            min_magnitude=5.0,
            investigation_time=50.0,
            maximum_distance=200.0,
            component='gmroti50',
            imt='pga',
            period=None,
            damping=None,
            truncation_type='twosided',
            truncation_level=3.0,
            imls=[
                0.005, 0.007, 0.0098, 0.0137, 0.0192, 0.0269, 0.0376, 0.0527,
                0.0738, 0.103, 0.145, 0.203, 0.284, 0.397, 0.556, 0.778, 1.09],
            poes=[0.1],
            realizations=1,
            depth_to_1pt_0km_per_sec=100.0,
            vs30_type='measured',
            source_model_lt_random_seed=23,
            gmpe_lt_random_seed=5,
            width_of_mfd_bin=0.1,
            standard_deviation_type='total',
            reference_vs30_value=760.0,
            reference_depth_to_2pt5km_per_sec_param=5.0,
            sadigh_site_type='rock',
            # area sources:
            include_area_sources=True,
            treat_area_source_as='pointsources',
            area_source_discretization=0.1,
            area_source_magnitude_scaling_relationship=(
                'W&C 1994 Mag-Length Rel.'),
            # point sources:
            include_grid_sources=False,
            treat_grid_source_as='pointsources',
            grid_source_magnitude_scaling_relationship=(
                'W&C 1994 Mag-Length Rel.'),
            # simple faults:
            include_fault_source=True,
            fault_rupture_offset=1.0,
            fault_surface_discretization=1.0,
            fault_magnitude_scaling_relationship='Wells & Coppersmith (1994)',
            fault_magnitude_scaling_sigma=0.0,
            rupture_aspect_ratio=2.0,
            rupture_floating_type='downdip',
            # complex faults:
            include_subduction_fault_source=False,
            subduction_fault_rupture_offset=10.0,
            subduction_fault_surface_discretization=10.0,
            subduction_fault_magnitude_scaling_relationship=(
                'W&C 1994 Mag-Length Rel.'),
            subduction_fault_magnitude_scaling_sigma=0.0,
            subduction_rupture_aspect_ratio=1.5,
            subduction_rupture_floating_type='downdip',
            quantile_levels=[],
            compute_mean_hazard_curve=True)

        expected_sections = ['HAZARD', 'general']
        expected_params = {
            'AREA_SOURCE_DISCRETIZATION': '0.1',
            'AREA_SOURCE_MAGNITUDE_SCALING_RELATIONSHIP':
                'W&C 1994 Mag-Length Rel.',
            'BASE_PATH': os.path.abspath(helpers.demo_file('HazardMapTest')),
            'CALCULATION_MODE': 'Classical',
            'COMPONENT': 'Average Horizontal (GMRotI50)',
            'COMPUTE_MEAN_HAZARD_CURVE': 'true',
            'DAMPING': '5.0',
            'DEPTHTO1PT0KMPERSEC': '100.0',
            'FAULT_MAGNITUDE_SCALING_RELATIONSHIP':
                'Wells & Coppersmith (1994)',
            'FAULT_MAGNITUDE_SCALING_SIGMA': '0.0',
            'FAULT_RUPTURE_OFFSET': '1.0',
            'FAULT_SURFACE_DISCRETIZATION': '1.0',
            'GMPE_LOGIC_TREE_FILE': os.path.abspath(
                helpers.demo_file('HazardMapTest/gmpe_logic_tree.xml')),
            'GMPE_LT_RANDOM_SEED': '5',
            'GMPE_TRUNCATION_TYPE': '2 Sided',
            'GRID_SOURCE_MAGNITUDE_SCALING_RELATIONSHIP':
                'W&C 1994 Mag-Length Rel.',
            'INCLUDE_AREA_SOURCES': 'true',
            'INCLUDE_FAULT_SOURCE': 'true',
            'INCLUDE_GRID_SOURCES': 'false',
            'INCLUDE_SUBDUCTION_FAULT_SOURCE': 'false',
            'INTENSITY_MEASURE_LEVELS': (
                '0.005, 0.007, 0.0098, 0.0137, 0.0192, 0.0269, 0.0376, 0.0527,'
                ' 0.0738, 0.103, 0.145, 0.203, 0.284, 0.397, 0.556, 0.778,'
                ' 1.09'),
            'INTENSITY_MEASURE_TYPE': 'PGA',
            'INVESTIGATION_TIME': '50.0',
            'MAXIMUM_DISTANCE': '200.0',
            'MINIMUM_MAGNITUDE': '5.0',
            'NUMBER_OF_LOGIC_TREE_SAMPLES': '1',
            'OUTPUT_DIR': 'computed_output',
            'PERIOD': '0.0',
            'POES': '0.1',
            'QUANTILE_LEVELS': '',
            'REFERENCE_DEPTH_TO_2PT5KM_PER_SEC_PARAM': '5.0',
            'REFERENCE_VS30_VALUE': '760.0',
            'REGION_GRID_SPACING': '0.01',
            'REGION_VERTEX':
                '37.6, -122.2, 38.2, -122.2, 38.2, -121.5, 37.6, -121.5',
            'RUPTURE_ASPECT_RATIO': '2.0',
            'RUPTURE_FLOATING_TYPE': 'Along strike and down dip',
            'SADIGH_SITE_TYPE': 'Rock',
            'SOURCE_MODEL_LOGIC_TREE_FILE': os.path.abspath(
                helpers.demo_file(
                    'HazardMapTest/source_model_logic_tree.xml')),
            'SOURCE_MODEL_LT_RANDOM_SEED': '23',
            'STANDARD_DEVIATION_TYPE': 'Total',
            'SUBDUCTION_FAULT_MAGNITUDE_SCALING_RELATIONSHIP':
                'W&C 1994 Mag-Length Rel.',
            'SUBDUCTION_FAULT_MAGNITUDE_SCALING_SIGMA': '0.0',
            'SUBDUCTION_FAULT_RUPTURE_OFFSET': '10.0',
            'SUBDUCTION_FAULT_SURFACE_DISCRETIZATION': '10.0',
            'SUBDUCTION_RUPTURE_ASPECT_RATIO': '1.5',
            'SUBDUCTION_RUPTURE_FLOATING_TYPE': 'Along strike and down dip',
            'TREAT_AREA_SOURCE_AS': 'Point Sources',
            'TREAT_GRID_SOURCE_AS': 'Point Sources',
            'TRUNCATION_LEVEL': '3',
            'VS30_TYPE': 'measured',
            'WIDTH_OF_MFD_BIN': '0.1'}

        actual_jp, params, sections = engine.import_job_profile(
            cfg_path, self.job)
        self.assertEqual(expected_params, params)
        self.assertEqual(expected_sections, sections)

        # Test the OqJobProfile:
        self.assertTrue(
            models.model_equals(expected_jp, actual_jp, ignore=(
                'id', 'last_update', '_owner_cache')))

        # Test the Inputs:
        actual_inputs = models.inputs4job(self.job.id)
        self.assertEqual(3, len(actual_inputs))

        for act_inp in actual_inputs:
            exp_inp = expected_inputs_map[act_inp.input_type]
            self.assertTrue(
                models.model_equals(
                    exp_inp, act_inp, ignore=(
                        "id", "last_update", "path", "model", "_owner_cache",
                        "owner_id", "model_content_id")))
예제 #54
0
    def test_generate_hazard_curves_using_classical_psha(self):
        def verify_realization_haz_curves_stored_to_kvs(the_job, keys):
            """ This just tests to make sure there something in the KVS
            for each key in given list of keys. This does NOT test the
            actual results. """
            # TODO (LB): At some point we need to test the actual
            # results to verify they are correct

            realizations = int(the_job.params['NUMBER_OF_LOGIC_TREE_SAMPLES'])

            for realization in xrange(0, realizations):
                for site in the_job.sites_to_compute():
                    key = tokens.hazard_curve_poes_key(the_job.job_id,
                                                       realization, site)
                    self.assertTrue(key in keys, "Missing key %s" % key)

        def verify_mean_haz_curves_stored_to_kvs(the_job, keys):
            """ Make sure that the keys and non-empty values for mean
            hazard curves have been written to KVS."""

            if the_job.params['COMPUTE_MEAN_HAZARD_CURVE'].lower() == 'true':

                LOG.debug("verifying KVS entries for mean hazard curves")
                for site in the_job.sites_to_compute():
                    key = tokens.mean_hazard_curve_key(the_job.job_id, site)
                    self.assertTrue(key in keys, "Missing key %s" % key)

        def verify_mean_haz_maps_stored_to_kvs(the_job, calculator, keys):
            """ Make sure that the keys and non-empty values for mean
            hazard maps have been written to KVS."""

            if (the_job.params[hazard_general.POES_PARAM_NAME] != '' and
                the_job.params['COMPUTE_MEAN_HAZARD_CURVE'].lower() == \
                'true'):

                LOG.debug("verifying KVS entries for mean hazard maps")

                for poe in calculator.poes_hazard_maps:
                    for site in the_job.sites_to_compute():
                        key = tokens.mean_hazard_map_key(
                            the_job.job_id, site, poe)
                        self.assertTrue(key in keys, "Missing key %s" % key)

        def verify_quantile_haz_curves_stored_to_kvs(the_job, calculator,
                                                     keys):
            """ Make sure that the keys and non-empty values for quantile
            hazard curves have been written to KVS."""

            quantiles = calculator.quantile_levels

            LOG.debug("verifying KVS entries for quantile hazard curves, "\
                "%s quantile values" % len(quantiles))

            for quantile in quantiles:
                for site in the_job.sites_to_compute():
                    key = tokens.quantile_hazard_curve_key(
                        the_job.job_id, site, quantile)
                    self.assertTrue(key in keys, "Missing key %s" % key)

        def verify_quantile_haz_maps_stored_to_kvs(the_job, calculator, keys):
            """ Make sure that the keys and non-empty values for quantile
            hazard maps have been written to KVS."""

            quantiles = calculator.quantile_levels

            if (the_job.params[hazard_general.POES_PARAM_NAME] != ''
                    and len(quantiles) > 0):

                poes = calculator.poes_hazard_maps

                LOG.debug("verifying KVS entries for quantile hazard maps, "\
                    "%s quantile values, %s PoEs" % (
                    len(quantiles), len(poes)))

                for quantile in quantiles:
                    for poe in poes:
                        for site in the_job.sites_to_compute():
                            key = tokens.quantile_hazard_map_key(
                                the_job.job_id, site, poe, quantile)
                            self.assertTrue(key in keys,
                                            "Missing key %s" % key)

        def verify_realization_haz_curves_stored_to_nrml(the_job, calculator):
            """Tests that a NRML file has been written for each realization,
            and that this file validates against the NRML schema.
            Does NOT test if results in NRML file are correct.
            """
            realizations = int(the_job.params['NUMBER_OF_LOGIC_TREE_SAMPLES'])
            for realization in xrange(0, realizations):

                nrml_path = os.path.join(
                    "demos/classical_psha_simple/computed_output",
                    calculator.hazard_curve_filename(realization))

                LOG.debug("validating NRML file %s" % nrml_path)

                self.assertTrue(xml.validates_against_xml_schema(
                    nrml_path, NRML_SCHEMA_PATH),
                    "NRML instance file %s does not validate against schema" \
                    % nrml_path)

        def verify_mean_haz_curves_stored_to_nrml(the_job, calculator):
            """Tests that a mean hazard curve NRML file has been written,
            and that this file validates against the NRML schema.
            Does NOT test if results in NRML file are correct.
            """

            if the_job.params['COMPUTE_MEAN_HAZARD_CURVE'].lower() == 'true':
                nrml_path = os.path.join(
                    "demos/classical_psha_simple/computed_output",
                    calculator.mean_hazard_curve_filename())

                LOG.debug("validating NRML file %s" % nrml_path)

                self.assertTrue(xml.validates_against_xml_schema(
                    nrml_path, NRML_SCHEMA_PATH),
                    "NRML instance file %s does not validate against schema" \
                    % nrml_path)

        def verify_mean_haz_maps_stored_to_nrml(the_job):
            """Tests that a mean hazard map NRML file has been written,
            and that this file validates against the NRML schema.
            Does NOT test if results in NRML file are correct.
            """
            if (the_job.params[hazard_general.POES_PARAM_NAME] != '' and
                the_job.params['COMPUTE_MEAN_HAZARD_CURVE'].lower() == \
                'true'):

                for poe in calculator.poes_hazard_maps:
                    nrml_path = os.path.join(
                        "demos/classical_psha_simple/computed_output",
                        calculator.mean_hazard_map_filename(poe))

                    LOG.debug("validating NRML file for mean hazard map %s" \
                        % nrml_path)

                    self.assertTrue(xml.validates_against_xml_schema(
                        nrml_path, NRML_SCHEMA_PATH),
                        "NRML instance file %s does not validate against "\
                        "schema" % nrml_path)

        def verify_quantile_haz_curves_stored_to_nrml(the_job, calculator):
            """Tests that quantile hazard curve NRML files have been written,
            and that these file validate against the NRML schema.
            Does NOT test if results in NRML files are correct.
            """

            for quantile in calculator.quantile_levels:

                nrml_path = os.path.join(
                    "demos/classical_psha_simple/computed_output",
                    calculator.quantile_hazard_curve_filename(quantile))

                LOG.debug("validating NRML file for quantile hazard curve: "\
                    "%s" % nrml_path)

                self.assertTrue(xml.validates_against_xml_schema(
                    nrml_path, NRML_SCHEMA_PATH),
                    "NRML instance file %s does not validate against schema" \
                    % nrml_path)

        def verify_quantile_haz_maps_stored_to_nrml(the_job, calculator):
            """Tests that quantile hazard map NRML files have been written,
            and that these file validate against the NRML schema.
            Does NOT test if results in NRML files are correct.
            """

            quantiles = calculator.quantile_levels

            if (the_job.params[hazard_general.POES_PARAM_NAME] != ''
                    and len(quantiles) > 0):

                for poe in calculator.poes_hazard_maps:
                    for quantile in quantiles:
                        nrml_path = os.path.join(
                            "demos/classical_psha_simple/computed_output",
                            calculator.quantile_hazard_map_filename(
                                quantile, poe))

                        LOG.debug("validating NRML file for quantile hazard "\
                            "map: %s" % nrml_path)

                        self.assertTrue(xml.validates_against_xml_schema(
                            nrml_path, NRML_SCHEMA_PATH),
                            "NRML instance file %s does not validate against "\
                            "schema" % nrml_path)

        base_path = helpers.testdata_path("classical_psha_simple")
        path = helpers.testdata_path("classical_psha_simple/config.gem")
        job = engine.prepare_job()
        job_profile, params, sections = engine.import_job_profile(path, job)

        the_job = JobContext(params,
                             job.id,
                             sections=sections,
                             base_path=base_path,
                             serialize_results_to=['db', 'xml'],
                             oq_job_profile=job_profile,
                             oq_job=job)
        the_job.to_kvs()

        calc_mode = job_profile.calc_mode
        calculator = CALCULATORS[calc_mode](the_job)

        used_keys = []
        calculator.execute(used_keys)

        verify_realization_haz_curves_stored_to_kvs(the_job, used_keys)
        verify_realization_haz_curves_stored_to_nrml(the_job, calculator)

        # hazard curves: check results of mean and quantile computation
        verify_mean_haz_curves_stored_to_kvs(the_job, used_keys)
        verify_quantile_haz_curves_stored_to_kvs(the_job, calculator,
                                                 used_keys)

        verify_mean_haz_curves_stored_to_nrml(the_job, calculator)
        verify_quantile_haz_curves_stored_to_nrml(the_job, calculator)

        # hazard maps: check results of mean and quantile computation
        verify_mean_haz_maps_stored_to_kvs(the_job, calculator, used_keys)
        verify_quantile_haz_maps_stored_to_kvs(the_job, calculator, used_keys)

        verify_mean_haz_maps_stored_to_nrml(the_job)
        verify_quantile_haz_maps_stored_to_nrml(the_job, calculator)