Example #1
0
def _job_from_file(config_file, output_type, owner_username='******'):
    """
    Create a job from external configuration files.

    NOTE: This function is deprecated. Please use
    :function:`openquake.engine.import_job_profile`.

    :param config_file:
        The external configuration file path
    :param output_type:
        Where to store results:
        * 'db' database
        * 'xml' XML files *plus* database
    :param owner_username:
        oq_user.user_name which defines the owner of all DB artifacts created
        by this function.
    """

    # output_type can be set, in addition to 'db' and 'xml', also to
    # 'xml_without_db', which has the effect of serializing only to xml
    # without requiring a database at all.
    # This allows to run tests without requiring a database.
    # This is not documented in the public interface because it is
    # essentially a detail of our current tests and ci infrastructure.
    assert output_type in ('db', 'xml')

    params, sections = _parse_config_file(config_file)
    params, sections = _prepare_config_parameters(params, sections)
    job_profile = _prepare_job(params, sections)

    validator = jobconf.default_validators(sections, params)
    is_valid, errors = validator.is_valid()

    if not is_valid:
        raise jobconf.ValidationException(errors)

    owner = OqUser.objects.get(user_name=owner_username)
    # openquake-server creates the calculation record in advance and stores
    # the calculation id in the config file
    calculation_id = params.get('OPENQUAKE_JOB_ID')
    if not calculation_id:
        # create the database record for this calculation
        calculation = OqCalculation(owner=owner, path=None)
        calculation.oq_job_profile = job_profile
        calculation.save()
        calculation_id = calculation.id

    if output_type == 'db':
        serialize_results_to = ['db']
    else:
        serialize_results_to = ['db', 'xml']

    base_path = params['BASE_PATH']

    job = CalculationProxy(params, calculation_id, sections=sections,
                           base_path=base_path,
                           serialize_results_to=serialize_results_to)
    job.to_kvs()

    return job
Example #2
0
    def test_calculator_for_task(self):
        """Load up a sample calculation (into the db and cache) and make sure
        we can instantiate the correct calculator for a given calculation id.
        """
        from openquake.calculators.hazard.classical.core import (
            ClassicalHazardCalculator)
        job_profile, params, sections = engine.import_job_profile(demo_file(
            'simple_fault_demo_hazard/config.gem'))

        calculation = OqCalculation(owner=job_profile.owner,
                                    oq_job_profile=job_profile)
        calculation.save()

        calc_proxy = engine.CalculationProxy(params, calculation.id,
                                             oq_job_profile=job_profile,
                                             oq_calculation=calculation)
        calc_proxy.to_kvs()

        with patch(
            'openquake.utils.tasks.get_running_calculation') as grc_mock:

            # Loading of the CalculationProxy is done by
            # `get_running_calculation`, which is covered by other tests.
            # So, we just want to make sure that it's called here.
            grc_mock.return_value = calc_proxy

            calculator = tasks.calculator_for_task(calculation.id, 'hazard')

            self.assertTrue(isinstance(calculator, ClassicalHazardCalculator))
            self.assertEqual(1, grc_mock.call_count)
    def setUp(self):
        kvs.get_client().flushall()

        base_path = helpers.testdata_path("scenario")
        self.job_profile, self.params, self.sections = (
            engine.import_job_profile(SCENARIO_SMOKE_TEST))
        calculation = OqCalculation(owner=self.job_profile.owner,
                                    oq_job_profile=self.job_profile)
        calculation.save()
        self.calc_proxy = CalculationProxy(
            self.params, calculation.id, sections=self.sections,
            base_path=base_path, oq_job_profile=self.job_profile,
            oq_calculation=calculation)

        self.calc_proxy.params[NUMBER_OF_CALC_KEY] = "1"

        self.calc_proxy.params['SERIALIZE_RESULTS_TO'] = 'xml'

        # saving the default java implementation
        self.default = (
            scenario.ScenarioHazardCalculator.compute_ground_motion_field)

        self.grid = self.calc_proxy.region.grid

        self.calc_proxy.to_kvs()
Example #4
0
class GetRunningCalculationTestCase(unittest.TestCase):
    """Tests for :function:`openquake.utils.tasks.get_running_calculation`."""

    def setUp(self):
        self.job_profile, self.params, _sections = (
            engine.import_job_profile(demo_file(
                'simple_fault_demo_hazard/config.gem')))

        self.params['debug'] = 'warn'

        self.calculation = OqCalculation(
            owner=self.job_profile.owner,
            oq_job_profile=self.job_profile)
        self.calculation.save()

        # Cache the calc proxy data into the kvs:
        calc_proxy = engine.CalculationProxy(
            self.params, self.calculation.id, oq_job_profile=self.job_profile,
            oq_calculation=self.calculation)
        calc_proxy.to_kvs()

    def test_get_running_calculation(self):
        self.calculation.status = 'pending'
        self.calculation.save()

        # No 'JobCompletedError' should be raised.
        calc_proxy = tasks.get_running_calculation(self.calculation.id)

        self.assertEqual(self.params, calc_proxy.params)
        self.assertTrue(model_equals(
            self.job_profile, calc_proxy.oq_job_profile,
            ignore=('_owner_cache',)))
        self.assertTrue(model_equals(
            self.calculation, calc_proxy.oq_calculation,
            ignore=('_owner_cache',)))

    def test_get_completed_calculation(self):
        self.calculation.status = 'succeeded'
        self.calculation.save()

        try:
            tasks.get_running_calculation(self.calculation.id)
        except tasks.JobCompletedError as exc:
            self.assertEqual(exc.message, self.calculation.id)
        else:
            self.fail("JobCompletedError wasn't raised")

    def test_completed_failure(self):
        self.calculation.status = 'failed'
        self.calculation.save()

        try:
            tasks.get_running_calculation(self.calculation.id)
        except tasks.JobCompletedError as exc:
            self.assertEqual(exc.message, self.calculation.id)
        else:
            self.fail("JobCompletedError wasn't raised")
Example #5
0
    def test__serialize_xml_filenames(self):
        # Test that the file names of the loss XML artifacts are correct.
        # See https://bugs.launchpad.net/openquake/+bug/894706.
        expected_lrc_file_name = "losscurves-block-#%(calculation_id)s-block#%(block)s.xml"
        expected_lr_file_name = "losscurves-loss-block-#%(calculation_id)s-block#%(block)s.xml"

        cfg_file = demo_file("classical_psha_based_risk/config.gem")

        job_profile, params, sections = import_job_profile(cfg_file)

        calculation = OqCalculation(owner=job_profile.owner, oq_job_profile=job_profile)
        calculation.save()

        calc_proxy = CalculationProxy(
            params,
            calculation.id,
            sections=sections,
            serialize_results_to=["xml", "db"],
            oq_job_profile=job_profile,
            oq_calculation=calculation,
        )

        calculator = ClassicalRiskCalculator(calc_proxy)

        with patch("openquake.writer.FileWriter.serialize"):
            # The 'curves' key in the kwargs just needs to be present;
            # because of the serialize mock in place above, it doesn't need
            # to have a real value.

            # First, we test loss ratio curve output,
            # then we'll do the same test for loss curve output.

            # We expect to get a single file path back.
            [file_path] = calculator._serialize(0, **dict(curve_mode="loss_ratio", curves=[]))

            _dir, file_name = os.path.split(file_path)

            self.assertEqual(expected_lrc_file_name % dict(calculation_id=calculation.id, block=0), file_name)

            # The same test again, except for loss curves this time.
            [file_path] = calculator._serialize(0, **dict(curve_mode="loss", curves=[]))

            _dir, file_name = os.path.split(file_path)

            self.assertEqual(expected_lr_file_name % dict(calculation_id=calculation.id, block=0), file_name)
Example #6
0
    def test_write_output(self):
        # Test that the loss map writers are properly called when
        # write_output is invoked.
        cfg_file = demo_file("classical_psha_based_risk/config.gem")

        job_profile, params, sections = import_job_profile(cfg_file)

        # Set conditional loss poe so that loss maps are created.
        # If this parameter is not specified, no loss maps will be serialized
        # at the end of the calculation.
        params["CONDITIONAL_LOSS_POE"] = "0.01"
        job_profile.conditional_loss_poe = [0.01]
        job_profile.save()

        calculation = OqCalculation(owner=job_profile.owner, oq_job_profile=job_profile)
        calculation.save()

        calc_proxy = CalculationProxy(
            params,
            calculation.id,
            sections=sections,
            serialize_results_to=["xml", "db"],
            oq_job_profile=job_profile,
            oq_calculation=calculation,
        )

        calculator = ClassicalRiskCalculator(calc_proxy)

        # Mock the composed loss map serializer:
        with patch("openquake.writer.CompositeWriter" ".serialize") as writer_mock:
            calculator.write_output()

            self.assertEqual(1, writer_mock.call_count)

            # Now test that the composite writer got the correct
            # 'serialize to' instructions. The composite writer should have
            # 1 DB and 1 XML loss map serializer:
            composite_writer = writer_mock.call_args[0][0]
            writers = composite_writer.writers

            self.assertEqual(2, len(writers))
            # We don't assume anything about the order of the writers,
            # and we don't care anyway in this test:
            self.assertTrue(any(isinstance(w, LossMapDBWriter) for w in writers))
            self.assertTrue(any(isinstance(w, LossMapNonScenarioXMLWriter) for w in writers))
Example #7
0
    def test__launch_calculation_calls_core_calc_methods(self):
        # The `Calculator` interface defines 4 general methods:
        # - analyze
        # - pre_execute
        # - execute
        # - post_execute
        # When `_launch_calculation` is called, each of these methods should be
        # called once per job type (hazard, risk).

        # Calculation setup:
        cfg_file = demo_file('classical_psha_based_risk/config.gem')

        job_profile, params, sections = engine.import_job_profile(cfg_file)
        calculation = OqCalculation(owner=job_profile.owner,
                                    oq_job_profile=job_profile)
        calculation.save()

        calc_proxy = engine.CalculationProxy(
            params, calculation.id, sections=sections,
            serialize_results_to=['xml', 'db'],
            oq_job_profile=job_profile, oq_calculation=calculation)

        # Mocking setup:
        cls_haz_calc = ('openquake.calculators.hazard.classical.core'
                        '.ClassicalHazardCalculator')
        cls_risk_calc = ('openquake.calculators.risk.classical.core'
                         '.ClassicalRiskCalculator')
        methods = ('analyze', 'pre_execute', 'execute', 'post_execute')
        haz_patchers = [patch('%s.%s' % (cls_haz_calc, m)) for m in methods]
        risk_patchers = [patch('%s.%s' % (cls_risk_calc, m)) for m in methods]

        haz_mocks = [p.start() for p in haz_patchers]
        risk_mocks = [p.start() for p in risk_patchers]

        # Call the function under test:
        engine._launch_calculation(calc_proxy, sections)

        self.assertTrue(all(x.call_count == 1 for x in haz_mocks))
        self.assertTrue(all(x.call_count == 1 for x in risk_mocks))

        # Tear down the mocks:
        for p in haz_patchers:
            p.stop()
        for p in risk_patchers:
            p.stop()
Example #8
0
    def setUp(self):
        # Test 'event-based' job
        cfg_path = helpers.testdata_path("simplecase/config.gem")
        base_path = helpers.testdata_path("simplecase")

        oq_job_profile, params, sections = engine.import_job_profile(cfg_path)

        oq_calculation = OqCalculation(owner=oq_job_profile.owner, description="", oq_job_profile=oq_job_profile)
        oq_calculation.save()

        self.eb_job = CalculationProxy(
            params,
            oq_calculation.id,
            sections=sections,
            base_path=base_path,
            oq_job_profile=oq_job_profile,
            oq_calculation=oq_calculation,
        )
Example #9
0
    def setUp(self):
        self.job_profile, self.params, _sections = (
            engine.import_job_profile(demo_file(
                'simple_fault_demo_hazard/config.gem')))

        self.params['debug'] = 'warn'

        self.calculation = OqCalculation(
            owner=self.job_profile.owner,
            oq_job_profile=self.job_profile)
        self.calculation.save()

        # Cache the calc proxy data into the kvs:
        calc_proxy = engine.CalculationProxy(
            self.params, self.calculation.id, oq_job_profile=self.job_profile,
            oq_calculation=self.calculation)
        calc_proxy.to_kvs()
Example #10
0
def run_calculation(job_profile, params, sections, output_type='db'):
    """Given an :class:`openquake.db.models.OqJobProfile` object, create a new
    :class:`openquake.db.models.OqCalculation` object and run the calculation.

    NOTE: The params and sections parameters are temporary but will be required
    until we can run calculations purely using Django model objects as
    calculator input.

    Returns the calculation object when the calculation concludes.

    :param job_profile:
        :class:`openquake.db.models.OqJobProfile` instance.
    :param params:
        A dictionary of config parameters parsed from the calculation
        config file.
    :param sections:
        A list of sections parsed from the calculation config file.
    :param output_type:
        'db' or 'xml' (defaults to 'db')

    :returns:
        :class:`openquake.db.models.OqCalculation` instance.
    """
    if not output_type in ('db', 'xml'):
        raise RuntimeError("output_type must be 'db' or 'xml'")

    calculation = OqCalculation(owner=job_profile.owner)
    calculation.oq_job_profile = job_profile
    calculation.status = 'running'
    calculation.save()

    # Clear any counters for this calculation_id, prior to running the
    # calculation.
    # We do this just to make sure all of the counters behave properly and can
    # provide accurate data about a calculation in-progress.
    stats.delete_job_counters(calculation.id)

    # Make the job/calculation ID generally available.
    utils_config.Config().job_id = calculation.id

    serialize_results_to = ['db']
    if output_type == 'xml':
        serialize_results_to.append('xml')

    calc_proxy = CalculationProxy(params, calculation.id, sections=sections,
                                  serialize_results_to=serialize_results_to,
                                  oq_job_profile=job_profile,
                                  oq_calculation=calculation)

    # closing all db connections to make sure they're not shared between
    # supervisor and job executor processes. otherwise if one of them closes
    # the connection it immediately becomes unavailable for other
    close_connection()

    calc_pid = os.fork()
    if not calc_pid:
        # calculation executor process
        try:
            logs.init_logs_amqp_send(level=FLAGS.debug, job_id=calculation.id)
            _launch_calculation(calc_proxy, sections)
        except Exception, ex:
            logs.LOG.critical("Calculation failed with exception: '%s'"
                              % str(ex))
            calculation.status = 'failed'
            calculation.save()
            raise
        else:
            calculation.status = 'succeeded'
            calculation.save()
        return
Example #11
0
 def setUp(self):
     owner = OqUser.objects.get(user_name="openquake")
     self.calculation = OqCalculation(owner=owner, path=None)
Example #12
0
class PrepareJobTestCase(unittest.TestCase, helpers.DbTestCase):

    """
    Unit tests for the _prepare_job helper function, which creates a new
    job entry with the associated parameters.

    Test data is a trimmed-down version of smoketest config files

    As a side-effect, also tests that the inserted record satisfied
    the DB constraints.
    """

    BASE_CLASSICAL_PARAMS = {
        "CALCULATION_MODE": "Classical",
        "POES": "0.01 0.1",
        "SOURCE_MODEL_LT_RANDOM_SEED": "23",
        "GMPE_LT_RANDOM_SEED": "5",
        "INTENSITY_MEASURE_TYPE": "PGA",
        "MINIMUM_MAGNITUDE": "5.0",
        "INVESTIGATION_TIME": "50.0",
        "INCLUDE_GRID_SOURCES": "true",
        "TREAT_GRID_SOURCE_AS": "Point Sources",
        "INCLUDE_AREA_SOURCES": "true",
        "TREAT_AREA_SOURCE_AS": "Point Sources",
        "QUANTILE_LEVELS": "0.25 0.50",
        "INTENSITY_MEASURE_LEVELS": "0.005, 0.007, 0.0098, 0.0137, 0.0192",
        "GMPE_TRUNCATION_TYPE": "2 Sided",
        "STANDARD_DEVIATION_TYPE": "Total",
        "MAXIMUM_DISTANCE": "200.0",
        "NUMBER_OF_LOGIC_TREE_SAMPLES": "2",
        "PERIOD": "0.0",
        "DAMPING": "5.0",
        "AGGREGATE_LOSS_CURVE": "1",
        "INCLUDE_FAULT_SOURCE": "true",
        "FAULT_RUPTURE_OFFSET": "5.0",
        "FAULT_SURFACE_DISCRETIZATION": "1.0",
        "FAULT_MAGNITUDE_SCALING_SIGMA": "0.0",
        "FAULT_MAGNITUDE_SCALING_RELATIONSHIP": "W&C 1994 Mag-Length Rel.",
        "REFERENCE_VS30_VALUE": "760.0",
        "REFERENCE_DEPTH_TO_2PT5KM_PER_SEC_PARAM": "5.0",
        "COMPONENT": "Average Horizontal (GMRotI50)",
        "CONDITIONAL_LOSS_POE": "0.01",
        "TRUNCATION_LEVEL": "3",
        "COMPUTE_MEAN_HAZARD_CURVE": "true",
        "AREA_SOURCE_DISCRETIZATION": "0.1",
        "AREA_SOURCE_MAGNITUDE_SCALING_RELATIONSHIP": "W&C 1994 Mag-Length Rel.",
        "WIDTH_OF_MFD_BIN": "0.1",
        "SADIGH_SITE_TYPE": "Rock",
        "INCLUDE_SUBDUCTION_FAULT_SOURCE": "true",
        "SUBDUCTION_FAULT_RUPTURE_OFFSET": "10.0",
        "SUBDUCTION_FAULT_SURFACE_DISCRETIZATION": "10.0",
        "SUBDUCTION_FAULT_MAGNITUDE_SCALING_SIGMA": "0.0",
        "SUBDUCTION_RUPTURE_ASPECT_RATIO": "1.5",
        "SUBDUCTION_RUPTURE_FLOATING_TYPE": "Along strike and down dip",
        "SUBDUCTION_FAULT_MAGNITUDE_SCALING_RELATIONSHIP": "W&C 1994 Mag-Length Rel.",
        "RUPTURE_ASPECT_RATIO": "1.5",
        "RUPTURE_FLOATING_TYPE": "Along strike and down dip",
    }

    BASE_SCENARIO_PARAMS = {
        "CALCULATION_MODE": "Scenario",
        "GMPE_MODEL_NAME": "BA_2008_AttenRel",
        "GMF_RANDOM_SEED": "3",
        "RUPTURE_SURFACE_DISCRETIZATION": "0.1",
        "INTENSITY_MEASURE_TYPE": "PGA",
        "REFERENCE_VS30_VALUE": "759.0",
        "COMPONENT": "Average Horizontal (GMRotI50)",
        "PERIOD": "0.0",
        "DAMPING": "5.0",
        "NUMBER_OF_GROUND_MOTION_FIELDS_CALCULATIONS": "5",
        "TRUNCATION_LEVEL": "3",
        "GMPE_TRUNCATION_TYPE": "1 Sided",
        "GROUND_MOTION_CORRELATION": "true",
    }

    BASE_EVENT_BASED_PARAMS = {
        "CALCULATION_MODE": "Event Based",
        "SOURCE_MODEL_LT_RANDOM_SEED": "23",
        "GMPE_LT_RANDOM_SEED": "5",
        "INTENSITY_MEASURE_TYPE": "SA",
        "INCLUDE_GRID_SOURCES": "false",
        "INCLUDE_SUBDUCTION_FAULT_SOURCE": "false",
        "RUPTURE_ASPECT_RATIO": "1.5",
        "MINIMUM_MAGNITUDE": "5.0",
        "SUBDUCTION_FAULT_MAGNITUDE_SCALING_SIGMA": "0.0",
        "INVESTIGATION_TIME": "50.0",
        "TREAT_GRID_SOURCE_AS": "Point Sources",
        "INCLUDE_AREA_SOURCES": "true",
        "TREAT_AREA_SOURCE_AS": "Point Sources",
        "INTENSITY_MEASURE_LEVELS": "0.005, 0.007, 0.0098, 0.0137, 0.0192",
        "GROUND_MOTION_CORRELATION": "false",
        "GMPE_TRUNCATION_TYPE": "None",
        "STANDARD_DEVIATION_TYPE": "Total",
        "SUBDUCTION_FAULT_RUPTURE_OFFSET": "10.0",
        "RISK_CELL_SIZE": "0.0005",
        "NUMBER_OF_LOGIC_TREE_SAMPLES": "5",
        "PERIOD": "1.0",
        "DAMPING": "5.0",
        "AGGREGATE_LOSS_CURVE": "true",
        "NUMBER_OF_SEISMICITY_HISTORIES": "1",
        "INCLUDE_FAULT_SOURCE": "true",
        "FAULT_RUPTURE_OFFSET": "5.0",
        "FAULT_SURFACE_DISCRETIZATION": "1.0",
        "FAULT_MAGNITUDE_SCALING_SIGMA": "0.0",
        "FAULT_MAGNITUDE_SCALING_RELATIONSHIP": "W&C 1994 Mag-Length Rel.",
        "SUBDUCTION_RUPTURE_ASPECT_RATIO": "1.5",
        "REFERENCE_VS30_VALUE": "760.0",
        "REFERENCE_DEPTH_TO_2PT5KM_PER_SEC_PARAM": "5.0",
        "COMPONENT": "Average Horizontal",
        "CONDITIONAL_LOSS_POE": "0.01",
        "TRUNCATION_LEVEL": "3",
        "AREA_SOURCE_DISCRETIZATION": "0.1",
        "AREA_SOURCE_MAGNITUDE_SCALING_RELATIONSHIP": "W&C 1994 Mag-Length Rel.",
        "WIDTH_OF_MFD_BIN": "0.1",
        "SADIGH_SITE_TYPE": "Rock",
        "SUBDUCTION_FAULT_RUPTURE_OFFSET": "10.0",
        "SUBDUCTION_FAULT_SURFACE_DISCRETIZATION": "10.0",
        "SUBDUCTION_FAULT_MAGNITUDE_SCALING_SIGMA": "0.0",
        "SUBDUCTION_RUPTURE_ASPECT_RATIO": "1.5",
        "SUBDUCTION_RUPTURE_FLOATING_TYPE": "Along strike and down dip",
        "SUBDUCTION_FAULT_MAGNITUDE_SCALING_RELATIONSHIP": "W&C 1994 Mag-Length Rel.",
        "RUPTURE_ASPECT_RATIO": "1.5",
        "RUPTURE_FLOATING_TYPE": "Along strike and down dip",
        "GMF_RANDOM_SEED": "1",
    }

    def setUp(self):
        owner = OqUser.objects.get(user_name="openquake")
        self.calculation = OqCalculation(owner=owner, path=None)

    def tearDown(self):
        if hasattr(self, "calculation") and self.calculation and hasattr(self.calculation, "oq_job_profile"):
            self.teardown_job(self.calculation)

    def _reload_params(self):
        return OqJobProfile.objects.get(id=self.job.id)

    def assertFieldsEqual(self, expected, params):
        got_params = dict((k, getattr(params, k)) for k in expected.keys())

        self.assertEquals(expected, got_params)

    def _get_inputs(self, job):
        inputs = [dict(path=i.path, type=i.input_type) for i in self.job.input_set.input_set.all()]

        return sorted(inputs, key=lambda i: (i["type"], i["path"]))

    def test_get_source_models(self):
        abs_path = partial(datapath, "classical_psha_simple")

        path = abs_path("source_model_logic_tree.xml")
        models = _get_source_models(path)
        expected_models = [abs_path("source_model1.xml"), abs_path("source_model2.xml")]

        self.assertEquals(expected_models, models),

    def test_prepare_classical_job(self):
        abs_path = partial(datapath, "classical_psha_simple")
        params = self.BASE_CLASSICAL_PARAMS.copy()
        params["REGION_VERTEX"] = "37.9, -121.9, 37.9, -121.6, 37.5, -121.6"
        params["REGION_GRID_SPACING"] = "0.1"
        params["SOURCE_MODEL_LOGIC_TREE_FILE"] = abs_path("source_model_logic_tree.xml")
        params["GMPE_LOGIC_TREE_FILE"] = abs_path("gmpe_logic_tree.xml")
        params["EXPOSURE"] = abs_path("small_exposure.xml")
        params["VULNERABILITY"] = abs_path("vulnerability.xml")
        params["SOURCE_MODEL_LT_RANDOM_SEED"] = "23"
        params["GMPE_LT_RANDOM_SEED"] = "5"
        params["LREM_STEPS_PER_INTERVAL"] = "5"

        self.job = _prepare_job(params, ["HAZARD", "RISK"])
        self.calculation.oq_job_profile = self.job
        self.calculation.save()
        self.job = self._reload_params()
        self.assertEquals(params["REGION_VERTEX"], _to_coord_list(self.job.region))
        self.assertFieldsEqual(
            {
                "calc_mode": "classical",
                "region_grid_spacing": 0.1,
                "min_magnitude": 5.0,
                "investigation_time": 50.0,
                "component": "gmroti50",
                "imt": "pga",
                "period": None,
                "truncation_type": "twosided",
                "truncation_level": 3.0,
                "reference_vs30_value": 760.0,
                "imls": [0.005, 0.007, 0.0098, 0.0137, 0.0192],
                "poes": [0.01, 0.1],
                "realizations": 2,
                "histories": None,
                "gm_correlated": None,
                "damping": None,
                "gmf_calculation_number": None,
                "rupture_surface_discretization": None,
                "subduction_rupture_floating_type": "downdip",
            },
            self.job,
        )
        self.assertEqual(
            [
                {"path": abs_path("small_exposure.xml"), "type": "exposure"},
                {"path": abs_path("gmpe_logic_tree.xml"), "type": "lt_gmpe"},
                {"path": abs_path("source_model_logic_tree.xml"), "type": "lt_source"},
                {"path": abs_path("source_model1.xml"), "type": "source"},
                {"path": abs_path("source_model2.xml"), "type": "source"},
                {"path": abs_path("vulnerability.xml"), "type": "vulnerability"},
            ],
            self._get_inputs(self.job),
        )

    def test_prepare_classical_job_over_sites(self):
        """
        Same as test_prepare_classical_job, but with geometry specified as
        a list of sites.
        """
        params = self.BASE_CLASSICAL_PARAMS.copy()
        params["SITES"] = "37.9, -121.9, 37.9, -121.6, 37.5, -121.6"
        params["LREM_STEPS_PER_INTERVAL"] = "5"

        self.job = _prepare_job(params, ["HAZARD", "RISK"])
        self.calculation.oq_job_profile = self.job
        self.calculation.save()
        self.job = self._reload_params()

        self.assertEquals(params["SITES"], _to_coord_list(self.job.sites))
        self.assertFieldsEqual(
            {
                "calc_mode": "classical",
                "min_magnitude": 5.0,
                "investigation_time": 50.0,
                "component": "gmroti50",
                "imt": "pga",
                "period": None,
                "truncation_type": "twosided",
                "truncation_level": 3.0,
                "reference_vs30_value": 760.0,
                "imls": [0.005, 0.007, 0.0098, 0.0137, 0.0192],
                "poes": [0.01, 0.1],
                "realizations": 2,
                "histories": None,
                "gm_correlated": None,
            },
            self.job,
        )

    def test_prepare_scenario_job(self):
        abs_path = partial(datapath, "scenario")
        params = self.BASE_SCENARIO_PARAMS.copy()
        params["REGION_VERTEX"] = "34.07, -118.25, 34.07, -118.22, 34.04, -118.22"
        params["REGION_GRID_SPACING"] = "0.02"
        params["SINGLE_RUPTURE_MODEL"] = abs_path("simple-fault-rupture.xml")
        params["EXPOSURE"] = abs_path("LA_small_portfolio.xml")
        params["VULNERABILITY"] = abs_path("vulnerability.xml")

        self.job = _prepare_job(params, ["HAZARD", "RISK"])
        self.calculation.oq_job_profile = self.job
        self.calculation.save()
        self.job = self._reload_params()

        self.assertEquals(params["REGION_VERTEX"], _to_coord_list(self.job.region))
        self.assertFieldsEqual(
            {
                "calc_mode": "scenario",
                "region_grid_spacing": 0.02,
                "min_magnitude": None,
                "investigation_time": None,
                "component": "gmroti50",
                "imt": "pga",
                "period": None,
                "truncation_type": "onesided",
                "truncation_level": 3.0,
                "reference_vs30_value": 759.0,
                "imls": None,
                "poes": None,
                "realizations": None,
                "histories": None,
                "gm_correlated": True,
                "damping": None,
                "gmf_calculation_number": 5,
                "rupture_surface_discretization": 0.1,
            },
            self.job,
        )
        self.assertEqual(
            [
                {"path": abs_path("LA_small_portfolio.xml"), "type": "exposure"},
                {"path": abs_path("simple-fault-rupture.xml"), "type": "rupture"},
                {"path": abs_path("vulnerability.xml"), "type": "vulnerability"},
            ],
            self._get_inputs(self.job),
        )

    def test_prepare_scenario_job_over_sites(self):
        """
        Same as test_prepare_scenario_job, but with geometry specified as
        a list of sites.
        """
        params = self.BASE_SCENARIO_PARAMS.copy()
        params["SITES"] = "34.07, -118.25, 34.07, -118.22, 34.04, -118.22"

        self.job = _prepare_job(params, ["HAZARD", "RISK"])
        self.calculation.oq_job_profile = self.job
        self.calculation.save()
        self.job = self._reload_params()

        self.assertEquals(params["SITES"], _to_coord_list(self.job.sites))
        self.assertFieldsEqual(
            {
                "calc_mode": "scenario",
                "min_magnitude": None,
                "investigation_time": None,
                "component": "gmroti50",
                "imt": "pga",
                "period": None,
                "truncation_type": "onesided",
                "truncation_level": 3.0,
                "reference_vs30_value": 759.0,
                "imls": None,
                "poes": None,
                "realizations": None,
                "histories": None,
                "gm_correlated": True,
            },
            self.job,
        )

    def test_prepare_event_based_job(self):
        abs_path = partial(datapath, "simplecase")
        params = self.BASE_EVENT_BASED_PARAMS.copy()
        params["REGION_VERTEX"] = "33.88, -118.3, 33.88, -118.06, 33.76, -118.06"
        params["REGION_GRID_SPACING"] = "0.02"
        params["SOURCE_MODEL_LOGIC_TREE_FILE"] = abs_path("source_model_logic_tree.xml")
        params["GMPE_LOGIC_TREE_FILE"] = abs_path("gmpe_logic_tree.xml")
        params["EXPOSURE"] = abs_path("small_exposure.xml")
        params["VULNERABILITY"] = abs_path("vulnerability.xml")
        params["GMF_RANDOM_SEED"] = "1"
        params["LOSS_HISTOGRAM_BINS"] = "25"

        self.job = _prepare_job(params, ["HAZARD", "RISK"])
        self.job.oq_job_profile = self._reload_params()
        self.assertEquals(params["REGION_VERTEX"], _to_coord_list(self.job.oq_job_profile.region))
        self.assertFieldsEqual(
            {
                "calc_mode": "event_based",
                "region_grid_spacing": 0.02,
                "min_magnitude": 5.0,
                "investigation_time": 50.0,
                "component": "average",
                "imt": "sa",
                "period": 1.0,
                "truncation_type": "none",
                "truncation_level": 3.0,
                "reference_vs30_value": 760.0,
                "imls": [0.005, 0.007, 0.0098, 0.0137, 0.0192],
                "poes": None,
                "realizations": 5,
                "histories": 1,
                "gm_correlated": False,
                "damping": 5.0,
                "gmf_calculation_number": None,
                "rupture_surface_discretization": None,
            },
            self.job.oq_job_profile,
        )
        self.assertEqual(
            [
                {"path": abs_path("small_exposure.xml"), "type": "exposure"},
                {"path": abs_path("gmpe_logic_tree.xml"), "type": "lt_gmpe"},
                {"path": abs_path("source_model_logic_tree.xml"), "type": "lt_source"},
                {"path": abs_path("source_model1.xml"), "type": "source"},
                {"path": abs_path("source_model2.xml"), "type": "source"},
                {"path": abs_path("vulnerability.xml"), "type": "vulnerability"},
            ],
            self._get_inputs(self.job),
        )

    def test_prepare_event_based_job_over_sites(self):
        """
        Same as test_prepare_event_based_job, but with geometry specified as
        a list of sites.
        """

        params = self.BASE_EVENT_BASED_PARAMS.copy()
        params["SITES"] = "33.88, -118.3, 33.88, -118.06, 33.76, -118.06"
        params["LOSS_HISTOGRAM_BINS"] = "25"

        self.job = _prepare_job(params, ["HAZARD", "RISK"])
        self.job.oq_job_profile = self._reload_params()
        self.assertEquals(params["SITES"], _to_coord_list(self.job.oq_job_profile.sites))
        self.assertFieldsEqual(
            {
                "calc_mode": "event_based",
                "min_magnitude": 5.0,
                "investigation_time": 50.0,
                "component": "average",
                "imt": "sa",
                "period": 1.0,
                "truncation_type": "none",
                "truncation_level": 3.0,
                "reference_vs30_value": 760.0,
                "imls": [0.005, 0.007, 0.0098, 0.0137, 0.0192],
                "poes": None,
                "realizations": 5,
                "histories": 1,
                "gm_correlated": False,
            },
            self.job.oq_job_profile,
        )
Example #13
0
    def test_generate_hazard_curves_using_classical_psha(self):

        def verify_realization_haz_curves_stored_to_kvs(the_job, keys):
            """ This just tests to make sure there something in the KVS
            for each key in given list of keys. This does NOT test the
            actual results. """
            # TODO (LB): At some point we need to test the actual
            # results to verify they are correct

            realizations = int(
                the_job.params['NUMBER_OF_LOGIC_TREE_SAMPLES'])

            for realization in xrange(0, realizations):
                for site in the_job.sites_to_compute():
                    key = tokens.hazard_curve_poes_key(
                        the_job.job_id, realization, site)
                    self.assertTrue(key in keys, "Missing key %s" % key)

        def verify_mean_haz_curves_stored_to_kvs(the_job, keys):
            """ Make sure that the keys and non-empty values for mean
            hazard curves have been written to KVS."""

            if the_job.params['COMPUTE_MEAN_HAZARD_CURVE'].lower() == 'true':

                LOG.debug("verifying KVS entries for mean hazard curves")
                for site in the_job.sites_to_compute():
                    key = tokens.mean_hazard_curve_key(the_job.job_id, site)
                    self.assertTrue(key in keys, "Missing key %s" % key)

        def verify_mean_haz_maps_stored_to_kvs(the_job, calculator, keys):
            """ Make sure that the keys and non-empty values for mean
            hazard maps have been written to KVS."""

            if (the_job.params[hazard_general.POES_PARAM_NAME] != '' and
                the_job.params['COMPUTE_MEAN_HAZARD_CURVE'].lower() == \
                'true'):

                LOG.debug("verifying KVS entries for mean hazard maps")

                for poe in calculator.poes_hazard_maps:
                    for site in the_job.sites_to_compute():
                        key = tokens.mean_hazard_map_key(
                            the_job.job_id, site, poe)
                        self.assertTrue(key in keys, "Missing key %s" % key)

        def verify_quantile_haz_curves_stored_to_kvs(the_job, calculator,
                                                     keys):
            """ Make sure that the keys and non-empty values for quantile
            hazard curves have been written to KVS."""

            quantiles = calculator.quantile_levels

            LOG.debug("verifying KVS entries for quantile hazard curves, "\
                "%s quantile values" % len(quantiles))

            for quantile in quantiles:
                for site in the_job.sites_to_compute():
                    key = tokens.quantile_hazard_curve_key(
                        the_job.job_id, site, quantile)
                    self.assertTrue(key in keys, "Missing key %s" % key)

        def verify_quantile_haz_maps_stored_to_kvs(the_job, calculator, keys):
            """ Make sure that the keys and non-empty values for quantile
            hazard maps have been written to KVS."""

            quantiles = calculator.quantile_levels

            if (the_job.params[hazard_general.POES_PARAM_NAME] != '' and
                len(quantiles) > 0):

                poes = calculator.poes_hazard_maps

                LOG.debug("verifying KVS entries for quantile hazard maps, "\
                    "%s quantile values, %s PoEs" % (
                    len(quantiles), len(poes)))

                for quantile in quantiles:
                    for poe in poes:
                        for site in the_job.sites_to_compute():
                            key = tokens.quantile_hazard_map_key(
                                the_job.job_id, site, poe, quantile)
                            self.assertTrue(
                                key in keys, "Missing key %s" % key)

        def verify_realization_haz_curves_stored_to_nrml(the_job, calculator):
            """Tests that a NRML file has been written for each realization,
            and that this file validates against the NRML schema.
            Does NOT test if results in NRML file are correct.
            """
            realizations = int(
                the_job.params['NUMBER_OF_LOGIC_TREE_SAMPLES'])
            for realization in xrange(0, realizations):

                nrml_path = os.path.join(
                    "demos/classical_psha_simple/computed_output",
                    calculator.hazard_curve_filename(realization))

                LOG.debug("validating NRML file %s" % nrml_path)

                self.assertTrue(xml.validates_against_xml_schema(
                    nrml_path, NRML_SCHEMA_PATH),
                    "NRML instance file %s does not validate against schema" \
                    % nrml_path)

        def verify_mean_haz_curves_stored_to_nrml(the_job, calculator):
            """Tests that a mean hazard curve NRML file has been written,
            and that this file validates against the NRML schema.
            Does NOT test if results in NRML file are correct.
            """

            if the_job.params['COMPUTE_MEAN_HAZARD_CURVE'].lower() == 'true':
                nrml_path = os.path.join(
                    "demos/classical_psha_simple/computed_output",
                    calculator.mean_hazard_curve_filename())

                LOG.debug("validating NRML file %s" % nrml_path)

                self.assertTrue(xml.validates_against_xml_schema(
                    nrml_path, NRML_SCHEMA_PATH),
                    "NRML instance file %s does not validate against schema" \
                    % nrml_path)

        def verify_mean_haz_maps_stored_to_nrml(the_job):
            """Tests that a mean hazard map NRML file has been written,
            and that this file validates against the NRML schema.
            Does NOT test if results in NRML file are correct.
            """
            if (the_job.params[hazard_general.POES_PARAM_NAME] != '' and
                the_job.params['COMPUTE_MEAN_HAZARD_CURVE'].lower() == \
                'true'):

                for poe in calculator.poes_hazard_maps:
                    nrml_path = os.path.join(
                        "demos/classical_psha_simple/computed_output",
                        calculator.mean_hazard_map_filename(poe))

                    LOG.debug("validating NRML file for mean hazard map %s" \
                        % nrml_path)

                    self.assertTrue(xml.validates_against_xml_schema(
                        nrml_path, NRML_SCHEMA_PATH),
                        "NRML instance file %s does not validate against "\
                        "schema" % nrml_path)

        def verify_quantile_haz_curves_stored_to_nrml(the_job, calculator):
            """Tests that quantile hazard curve NRML files have been written,
            and that these file validate against the NRML schema.
            Does NOT test if results in NRML files are correct.
            """

            for quantile in calculator.quantile_levels:

                nrml_path = os.path.join(
                    "demos/classical_psha_simple/computed_output",
                    calculator.quantile_hazard_curve_filename(quantile))

                LOG.debug("validating NRML file for quantile hazard curve: "\
                    "%s" % nrml_path)

                self.assertTrue(xml.validates_against_xml_schema(
                    nrml_path, NRML_SCHEMA_PATH),
                    "NRML instance file %s does not validate against schema" \
                    % nrml_path)

        def verify_quantile_haz_maps_stored_to_nrml(the_job, calculator):
            """Tests that quantile hazard map NRML files have been written,
            and that these file validate against the NRML schema.
            Does NOT test if results in NRML files are correct.
            """

            quantiles = calculator.quantile_levels

            if (the_job.params[hazard_general.POES_PARAM_NAME] != '' and
                len(quantiles) > 0):

                for poe in calculator.poes_hazard_maps:
                    for quantile in quantiles:
                        nrml_path = os.path.join(
                            "demos/classical_psha_simple/computed_output",
                            calculator.quantile_hazard_map_filename(quantile,
                                                                   poe))

                        LOG.debug("validating NRML file for quantile hazard "\
                            "map: %s" % nrml_path)

                        self.assertTrue(xml.validates_against_xml_schema(
                            nrml_path, NRML_SCHEMA_PATH),
                            "NRML instance file %s does not validate against "\
                            "schema" % nrml_path)

        base_path = helpers.testdata_path("classical_psha_simple")
        path = helpers.testdata_path("classical_psha_simple/config.gem")
        job_profile, params, sections = engine.import_job_profile(path)

        calculation = OqCalculation(owner=job_profile.owner)
        calculation.oq_job_profile = job_profile
        calculation.save()

        the_job = CalculationProxy(
            params, calculation.id, sections=sections, base_path=base_path,
            serialize_results_to=['db', 'xml'], oq_job_profile=job_profile,
            oq_calculation=calculation)
        the_job.to_kvs()

        calc_mode = job_profile.calc_mode
        calculator = CALCULATORS[calc_mode](the_job)

        used_keys = []
        calculator.execute(used_keys)

        verify_realization_haz_curves_stored_to_kvs(the_job, used_keys)
        verify_realization_haz_curves_stored_to_nrml(the_job, calculator)

        # hazard curves: check results of mean and quantile computation
        verify_mean_haz_curves_stored_to_kvs(the_job, used_keys)
        verify_quantile_haz_curves_stored_to_kvs(the_job, calculator,
                                                 used_keys)

        verify_mean_haz_curves_stored_to_nrml(the_job, calculator)
        verify_quantile_haz_curves_stored_to_nrml(the_job, calculator)

        # hazard maps: check results of mean and quantile computation
        verify_mean_haz_maps_stored_to_kvs(the_job, calculator, used_keys)
        verify_quantile_haz_maps_stored_to_kvs(the_job, calculator, used_keys)

        verify_mean_haz_maps_stored_to_nrml(the_job)
        verify_quantile_haz_maps_stored_to_nrml(the_job, calculator)